Update _rnn.tex

This commit is contained in:
Luca Domeniconi
2024-10-27 17:14:34 +01:00
committed by GitHub
parent cff26da369
commit 9e8675ab9d

View File

@ -39,7 +39,7 @@
\begin{description}
\item[Training]
Given the predicted distribution $\hat{\vec{y}}^{(t)}$ and ground-truth $\vec{y}^{(t)}$ at step $t$, the loss is computed as the cross-entropy:
\[ \mathcal{L}^{(t)}(\matr{\theta}) = - \sum_{v \in V} \vec{y}_v^{(t)} \log\left( \hat{\vec{y}}_w^{(t)} \right) \]
\[ \mathcal{L}^{(t)}(\matr{\theta}) = - \sum_{v \in V} \vec{y}_v^{(t)} \log\left( \hat{\vec{y}}_v^{(t)} \right) \]
\begin{description}
\item[Teacher forcing] \marginnote{Teacher forcing}
@ -68,4 +68,4 @@
\item[Greedy] Select the token with the highest probability.
\item[Sampling] Randomly sample the token following the probabilities of the output distribution.
\end{descriptionlist}
\end{description}
\end{description}