diff --git a/src/ainotes.cls b/src/ainotes.cls index b41a4ae..0323a80 100644 --- a/src/ainotes.cls +++ b/src/ainotes.cls @@ -39,8 +39,7 @@ showspaces = false, showstringspaces = true, showtabs = false, - tabsize = 3, - belowskip = -0.8\baselineskip + tabsize = 3 } \lstset{style=mystyle} \lstset{language=Python} diff --git a/src/statistical-and-mathematical-methods-for-ai/sections/_gradient_methods.tex b/src/statistical-and-mathematical-methods-for-ai/sections/_gradient_methods.tex index 0f1de01..2c1bb69 100644 --- a/src/statistical-and-mathematical-methods-for-ai/sections/_gradient_methods.tex +++ b/src/statistical-and-mathematical-methods-for-ai/sections/_gradient_methods.tex @@ -106,7 +106,7 @@ Note: descent methods usually converge to a local minimum. \item[Backtracking procedure] \marginnote{Backtracking procedure} $\alpha_k$ is chose such that it respects the Wolfe condition\footnote{\url{https://en.wikipedia.org/wiki/Wolfe_conditions}}: - \begin{lstlisting}[mathescape=true] + \begin{lstlisting}[mathescape=true, belowskip = -0.8\baselineskip] def backtracking($\tau$, $c_1$): $\alpha_k$ = 1 # Initial guess while $f(x_k - \alpha_k \nabla f(\vec{x}_k))$ > $f(\vec{x}_k)$ + $c_1 \alpha_k \nabla f(\vec{x}_k)^T \nabla f(\vec{x}_k)$: