Rearranged sections

This commit is contained in:
2025-04-03 15:58:42 +02:00
parent d5496f3ef7
commit fcd890411d

View File

@ -586,13 +586,13 @@
\end{figure} \end{figure}
\end{description} \end{description}
\subsection{Optimization algorithms}
\begin{remark} \begin{remark}
Using as direction the sum of the gradients of all agents is not possible as not everyone can communicate with everyone. Using as direction the sum of the gradients of all agents is not possible as not everyone can communicate with everyone.
\end{remark} \end{remark}
\subsection{Distributed gradient algorithm}
\begin{description} \begin{description}
\item[Distributed gradient algorithm] \marginnote{Distributed gradient algorithm} \item[Distributed gradient algorithm] \marginnote{Distributed gradient algorithm}
Method that estimates a (more precise) set of parameters as a weighted sum those of its neighbors' (self-loop included): Method that estimates a (more precise) set of parameters as a weighted sum those of its neighbors' (self-loop included):
@ -606,6 +606,7 @@
&= \left(\sum_{j \in \mathcal{N}_i} a_{ij} \z_j^k\right) - \alpha^k \nabla l_i\left(\sum_{j \in \mathcal{N}_i} a_{ij} \z_j^k\right) &= \left(\sum_{j \in \mathcal{N}_i} a_{ij} \z_j^k\right) - \alpha^k \nabla l_i\left(\sum_{j \in \mathcal{N}_i} a_{ij} \z_j^k\right)
\end{split} \end{split}
\] \]
\end{description}
\begin{theorem}[Distributed gradient algorithm convergence] \marginnote{Distributed gradient algorithm convergence} \begin{theorem}[Distributed gradient algorithm convergence] \marginnote{Distributed gradient algorithm convergence}
Assume that: Assume that:
@ -618,6 +619,8 @@
\[ \lim_{k \rightarrow \infty} \Vert \z_i^k - \z^* \Vert = 0 \] \[ \lim_{k \rightarrow \infty} \Vert \z_i^k - \z^* \Vert = 0 \]
\end{theorem} \end{theorem}
\begin{description}
\item[Distributed projected subgradient algorithm] \marginnote{Distributed projected subgradient algorithm} \item[Distributed projected subgradient algorithm] \marginnote{Distributed projected subgradient algorithm}
Distributed gradient algorithm extended to the case where $l_i$ are non-smooth convex functions and $\z$ is constrained to a closed convex set $Z \subseteq \mathbb{R}^d$. The distributed step is the following: Distributed gradient algorithm extended to the case where $l_i$ are non-smooth convex functions and $\z$ is constrained to a closed convex set $Z \subseteq \mathbb{R}^d$. The distributed step is the following:
\[ \[
@ -627,6 +630,7 @@
\end{split} \end{split}
\] \]
where $P_Z(\cdot)$ is the Euclidean projection onto $Z$ and $\tilde{\nabla} l_i$ is a subgradient of $l_i$. where $P_Z(\cdot)$ is the Euclidean projection onto $Z$ and $\tilde{\nabla} l_i$ is a subgradient of $l_i$.
\end{description}
\begin{theorem}[Distributed projected subgradient algorithm convergence] \marginnote{Distributed projected subgradient algorithm convergence} \begin{theorem}[Distributed projected subgradient algorithm convergence] \marginnote{Distributed projected subgradient algorithm convergence}
Assume that: Assume that:
@ -637,9 +641,10 @@
\end{itemize} \end{itemize}
Then, each agent converges to an optimal solution $\z^*$. Then, each agent converges to an optimal solution $\z^*$.
\end{theorem} \end{theorem}
\end{description}
\subsection{Gradient tracking algorithm}
\begin{theorem} \begin{theorem}
The distributed gradient algorithm does not converge with a constant step size. The distributed gradient algorithm does not converge with a constant step size.