From c0b02f2d79701ef73a840cd2a68c5f22181f0c89 Mon Sep 17 00:00:00 2001 From: NotXia <35894453+NotXia@users.noreply.github.com> Date: Wed, 28 Feb 2024 21:40:00 +0100 Subject: [PATCH] Add DL expressivity --- src/deep-learning/ainotes.cls | 1 + src/deep-learning/dl.tex | 12 ++++ src/deep-learning/sections/_expressivity.tex | 58 ++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 120000 src/deep-learning/ainotes.cls create mode 100644 src/deep-learning/dl.tex create mode 100644 src/deep-learning/sections/_expressivity.tex diff --git a/src/deep-learning/ainotes.cls b/src/deep-learning/ainotes.cls new file mode 120000 index 0000000..c22704b --- /dev/null +++ b/src/deep-learning/ainotes.cls @@ -0,0 +1 @@ +../ainotes.cls \ No newline at end of file diff --git a/src/deep-learning/dl.tex b/src/deep-learning/dl.tex new file mode 100644 index 0000000..9d5d923 --- /dev/null +++ b/src/deep-learning/dl.tex @@ -0,0 +1,12 @@ +\documentclass[11pt]{ainotes} + +\title{Deep Learning} +\date{2023 -- 2024} +\def\lastupdate{{PLACEHOLDER-LAST-UPDATE}} + +\begin{document} + + \makenotesfront + \input{./sections/_expressivity.tex} + +\end{document} \ No newline at end of file diff --git a/src/deep-learning/sections/_expressivity.tex b/src/deep-learning/sections/_expressivity.tex new file mode 100644 index 0000000..e30c960 --- /dev/null +++ b/src/deep-learning/sections/_expressivity.tex @@ -0,0 +1,58 @@ +\chapter{Neural networks expressivity} + + + +\section{Perceptron} + +Single neuron that defines a binary threshold through a hyperplane: +\[ + \begin{cases} + 1 & \sum_{i} w_i x_i + b \geq 0 \\ + 0 & \text{otherwise} + \end{cases} +\] + +\begin{description} + \item[Expressivity] \marginnote{Perceptron expressivity} + A perceptron can represent a NAND gate but not a XOR gate. + \begin{center} + \begin{minipage}{.2\textwidth} + \centering + \includegraphics[width=\textwidth]{img/perceptron_nand.pdf} + \tiny NAND + \end{minipage} + \begin{minipage}{.2\textwidth} + \centering + \includegraphics[width=\textwidth]{img/xor.pdf} + \tiny XOR + \end{minipage} + \end{center} + + \begin{remark} + Even if NAND is logically complete, the strict definition of a perceptron is not a composition of them. + \end{remark} +\end{description} + + + +\section{Multi-layer perceptron} + +Composition of perceptrons. + +\begin{descriptionlist} + \item[Shallow neural network] \marginnote{Shallow NN} + Neural network with one hidden layer. + + \item[Deep neural network] \marginnote{Deep NN} + Neural network with more than one hidden layer. +\end{descriptionlist} + +\begin{description} + \item[Expressivity] \marginnote{Multi-layer perceptron expressivity} + Shallow neural networks allow to approximate any continuous function + \[ f: \mathbb{R} \rightarrow [0, 1] \] + + \begin{remark} + Still, deep neural networks allow to use less neural units. + \end{remark} +\end{description} \ No newline at end of file