mirror of
https://github.com/NotXia/unibo-ai-notes.git
synced 2025-12-14 18:51:52 +01:00
Add DL expressivity
This commit is contained in:
1
src/deep-learning/ainotes.cls
Symbolic link
1
src/deep-learning/ainotes.cls
Symbolic link
@ -0,0 +1 @@
|
||||
../ainotes.cls
|
||||
12
src/deep-learning/dl.tex
Normal file
12
src/deep-learning/dl.tex
Normal file
@ -0,0 +1,12 @@
|
||||
\documentclass[11pt]{ainotes}
|
||||
|
||||
\title{Deep Learning}
|
||||
\date{2023 -- 2024}
|
||||
\def\lastupdate{{PLACEHOLDER-LAST-UPDATE}}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\makenotesfront
|
||||
\input{./sections/_expressivity.tex}
|
||||
|
||||
\end{document}
|
||||
58
src/deep-learning/sections/_expressivity.tex
Normal file
58
src/deep-learning/sections/_expressivity.tex
Normal file
@ -0,0 +1,58 @@
|
||||
\chapter{Neural networks expressivity}
|
||||
|
||||
|
||||
|
||||
\section{Perceptron}
|
||||
|
||||
Single neuron that defines a binary threshold through a hyperplane:
|
||||
\[
|
||||
\begin{cases}
|
||||
1 & \sum_{i} w_i x_i + b \geq 0 \\
|
||||
0 & \text{otherwise}
|
||||
\end{cases}
|
||||
\]
|
||||
|
||||
\begin{description}
|
||||
\item[Expressivity] \marginnote{Perceptron expressivity}
|
||||
A perceptron can represent a NAND gate but not a XOR gate.
|
||||
\begin{center}
|
||||
\begin{minipage}{.2\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/perceptron_nand.pdf}
|
||||
\tiny NAND
|
||||
\end{minipage}
|
||||
\begin{minipage}{.2\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/xor.pdf}
|
||||
\tiny XOR
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
|
||||
\begin{remark}
|
||||
Even if NAND is logically complete, the strict definition of a perceptron is not a composition of them.
|
||||
\end{remark}
|
||||
\end{description}
|
||||
|
||||
|
||||
|
||||
\section{Multi-layer perceptron}
|
||||
|
||||
Composition of perceptrons.
|
||||
|
||||
\begin{descriptionlist}
|
||||
\item[Shallow neural network] \marginnote{Shallow NN}
|
||||
Neural network with one hidden layer.
|
||||
|
||||
\item[Deep neural network] \marginnote{Deep NN}
|
||||
Neural network with more than one hidden layer.
|
||||
\end{descriptionlist}
|
||||
|
||||
\begin{description}
|
||||
\item[Expressivity] \marginnote{Multi-layer perceptron expressivity}
|
||||
Shallow neural networks allow to approximate any continuous function
|
||||
\[ f: \mathbb{R} \rightarrow [0, 1] \]
|
||||
|
||||
\begin{remark}
|
||||
Still, deep neural networks allow to use less neural units.
|
||||
\end{remark}
|
||||
\end{description}
|
||||
Reference in New Issue
Block a user