mirror of
https://github.com/NotXia/unibo-ai-notes.git
synced 2025-12-15 19:12:22 +01:00
Add missing corollary and sections reorder
This commit is contained in:
@ -79,36 +79,39 @@
|
||||
\end{description}
|
||||
\end{description}
|
||||
|
||||
\begin{example}[Axes-aligned rectangles in $\mathbb{R}^2_{[0, 1]}$]
|
||||
Consider the instance space $X = \mathbb{R}^2_{[0, 1]}$
|
||||
and the concept class $\mathcal{C}$ of concepts represented by all the points contained within a rectangle parallel to the axes of arbitrary size.
|
||||
|
||||
\begin{figure}[H]
|
||||
|
||||
\section{Axes-aligned rectangles over $\mathbb{R}^2_{[0, 1]}$}
|
||||
|
||||
Consider the instance space $X = \mathbb{R}^2_{[0, 1]}$
|
||||
and the concept class $\mathcal{C}$ of concepts represented by all the points contained within a rectangle parallel to the axes of arbitrary size.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=0.2\linewidth]{./img/_learning_rectangle.pdf}
|
||||
\caption{Example of problem instance. The gray rectangle is the target concept, red dots are positive data points and blue dots are negative data points.}
|
||||
\end{figure}
|
||||
\end{figure}
|
||||
|
||||
An algorithm has to guess a classifier (i.e. a rectangle) without knowing the target concept and the distribution of its training data.
|
||||
Let an algorithm $\mathcal{A}_\text{BFP}$ be defined as follows:
|
||||
\begin{itemize}
|
||||
An algorithm has to guess a classifier (i.e. a rectangle) without knowing the target concept and the distribution of its training data.
|
||||
Let an algorithm $\mathcal{A}_\text{BFP}$ be defined as follows:
|
||||
\begin{itemize}
|
||||
\item Take as input some data $\{ ((x_1, y_1), p_1), \dots, ((x_n, y_n), p_n) \}$ where
|
||||
$(x_i, y_i)$ are the coordinates of the point and $p_i$ indicates if the point is within the target rectangle.
|
||||
\item Return the smallest rectangle that includes all the positive instances.
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
|
||||
Given the rectangle $R$ predicted by $\mathcal{A}_\text{BFP}$ and the target rectangle $T$,
|
||||
the probability of error in using $R$ in place of $T$ is:
|
||||
\[ \text{error}_{\mathcal{D}, T}(R) = \mathcal{P}_{x \sim \mathcal{D}} [ x \in (R \smallsetminus T) \cup (T \smallsetminus R) ] \]
|
||||
In other words, a point is misclassified if it is in $R$ but not in $T$ or vice versa.
|
||||
\begin{remark}
|
||||
Given the rectangle $R$ predicted by $\mathcal{A}_\text{BFP}$ and the target rectangle $T$,
|
||||
the probability of error in using $R$ in place of $T$ is:
|
||||
\[ \text{error}_{\mathcal{D}, T}(R) = \mathcal{P}_{x \sim \mathcal{D}} [ x \in (R \smallsetminus T) \cup (T \smallsetminus R) ] \]
|
||||
In other words, a point is misclassified if it is in $R$ but not in $T$ or vice versa.
|
||||
\begin{remark}
|
||||
By definition of $\mathcal{A}_\text{BFP}$, it always holds that $R \subseteq T$.
|
||||
Therefore, $(R \smallsetminus T) = \varnothing$ and the error can be rewritten as:
|
||||
\[ \text{error}_{\mathcal{D}, T}(R) = \mathcal{P}_{x \sim \mathcal{D}} [ x \in (T \smallsetminus R) ] \]
|
||||
\end{remark}
|
||||
\end{remark}
|
||||
|
||||
|
||||
\begin{theorem}[Axes-aligned rectangles in $\mathbb{R}^2_{[0, 1]}$ PAC learnability]
|
||||
\begin{theorem}[Axes-aligned rectangles over $\mathbb{R}^2_{[0, 1]}$ PAC learnability]
|
||||
It holds that:
|
||||
\begin{itemize}
|
||||
\item For every distribution $\mathcal{D}$,
|
||||
@ -155,5 +158,9 @@
|
||||
|
||||
\textit{To be continued\dots}
|
||||
\end{proof}
|
||||
\end{theorem}
|
||||
\end{example}
|
||||
\end{theorem}
|
||||
|
||||
|
||||
\begin{corollary}
|
||||
The concept class of axis-aligned rectangles over $\mathbb{R}^2_{[0, 1]}$ is efficiently PAC learnable.
|
||||
\end{corollary}
|
||||
Reference in New Issue
Block a user