Moved LAAI2 in year1

This commit is contained in:
2023-12-22 21:47:35 +01:00
parent fa4f50df48
commit 5a8c82277d
8 changed files with 1 additions and 1 deletions

View File

@ -0,0 +1 @@
../../../ainotes.cls

View File

@ -0,0 +1,17 @@
\documentclass[11pt]{ainotes}
\title{Languages and Algorithms for\\Artificial Intelligence\\(Module 2)}
\date{2023 -- 2024}
\def\lastupdate{{PLACEHOLDER-LAST-UPDATE}}
\begin{document}
\makenotesfront
\input{sections/_propositional_logic.tex}
\input{sections/_first_order_logic.tex}
\input{sections/_logic_programming.tex}
\input{sections/_prolog.tex}
\input{sections/_constraint_programming.tex}
\eoc
\end{document}

View File

@ -0,0 +1,199 @@
\chapter{Constraint programming}
\begin{description}
\item[Class of problems] \phantom{}
\begin{description}
\item[Constraint satisfaction problem (CSP)] \marginnote{Constraint satisfaction problem}
Defined by:
\begin{itemize}
\item A finite set of variables ${X_1, \dots, X_n}$.
\item A domain for each variable $D(X_1), \dots, D(X_n)$.
\item A set of constraints $\{ C_1, \dots, C_m \}$
\end{itemize}
A solution is an assignment to all the variables while satisfying the constraints.
\item[Constraint optimization problem (COP)] \marginnote{Constraint optimization problem}
Extension of a constraint satisfaction problem with an objective function with domain $D$:
\[ f: D(X_1) \times \dots \times D(X_n) \rightarrow D \]
A solution is a CSP solution that optimizes $f$.
\end{description}
\item[Class of languages] \phantom{}
\begin{description}
\item[Constraint logic programming (CLP)] \marginnote{Constraint logic programming}
Add constraints and solvers to logic programming.
Generally more efficient than plain logic programming.
\item[Imperative languages] \marginnote{Imperative languages}
Add constraints and solvers to imperative languages.
\end{description}
\end{description}
\section{Constraint logic programming (CLP)}
\subsection{Syntax}
\begin{description}
\item[Atom] \marginnote{Atom}
$A := p(t_1, \dots, t_n)$, for $n \geq 0$. $p$ is a predicate.
\item[Constraint] \marginnote{Constraint}
$C := c(t_1, \dots, t_n) \mid C_1 \land C_2$, for $n \geq 0$. $c$ is an atomic constraint.
\item[Goal] \marginnote{Goal}
$G := \top \mid \bot \mid A \mid C \mid G_1 \land G_2$
\item[Constraint logic clause] \marginnote{Constraint logic clause}
$K := A \Leftarrow G$
\item[Constraint logic program] \marginnote{Constraint logic program}
$P := K_1 \dots K_m$, for $m \geq 0$
\end{description}
\subsection{Semantics}
\begin{description}
\item[Transition system] \marginnote{Transition system}
\phantom{}
\begin{description}
\item[State] Pair $\langle G, C \rangle$ where $G$ is a goal and $C$ is a constraint.
\begin{description}
\item[Initial state] $\langle G, \top \rangle$
\item[Successful final state] $\langle \top, C \rangle$ with $C \neq \bot$
\item[Failed final state] $\langle G, \bot \rangle$
\end{description}
\item[Transition]
Starting from the state $\langle A \land G, C \rangle$ of a program $P$, a transition on the atom $A$ can result in:
\begin{description}
\item[Unfold] \marginnote{Unfold}
If there exists a clause $(B \Leftarrow H)$ in $P$ and
an assignment $(B \doteq A)$ such that $((B \doteq A) \land C)$ is still valid,
then we have a transition $\langle A \land G, C \rangle \mapsto \langle H \land G, (B \doteq A) \land C \rangle$.
In other words, we want to develop an atom $A$ and the current constraints are denoted as $C$.
We look for a clause whose head equals $A$, applying an assignment if needed.
If this is possible, we transition from solving $A$ to solving the body of the clause and
add the assignment to the set of active constraints.
\item[Failure] \marginnote{Failure}
If there are no clauses $(B \Leftarrow H)$ with a valid assignment $((B \doteq A) \land C)$,
then we have a transition $\langle A \land G, C \rangle \mapsto \langle \bot, \bot \rangle$.
\end{description}
Moreover, starting from the state $\langle C \land G, D_1 \rangle$ of a program $P$, a transition on the constraint $C$ can result in:
\begin{description}
\item[Solve] \marginnote{Solve}
If $(C \land D_1) \iff D_2$ holds,
then we have a transition $\langle C \land G, D_1 \rangle \mapsto \langle G, D_2 \rangle$.
In other words, we want to develop a constraint $C$ and the current constraints are denoted as $D_1$.
If $(C \land D_1)$ is valid, we call it $D_2$ and continue solving the rest of the goal constrained to $D_2$.
\end{description}
\item[Non-determinism]
As in logic programming, there is don't-care and don't-know non-determinism.
A SLD search tree is also used.
\end{description}
\item[Derivation strategies] \phantom{}
\begin{description}
\item[Generate-and-test] \marginnote{Generate-and-test}
Strategy adopted by logic programs.
Every possible assignment to the variables is generated and tested.
The workflow is the following:
\begin{enumerate}
\item Determine domain.
\item Make an assignment to each variable.
\item Test the constraints.
\end{enumerate}
\item[Constrain-and-generate] \marginnote{Constrain-and-generate}
Strategy adopted by constraint logic programs.
Exploit facts to reduce the search space.
The workflow is the following:
\begin{enumerate}
\item Determine domain.
\item Restrict the domain following the constraints.
\item Make an assignment to each variable.
\end{enumerate}
\end{description}
\end{description}
\section{MiniZinc}
Declarative language for constraint programming.
\begin{description}
\item[Built-in types] \marginnote{Built-in types}
\texttt{bool}, \texttt{int}, \texttt{float}, \texttt{string}
\item[Logical operators] \marginnote{Logical operators}
\texttt{\char`\\/} $\cdot$ \texttt{/\char`\\} $\cdot$ \texttt{->} $\cdot$ \texttt{!}
\item[Parameter] \marginnote{Parameter}
User-inputted value passed to the solver before execution.
\[ \texttt{<domain>: <name>} \]
\begin{example} \texttt{int: size;} \end{example}
\item[Variable] \marginnote{Variable}
Value computed by the solver.
\[ \texttt{var <domain>: <name>} \]
\begin{example} \texttt{var bool: flag;} \end{example}
\item[Set] \marginnote{Set}
For defining ranges.
\[ \texttt{set of <domain>: <name>} \]
\begin{example} \texttt{set of int: top10 = 1..10;} \end{example}
\item[Array] \marginnote{Array}
Array of parameters or variables.
\[ \texttt{array[<index range>] of <domain>: <name>} \]
\begin{example} \texttt{array[1..5] of var int: vars;} \end{example}
\item[Aggregation functions] \marginnote{Aggregation functions}
\texttt{sum}, \texttt{product}, \texttt{min}, \texttt{max}.
\begin{description}
\item[Forall]
\[
\begin{split}
&\texttt{forall(<iterators> in <domain>)}\texttt{(<conditions>) } \\
&\texttt{forall(<iterators> in <domain> where <conditions>)}\texttt{(<conditions>) }
\end{split}
\]
\begin{example} \texttt{forall(i, j in 1..3 where i < j)(arr[i] != arr[j]);} \end{example}
\item[Exists]
\[
\begin{split}
&\texttt{exists(<iterators> in <domain>)}\texttt{(<conditions>) } \\
&\texttt{exists(<iterators> in <domain> where <conditions>)}\texttt{(<conditions>) }
\end{split}
\]
\end{description}
\item[Constraints] \marginnote{Constraints}
\[ \texttt{constraint <expression>} \]
Multiple constraints are seen as conjunctions.
\begin{example}
\texttt{constraint X >= 5 /\char`\\\, X != 10;}
\end{example}
\begin{description}
\item[Global constraints] \texttt{all\_different(...)}, \texttt{all\_equal(...)}
\end{description}
\item[Solver] \marginnote{Solver} \phantom{}
\begin{description}
\item[Satisfiability problem]
\[ \texttt{solve satisfy;} \]
\item[Optimization problem]
\[ \texttt{solve minimize <variable>;} \]
\end{description}
\end{description}

View File

@ -0,0 +1,151 @@
\chapter{First-order logic}
\section{Syntax}
\marginnote{Syntax}
The symbols of propositional logic are:
\begin{descriptionlist}
\item[Constants]
Known elements of the domain. Do not represent truth values.
\item[Variables]
Unknown elements of the domain. Do not represent truth values.
\item[Function symbols]
Function $f^{(n)}$ applied on $n$ elements of the domain to obtain another element of the domain.
\item[Predicate symbols]
Function $P^{(n)}$ applied on $n$ elements of the domain to obtain a truth value.
\item[Connectives] $\forall$ $\exists$ $\land$ $\vee$ $\Rightarrow$ $\lnot$ $\Leftrightarrow$ $\top$ $\bot$ $($ $)$
\end{descriptionlist}
Using the basic syntax, the following constructs can be defined:
\begin{descriptionlist}
\item[Term] Denotes elements of the domain.
\[ t := \texttt{constant} \,|\, \texttt{variable} \,|\, f^{(n)}(t_1, \dots, t_n) \]
\item[Proposition] Denotes truth values.
\[
P := \top \,|\, \bot \,|\, P \land P \,|\, P \vee P \,|\, P \Rightarrow P \,|\, P \Leftrightarrow P \,|\,
\lnot P \,|\, \forall x. P \,|\, \exists x. P \,|\, (P) \,|\, P^{(n)}(t_1, \dots, t_n)
\]
\end{descriptionlist}
\begin{description}
\item[Well-formed formula] \marginnote{Well-formed formula}
The definition of well-formed formula in first-order logic extends the one of
propositional logic by adding the following conditions:
\begin{itemize}
\item If S is well-formed, $\exists X. S$ is well-formed. Where $X$ is a variable.
\item If S is well-formed, $\forall X. S$ is well-formed. Where $X$ is a variable.
\end{itemize}
\item[Free variables] \marginnote{Free variables}
The universal and existential quantifiers bind their variable within the scope of the formula.
Let $\mathcal{F}_v(F)$ be the set of free variables in a formula $F$, $\mathcal{F}_v$ is defined as follows:
\begin{itemize}
\item $\mathcal{F}_v(p(t)) = \bigcup \{ \text{variables of $t$} \}$
\item $\mathcal{F}_v(\top) = \mathcal{F}_v(\bot) = \varnothing$
\item $\mathcal{F}_v(\lnot F) = \mathcal{F}_v(F)$
\item $\mathcal{F}_v(F_1 \land F_2) = \mathcal{F}_v(F_1 \vee F_2) = \mathcal{F}_v(F_1 \Rightarrow F_2) = \mathcal{F}_v(F_1) \cup \mathcal{F}_v(F_2)$
\item $\mathcal{F}_v(\forall X.F) = \mathcal{F}_v(\exists X.F) = \mathcal{F}_v(F) \smallsetminus \{ X \}$
\end{itemize}
\begin{description}
\item[Closed formula/Sentence] \marginnote{Sentence}
Proposition without free variables.
\item[Theory] \marginnote{Theory}
Set of sentences.
\item[Ground term/Ground formula] \marginnote{Ground term/Ground formula}
Proposition without variables.
\end{description}
\end{description}
\section{Semantics}
\begin{description}
\item[Interpretation] \marginnote{Interpretation}
An interpretation in first-order logic $\mathcal{I}$ is a pair $(D, I)$:
\begin{itemize}
\item $D$ is the domain of the terms.
\item $I$ is the interpretation function such that:
\begin{itemize}
\item The interpretation of an n-ary function symbol is a function $I(f): D^n \rightarrow D$.
\item The interpretation of an n-ary predicate symbol is a relation $I(p) \subseteq D^n$.
\end{itemize}
\end{itemize}
\item[Variable evaluation] \marginnote{Variable evaluation}
Given an interpretation $\mathcal{I} = (D, I)$ and a set of variables $\mathcal{V}$,
a variable is evaluated through $\eta: \mathcal{V} \rightarrow D$.
\item[Model] \marginnote{Model}
Given an interpretation $\mathcal{I}$ and a formula $F$,
$\mathcal{I}$ models $F$ ($\mathcal{I} \models F$)
when $\mathcal{I}, \eta \models F$ for every variable evaluation $\eta$.
A sentence $S$ is:
\begin{descriptionlist}
\item[Valid] $S$ is satisfied by every interpretation ($\forall \mathcal{I}: \mathcal{I} \models S$).
\item[Satisfiable] $S$ is satisfied by some interpretations ($\exists \mathcal{I}: \mathcal{I} \models S$).
\item[Falsifiable] $S$ is not satisfied by some interpretations ($\exists \mathcal{I}: \mathcal{I} \cancel{\models} S$).
\item[Unsatisfiable] $S$ is not satisfied by any interpretation ($\forall \mathcal{I}: \mathcal{I} \cancel{\models} S$).
\end{descriptionlist}
\item[Logical consequence] \marginnote{Logical consequence}
A sentence $T_1$ is a logical consequence of $T_2$ ($T_2 \models T_1$) if
every model of $T_2$ is also model of $T_1$:
\[ \mathcal{I} \models T_2 \Rightarrow \mathcal{I} \models T_1 \]
\begin{theorem}
Determining if a first-order logic formula is a tautology is undecidable.
\end{theorem}
\item[Equivalence] \marginnote{Equivalence}
A sentence $T_1$ is equivalent to $T_2$ iff $T_1 \models T_2$ and $T_2 \models T_1$.
\end{description}
\begin{theorem}
The following statements are equivalent:
\begin{enumerate}
\item $F_1, \dots, F_n \models G$.
\item $F_1 \land \dots \land F_n \Rightarrow G$ is valid (i.e. deduction).
\item $F_1 \land \dots \land F_n \land \lnot G$ is unsatisfiable (i.e. refutation).
\end{enumerate}
\end{theorem}
\section{Substitution}
\begin{description}
\item[Substitution] \marginnote{Substitution}
A substitution $\sigma: \mathcal{V} \Rightarrow \mathcal{T}$ is a mapping from variables to terms.
It is written as $\{ X_1 \mapsto t_1, \dots, X_n \mapsto t_n \}$.
The application of a substitution is the following:
\begin{itemize}
\item $p(t_1, \dots, t_n)\sigma = p(t_1\sigma, \dots, t_n\sigma)$
\item $f(t_1, \dots, t_n)\sigma = fp(t_1\sigma, \dots, t_n\sigma)$
\item $\bot\sigma = \bot$ and $\top\sigma = \top$
\item $(\lnot F)\sigma = (\lnot F\sigma)$
\item $(F_1 \star F_2)\sigma = (F_1\sigma \star F_2\sigma)$ for $\star \in \{ \land, \vee, \Rightarrow \}$
\item $(\forall X.F)\sigma = \forall X' (F \sigma[X \mapsto X'])$ where $X'$ is a fresh variable (i.e. it does not appear in $F$).
\item $(\exists X.F)\sigma = \exists X' (F \sigma[X \mapsto X'])$ where $X'$ is a fresh variable.
\end{itemize}
\item[Unifier] \marginnote{Unifier}
A substitution $\sigma$ is a unifier for $e_1, \dots, e_n$ if $e_1\sigma = \dots = e_n\sigma$.
\begin{description}
\item[Most general unifier] \marginnote{Most general unifier}
A unifier $\sigma$ is the most general unifier (MGU) for $\bar{e} = e_1, \dots, e_n$ if
every unifier $\tau$ for $\bar{e}$ is an instance of $\sigma$ ($\tau$ = $\sigma\rho$ for some substitution $\rho$).
In other words, $\sigma$ is the smallest substitution to unify $\bar{e}$.
\end{description}
\end{description}

View File

@ -0,0 +1,120 @@
\chapter{Logic programming}
\section{Syntax}
A logic program has the following components (defined using BNF):
\begin{descriptionlist}
\item[Atom] \marginnote{Atom}
$A := p(t_1, \dots, t_n)$ for $n \geq 0$
\item[Goal] \marginnote{Goal}
$G := \top \mid \bot \mid A \mid G_1 \land G_2$
\item[Horn clause] \marginnote{Horn clause}
A clause with at most one positive literal.
\[ K := A \Leftarrow G \]
In other words, $A$ and all the literals in $G$ are positive as $A \Leftarrow G = A \vee \lnot G$.
\item[Program] \marginnote{Program}
$P := K_1 \dots K_m$ for $m \geq 0$
\end{descriptionlist}
\section{Semantics}
\subsection{State transition system}
\begin{description}
\item[State] \marginnote{State}
Pair $\langle G, \theta \rangle$ where $G$ is a goal and $\theta$ is a substitution.
\begin{description}
\item[Intial state] $\langle G, \varepsilon \rangle$
\item[Successful final state] $\langle \top, \theta \rangle$
\item[Failed final state] $\langle \bot, \varepsilon \rangle$
\end{description}
\item[Derivation] \marginnote{Derivation}
A sequence of states.
A derivation can be:
\begin{descriptionlist}
\item[Successful] If the final state is successful.
\item[Failed] If the final state is failed.
\item[Infinite] If there is an infinite sequence of states.
\end{descriptionlist}
Given a derivation, a goal $G$ can be:
\begin{descriptionlist}
\item[Successful] There is a successful derivation starting from $\langle G, \varepsilon \rangle$.
\item[Finitely failed] All the derivations starting from $\langle G, \varepsilon \rangle$ are failed.
\end{descriptionlist}
\item[Computed answer substitution] \marginnote{Computed answer substitution}
Given a goal $G$ and a program $P$, if there exists a successful derivation
$\langle G, \varepsilon \rangle \mapsto^* \langle \top, \theta \rangle$,
then the substitution $\theta$ is the computed answer substitution of $G$.
\item[Transition] \marginnote{Transition}
Starting from the state $\langle A \land G, \theta \rangle$ of a program $P$, a transition on the atom $A$ can result in:
\begin{descriptionlist}
\item[Unfold]
If there exists a clause $(B \Leftarrow H)$ in $P$ and
a (most general) unifier $\beta$ for $A\theta$ and $B$,
then we have a transition: $\langle A \land G, \theta \rangle \mapsto \langle H \land G, \theta\beta \rangle$.
In other words, we want to prove that $A\theta$ holds.
To do this, we search for a clause that has as conclusion $A\theta$ and add its premise to the things to prove.
If a unification is needed to match $A\theta$, we add it to the substitutions of the state.
\item[Failure]
If there are no clauses $(B \Leftarrow H)$ in $P$ with a unifier for $A\theta$ and $B$,
then we have a transition: $\langle A \land G, \theta \rangle \mapsto \langle \bot, \varepsilon \rangle$.
\end{descriptionlist}
\begin{description}
\item[Non-determinism] A transition has two types of non-determinism:
\begin{descriptionlist}
\item[Don't-care] \marginnote{Don't-care}
Any atom in $(A \land G)$ can be chosen to determine the next state.
This affects the length of the derivation (infinite in the worst case).
\item[Don't-know] \marginnote{Don't-know}
Any clause $(B \Leftarrow H)$ in $P$ with a unifier for $A\theta$ and $B$ can be chosen.
This determines the output of the derivation.
\end{descriptionlist}
\end{description}
\item[Selective linear definite resolution] \marginnote{SLD resolution}
Approach to avoid non-determinism when constructing a derivation.
\begin{description}
\item[Selection rule] \marginnote{Selection rule}
Method to select the atom in the goal to unfold (eliminates don't-care non-determinism).
\item[SLD tree] \marginnote{SLD tree}
Search tree constructed using all the possible clauses according to a selection rule
and visited following a search strategy (eliminates don't know non-determinism).
\end{description}
\begin{theorem}[Soundness]
Given a program $P$, a goal $G$ and a substitution $\theta$,
if $\theta$ is a computed answer substitution, then $P \models G\theta$.
\end{theorem}
\begin{theorem}[Completeness]
Given a program $P$, a goal $G$ and a substitution $\theta$,
if $P \models G\theta$, then there exists a computed answer substitution $\sigma$ such that $G\theta = G\sigma\beta$.
\end{theorem}
\begin{theorem}
If a computed answer substitution can be obtained using a selection rule $r$,
it can be obtained also using a different selection rule $r'$.
\end{theorem}
\item[Prolog SLD] \marginnote{Prolog SLD}
\begin{description}
\item[Selection rule] Select the leftmost atom.
\item[Tree search strategy] Search following the order of definition of the clauses.
\end{description}
This results in a left-to-right, depth-first search of the SLD tree.
Note that this may end up in a loop.
\end{description}

View File

@ -0,0 +1,11 @@
\newpage
\lohead{\color{gray} Chapter taken from \href{\gitFAIKRTwo}{\texttt{\color{gray}Fundamentals of AI and KR (module 2)}}}
\lehead{\color{gray} Chapter taken from \href{\gitFAIKRTwo}{\texttt{\color{gray}Fundamentals of AI and KR (module 2)}}}
\graphicspath{{../../fundamentals-of-ai-and-kr/module2/}}
\input{../../fundamentals-of-ai-and-kr/module2/sections/_prolog.tex}
\graphicspath{}
\newpage
\lohead{}
\lehead{}

View File

@ -0,0 +1,324 @@
\chapter{Propositional logic}
\section{Syntax}
\begin{description}
\item[Syntax] \marginnote{Syntax}
Rules and symbols to define well-formed sentences.
\end{description}
The symbols of propositional logic are:
\begin{descriptionlist}
\item[Proposition symbols] $p_0$, $p_1$, \dots
\item[Connectives] $\land$ $\vee$ $\Rightarrow$ $\Leftrightarrow$ $\lnot$ $\bot$ $($ $)$
\end{descriptionlist}
\begin{description}
\item[Well-formed formula] \marginnote{Well-formed formula}
The definition of a well-formed formula is recursive:
\begin{itemize}
\item An atomic proposition is a well-formed formula.
\item If $S$ is well-formed, $\lnot S$ is well-formed.
\item If $S_1$ and $S_2$ are well-formed, $S_1 \land S_2$ is well-formed.
\item If $S_1$ and $S_2$ are well-formed, $S_1 \vee S_2$ is well-formed.
\end{itemize}
Note that the implication $S_1 \Rightarrow S_2$ can be written as $\lnot S_1 \vee S_2$.
The BNF definition of a formula is:
\[
F := \texttt{atomic\_proposition} \,|\, F \land F \,|\, F \vee F \,|\,
F \Rightarrow F \,|\, F \Leftrightarrow F \,|\, \lnot F \,|\, (F)
\]
% \[
% \begin{split}
% \texttt{<formula>} :=\,\, &\texttt{atomic\_proposition} \,|\,\\
% &\lnot \texttt{<formula>} \,|\, \\
% &\texttt{<formula>} \land \texttt{<formula>} \,|\, \\
% &\texttt{<formula>} \vee \texttt{<formula>} \,|\, \\
% &\texttt{<formula>} \Rightarrow \texttt{<formula>} \,|\, \\
% &\texttt{<formula>} \Leftrightarrow \texttt{<formula>} \,|\, \\
% &(\texttt{<formula>}) \\
% \end{split}
% \]
\end{description}
\section{Semantics}
\begin{description}
\item[Semantics] \marginnote{Semantics}
Rules to associate a meaning to well-formed sentences.
\begin{descriptionlist}
\item[Model theory] What is true.
\item[Proof theory] What is provable.
\end{descriptionlist}
\end{description}
\begin{description}
\item[Interpretation] \marginnote{Interpretation}
Given a propositional formula $F$ of $n$ atoms $ \{ A_1, \dots, A_n \}$,
an interpretation $\mathcal{I}$ of $F$ is is a pair $(D, I)$ where:
\begin{itemize}
\item $D$ is the domain. Truth values in the case of propositional logic.
\item $I$ is the interpretation mapping that assigns
to the atoms $\{ A_1, \dots, A_n \}$ an element of $D$.
\end{itemize}
Note: given a formula $F$ of $n$ distinct atoms, there are $2^n$ distinct interpretations.
\begin{description}
\item[Model] \marginnote{Model}
If $F$ is true under the interpretation $\mathcal{I}$,
we say that $\mathcal{I}$ is a model of $F$ ($\mathcal{I} \models F$).
\end{description}
\item[Valid formula] \marginnote{Valid formula}
A formula $F$ is valid (tautology) iff it is true in all the possible interpretations.
It is denoted as $\models F$.
\item[Invalid formula] \marginnote{Invalid formula}
A formula $F$ is invalid iff it is not valid {\tiny(\texttt{:o})}.
In other words, there is at least an interpretation where $F$ is false.
\item[Inconsistent formula] \marginnote{Inconsistent formula}
A formula $F$ is inconsistent (unsatisfiable) iff it is false in all the possible interpretations.
\item[Consistent formula] \marginnote{Consistent formula}
A formula $F$ is consistent (satisfiable) iff it is not inconsistent.
In other words, there is at least an interpretation where $F$ is true.
\item[Decidability] \marginnote{Decidability}
A logic is decidable if there is a terminating method to decide if a formula is valid.
Propositional logic is decidable.
\item[Truth table] \marginnote{Truth table}
Useful to define the semantics of connectives.
\begin{itemize}
\item $\lnot S$ is true iff $S$ is false.
\item $S_1 \land S_2$ is true iff $S_1$ is true and $S_2$ is true.
\item $S_1 \vee S_2$ is true iff $S_1$ is true or $S_2$ is true.
\item $S_1 \Rightarrow S_2$ is true iff $S_1$ is false or $S_2$ is true.
\item $S_1 \Leftrightarrow S_2$ is true iff $S_1 \Rightarrow S_2$ is true and $S_1 \Leftarrow S_2$ is true.
\end{itemize}
\item[Evaluation] \marginnote{Evaluation order}
The connectives of a propositional formula are evaluated in the following order:
\[ \Leftrightarrow, \Rightarrow, \vee, \land, \lnot \]
Formulas in parenthesis have higher priority.
\item[Logical consequence] \marginnote{Logical consequence}
Let $\Gamma = \{F_1, \dots, F_n\}$ be a set of formulas (premises) and $G$ a formula (conclusion).
$G$ is a logical consequence of $\Gamma$ ($\Gamma \models G$)
if in all the possible interpretations $\mathcal{I}$,
if $F_1 \land \dots \land F_n$ is true, $G$ is true.
\item[Logical equivalence] \marginnote{Logical equivalence}
Two formulas $F$ and $G$ are logically equivalent ($F \equiv G$) iff the truth values of $F$ and $G$
are the same under the same interpretation.
In other words, $F \equiv G \iff F \models G \land G \models F$.
Common equivalences are:
\begin{descriptionlist}
\footnotesize
\item[Commutativity]: $(P \land Q) \equiv (Q \land P)$ and $(P \vee Q) \equiv (Q \vee P)$
\item[Associativity]: $((P \land Q) \land R) \equiv (P \land (Q \land R))$
and $((P \vee Q) \vee R) \equiv (P \vee (Q \vee R))$
\item[Double negation elimination]: $\lnot(\lnot P) \equiv P$
\item[Contraposition]: $(P \Rightarrow Q) \equiv (\lnot Q \Rightarrow \lnot P)$
\item[Implication elimination]: $(P \Rightarrow Q) \equiv (\lnot P \vee Q)$
\item[Biconditional elimination]: $(P \Leftrightarrow Q) \equiv ((P \Rightarrow Q) \land (Q \Rightarrow P))$
\item[De Morgan]: $\lnot(P \land Q) \equiv (\lnot P \vee \lnot Q)$ and $\lnot(P \vee Q) \equiv (\lnot P \land \lnot Q)$
\item[Distributivity of $\land$ over $\vee$]: $(P \land (Q \vee R)) \equiv ((P \land Q) \vee (P \land R))$
\item[Distributivity of $\vee$ over $\land$]: $(P \vee (Q \land R)) \equiv ((P \vee Q) \land (P \vee R))$
\end{descriptionlist}
\end{description}
\subsection{Normal forms}
\begin{description}
\item[Negation normal form (NNF)] \marginnote{Negation normal form}
A formula is in negation normal form iff negations appear only in front of atoms (i.e. not parenthesis).
\item[Conjunctive normal form (CNF)] \marginnote{Conjunctive normal form}
A formula $F$ is in conjunctive normal form iff:
\begin{itemize}
\item it is in negation normal form;
\item it has the form $F := F_1 \land F_2 \dots \land F_n$, where each $F_i$ (clause) is a disjunction of literals.
\end{itemize}
\begin{example} \phantom{}\\
$(\lnot P \vee Q) \land (\lnot P \vee R)$ is in CNF.\\
$\lnot(P \vee Q) \land (\lnot P \vee R)$ is not in CNF (not in NNF).
\end{example}
\item[Disjunctive normal form (DNF)] \marginnote{Disjunctive normal form}
A formula $F$ is in disjunctive normal form iff:
\begin{itemize}
\item it is in negation normal form;
\item it has the form $F := F_1 \vee F_2 \dots \vee F_n$, where each $F_i$ is a conjunction of literals.
\end{itemize}
\end{description}
\section{Reasoning}
\begin{description}
\item[Reasoning method] \marginnote{Reasoning method}
Systems to work with symbols.
Given a set of formulas $\Gamma$, a formula $F$ and a reasoning method $E$,
we denote with $\Gamma \vdash^E F$ the fact that $F$ can be deduced from $\Gamma$
using the reasoning method $E$.
\begin{description}
\item[Sound] \marginnote{Soundness}
A reasoning method $E$ is sound iff:
\[ (\Gamma \vdash^E F) \Rightarrow (\Gamma \models F) \]
\item[Complete] \marginnote{Completeness}
A reasoning method $E$ is complete iff:
\[ (\Gamma \models F) \Rightarrow (\Gamma \vdash^E F) \]
\end{description}
\item[Deduction theorem] \marginnote{Deduction theorem}
Given a set of formulas $\{ F_1, \dots, F_n \}$ and a formula $G$:
\[ (F_1 \land \dots \land F_n) \models G \,\iff\, \models (F_1 \land \dots \land F_n) \Rightarrow G \]
\begin{proof} \phantom{}
\begin{description}
\item[$\Rightarrow$])
By hypothesis $(F_1 \land \dots \land F_n) \models G$.
So, for each interpretation $\mathcal{I}$ in which $(F_1 \land \dots \land F_n)$ is true,
$G$ is also true.
Therefore, $\mathcal{I} \models (F_1 \land \dots \land F_n) \Rightarrow G$.
Moreover, for each interpretation $\mathcal{I}'$ in which $(F_1 \land \dots \land F_n)$ is false,
$(F_1 \land \dots \land F_n) \Rightarrow G$ is true.
Therefore, $\mathcal{I}' \models (F_1 \land \dots \land F_n) \Rightarrow G$.
In conclusion, $\models (F_1 \land \dots \land F_n) \Rightarrow G$.
\item[$\Leftarrow$])
By hypothesis $\models (F_1 \land \dots \land F_n) \Rightarrow G$.
Therefore, for each interpretation where $(F_1 \land \dots \land F_n)$ is true,
$G$ is also true.
In conclusion, $(F_1 \land \dots \land F_n) \models G$.
\end{description}
\end{proof}
\item[Refutation theorem] \marginnote{Refutation theorem}
Given a set of formulas $\{ F_1, \dots, F_n \}$ and a formula $G$:
\[ (F_1 \land \dots \land F_n) \models G \,\iff\, F_1 \land \dots \land F_n \land \lnot G \text{ is inconsistent} \]
Note: this theorem is not accepted in intuitionistic logic.
\begin{proof}
By definition, $(F_1 \land \dots \land F_n) \models G$ iff for every interpretation where
$(F_1 \land \dots \land F_n)$ is true, $G$ is also true.
This requires that there are no interpretations where $(F_1 \land \dots \land F_n)$ is true and $G$ false.
In other words, it requires that $(F_1 \land \dots \land F_n \land \lnot G)$ is inconsistent.
\end{proof}
\end{description}
\subsection{Natural deduction}
\begin{description}
\item[Proof theory] \marginnote{Proof theory}
Set of rules that allows to derive conclusions from premises by exploiting syntactic manipulations.
\end{description}
\begin{description}
\item[Natural deduction] \marginnote{Natural deduction for propositional logic}
Set of rules to introduce or eliminate connectives.
We consider a subset $\{ \land, \Rightarrow, \bot \}$ of functionally complete connectives.
Natural deduction can be represented using a tree-like structure:
\begin{prooftree}
\AxiomC{[hypothesis]}
\noLine
\UnaryInfC{\vdots}
\noLine
\UnaryInfC{premise}
\RightLabel{rule name}\UnaryInfC{conclusion}
\end{prooftree}
The conclusion is true when the hypotheses can prove the premise.
Another tree can be built on top of the premises to prove them.
\begin{descriptionlist}
\item[Introduction] \marginnote{Introduction rules}
Usually used to prove the conclusion by splitting it.
Note that $\lnot \psi \equiv (\psi \Rightarrow \bot)$. \\
\begin{minipage}{.4\linewidth}
\begin{prooftree}
\AxiomC{$\psi$}
\AxiomC{$\varphi$}
\RightLabel{$\land_\text{i}$}\BinaryInfC{$\varphi \land \psi$}
\end{prooftree}
\end{minipage}
\begin{minipage}{.4\linewidth}
\begin{prooftree}
\AxiomC{[$\varphi$]}
\noLine
\UnaryInfC{\vdots}
\noLine
\UnaryInfC{$\psi$}
\RightLabel{$\Rightarrow_\text{i}$}\UnaryInfC{$\varphi \Rightarrow \psi$}
\end{prooftree}
\end{minipage}
\item[Elimination] \marginnote{Elimination rules}
Usually used to exploit hypothesis and derive a conclusion.\\
\begin{minipage}{.25\linewidth}
\begin{prooftree}
\AxiomC{$\varphi \land \psi$}
\RightLabel{$\land_\text{e}$}\UnaryInfC{$\varphi$}
\end{prooftree}
\end{minipage}
\begin{minipage}{.25\linewidth}
\begin{prooftree}
\AxiomC{$\varphi \land \psi$}
\RightLabel{$\land_\text{e}$}\UnaryInfC{$\psi$}
\end{prooftree}
\end{minipage}
\begin{minipage}{.3\linewidth}
\begin{prooftree}
\AxiomC{$\varphi$}
\AxiomC{$\varphi \Rightarrow \psi$}
\RightLabel{$\Rightarrow_\text{e}$}\BinaryInfC{$\psi$}
\end{prooftree}
\end{minipage}
\item[Ex falso sequitur quodlibet] \marginnote{Ex falso sequitur quodlibet}
From contradiction, anything follows.
This can be used when we have two contradicting hypotheses.
\begin{prooftree}
\AxiomC{$\psi$}
\AxiomC{$\lnot \psi$}
\BinaryInfC{$\bot$}
\UnaryInfC{$\varphi$}
\end{prooftree}
\item[Reductio ad absurdum] \marginnote{Reductio ad absurdum}
Assume the opposite and prove a contradiction (not accepted in intuitionistic logic).
\begin{prooftree}
\AxiomC{[$\lnot \varphi$]}
\noLine
\UnaryInfC{\vdots}
\noLine
\UnaryInfC{$\bot$}
\RightLabel{RAA}\UnaryInfC{$\varphi$}
\end{prooftree}
\end{descriptionlist}
\end{description}