mirror of
https://github.com/NotXia/unibo-ai-notes.git
synced 2025-12-15 19:12:22 +01:00
Fix typos <noupdate>
This commit is contained in:
@ -18,7 +18,7 @@ the opponent as the entity that (optimally) minimizes (\textsc{Min}) the utility
|
|||||||
\caption{Example of game tree with propagated scores}
|
\caption{Example of game tree with propagated scores}
|
||||||
\end{figure}
|
\end{figure}
|
||||||
|
|
||||||
In a game tree, each level represents the actions a that single player can do.
|
In a game tree, each level represents the actions that a single player can do.
|
||||||
In minimax, the levels where the player plays are the \textsc{Max} levels,
|
In minimax, the levels where the player plays are the \textsc{Max} levels,
|
||||||
while the levels of the opponent are the \textsc{Min} levels.
|
while the levels of the opponent are the \textsc{Min} levels.
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ an iteration of the minimax algorithm can be described as follows:
|
|||||||
\end{center}
|
\end{center}
|
||||||
|
|
||||||
\begin{algorithm}
|
\begin{algorithm}
|
||||||
\caption{Minimax algorithm}
|
\caption{Minimax}
|
||||||
\begin{lstlisting}[mathescape=true]
|
\begin{lstlisting}[mathescape=true]
|
||||||
def minimax(node, max_depth, who_is_next):
|
def minimax(node, max_depth, who_is_next):
|
||||||
if node.isLeaf() or max_depth == 0:
|
if node.isLeaf() or max_depth == 0:
|
||||||
|
|||||||
@ -27,13 +27,13 @@
|
|||||||
\item[Planner] \marginnote{Planner}
|
\item[Planner] \marginnote{Planner}
|
||||||
Process to decide the actions that solve a planning problem.
|
Process to decide the actions that solve a planning problem.
|
||||||
In this phase, actions are considered:
|
In this phase, actions are considered:
|
||||||
\begin{description}
|
\begin{descriptionlist}
|
||||||
\item[Non decomposable]
|
\item[Non decomposable]
|
||||||
An action is atomic (it starts and finishes).
|
An action is atomic (it starts and finishes).
|
||||||
Actions interact with each other by reaching sub-goals.
|
Actions interact with each other by reaching sub-goals.
|
||||||
\item[Reversible]
|
\item[Reversible]
|
||||||
Choices are backtrackable.
|
Choices are backtrackable.
|
||||||
\end{description}
|
\end{descriptionlist}
|
||||||
|
|
||||||
A planner can have the following properties:
|
A planner can have the following properties:
|
||||||
\begin{descriptionlist}
|
\begin{descriptionlist}
|
||||||
@ -99,7 +99,7 @@ The direction of the search can be:
|
|||||||
\]
|
\]
|
||||||
|
|
||||||
\begin{example}[Moving blocks]
|
\begin{example}[Moving blocks]
|
||||||
Given the action \texttt{unstack(X, Y)} with:
|
Given the action \texttt{UNSTACK(X, Y)} with:
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{d\_list} &= \{ \texttt{handempty}, \texttt{on(X, Y)}, \texttt{clear(X)} \} \\
|
\texttt{d\_list} &= \{ \texttt{handempty}, \texttt{on(X, Y)}, \texttt{clear(X)} \} \\
|
||||||
@ -109,10 +109,10 @@ The direction of the search can be:
|
|||||||
We have that:
|
We have that:
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{regr[holding(b), unstack(b, Y)]} &= \texttt{true} \\
|
\texttt{regr[holding(b), UNSTACK(b, Y)]} &= \texttt{true} \\
|
||||||
\texttt{regr[handempty, unstack(X, Y)]} &= \texttt{false} \\
|
\texttt{regr[handempty, UNSTACK(X, Y)]} &= \texttt{false} \\
|
||||||
\texttt{regr[ontable(c), unstack(X, Y)]} &= \texttt{ontable(c)} \\
|
\texttt{regr[ontable(c), UNSTACK(X, Y)]} &= \texttt{ontable(c)} \\
|
||||||
\texttt{regr[clear(c), unstack(X, Y)]} &= \begin{cases}
|
\texttt{regr[clear(c), UNSTACK(X, Y)]} &= \begin{cases}
|
||||||
\texttt{true} & \text{if \texttt{Y}=\texttt{c}} \\
|
\texttt{true} & \text{if \texttt{Y}=\texttt{c}} \\
|
||||||
\texttt{clear(c)} & \text{otherwise}
|
\texttt{clear(c)} & \text{otherwise}
|
||||||
\end{cases}
|
\end{cases}
|
||||||
@ -149,15 +149,15 @@ The main concepts are:
|
|||||||
\[ \texttt{pre-conditions} \rightarrow \texttt{post-conditions} \]
|
\[ \texttt{pre-conditions} \rightarrow \texttt{post-conditions} \]
|
||||||
Applying the equivalence $A \rightarrow B \equiv \lnot A \vee B$, actions can be described by means of disjunctions.
|
Applying the equivalence $A \rightarrow B \equiv \lnot A \vee B$, actions can be described by means of disjunctions.
|
||||||
\begin{example}[Moving blocks]
|
\begin{example}[Moving blocks]
|
||||||
The action \texttt{stack(X, Y)} has pre-conditions \texttt{holding(X)} and \texttt{clear(Y)}, and
|
The action \texttt{STACK(X, Y)} has pre-conditions \texttt{holding(X)} and \texttt{clear(Y)}, and
|
||||||
post-conditions \texttt{on(X, Y)}, \texttt{clear(X)} and \texttt{handfree}.
|
post-conditions \texttt{on(X, Y)}, \texttt{clear(X)} and \texttt{handfree}.
|
||||||
Its representation in Green's formulation is:
|
Its representation in Green's formulation is:
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{holding(X, S)} \land \texttt{clear(Y, S)} &\rightarrow \\
|
\texttt{holding(X, S)} \land \texttt{clear(Y, S)} &\rightarrow \\
|
||||||
&\texttt{on(X, Y, do(stack(X, Y), s))} \land \\
|
&\texttt{on(X, Y, do(STACK(X, Y), s))} \land \\
|
||||||
&\texttt{clear(X, do(stack(X, Y), s))} \land \\
|
&\texttt{clear(X, do(STACK(X, Y), s))} \land \\
|
||||||
&\texttt{handfree(do(stack(X, Y), s))} \\
|
&\texttt{handfree(do(STACK(X, Y), s))} \\
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
\end{example}
|
\end{example}
|
||||||
@ -166,7 +166,7 @@ The main concepts are:
|
|||||||
Besides the effects of actions, each state also have to define for all non-changing fluents their frame axioms.
|
Besides the effects of actions, each state also have to define for all non-changing fluents their frame axioms.
|
||||||
If the problem is complex, the number of frame axioms becomes unreasonable.
|
If the problem is complex, the number of frame axioms becomes unreasonable.
|
||||||
\begin{example}[Moving blocks]
|
\begin{example}[Moving blocks]
|
||||||
\[ \texttt{on(U, V, S)}, \texttt{diff(U, X)} \rightarrow \texttt{on(U, V, do(move(X, Y, Z), S))} \]
|
\[ \texttt{on(U, V, S)} \land \texttt{diff(U, X)} \rightarrow \texttt{on(U, V, do(MOVE(X, Y, Z), S))} \]
|
||||||
\end{example}
|
\end{example}
|
||||||
\end{descriptionlist}
|
\end{descriptionlist}
|
||||||
|
|
||||||
@ -194,33 +194,35 @@ The main concepts are:
|
|||||||
\includegraphics[width=\linewidth]{img/_moving_block_example_green.pdf}
|
\includegraphics[width=\linewidth]{img/_moving_block_example_green.pdf}
|
||||||
\end{minipage}\\[0.5em]
|
\end{minipage}\\[0.5em]
|
||||||
|
|
||||||
For simplicity, we only consider the action \texttt{move(X, Y, Z)} that moves \texttt{X} from \texttt{Y} to \texttt{Z}.
|
For simplicity, we only consider the action \texttt{MOVE(X, Y, Z)} that moves \texttt{X} from \texttt{Y} to \texttt{Z}.
|
||||||
It is defined as:
|
It is defined as:
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{clear(X, S)}&, \texttt{clear(Z, S)}, \texttt{on(X, Y, S)}, \texttt{diff(X, Z)} \rightarrow \\
|
\texttt{clear(X, S)}&, \texttt{clear(Z, S)}, \texttt{on(X, Y, S)}, \texttt{diff(X, Z)} \rightarrow \\
|
||||||
&\texttt{clear(Y, do(move(X, Y, Z), S))}, \texttt{on(X, Z, do(move(X, Y, Z), S))}
|
&\texttt{clear(Y, do(MOVE(X, Y, Z), S))}, \texttt{on(X, Z, do(MOVE(X, Y, Z), S))}
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
This action can be translated into the following effect axioms:
|
This action can be translated into the following effect axioms:
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\lnot\texttt{clear(X, S)} &\vee \lnot\texttt{clear(Z, S)} \vee \lnot\texttt{on(X, Y, S)} \vee \lnot\texttt{diff(X, Z)} \vee \\
|
\lnot\texttt{clear(X, S)} &\vee \lnot\texttt{clear(Z, S)} \vee \lnot\texttt{on(X, Y, S)} \vee \lnot\texttt{diff(X, Z)} \vee \\
|
||||||
&\texttt{clear(Y, do(move(X, Y, Z), S))}
|
&\texttt{clear(Y, do(MOVE(X, Y, Z), S))}
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\lnot\texttt{clear(X, S)} &\vee \lnot\texttt{clear(Z, S)} \vee \lnot\texttt{on(X, Y, S)} \vee \lnot\texttt{diff(X, Z)} \vee \\
|
\lnot\texttt{clear(X, S)} &\vee \lnot\texttt{clear(Z, S)} \vee \lnot\texttt{on(X, Y, S)} \vee \lnot\texttt{diff(X, Z)} \vee \\
|
||||||
&\texttt{on(X, Z, do(move(X, Y, Z), S))}
|
&\texttt{on(X, Z, do(MOVE(X, Y, Z), S))}
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
Given the goal \texttt{on(a, b, s1)}, we prove that $\lnot\texttt{on(a, b, s1)}$ leads to an inconsistency.
|
Given the goal \texttt{on(a, b, s1)}, we look for an action whose effects together with $\lnot\texttt{on(a, b, s1)}$ lead to an inconsistency.
|
||||||
We decide to make the following substitutions:
|
We decide to achieve this by using the action \texttt{MOVE(a, Y, b)},
|
||||||
\[ \{ \texttt{X}/\texttt{a}, \texttt{Z}/\texttt{b}, \texttt{s1}/\texttt{do(move(a, Y, b), S)} \} \]
|
therefore making the following substitutions:
|
||||||
The premise of \texttt{move} leads to an inconsistency (when applying \texttt{move} its premise is false):
|
\[ \{ \texttt{X}/\texttt{a}, \texttt{Z}/\texttt{b}, \texttt{s1}/\texttt{do(MOVE(a, Y, b), S)} \} \]
|
||||||
|
Using the disjunctive formulation (effect axioms), we need to show that the negated preconditions are false
|
||||||
|
(therefore, making the action applicable):
|
||||||
\begin{center}
|
\begin{center}
|
||||||
\begin{tabular}{c|c|c|c}
|
\begin{tabular}{c|c|c|c}
|
||||||
$\lnot\texttt{clear(a, S)}$ & $\lnot\texttt{clear(b, S)}$ & $\lnot\texttt{on(a, Y, S)}$ & $\lnot\texttt{diff(a, b)}$ \\
|
$\lnot\texttt{clear(a, S)}$ & $\lnot\texttt{clear(b, S)}$ & $\lnot\texttt{on(a, Y, S)}$ & $\lnot\texttt{diff(a, b)}$ \\
|
||||||
@ -228,7 +230,7 @@ The premise of \texttt{move} leads to an inconsistency (when applying \texttt{mo
|
|||||||
& False with $\{ \texttt{S}/\texttt{s0}, \texttt{Y}/\texttt{d} \}$ & False
|
& False with $\{ \texttt{S}/\texttt{s0}, \texttt{Y}/\texttt{d} \}$ & False
|
||||||
\end{tabular}
|
\end{tabular}
|
||||||
\end{center}
|
\end{center}
|
||||||
Therefore, the substitution $\{ \texttt{s1}/\texttt{do(move(a, Y, b), S)} \}$ defines the plan to reach the goal \texttt{on(a, b, s1)}.
|
Therefore, the action \texttt{do(MOVE(a, d, b), s0)} defines the plan to reach the goal \texttt{on(a, b, s1)}.
|
||||||
|
|
||||||
|
|
||||||
\subsubsection{Kowalsky's formulation}
|
\subsubsection{Kowalsky's formulation}
|
||||||
@ -269,7 +271,7 @@ In the Kowalsky's formulation, each action requires a frame assertion (in Green'
|
|||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{example}[Moving blocks]
|
\begin{example}[Moving blocks]
|
||||||
The action \texttt{unstack(X, Y)} has:
|
The action \texttt{UNSTACK(X, Y)} has:
|
||||||
\begin{descriptionlist}
|
\begin{descriptionlist}
|
||||||
\item[Pre-conditions] \texttt{on(X, Y)}, \texttt{clear(X)} and \texttt{handempty}
|
\item[Pre-conditions] \texttt{on(X, Y)}, \texttt{clear(X)} and \texttt{handempty}
|
||||||
\item[Effects] \phantom{}
|
\item[Effects] \phantom{}
|
||||||
@ -285,20 +287,20 @@ In the Kowalsky's formulation, each action requires a frame assertion (in Green'
|
|||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{holds(on(X, Y), S)}&, \texttt{holds(clear(X), S)}, \texttt{holds(handempty, S)} \rightarrow \\
|
\texttt{holds(on(X, Y), S)}&, \texttt{holds(clear(X), S)}, \texttt{holds(handempty, S)} \rightarrow \\
|
||||||
&\texttt{pact(unstack(X, Y), S)}
|
&\texttt{pact(UNSTACK(X, Y), S)}
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
|
|
||||||
\item[Effects] (use add-list)
|
\item[Effects] (use add-list)
|
||||||
\[ \texttt{holds(holding(X), do(unstack(X, Y), S))} \]
|
\[ \texttt{holds(holding(X), do(UNSTACK(X, Y), S))} \]
|
||||||
\[ \texttt{holds(clear(Y), do(unstack(X, Y), S))} \]
|
\[ \texttt{holds(clear(Y), do(UNSTACK(X, Y), S))} \]
|
||||||
|
|
||||||
\item[Frame condition] (uses delete-list)
|
\item[Frame condition] (uses delete-list)
|
||||||
\[
|
\[
|
||||||
\begin{split}
|
\begin{split}
|
||||||
\texttt{holds(V, S)}&, \texttt{V} \neq \texttt{on(X, Y)}, \texttt{V} \neq \texttt{clear(X)}, \texttt{V} \neq \texttt{handempty}
|
\texttt{holds(V, S)}&, \texttt{V} \neq \texttt{on(X, Y)}, \texttt{V} \neq \texttt{clear(X)}, \texttt{V} \neq \texttt{handempty}
|
||||||
\rightarrow \\
|
\rightarrow \\
|
||||||
& \texttt{holds(V, do(unstack(X, Y), S))}
|
& \texttt{holds(V, do(UNSTACK(X, Y), S))}
|
||||||
\end{split}
|
\end{split}
|
||||||
\]
|
\]
|
||||||
\end{descriptionlist}
|
\end{descriptionlist}
|
||||||
@ -332,7 +334,7 @@ STRIPS uses two data structures:
|
|||||||
\item[Current state] Represents the forward application of the actions found using the goal stack.
|
\item[Current state] Represents the forward application of the actions found using the goal stack.
|
||||||
\end{descriptionlist}
|
\end{descriptionlist}
|
||||||
|
|
||||||
\begin{algorithm}
|
\begin{algorithm}[H]
|
||||||
\caption{STRIPS}
|
\caption{STRIPS}
|
||||||
\begin{lstlisting}[mathescape=true]
|
\begin{lstlisting}[mathescape=true]
|
||||||
def strips(problem):
|
def strips(problem):
|
||||||
@ -365,17 +367,17 @@ def strips(problem):
|
|||||||
|
|
||||||
\begin{example}[Moving blocks]
|
\begin{example}[Moving blocks]
|
||||||
\begin{center}
|
\begin{center}
|
||||||
\includegraphics[trim={0 16cm 0 0}, clip, width=0.85\textwidth]{img/_strips_example.pdf}
|
\includegraphics[trim={0 32.2cm 0 0}, clip, width=0.85\textwidth]{img/_strips_example.pdf}
|
||||||
\end{center}
|
\end{center}
|
||||||
\begin{center}
|
\begin{center}
|
||||||
\includegraphics[trim={0 0 0 33.8cm}, clip, width=0.85\textwidth]{img/_strips_example.pdf}
|
\includegraphics[trim={0 0 0 17.5cm}, clip, width=0.85\textwidth]{img/_strips_example.pdf}
|
||||||
\end{center}
|
\end{center}
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
Since there are non-deterministic choices, the search space may become very large.
|
Since there are non-deterministic choices, the search space might become very large.
|
||||||
Heuristics may be used to avoid this.
|
Heuristics can be used to avoid this.
|
||||||
|
|
||||||
Conjunction of goals are solved separately, but this could lead to the \marginnote{Sussman anomaly} \textbf{Sussman anomaly}
|
Conjunction of goals are solved separately, but this can lead to the \marginnote{Sussman anomaly} \textbf{Sussman anomaly}
|
||||||
where a sub-goal destroys what another sub-goal has done.
|
where a sub-goal destroys what another sub-goal has done.
|
||||||
For this reason, when a conjunction is encountered, it is not immediately popped from the goal stack
|
For this reason, when a conjunction is encountered, it is not immediately popped from the goal stack
|
||||||
and is left as a final check.
|
and is left as a final check.
|
||||||
@ -398,7 +400,7 @@ A non-linear plan is represented by:
|
|||||||
between actions.
|
between actions.
|
||||||
\item[Causal links] \marginnote{Causal links}
|
\item[Causal links] \marginnote{Causal links}
|
||||||
triplet $\langle S_i, S_j, c \rangle$ where $S_i$ and $S_j$ are actions and $c$ is a sub-goal.
|
triplet $\langle S_i, S_j, c \rangle$ where $S_i$ and $S_j$ are actions and $c$ is a sub-goal.
|
||||||
$c$ should be the effect of $S_i$ and precondition of $S_j$.
|
$c$ should be the effects of $S_i$ and preconditions of $S_j$.
|
||||||
|
|
||||||
Causal links represent causal relations between actions (i.e. interaction between sub-goals):
|
Causal links represent causal relations between actions (i.e. interaction between sub-goals):
|
||||||
to execute $S_j$, the effect $c$ of $S_i$ is required first.
|
to execute $S_j$, the effect $c$ of $S_i$ is required first.
|
||||||
@ -635,7 +637,7 @@ MTC refinement methods are:
|
|||||||
\begin{enumerate}
|
\begin{enumerate}
|
||||||
\item Insert a new action in the plan.
|
\item Insert a new action in the plan.
|
||||||
\item Add an ordering constraint.
|
\item Add an ordering constraint.
|
||||||
\item Do a variable assignement.
|
\item Do a variable assignment.
|
||||||
\end{enumerate}
|
\end{enumerate}
|
||||||
|
|
||||||
\item[Promotion] \marginnote{Promotion}
|
\item[Promotion] \marginnote{Promotion}
|
||||||
@ -664,7 +666,8 @@ Different meta-level searches are executed to generate meta-level plans that are
|
|||||||
\marginnote{ABSTRIPS}
|
\marginnote{ABSTRIPS}
|
||||||
In ABSTRIPS, a criticality value is assigned to each precondition based on the complexity of its achievement.
|
In ABSTRIPS, a criticality value is assigned to each precondition based on the complexity of its achievement.
|
||||||
|
|
||||||
At each level, a plan is found assuming that the preconditions corresponding to lower levels of criticality are true.
|
At each level, a plan is found assuming that the preconditions corresponding to lower levels of criticality are true
|
||||||
|
(i.e. solve harder goals first).
|
||||||
At the next level, the previously found plan and its preconditions are used as starting point in the goal stack.
|
At the next level, the previously found plan and its preconditions are used as starting point in the goal stack.
|
||||||
|
|
||||||
\begin{algorithm}
|
\begin{algorithm}
|
||||||
@ -755,7 +758,7 @@ It generates a different plan for each source of uncertainty and therefore has e
|
|||||||
\includegraphics[width=0.65\textwidth]{img/_conditional_planning.pdf}
|
\includegraphics[width=0.65\textwidth]{img/_conditional_planning.pdf}
|
||||||
\end{center}
|
\end{center}
|
||||||
|
|
||||||
When executing a sensing action, a copy of the goal is generated for each possible scenario.
|
When executing a sensing action (\texttt{CHECK(tire1)}), a copy of the goal is generated for each possible scenario.
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
|
|
||||||
@ -968,7 +971,7 @@ def extractSolution(graph, goal):
|
|||||||
\item \texttt{a} and \texttt{b} are objects.
|
\item \texttt{a} and \texttt{b} are objects.
|
||||||
\item \texttt{l} and \texttt{p} are locations.
|
\item \texttt{l} and \texttt{p} are locations.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
and the initial state is:
|
and the initial state:
|
||||||
\begin{center}
|
\begin{center}
|
||||||
\texttt{at(a, l)} $\cdot$ \texttt{at(b, l)} $\cdot$ \texttt{at(r, l)} $\cdot$ \texttt{hasFuel(r)}
|
\texttt{at(a, l)} $\cdot$ \texttt{at(b, l)} $\cdot$ \texttt{at(r, l)} $\cdot$ \texttt{hasFuel(r)}
|
||||||
\end{center}
|
\end{center}
|
||||||
|
|||||||
@ -148,7 +148,7 @@ The algorithm has the following phases:
|
|||||||
\end{descriptionlist}
|
\end{descriptionlist}
|
||||||
|
|
||||||
\begin{algorithm}
|
\begin{algorithm}
|
||||||
\caption{ABC algorithm}
|
\caption{ABC}
|
||||||
\begin{lstlisting}[mathescape=true]
|
\begin{lstlisting}[mathescape=true]
|
||||||
def abcAlgorithm(problem):
|
def abcAlgorithm(problem):
|
||||||
initPhase()
|
initPhase()
|
||||||
@ -206,27 +206,26 @@ Each particle is described by:
|
|||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\begin{algorithm}
|
\begin{algorithm}
|
||||||
\caption{PSO algorithm}
|
\caption{PSO}
|
||||||
\begin{lstlisting}[mathescape=true]
|
\begin{lstlisting}[mathescape=true]
|
||||||
def pco(f, n_particles, $\vec{l}$, $\vec{u}$, $\omega$, $\varphi_p$, $\varphi_g$):
|
def pso(f, n_particles, $\vec{l}$, $\vec{u}$, $\omega$, $\varphi_p$, $\varphi_g$):
|
||||||
particles = [Particle()] * n_particles
|
particles = [Particle()] * n_particles
|
||||||
global_best = None
|
gb = None # Global best
|
||||||
for particle in particles:
|
for particle in particles:
|
||||||
particle.value = randomUniform($\vec{l}$, $\vec{u}$) # Search space bounds
|
particle.value = randomUniform($\vec{l}$, $\vec{u}$) # Search space bounds
|
||||||
particle.vel = randomUniform($-\vert \vec{u}-\vec{l} \vert$, $\vert \vec{u}-\vec{l} \vert$)
|
particle.vel = randomUniform($-\vert \vec{u}-\vec{l} \vert$, $\vert \vec{u}-\vec{l} \vert$)
|
||||||
particle.best = particle.value
|
particle.best = particle.value
|
||||||
if f(particles.best) < f(g): g = particles.best
|
if f(particles.best) < f(gb): gb = particles.best
|
||||||
|
|
||||||
while not terminationConditions():
|
while not terminationConditions():
|
||||||
for particle in particles:
|
for particle in particles:
|
||||||
$r_p$, $r_g$ = randomUniform(0, 1), randomUniform(0, 1)
|
$r_p$, $r_g$ = randomUniform(0, 1), randomUniform(0, 1)
|
||||||
$\vec{x}_i$, $\vec{p}_i$, $\vec{v}_i$ = particle.value, particle.best, particle.vel
|
$\vec{x}_i$, $\vec{p}_i$, $\vec{v}_i$ = particle.value, particle.best, particle.vel
|
||||||
$\vec{g}$ = global_best
|
particle.vel = $\omega$*$\vec{v}_i$ + $\varphi_p$*$r_p$*($\vec{p}_i$-$\vec{x}_i$) + $\varphi_g$*$r_g$*(gb-$\vec{x}_i$)
|
||||||
particle.vel = $\omega$*$\vec{v}_i$ + $\varphi_p$*$r_p$*($\vec{p}_i$-$\vec{x}_i$) + $\varphi_g$*$r_g$*($\vec{g}$-$\vec{x}_i$)
|
|
||||||
particle.value = particle.value + particle.vel
|
particle.value = particle.value + particle.vel
|
||||||
if f(particle.value) < f(particle.best):
|
if f(particle.value) < f(particle.best):
|
||||||
particle.best = particle.value
|
particle.best = particle.value
|
||||||
if f(particle.best) < f(g): g = particle.best
|
if f(particle.best) < f(gb): gb = particle.best
|
||||||
\end{lstlisting}
|
\end{lstlisting}
|
||||||
\end{algorithm}
|
\end{algorithm}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user