summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-05 18:15:19 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-05 18:15:19 +1100
commit4c546048ba30e97bce4dc82a1962cf76551db0d5 (patch)
tree73297114148d1045e10c816cbead43c7ce9d8a9a
parent6f49860389b973bf0ec58cdefc63cda41f432d1c (diff)
A bunch of contribution writing, also fix the bib.
Fix up the bib style to use [1] style citations. Add a bunch of stuff to the contribution section.
-rw-r--r--tex/thesis/contribution/contribution.tex599
-rw-r--r--tex/thesis/references.bib1
-rw-r--r--tex/thesis/thesis.tex2
3 files changed, 373 insertions, 229 deletions
diff --git a/tex/thesis/contribution/contribution.tex b/tex/thesis/contribution/contribution.tex
index 4a3c7b7..17a1554 100644
--- a/tex/thesis/contribution/contribution.tex
+++ b/tex/thesis/contribution/contribution.tex
@@ -7,9 +7,12 @@
\newcommand\stableSI{\mathsf{stable^{SI}}}
\newcommand\stable{\mathsf{stable}}
-\newcommand\system{\mathsf{system}}
-
+\newcommand\system{\varepsilon} %\mathsf{system}}
+\newcommand\parent{\mathsf{parent}}
+\newcommand\parents{\mathsf{parents}}
+\newcommand\old{\mathsf{old}}
+\newcommand\Operators{\mathcal{O}}
\newcommand\Systems{\mathcal{E}}
@@ -27,11 +30,11 @@
\chapter{Contribution} \label{chap:contribution}
-The main theoretical contribution of this paper is an improvement on a
-$\max$-strategy improvement algorithm for solving fixpoint equations
-over the integers with monotonic
+In this chapter the main theoretical contribution of this thesis is
+presented: an improvement on a $\max$-strategy improvement algorithm
+for solving fixpoint equations over the integers with monotonic
operators\cite{Gawlitza:2007:PFC:1762174.1762203}. The original
-algorithm is presented in Section \ref{section:basic-algorithm}. We
+algorithm is presented in Section \ref{sec:gawlitza-algorithm}. We
employ the ideas of Seidl, et al. to design an algorithm which runs in
considerably less time (in the best case, and in most practical cases)
than the existing solver.
@@ -57,12 +60,14 @@ By making use of some data-dependencies within the equation systems it
is possible to reduce the amount of work that is to be done quite
considerably.
+\section{Preliminaries}
+
In order to aid our explanation of these algorithms we will now define
-a few terms and notations. All variables are taken from the set $X$
-and all values from the set $\D$.
+a few terms and notations. We will assume a fixed, finite set $X$ of
+variables. We will also define $\CZ = \Z \cup \{-\infty, \infty\}$.
\begin{definition}
- \textbf{Variable Assignments:} $X \to \D$. A function from a
+ \textbf{Variable Assignments:} $X \to \CZ$. A function from a
variable to a value in our domain. An underlined value
(eg. $\underline{\infty}$) indicates a variable assignment mapping
everything to that value. Variable assignments may be combined with
@@ -76,28 +81,39 @@ and all values from the set $\D$.
\end{definition}
\begin{definition}
+ \textbf{Operators:} An operator is a function $o: 2^{\CZ} \to
+ \CZ$. They belong to the set $\Operators$.
+\end{definition}
+
+\begin{definition}
\textbf{Expressions:} For the purposes of this discussion we will
- consider expressions, $e \in E$, as $e : (X \to \D) \to \D$, a
+ consider expressions, $e \in E$, as $e : (X \to \CZ) \to \CZ$, a
mapping from a variable assignment to the expression's value in that
assignment.
+
+ Expressions are either of the form $e = o\{ e_1, e_2, ..., e_k \}$
+ (where $o \in O$ and $e_1, e_2, ... e_k \in E$) or else they are of
+ the form $e \in \CZ$ (a constant value) or $e \in X$ (a variable).
- The subset of expressions of the form $\max(e_1, e_2,
- ... e_n)$, with $e_1, e_2, ..., e_n \in E$ are referred to as
- \emph{$\max$-expressions}, denoted by $E_{\max} \subset E$.
+ The subset of expressions of the form $\max\{ e_1, e_2, ... e_k \}$,
+ with $e_1, e_2, ..., e_k \in E$ are referred to as
+ \emph{$\max$-expressions}, denoted by $E_{\max} \subset E$.
+
+ For an expression $e$, the set of expressions containing it, $\{
+ e_\parent \mid e_\parent = o\{ e_1, e_2, ... e_k \}, o \in \O, e_i
+ \in E \}$, is denoted $\parents(e)$.
\end{definition}
\begin{definition}
- \textbf{Equation System:} $\{ x = e_x \mid x \in X, e_x \in E
- \}$. The values $x \in X$ are called ``variables'' while the values
- $e_x \in E$ are called ``right-hand-sides''. The set of all equation
- systems is denoted by $\Systems$;
-
- An equation system can be considered as a function $\system : X
- \to ((X \to \D) \to \D)$; $\system[x] = e_x$, mapping from each
- variable to its right-hand-side.
+ \textbf{Equation System:} An equation system is a mapping from
+ variables to expressions. $\system : X \to E_\system$, where
+ $E_\system \subset E$. As each element of $E$ is also a function $(X
+ \to \CZ) \to \CZ$, so an equation system can be considered as a
+ function $\system : X \to ((X \to \CZ) \to \CZ)$ (which is
+ equivalent to the above).
An equation system can also be considered as a function $\system
- : (X \to \D) \to (X \to \D)$, $\system(\rho)[x] = e_x(\rho)$,
+ : (X \to \CZ) \to (X \to \CZ)$, $\system(\rho)[x] = e_x(\rho)$,
taking a variable assignment $\rho$ and returning the result of each
$e_x$'s evaluation in $\rho$.
@@ -105,19 +121,35 @@ and all values from the set $\D$.
the argument order in the following way: $\system(\rho)[x] =
\system[x](\rho)$. It is only for convenience that we use both
forms.
+
+ The expressions in $E_\system$ are referred to as ``right hand
+ sides''. The set of all equation systems is denoted by $\Systems$.
+
+ An inverse mapping, $E \to X$ also exists for each system, denoted
+ by $\system^{-1}$. This is defined as:
+ \begin{align*}
+ \system^{-1}(e) &= \{ x \mid system(x) = e \} \cup \{
+ \system^{-1}(e_\parent) \mid e_\parent \in parents(e) \}
+ \end{align*}
+ It provides a mapping from each expression to the variables for
+ which it is a `subexpression' of the variable's right hand side.
\end{definition}
\begin{definition}
\textbf{Dependencies:} A variable or expression $x$ is said to
- \emph{depend on} $y$ if a change to the value of $y$ induces a
- change in the value of $x$. If $x$ depends on $y$ then $y$ is said
- to \emph{influence} $x$.
+ \emph{depend on} $y$ if $x(\rho) \ne x(\varrho)$, when
+ \begin{align*}
+ \rho(z) &= \varrho(z) & \forall z \in X \backslash \{y\} \\
+ \rho(y) &\ne \varrho(y)
+ \end{align*}
+ If $x$ depends on $y$ then $y$ is said to
+ \emph{influence} $x$.
\end{definition}
\begin{definition}
- \textbf{Local:} A solver is said be local if, for some $e_x \in E$,
- the evaluation of $e_x$ only requires the evaluation of other
- variables which $e_x$ may depend on.
+ \textbf{Local:} A solver is said be local if, for some system,
+ $\system$, and some $e \in E_\system$, it will for $e_x$ without
+ evaluating every element of $E_\system$.
\end{definition}
\section{Fixpoint Iteration}
@@ -155,7 +187,13 @@ approach which can evaluate smaller portions of the system in each
iteration would be a significant improvement.
An additional deficiency of Kleene iteration is that it is not
-guaranteed to terminate. In some cases Kleene iteration must iterate
+guaranteed to terminate for all fixpoints. An example system is
+presented in Figure \ref{fix:kleene-infinite}. In this case our first
+iteration will give us $\{ x \mapsto 0, y \mapsto 0 \}$, leading to a
+second iteration resulting in $\{ x \mapsto -1, y \mapsto 0\}$. This
+will continue without bound resulting
+
+ In some cases Kleene iteration must iterate
an infinite number of times in order to reach a fixpoint. An example
system is presented as Figure \ref{fig:kleene-infinite}. In this case
$x$ will take the value of $0$ in the first iteration, then $y$ will
@@ -166,8 +204,9 @@ greatest fixpoint of $\{ x \mapsto -\infty, y \mapsto -\infty \}$.
\begin{figure}[H]
\begin{align*}
- x &= \min(0, y) \\
- y &= x - 1
+ x &= \min(0, x - 1) \\
+ y &= \min(0, \infty \oplus x) &
+ \text{where } \infty \oplus -\infty &= -\infty
\end{align*}
\caption{An example equation system for which Kleene iteration will
not terminate}
@@ -179,24 +218,25 @@ significant problem in our application, however, as it has been shown
in the work of Gawlitza
et. al. \cite{Gawlitza:2007:PFC:1762174.1762203}, that Kleene
iteration in this specific case is restricted to at most $|X|$
-iterations.
+iterations. It does, however, require $\Theta(|X|)$ right hand side
+evaluations, which is very inefficient.
\subsection{W-DFS algorithm} \label{sec:wdfs}
-The W-DFS algorithm presented by Seidl, et al. takes into account some
-form of data-dependencies as it solves the system. This gives it the
-ability to leave portions of the system unevaluated when it is certain
-that those values have not changed. The algorithm is presented in
-Listing \ref{algo:w-dfs}.
+The W-DFS algorithm of Seidl, et al. takes into account some form of
+data-dependencies as it solves the system. This gives it the ability
+to leave portions of the system unevaluated when it is certain that
+those values have not changed. The algorithm is presented in Listing
+\ref{algo:wdfs}.
\begin{algorithm}
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $D$ & $\in (X \to \D)$, a mapping from variables to their current
+ $D$ & $\in X \to \CZ$, a mapping from variables to their current
values \\
- $\inflFP$ & $\in (X \to 2^X)$, a mapping from a variable to the
+ $\inflFP$ & $\in X \to 2^X$, a mapping from a variable to the
variables it \emph{may} influence \\
$\stableFP$ & $\subseteq X$, a set of ``stable'' variables \\
@@ -207,14 +247,14 @@ Listing \ref{algo:w-dfs}.
\end{algorithmic}
\begin{algorithmic}
- \Function {solvefixpoint} {$s \in \Systems$}
+ \Function {init} {$s \in \Systems$}
\State $D = \underline{\infty}$
\State $\inflFP = \{x \mapsto \emptyset \mid \forall x \in X\}$
\State $\stableFP = \emptyset$
\State $\system = s$
\State \Return $\lambda x . (\solve(x); D[x])$
\EndFunction
- \label{algo:w-dfs:init}
+ \label{algo:wdfs:init}
\end{algorithmic}
\begin{algorithmic}
@@ -249,10 +289,10 @@ Listing \ref{algo:w-dfs}.
\caption{The W-DFS mentioned in \cite{DBLP:tr/trier/MI96-11} and
presented in \cite{fixpoint-slides}, modified to find
greatest-fixpoints of monotonic fixpoint equations}
- \label{algo:w-dfs}
+ \label{algo:wdfs}
\end{algorithm}
-The function $\init$ in Listing \ref{algo:w-dfs:init} is of the type
+The function $\init$ in Listing \ref{algo:wdfs:init} is of the type
$\Systems \to (X \to D)$. It takes an equation system and returns a
function to query the greatest fixpoint. Each fixpoint value is solved
on demand and will only evaluate the subset of the equation system
@@ -274,31 +314,90 @@ at which point the value is simply looked up in $D$.
Whenever a variable's value changes it will ``destabilise'' and
re-evaluate any variables which \emph{may} depend on it for their
values, as they may change their value if re-evaluated (and hence are
-not stable). If these values change then they, too, will destabilise
-their dependent variables, and so on.
+not certainly stable). If these values change then they, too, will
+destabilise their dependent variables, and so on.
The W-DFS algorithm provides us with a \emph{local},
\emph{demand-driven} solver for greatest fixpoints of monotonic
fixpoint problems.
+Because each value is only computed when requested, the solver avoids
+calculating results which are irrelevant to the final outcome. To
+provide a more tangible example, consider the equation system
+presented in Figure \ref{fig:wdfs-example}.
+\begin{figure}
+ \begin{align*}
+ x &= \min(0, y) \\
+ y &= x \\
+ a &= \min(0, b) \\
+ b &= c \\
+ c &= d \\
+ d &= e \\
+ e &= a \
+ \end{align*}
+ \caption{Example equation system for a W-DFS solve}
+ \label{fig:wdfs-example}
+\end{figure}
+It is clear by inspecting this system that it could be split into two
+equation systems, one consisting of $\{x, y\}$ and the other of $\{a,
+b, c, d, e\}$. In order to find the value of a variable in either of
+these sub-systems it is necessary to evaluate the entire sub-system,
+but it is not necessary to evaluate the other sub-system. So, for
+example, to find the value of $x$ in the greatest fixpoint it is
+unnecessary to find the value of the variable $d$.
-\section{$\max$-strategy Iteration}
+W-DFS, in this case, would only evaluate the sub-system containing the
+requested variable and would leave the other sub-system entirely
+unsolved until a variable within it has its value requested.
-The $\max$-strategy iteration can be viewed as an accelerated fixpoint
-problem. We are attempting to find a strategy, $\sigma: E_{\max} \to
-E$ that will result in the greatest value for each $e \in
-E_{\max}$. Therefore if we consider our ``variables'' to be
-$\max$-expressions and our ``values'' to be their subexpressions then
-we can solve for the best $\max$-strategy using a similar approach to
-the W-DFS algorithm presented above.
+This means that W-DFS can avoid evaluating a large portion of some
+equation systems, thereby making it a \emph{local} solver.
-Because $\max$-strategy iteration is so similar to a standard fixpoint
-problem it is possible
+
+
+
+
+\section{$\max$-strategy Iteration}
+
+The problem of $\max$-strategy iteration is trying to find a mapping
+$\sigma: E_{\max} \to E$ from each $\max$-expression to its greatest
+sub-expression. (We will also use $\sigma: \Systems \to \Systems$ as a
+shorthand to denote the system obtained by replacing each
+$\max$-expression, $x$, in $\system$ with $\sigma(x)$.)
+
+In the particular case of \emph{monotonic}, \emph{expansive} operators
+it has been shown\cite{Gawlitza:2007:PFC:1762174.1762203} that this
+process will take a finite number of iterations if we ensure at each
+iteration that we make a \emph{strict} improvement on our
+strategy. This means that each time we improve our strategy we must
+make it \emph{better} in at least one $\max$-expression, and no worse
+in the others.
+
+To this end a new function, $P_{\max}: ((E_{\max} \to E), (X \to D))
+\to (E_{\max} \to E)$, is used below as a ``strategy improvement
+operator''. It takes a $\max$-strategy and a variable assignment and
+returns a new $\max$-strategy which constitutes an `improvement' of
+the strategy in the variable assignment $\rho$.
+
+In more precise terms, if have a $\max$-strategy $\sigma$, a variable
+assignment $\rho$ and $\varsigma = P_{\max}(\sigma, \rho)$. Then:
+\begin{align*}
+ \sigma(\system)(\rho) \le \varsigma(\system)(\rho)
+\end{align*}
+
+If it is possible for $\sigma$ to be improved then $P_{\max}$ will
+return an improvement, otherwise it will return $\sigma$ unchanged.
\subsection{Naive approach}
+The problem of $\max$-strategy iteration is one of constantly
+improving until no further improvement can be made, so a simple
+approach is simply to perform a straightforward loop, improving the
+strategy at each step, until an equilibrium point is reached. This is
+the approach presented in Listing \ref{algo:naive-strategy}.
+
\begin{algorithm}[H]
\begin{algorithmic}
\Assumptions
@@ -308,16 +407,16 @@ problem it is possible
$\system$ & $\in \Systems$, an equation
system \\
- $\rho$ & $\in (X \to D)$, a variable assignment \\
+ $\rho$ & $\in X \to D$, a variable assignment \\
- $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \D)) \to (E_{\max}
+ $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
\to E)$, a $\max$-strategy improvement operator \\
\end{tabularx}
\EndAssumptions
\Function {solvestrategy} {$\system \in \Systems$}
\State $k = 0$
- \State $\sigma_0 = \lambda x . -\infty$
+ \State $\sigma_0 = \{ x \mapsto -\infty \mid \forall x \in X \}$
\State $\rho_0 = \underline{-\infty}$
\Repeat
\State $\sigma_{k+1} = P_{\max}(\sigma_k, \rho_k)$
@@ -331,12 +430,25 @@ problem it is possible
\label{algo:naive-strategy}
\end{algorithm}
+This approach is quite similar to that of Kleene iteration, but from
+the results of Gawlitza et al.\cite{Gawlitza:2007:PFC:1762174.1762203}
+it is known that this iteration is guaranteed to terminate. It is
+still an inefficient way to solve this problem, however, for many of
+the same reasons as Kleene iteration. It may spend significant time
+attempting to improve strategies which can not have changed and thus
+may perform a significant amount of unnecessary work.
\subsection{Adapted W-DFS algorithm} \label{sec:adapted-wdfs}
-This, then, allows us to use the W-DFS algorithm to re-evaluate only
-those parts of the strategy which have changed. Listing
-\ref{algo:w-dfs-max} presents this variation on W-DFS.
+The $\max$-strategy iteration can be viewed as an accelerated fixpoint
+problem. We are attempting to find a strategy, $\sigma: E_{\max} \to
+E$ that will result in the greatest value for each $e \in
+E_{\max}$. Therefore if we consider our ``variables'' to be
+$\max$-expressions and our ``values'' to be their subexpressions then
+we can solve for the best $\max$-strategy using a similar approach to
+the W-DFS algorithm presented above in Section \ref{sec:wdfs}. Listing
+\ref{algo:adapted-wdfs} presents one variation on W-DFS to allow it to
+solve $\max$-strategy iteration problems.
\begin{algorithm}[H]
\begin{algorithmic}
@@ -353,23 +465,21 @@ those parts of the strategy which have changed. Listing
$\system$ & $\in \Systems$, an equation system \\
- $P_{\max}$ & $\in (E_{\max}, E, (X \to \D)) \to E$, a function
- mapping from a $\max$-expression, the current strategy and a
- variable assignment to a better subexpression than the current
- strategy in that variable assignment \\
+ $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
+ \to E)$, a $\max$-strategy improvement operator \\
\end{tabularx}
\EndGlobals
\end{algorithmic}
\begin{algorithmic}
- \Function {solvestrategy} {$s \in \Systems$}
+ \Function {init} {$s \in \Systems$}
\State $\sigma = \underline{-\infty}$
\State $\inflSI = \{x \mapsto \emptyset \mid \forall x \in E_{\max}\}$
\State $\stableSI = \emptyset$
\State $\system = s$
\State \Return $\lambda x . (\solve(x); \sigma[x])$
\EndFunction
- \label{algo:w-dfs:init}
+ \label{algo:adapted-wdfs:init}
\end{algorithmic}
\begin{algorithmic}
@@ -385,7 +495,7 @@ those parts of the strategy which have changed. Listing
\If {$x \not \in \stableSI$}
\State $\stableSI = \stableSI \cup \{x\}$
\State $\rho = \solve \fixpoint(\sigma(\system))$
- \State $v = P_{\max}(x, \sigma[x], \lambda y . \eval(x, y))$
+ \State $v = P_{\max}(\lambda y . \eval(x, y), \rho)[x]$
\If {$v \ne \sigma[x]$}
\State $\sigma = \{ x \mapsto v\} \oplus \sigma$
\State $W = \inflSI[x]$
@@ -400,9 +510,26 @@ those parts of the strategy which have changed. Listing
\end{algorithmic}
\caption{W-DFS, this time modified to find the best $\max$-strategy.}
- \label{algo:w-dfs-max}
+ \label{algo:adapted-wdfs}
\end{algorithm}
+This approach retains the benefits of the W-DFS algorithm for solving
+fixpoints, but applies them to $\max$-strategy iteration problems. It
+will take into account dependencies between the $\max$-expressions and
+only attempt to improve the strategy for a given $\max$-expression if
+some of the $\max$-expressions which may influence it are changed.
+
+For this particular algorithm to work, however, we must assume another
+property of $P_{\max}$. In Listing \ref{algo:adapted-wdfs} we take the
+value of $P_{\max}(\lambda y . \eval(x, y), \rho)[x]$. That is:
+improve the strategy, then use only the strategy for $x$. In order for
+this algorithm to be correct $P_{\max}$ must always improve the
+strategy at $x$ if it is possible to do so.
+
+Additionally, in order to remain efficient, $P_{\max}$ should not
+attempt to improve the strategy for any $\max$-expressions until that
+expression is requested. This will not affect the correctness of the
+algorithm, but will significantly improve the performance of it.
\section{Local Demand-driven Strategy Improvement (LDSI)}
@@ -414,7 +541,8 @@ provide some channel for communication between the two W-DFS instances
to reduce the amount of work that must be done at each stage. By
supplying each other with information about which parts of the system
have changed we can jump in partway through the process of finding a
-solution, thereby avoiding repeating work that we have already done.
+fixpoint, thereby avoiding repeating work that we have already done
+when it is unnecessary to do so.
The new \emph{Local Demand-driven Strategy Improvement} algorithm,
\emph{LDSI}, seeks to do this. By adding an ``invalidate'' procedure
@@ -428,9 +556,9 @@ strategy-improvement step, invalidates a portion of the current
fixpoint iteration which may depend on the changed strategy. The
fixpoint iteration then re-stabilises each variable as it is
requested, tracking which values have changed since the last time they
-iterated. The $\max$-strategy iteration then uses this list of changed
-variables to determine which portions of the current strategy must be
-re-stabilised once more. This process continues until the
+were stabilised. The $\max$-strategy iteration then uses this list of
+changed variables to determine which portions of the current strategy
+must be re-stabilised once more. This process continues until the
$\max$-strategy stabilises (at which point the fixpoint will also
stabilise).
@@ -449,13 +577,13 @@ This algorithm is presented in three parts.
Section \ref{sec:ldsi:top-level} presents the global state and entry
point to the algorithm.
-Section \ref{sec:ldsi:max-strategy} presents the fixpoint portion of
+Section \ref{sec:ldsi:fixpoint} presents the fixpoint portion of
the algorithm. This portion bears many similarities to the W-DFS
algorithm presented in Section \ref{sec:wdfs}, with a few
modifications to track changes and allow external invalidations of
parts of the solution.
-Section \ref{sec:ldsi:fixpoint} presents the $\max$-strategy portion
+Section \ref{sec:ldsi:max-strategy} presents the $\max$-strategy portion
of the algorithm. This portion is quite similar to the Adapted W-DFS
algorithm presented in Section \ref{sec:adapted-wdfs}, with some
modifications to allow for communication with the fixpoint portion of
@@ -471,10 +599,13 @@ equation systems involving $\max$-expressions.
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $D$ & $\in (X \to \D)$, a mapping from variables to their current
- values \\
+ $D$ & $\in X \to \CZ$, a mapping from variables to their current
+ value \\
+
+ $D_\old$ & $\in X \to \CZ$, a mapping from variables to their
+ last stable value \\
- $\inflFP$ & $\in (X \to 2^X)$, a mapping from a variable to the
+ $\inflFP$ & $\in X \to 2^X$, a mapping from a variable to the
variables it \emph{may} influence \\
$\stableFP$ & $\subseteq X$, a set of ``stable'' variables \\
@@ -485,19 +616,17 @@ equation systems involving $\max$-expressions.
\\
- $\sigma$ & $\in (E_{\max} \to E)$, a mapping from
+ $\sigma$ & $\in E_{\max} \to E$, a mapping from
$\max$-expressions to a subexpression \\
- $\inflSI$ & $\in (E_{\max} \to 2^{E_{\max}}$, a mapping from a
+ $\inflSI$ & $\in E_{\max} \to 2^{E_{\max}}$, a mapping from a
$\max$-expression to the sub-expressions it influences \\
$\stableSI$ & $\subseteq E_{\max}$, the set of all
$\max$-expressions whose strategies are stable \\
- $P_{\max}$ & $\in (E_{\max}, E, (X \to \D)) \to E$, a function
- mapping from a $\max$-expression, the current strategy and a
- variable assignment to a better subexpression than the current
- strategy in that variable assignment \\
+ $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
+ \to E)$, a $\max$-strategy improvement operator \\
\\
@@ -506,7 +635,7 @@ equation systems involving $\max$-expressions.
\EndGlobals
\end{algorithmic}
\caption{Global variables used by the LDSI algorithm}
- \label{algo:ldsi:fixpoint:globals}
+ \label{algo:ldsi:globals}
\end{algorithm}
These variables are mostly just a combination of the variables found
@@ -514,113 +643,100 @@ in Sections \ref{sec:wdfs} and \ref{sec:adapted-wdfs}. The only
exception is $\touchedFP$, which is a newly introduced variable to
track variables which are touched by the fixpoint iteration steps.
-
-
-\subsection{$\max$-strategy iteration} \label{sec:ldsi:max-strategy}
-\subsection{Fixpoint iteration} \label{sec:ldsi:fixpoint}
-\subsection{Correctness} \label{sec:ldsi:correctness}
-
-THIS IS THE END OF WHERE I'M UP TO! The stuff after this still needs
-to be fixed up to use the new variables and have the ``touched'' thing
-properly put in.
-
-\endinput
-
-
-Listings
-\ref{algo:ldsi:fixpoint:globals},
-\ref{algo:ldsi:fixpoint:eval},
-\ref{algo:ldsi:fixpoint:invalidate},
-\ref{algo:ldsi:fixpoint:solve} and
-\ref{algo:ldsi:fixpoint:stabilise} present the
-fixpoint-iteration portion of the algorithm, while Listing
-\ref{algo:combined-max} presents the $\max$-strategy portion. The
-correctness of this new algorithm is argued in
-\ref{sec:ldsi-correctness}.
-
\begin{algorithm}[H]
\begin{algorithmic}
- \Globals
- \begin{tabularx}{0.9\textwidth}{rX}
- $D$ & $X \to \D$ - a mapping from variables to values,
- starting at $\{ x \mapsto \infty \}$ \\
-
- $\sigma$ & $E_{\max} \to E$ - a mapping from $\max$-expressions
- to their sub-expressions (a $\max$-strategy) \\
-
- $I_{X,X}$ & $X \to X$ - a mapping from a variable to the
- variables it influences \\
-
- $I_{\max,\max}$ & $E_{\max} \to E_{\max}$ - a mapping from a
- $\max$-expression to the $\max$-expressions it influences \\
+ \Function {init} {$s \in \Systems$}
+ \State $D = \underline{-\infty}$
+ \State $D_\old = \underline{-\infty}$
+ \State $\inflFP = \{x \mapsto \emptyset \mid \forall x \in X\}$
+ \State $\stableFP = X$
+ \State $\touchedFP = \emptyset$
- $U_{X}$ & The set of all variables whose values have not
- stabilised to a final fixpoint value (unstable variables) \\
+ \State $\sigma = \underline{-\infty}$
+ \State $\inflSI = \{x \mapsto \emptyset \mid \forall x \in E_{\max}\}$
+ \State $\stableSI = \emptyset$
- $S_{\max}$ & The set of all $\max$ expressions whose strategies
- have stabilised to their final strategy (stable
- $\max$-expressions) \\
+ \State $\system = s$
- $\system$ & The equation system, a mapping from a variable
- to its associated function \\
- \end{tabularx}
- \EndGlobals
+ \State \Return $\lambda x . (\solve \strategy (x); \solve
+ \fixpoint (x) ; D[x])$
+ \EndFunction
\end{algorithmic}
- \caption{Global variables used by the LDSI algorithm}
- \label{algo:ldsi:fixpoint:globals}
+ \caption{LSDI init function and entry point}
+ \label{algo:ldsi:init}
\end{algorithm}
-A few things are of particular note for the global variables. In
-particular the difference between $U_X$ being an unstable set and
-$S_{\max}$ being a stable set. In reality these two are entirely
-equivalent, but because the fixpoint-iteration will be started as
-being entirely ``stable'' (with values of $-\infty$) it is of more
-practical benefit to avoid the extra work populating the ``stable''
-set by instead storing unstable values.
+The $\init$ function is, similarly, just a combination of the $\init$
+functions presented in Sections \ref{sec:wdfs} and
+\ref{sec:adapted-wdfs}. The $\touchedFP$ and $D_\old$ variables are
+also initialised, as they was not present in either of the previous
+$\init$ functions.
-The other variables are just the state from each of the previous
-algorithms for intelligently performing $\max$-strategy iteration and
-fixpoint iteration (as were presented in Sections \ref{sec:wdfs}
-and \ref{sec:adapted-wdfs}). $D$ and $I_{X,X}$ are taken from the
-W-DFS algorithm, while $\sigma$ and $I_{\max,\max}$ are taken from the
-Adapted W-DFS algorithm.
+It is important to note, also, that $D$ and $D_\old$ are both
+initialised to \underline{$-\infty$} and $\stableFP$ is initially the
+entire set of variables. This is because we are beginning with a known
+strategy mapping every max expression to $-\infty$, so we already know
+that each fixpoint value must be stable at $-\infty$, so we can save
+an entire fixpoint iteration step by taking this into account. This
+state corresponds to an already complete fixpoint iteration with the
+given initial strategy.
+The type of this function is also different to either of the original
+$\init$ functions. $\init : \Systems \to (X \to \CZ)$. The function
+returned by $\init$ performs the entire process of $\max$-strategy
+iteration and fixpoint iteration and calculates the final value of
+each variable for the least fixpoint, returning a value in $\CZ$.
+\subsection{Fixpoint iteration} \label{sec:ldsi:fixpoint}
+
+The process of fixpoint iteration is extremely similar to the fixpoint
+iteration algorithm presented in Section \ref{sec:wdfs}. The only
+difference is that we have an $\invalidate \fixpoint$ procedure which
+allows the $\max$-strategy iteration to invalidate a portion of the
+system to ensure its value is recalculated by the fixpoint iteration
+process. The last stable value is also stored in this procedure to
+allow \emph{changed} values to be identified, not merely
+\emph{invalidated} ones.
+
\begin{algorithm}[H]
\begin{algorithmic}
\Function {evalfixpoint} {$x \in X$, $y \in X$}
\State $\solve \fixpoint(y)$
- \State $I_{X,X}[y] = I_{X,X}[y] \cup \{x\}$
+ \State $\inflFP[y] = \inflFP[y] \cup \{x\}$
\State \Return $D[y]$
\EndFunction
\end{algorithmic}
- \caption{Utility function used to track fixpoint variable dependencies.}
+ \caption{Utility function used to track fixpoint variable
+ dependencies. This function is very similar to the $\eval$
+ function presented in Listing \ref{algo:wdfs}}
\label{algo:ldsi:fixpoint:eval}
\end{algorithm}
This procedure is exactly the same as the equivalent method in the
-W-DFS algorithm. It allows us to more easily track dependencies
-between fixpoint variables by injecting this function as our
-variable-lookup function. It then both calculates a new value for the
-variable (the $\solve \fixpoint$ call) and notes the dependency
-between $x$ and $y$.
+W-DFS algorithm, except for the fact that $\solve$ has been renamed to
+$\solve \fixpoint$. It allows us to track dependencies between
+fixpoint variables by injecting this function as our variable-lookup
+function. It then both calculates a new value for the variable (the
+$\solve \fixpoint$ call) and notes the dependency between $x$ and $y$.
\begin{algorithm}[H]
\begin{algorithmic}
- \Function {invalidatefixpoint} {$x \in X$}
+ \Procedure {invalidatefixpoint} {$x \in X$}
\Comment{Invalidate a fixpoint variable}
- \If {$x \not \in U_X$}
- \State $U_X = U_X \cup \{x\}$
+ \If {$x \in \stableFP$}
+ \State $\stableFP = \stableFP \backslash \{x\}$
+ \State $\D_\old = D[x]$
\State $D[x] = \infty$
- \State $W = I[x]$
- \State $I[x] = \emptyset$
+ \State $W = \inflFP[x]$
+ \State $\inflFP[x] = \emptyset$
+ \State $\touchedFP = \touchedFP \cup \{ x \}$
\For {$v \in W$}
\State $\invalidate \fixpoint(v)$
\EndFor
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
\caption{Fixpoint subroutine called from the $\max$-strategy
iteration portion to invalidate fixpoint variables}
@@ -635,141 +751,166 @@ and will require re-evaluation. This makes it possible to only
re-evaluate a partial system (the solving of which is also be delayed
until requested by the $\solve \fixpoint$ procedure).
+While invalidating variables we also note their $\old$ value, and mark
+them as ``touched''. As the $\old$ value is only set in this
+procedure, and only when $x$ is stable, it gives us the ability to see
+whether a variable has stabilised to a new value, or whether it has
+merely re-stabilised to the same value as last time. This can prevent
+us from doing extra work in the $\max$-strategy iteration phase.
+
\begin{algorithm}[H]
\begin{algorithmic}
- \Function {solvefixpoint} {$x$}
- \Comment{Solve a specific expression and place its value in $D$}
- \If {$x \in U_X$}
- \State $f = \system[x]$
- \State $U_X = U_X \backslash \{x\}$
+ \Procedure {solvefixpoint} {$x \in X$}
+ \If {$x \not \in \stableFP$}
+ \State $\stableFP = \stableFP \cup \{ x \}$
\State $v = \sigma(\system[x])( \lambda y . \eval \fixpoint(x, y) )$
\If {$v \ne D[x]$}
\State $D = \{ x \mapsto v \} \oplus D$
- \State $W = I_{X,X}[x]$
- \State $I_{X,X}[x] = \emptyset$
- \State $\invalidate \strategy(x)$
- \State $\stable = \stable \backslash W$
+ \State $W = \inflFP[x]$
+ \State $\inflFP[x] = \emptyset$
+ \State $\stableFP = \stableFP \backslash W$
\For {$v \in W$}
\State $\solve \fixpoint(v)$
\EndFor
\EndIf
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
\caption{The subroutine of the fixpoint iteration responsible for
- solving for each variable}
+ solving for each variable. This is very similar to the $\solve$
+ procedure in Listing \ref{algo:wdfs}}
\label{algo:ldsi:fixpoint:solve}
\end{algorithm}
+The $\solve \fixpoint$ procedure is extremely similar to the $\solve$
+procedure presented in Listing \ref{algo:wdfs}. There are two main
+differences: the self-recursive call has been renamed (to reflect the
+change in the function's name), and instead of using $\system[x]$ we
+must use $\sigma(\system[x])$. The use of $\sigma$ is to ensure that
+the fixpoint is solving the variable using the current
+$\max$-strategy. This $\max$-strategy is stable for the duration of
+the $\solve \fixpoint$ execution, so it will result in fixpoint
+iteration being performed over a stable system.
+
After an evaluation of the $\solve \fixpoint$ procedure, the variable
-supplied as its argument will have been stabilised within the current
-$\max$-strategy. This means that it will have taken on the same value
-as it would take in the greatest fixpoint of this system. Because the
-$\solve \fixpoint$ calls need not be made immediately it's possible
-for a variable to be invalidated by several $\max$-strategy
-improvements without being re-evaluated.
+$x$ will have its value in the current $\max$-strategy's greatest
+fixpoint stabilised, and that value will be stored in
+$D[x]$. Stabilising this may result in other related variables also
+being stabilised.
+\subsection{$\max$-strategy iteration} \label{sec:ldsi:max-strategy}
+
+
\begin{algorithm}[H]
\begin{algorithmic}
\Function {evalstrategy} {$x \in E_{\max}$, $y \in E_{\max}$}
- \Comment{Evaluate $y$ for its value and note that when $y$
- changes, $x$ must be re-evaluated}
\State $\solve \strategy(y)$
- \State $I_{\max,\max}[y] = I_{\max,\max}[y] \cup \{x\}$
+ \State $\inflSI[y] = \inflSI[y] \cup \{x\}$
\State \Return $\sigma[y]$
\EndFunction
+
\Function {evalstrategyfixpoint} {$x \in E_{\max}$, $y \in X$}
- \Comment{Evaluate $y$ for its value and note that when $y$
- changes, $x$ must be re-evaluated}
\State $\solve \fixpoint(y)$
- \State $I_{\max,\max}[y] = I_{\max,\max}[\system[y]] \cup \{x\}$
+ \State $\inflSI[y] = \inflSI[\system[y]] \cup \{ x \}$
+ \For {$v \in \touchedFP$}
+ \If {$v \in \stableFP$ and $D[x] \ne D_\old[x]$}
+ \For {$w \in \inflSI[\strategy(\system[v])]$}
+ \State $\invalidate \strategy(\system[v])$
+ \EndFor
+ \EndIf
+ \EndFor
+
\State \Return $D[y]$
\EndFunction
\end{algorithmic}
\label{algo:ldsi:strategy:eval}
- \caption{Evaluate a portion of the $\max$-strategy and note a
- dependency}
+ \caption{Functions which evaluate a portion of the
+ $\max$-strategy/fixpoint and note a dependency}
\end{algorithm}
-The $\eval \strategy$ function is essentially the same as the $\eval
-\fixpoint$ function, except that it notes the dependencies in a
-different variable, $I_{\max,\max}$. This is because the dependency
-information for the $\max$-strategy iteration is entirely separate to
-that of the fixpoint iteration.
+The $\eval \strategy$ function is essentially the same as the $\eval$
+function in Listing \ref{algo:adapted-wdfs}, except that it calls the
+$\solve \strategy$ procedure.
The $\solve \fixpoint$ calls in $\eval \strategy \fixpoint$ are
-top-level calls, meaning that upon their return we know that $D$
-contains the value it would take in the greatest fixpoint of the
-current strategy $\sigma$. This function, therefore, acts as a simple
-intermediate layer between the fixpoint-iteration and the
-$\max$-strategy iteration to allow for dependencies to be tracked.
+top-level fixpoint-iteration calls, meaning that upon their return we
+know that $D$ contains the value it would take in the greatest
+fixpoint of the current strategy $\sigma$. This function, therefore,
+acts as a simple intermediate layer between the fixpoint-iteration and
+the $\max$-strategy iteration to allow for dependencies to be tracked.
+
+Upon the conclusion of the $\solve \fixpoint$ we also must inspect the
+$\touchedFP$ set to determine which variables have values which may
+have changed. If their values have stabilised since being invalidated,
+and if they stabilised to a different value to their previous
+stabilisation, then we will invalidate all strategies which depend on
+them. We do not have to invalidate the variable's right hand side
+directly, but if it is dependent on its own value (as in $\{ x = x + 1
+\}$, for example), then it will be destabilised by the transitivity of
+$\invalidate \strategy$.
\begin{algorithm}[H]
- \begin{algorithmic}
- \Function {invalidatestrategy} {$x \in X$} \Comment{$x$ is a
- \emph{variable}}
- \For {$v \in I_{\max,\max}(\system[x])$} \Comment{$v$ is
- influenced by $x$}
- \State $\invalidate \strategy (v)$
- \EndFor
- \EndFunction
-
- \Function {invalidatestrategy} {$x \in E_{\max}$} \Comment{$x$ is
- a \emph{$\max$-expression}}
- \If {$x \in S_{\max}$}
- \State $S_{\max} = S_{\max} \backslash \{x\}$
- \For {$v \in I_{\max,\max}$} \Comment {$v$ is influenced by $x$}
+ \begin{algorithmic}
+ \Procedure {invalidatestrategy} {$x \in E_{\max}$}
+ \If {$x \in \stableSI$}
+ \State $\stableSI = \stableSI \backslash \{x\}$
+ \For {$v \in \inflSI$}
\State $\invalidate \strategy (v)$
\EndFor
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
\label{algo:ldsi:strategy:invalidate}
\caption{Evaluate a portion of the $\max$-strategy and note a
dependency}
\end{algorithm}
-Invalidating the $\max$-strategy iteration is slightly more
-complicated than invalidating the fixpoint iteration stage. As the
-invalidation interface consists of variables, we must first translate
-a variable into a $\max$-expression (which is easily done by looking
-it up in the equation system). We must then invalidate the strategies
-for each variable which depends on this resultant
-$\max$-expression. The invalidation for $\max$-expressions consists of
-transitively invalidating everything which depends on the
-$\max$-expression, as well as itself.
+Invalidating the $\max$-strategy iteration is essentially the same as
+the invalidation in the fixpoint-iteration stage, except that we do
+not need to keep track of the last stable values, nor which
+$\max$-expressions we have invalidated. All we need to do is remove
+them from the $\stableSI$ set, thereby indicating that they need to be
+re-stabilised by a $\solve \strategy$ call. All $\max$-expressions which (transitively) depend on $x$ are
+themselves destabilised as well.
+
\begin{algorithm}[H]
\begin{algorithmic}
- \Function {solvestrategy} {$x \in E_{\max}$}
- \Comment{Solve a specific variable and place its value in $\sigma$}
- \If {$x \not \in S_{\max}$}
- \State $S_{\max} = S_{\max} \cup \{x\}$
- \State $\sigma_{dynamic} = \lambda y . \eval \strategy(x,y)$
- \State $e = P_{\max}(\sigma_{dynamic},
- \lambda y . \eval \strategy \fixpoint(x, y))(x)$
+ \Procedure {solvestrategy} {$x \in E_{\max}$}
+ \If {$x \not \in \stableSI$}
+ \State $\stableSI = \stableSI_{\max} \cup \{x\}$
+ \State $e = P_{\max}(\lambda y . \eval \strategy(x,y),
+ \lambda y . \eval \strategy \fixpoint(x, y))[x]$
\If {$e \ne \sigma[x]$}
\State $\sigma = \{ x \mapsto e \} \oplus \sigma$
\State $\invalidate \fixpoint(\system^{-1}(x))$
- \State $S_{\max} = S_{\max} \backslash I[x]$
- \For {$v \in I[x]$}
+ \State $\stableSI = \stableSI \backslash I[x]$
+ \For {$v \in \inflSI[x]$}
\State $\solve(v)$
\EndFor
\EndIf
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
\caption{The $\max$-strategy portion of the Combined W-DFS.}
\label{algo:ldsi:solve}
\end{algorithm}
-\subsection{Correctness} \label{sec:combined-correctness}
+
+\subsection{Correctness} \label{sec:ldsi:correctness}
+
+In order to argue that LDSI is correct we must first assume that the
+W-DFS algorithm of Seidl et al. is correct and will solve the equation
+system for its least/greatest fixpoint.
+
+
+
This algorithm relies on the correctness of the underlying W-DFS
algorithm. This algorithm was presented in
diff --git a/tex/thesis/references.bib b/tex/thesis/references.bib
index e743703..a7929fe 100644
--- a/tex/thesis/references.bib
+++ b/tex/thesis/references.bib
@@ -139,5 +139,6 @@
author = {Helmut Seidl},
title = {Lecture Slides in Program Optimisation},
url = {http://www2.in.tum.de/~seidl/Courses/WS2011/Optimierung/all2011.pdf},
+ urldate = {2012-11-04},
year = {2011}
} \ No newline at end of file
diff --git a/tex/thesis/thesis.tex b/tex/thesis/thesis.tex
index 2397833..51fa0de 100644
--- a/tex/thesis/thesis.tex
+++ b/tex/thesis/thesis.tex
@@ -13,6 +13,8 @@
%%%%%%%%%%%%
% Packages
+\usepackage[numbers]{natbib}
+
\usepackage{alltt}
\usepackage{amsfonts}
\usepackage{amsthm}