summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tex/thesis/Makefile2
-rw-r--r--tex/thesis/contribution/contribution.tex329
2 files changed, 219 insertions, 112 deletions
diff --git a/tex/thesis/Makefile b/tex/thesis/Makefile
index 8609c3a..5022129 100644
--- a/tex/thesis/Makefile
+++ b/tex/thesis/Makefile
@@ -5,7 +5,7 @@ PROJ = thesis
all: pdf clean
-pdf:
+pdf: clean
pdflatex $(PROJ)
bibtex $(PROJ)
pdflatex $(PROJ)
diff --git a/tex/thesis/contribution/contribution.tex b/tex/thesis/contribution/contribution.tex
index 6d4aeca..7d12299 100644
--- a/tex/thesis/contribution/contribution.tex
+++ b/tex/thesis/contribution/contribution.tex
@@ -2,8 +2,12 @@
\newcommand\stable{\mathsf{stable}}
\newcommand\eval{\mathsf{\textsc{eval}}}
+\newcommand\stabilise{\mathsf{\textsc{stabilise}}}
\newcommand\solve{\mathsf{\textsc{solve}}}
\newcommand\system{\mathsf{system}}
+\newcommand\invalidate{\mathsf{\textsc{invalidate}}}
+\newcommand\fixpoint{\mathsf{\textsc{fixpoint}}}
+\newcommand\strategy{\mathsf{\textsc{strategy}}}
\algblockx[Globals]{Globals}{EndGlobals}{\textbf{Globals:\\}}{}
\algblockx[Assume]{Assumptions}{EndAssumptions}{\textbf{Assume:\\}}{}
@@ -16,7 +20,8 @@ over the integers with monotonic
operators\cite{Gawlitza:2007:PFC:1762174.1762203}. The original
algorithm is presented in Section \ref{section:basic-algorithm}. We
employ the ideas of Seidl, et al. to design an algorithm which runs in
-considerably less time than the existing solver.
+considerably less time (in the best case, and in most practical cases)
+than the existing solver.
In this chapter we will begin by presenting the Work-list Depth First
Search (W-DFS) fixpoint algorithm developed by Seidl, et
@@ -62,18 +67,32 @@ and all values from the set $\D$.
consider expressions, $e \in E$, as $e : (X \to \D) \to \D$, a
mapping from a variable assignment to the expression's value in that
assignment.
+
+ The subset of expressions of the form $\max(e_1, e_2,
+ ... e_n)$, with $e_1, e_2, ..., e_n \in E$ are referred to as
+ \emph{$\max$-expressions}, denoted by $E_{\max} \subset E$.
\end{definition}
\begin{definition}
- \textbf{Equation System:} $\{ x = e_x \mid x \in X, e_x \in E \}$. An
- equation system can also be considered as a function $\varepsilon :
- (X \to D) \to (X \to D)$; $\varepsilon[\rho](x) = e_x(\rho)$.
+ \textbf{Equation System:} $\{ x = e_x \mid x \in X, e_x \in E
+ \}$. The values $x \in X$ are called ``variables'' while the values
+ $e_x \in E$ are called ``right-hand-sides''.
+
+ An equation system can also be considered as a function $\varepsilon
+ : (X \to D) \to (X \to D)$; $\varepsilon[\rho](x) = e_x(\rho)$.
+\end{definition}
+
+\begin{definition}
+ \textbf{Dependencies:} A variable or expression $x$ is said to
+ \emph{depend on} $y$ if a change to the value of $y$ induces a
+ change in the value of $x$. If $x$ depends on $y$ then $y$ is said
+ to \emph{influence} $x$.
\end{definition}
\begin{definition}
- \textbf{Dependencies:} A variable $x$ is said to \emph{depend on}
- $y$ if a change to the value of $y$ induces a change in the value of
- $x$.
+ \textbf{Local:} A solver is said be local if, for some $e_x \in E$,
+ the evaluation of $e_x$ only requires the evaluation of other
+ variables which $e_x$ may depend on.
\end{definition}
\section{Fixpoint Iteration}
@@ -86,7 +105,7 @@ reach the least/greatest solution if there is one to be found, but it
will often perform many more evaluations than are necessary. This
algorithm is presented in Listing \ref{algo:kleene}.
-\begin{algorithm}
+\begin{algorithm}[H]
\begin{algorithmic}
\Assumptions
\begin{tabularx}{0.9\textwidth}{rX}
@@ -110,9 +129,10 @@ algorithm is presented in Listing \ref{algo:kleene}.
For each iteration the entire system is evaluated, irrespective of
whether it could possibly have changed value. This results in a
-considerable inefficiency in practice and thus an approach which can
-evaluate smaller portions of the system in each iteration would be a
-sensible improvement.
+considerable inefficiency in practice, requiring the evaluation of
+$O(n^3)$ right-hand-sides, and thus an approach which can evaluate
+smaller portions of the system in each iteration would be a
+significant improvement.
\subsection{W-DFS algorithm}
@@ -121,20 +141,24 @@ form of data-dependencies as it solves the system. This gives it the
ability to leave portions of the system unevaluated when it is certain
that those values have not changed.
-\begin{algorithm}
+\begin{algorithm}[H]
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $D : X \to \D$ & a mapping from variables to their current values, starting
- at $\{ x \mapsto \infty | \forall x \in X \}$ \\
- I & A mapping from a variable to the variables which depend on
- it in their evaluation \\
- stable & The set of all variables whose values have stabilised \\
+ $D : X \to \D$ & a mapping from variables to their current
+ values, starting at $\{ x \mapsto \infty | \forall x \in X \}$
+ \\
+
+ I & A mapping from a variable to the variables which \emph{may}
+ depend on it in their evaluation \\
+
+ stable & The set of all variables whose values have stabilised
+ \\
+
system & The equation system, a mapping from a variable to its
associated function \\
\end{tabularx}
\EndGlobals
- something something
\end{algorithmic}
\begin{algorithmic}
@@ -155,8 +179,7 @@ that those values have not changed.
\State $\stable = \stable \cup \{x\}$
\State $v = f( \lambda y . \eval(x, y) )$
\If {$v \ne D[x]$}
- \State $D = \{ x \mapsto v, \alpha \mapsto D[\alpha] \}
- \forall \alpha \ne x$
+ \State $D = \{ x \mapsto v \} \oplus D$
\State $W = I[x]$
\State $I(x) = \emptyset$
\State $\stable = \stable \backslash W$
@@ -174,27 +197,71 @@ that those values have not changed.
\label{algo:w-dfs}
\end{algorithm}
+The W-DFS algorithm over-approximates the dependencies for each
+variable, keeping a map of which variables \emph{may} depend on other
+variables.
+
+The particular variation of W-DFS presented here is designed to return
+the \emph{greatest} fixpoint of an equation system consisting of only
+\emph{monotonic} expressions.
+
\section{$\max$-strategy Iteration}
+
+The $\max$-strategy iteration can be viewed as an accelerated fixpoint
+problem. We are attempting to find a strategy, $\sigma: E_{\max} \to
+E$ that will result in the greatest value for each $e \in
+E_{\max}$. Therefore if we consider our ``variables'' to be
+$\max$-expressions and our ``values'' to be their subexpressions then we
+can solve for the best $\max$-strategy using a similar approach
+ and our
+``comparison''
+
+Because $\max$-strategy iteration is so similar to a standard fixpoint
+problem it is possible
+
\subsection{Naive approach}
-TODO: Explanation of the naive approach
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Assumptions
+ \begin{tabularx}{0.9\textwidth}{rX}
+ $\sigma $:&$ E_{\max} \to E$, a $\max$ strategy \\
+
+ $\varepsilon $:&$ (X \to \D) \to (X \to \D)$, an equation
+ system \\
+
+ $\rho $:&$ (X \to D)$, a variable assignment \\
+
+ $P_{\max} $:&$ ((E_{\max} \to E_{\max}), (X \to \D)) \to
+ (E_{\max} \to E_{\max})$, a $\max$-strategy improvement
+ operator
+ \end{tabularx}
+ \EndAssumptions
+
+ \State $n = 0$
+ \State $\sigma_0 = \lambda x . -\infty$
+ \State $\rho_0 = \underline{-\infty}$
+ \Repeat
+ \State $\sigma_{n+1} = P_{\max}(\sigma, \rho)$
+ \State $\rho_{n+1} = \sigma(\varepsilon)[ \rho_{n} ]$
+ \State $n = n + 1$
+ \Until {$\sigma_{n-1} = \sigma_n$}
+ \State \Return $\sigma_n$
+ \end{algorithmic}
+ \caption{The naive approach to strategy iteration}
+ \label{algo:naive-strategy}
+\end{algorithm}
-\subsection{Adapted W-DFS algorithm}
-The $\max$-strategy iteration can be viewed as a fixpoint problem. We
-are attempting to find a strategy, $\sigma: E_{\max} \to E$ that will
-result in the greatest value for each $e \in E_{\max}$. Therefore if
-we consider our ``variables'' to be $\max$-expressions, our ``values''
-to be their subexpressions and our ``comparison'' to be carried out
-using the result of evaluating the system with that strategy.
+\subsection{Adapted W-DFS algorithm}
This, then, allows us to use the W-DFS algorithm to re-evaluate only
those parts of the strategy which have changed. Listing
\ref{algo:w-dfs-max} presents this variation on W-DFS.
-\begin{algorithm}
+\begin{algorithm}[H]
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
@@ -226,10 +293,9 @@ those parts of the strategy which have changed. Listing
\If {$x \not \in \stable$}
\State $f = \system[x]$
\State $\stable = \stable \cup \{x\}$
- \State $v = bestStrategy(x, \lambda y . \eval(x, y))$
+ \State $v = bestStrategy(f, \lambda y . \eval(x, y))$
\If {$v \ne \sigma[x]$}
- \State $\sigma = \{ x \mapsto v, \alpha \mapsto \sigma[\alpha]
- \} \forall \alpha \ne x $
+ \State $\sigma = \{ x \mapsto v\} \oplus \sigma$
\State $W = I[x]$
\State $I(x) = \emptyset$
\State $\stable = \stable \backslash W$
@@ -246,7 +312,7 @@ those parts of the strategy which have changed. Listing
\end{algorithm}
-\section{Combined W-DFS}
+\section{Local Demand-driven Strategy Improvement (LDSI)}
W-DFS can be used to speed up both the $\max$-strategy iteration and
the fixpoint iteration as two independent processes, but each
@@ -258,103 +324,143 @@ provide some information to each other about what values have changed
so that at each step only a subset of the entire system would have to
be evaluated.
-The new algorithm, \emph{Combined W-DFS} seeks to do this. By adding
-an ``invalidate'' method to both W-DFS instances we provide an
-interface for the two sides of the algorithm to indicate which values
-have changed. This gives the other side enough information to avoid
-evaluating irrelevant portions of the equation system.
-
-This algorithm is presented in two parts. Listing
-\ref{algo:combined-fixpoint} presents the fixpoint-iteration portion
-of the algorithm, while Listing \ref{algo:combined-max} presents the
-$\max$-strategy portion. The correctness of this new algorithm is
-argued in \ref{sec:combined-correctness}.
-
-
-\begin{algorithm}
+The new \emph{Local Demand-driven Strategy Improvement} algorithm,
+\emph{LDSI}, seeks to do this. By adding an ``invalidate''
+function to both W-DFS instances we provide an interface for the two
+sides of the algorithm to indicate which values have changed. The
+fixpoint-iteration operation can then be provided with a ``stabilise''
+function to solve a partially-unstable system.
+
+This essentially results in a $\max$-strategy iteration which, at each
+strategy-improvement step, invalidates a portion of the current
+fixpoint iteration. The fixpoint iteration then re-stabilises itself
+by evaluating what values have been changed before returning control
+to the $\max$-strategy iteration to change the system again. The back
+and forth continues in this way until the $\max$-strategy can not
+improve the strategy any further.
+
+
+
+This algorithm is presented in two parts. Listings
+\ref{algo:ldsi:fixpoint:globals},
+\ref{algo:ldsi:fixpoint:eval},
+\ref{algo:ldsi:fixpoint:invalidate},
+\ref{algo:ldsi:fixpoint:solve} and
+\ref{algo:ldsi:fixpoint:stabilise} present the
+fixpoint-iteration portion of the algorithm, while Listing
+\ref{algo:combined-max} presents the $\max$-strategy portion. The
+correctness of this new algorithm is argued in
+\ref{sec:ldsi-correctness}.
+
+\begin{algorithm}[H]
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- D & A mapping from variables to their current values, starting
- at $\{ x \mapsto \infty \}$ \\
- I & A mapping from a variable to the variables which depend on
- it in their evaluation \\
- stable & The set of all variables whose values have stabilised \\
- system & The equation system, a mapping from a variable to its
- associated function \\
+ $V$ & $\{X \to \D\}$ - a mapping from variables to values,
+ starting at $\{ x \mapsto \infty \}$ \\
+
+ $I_{X,X}$ & $\{X \to X\}$ - a mapping from a variable to the
+ variables it influences \\
+
+ $I_{\max,\max}$ & $\{E_{\max} \to E_{\max}\}$ - a mapping from a
+ $\max$-expression to the $\max$-expressions it influences \\
+
+ $I_{X,\max}$ & $\{X \to E_{\max}\}$ - a mapping from a variable
+ to the $\max$-expressions it influences \\
+
+ $U_{X}$ & The set of all variables whose values have not
+ stabilised to a final fixpoint value (unstable variables) \\
+
+ $U_{\max}$ & The set of all $\max$ expressions whose strategies
+ have not yet stabilised to their final strategy (unstable
+ $\max$-expressions) \\
+
+ $\varepsilon$ & The equation system, a mapping from a variable
+ to its associated function \\
\end{tabularx}
\EndGlobals
+ \end{algorithmic}
+ \caption{Global variables used by the LDSI algorithm}
+ \label{algo:ldsi:fixpoint:globals}
+\end{algorithm}
- \Function {eval} {$x$, $y$}
- \Comment{Evaluate $y$ for its value and note that when $y$
- changes, $x$ must be re-evaluated}
- \State $\solve(y)$
- \State $I[y] = I[y] \cup \{x\}$
+
+
+
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Function {evalfixpoint} {$x$, $y$}
+ \State $\solve \fixpoint(y)$
+ \State $I_{X,X}[y] = I_{X,X}[y] \cup \{x\}$
\State \Return $D[y]$
\EndFunction
+ \end{algorithmic}
+ \caption{Utility function used to track fixpoint variable dependencies.}
+ \label{algo:ldsi:fixpoint:eval}
+\end{algorithm}
- \Function {invalidate} {$x$}
- \If {$x \in \stable$}
- \State $\stable = \stable \backslash \{x\}$
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Function {invalidatefixpoint} {$x$}
+ \If {$x \not \in U_X$}
+ \State $U_X = U_X \cup \{x\}$
\State $D[x] = \infty$
\State $W = I[x]$
\State $I[x] = \emptyset$
\For {$v \in W$}
- invalidate(v)
+ \State $\invalidate \fixpoint(v)$
\EndFor
\EndIf
\EndFunction
+ \end{algorithmic}
+ \caption{Fixpoint subroutine called from the $\max$-strategy
+ iteration portion to invalidate fixpoint variables}
+ \label{algo:ldsi:fixpoint:invalidate}
+\end{algorithm}
- \Function {solve} {$x$}
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Function {solvefixpoint} {$x$}
\Comment{Solve a specific expression and place its value in $D$}
- \If {$x \not \in \stable$}
+ \If {$x \in U_X$}
\State $f = \system[x]$
- \State $\stable = \stable \cup \{x\}$
+ \State $U_X = U_X \backslash \{x\}$
\State $v = f( \lambda y . \eval(x, y) )$
\If {$v \ne D[x]$}
- \State $D = \{ x \mapsto v, \alpha \mapsto D[\alpha] \}
- \forall \alpha \ne x$
- \State $W = I[x]$
- \State $I(x) = \emptyset$
- \State strategy::invalidate($x$)
+ \State $D = \{ x \mapsto v \} \oplus D$
+ \State $W = I_{X,X}[x]$
+ \State $I_{X,X}[x] = \emptyset$
+ \State $\invalidate \strategy(x)$
\State $\stable = \stable \backslash W$
\For {$v \in W$}
- \State $\solve(v)$
+ \State $\solve \fixpoint(v)$
\EndFor
\EndIf
\EndIf
\EndFunction
\end{algorithmic}
-
- \caption{The fixpoint portion of the Combined W-DFS.}
- \label{algo:combined-fixpoint}
+ \caption{The subroutine of the fixpoint iteration responsible for
+ solving for each variable}
+ \label{algo:ldsi:fixpoint:solve}
\end{algorithm}
+After an evaluation of the $\solve \fixpoint$ procedure, the variable
+supplied as its argument will have been stabilised within the current
+$\max$-strategy.
-\begin{algorithm}
- \begin{algorithmic}
- \Globals
- \begin{tabularx}{0.9\textwidth}{rX}
- $\sigma$ & A mapping from $\max$-expressions to their current
- sub-expressions, starting by \\& mapping to the first
- sub-expression \\
- D & A \emph{dynamic} variable assignment which will stay updated
- as $\sigma$ changes \\
- $I$ & A mapping from a $\max$-expression to the sub-expressions
- which depend on it \\& in their evaluation \\
- $I_V$ & A mapping from a variable to the $\max-$ expressions which
- depend on it in their \\& evaluation \\
- stable & The set of all $\max$-expressions whose strategies have
- stabilised \\
- system & The equation system, a mapping from a variable to its
- associated function \\
- bestStrategy & A function $(E_{\max}, (X \to D)) \to E$ mapping
- from an expression and a variable \\& assignment to the greatest
- subexpression in that context
- \end{tabularx}
- \EndGlobals
+After an evaluation of the $\stabilise \fixpoint$ procedure all
+currently ``unstable'' variables in the current fixpoint will be
+solved. This correlates to performing an entire fixpoint-iteration for
+the greatest-fixpoint of the unstable portion of the equation
+system. This is meant to be used to perform a sufficient
+fixpoint-iteration to allow for the next $\max$-strategy improvement
+step, but this relies on the $\max$-strategy solver to correctly
+invalidate the fixpoint-iteration portion of the algorithm.
- \Function {eval} {$x$, $y$}
+
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Function {evalstrategy} {$x$, $y$}
\Comment{Evaluate $y$ for its value and note that when $y$
changes, $x$ must be re-evaluated}
\State $\solve(y)$
@@ -362,28 +468,29 @@ argued in \ref{sec:combined-correctness}.
\State \Return $\sigma[y]$
\EndFunction
- \Function {invalidate} {$x \in X$} \Comment{X is vars}
- \State $\stable = \stable \backslash I[x]$
- \State $W = I_v[x]$
- \State $I_V = \emptyset$
- \For {$v \in W$}
- \State solve(v)
- \EndFor
+ \Function {invalidatestrategy} {$x \in X$} \Comment{$x$ is a
+ \emph{variable}}
+ \If {$x \not \in U_X$}
+ \State $U_X = U_X \cup \{x\}$
+ \State $W = I[x]$
+ \State $I[x] = \emptyset$
+ \For {$v \in W$}
+ \State $\invalidate \fixpoint(v)$
+ \EndFor
+ \EndIf
\EndFunction
- \Function {solve} {$x$}
+ \Function {solvestrategy} {$x$}
\Comment{Solve a specific variable and place its value in $\sigma$}
\If {$x \not \in \stable$}
\State $f = \system[x]$
\State $\stable = \stable \cup \{x\}$
- \State $v = bestStrategy(x,
- \lambda y . \eval(x, y))$
+ \State $v = bestStrategy(f, \sigma(), \lambda y . \eval(x, y))$
\If {$v \ne \sigma[x]$}
- \State $\sigma = \{ x \mapsto v, \alpha \mapsto \sigma[\alpha]
- \} \forall \alpha \ne x $
+ \State $\sigma = \{ x \mapsto v \} \oplus \sigma$
\State $W = I[x]$
\State $I(x) = \emptyset$
- \State fixpoint::invalidate$(\mathsf{lookupVarFor}(x))$
+ \State $\invalidate \fixpoint(\mathsf{lookupVarFor}(x))$
\State $\stable = \stable \backslash W$
\For {$v \in W$}
\State $\solve(v)$