summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-02 16:11:39 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-02 16:11:39 +1100
commit8c7de1fc64e2ef1994b558f223e0e77fb1395539 (patch)
tree881e3686e3670a7c090c90c134936eb4853d9222
parentea660a9528cea35b7971dfc405e464cbddb2d1d0 (diff)
Change to using "touched" sets, and some writing.
-rw-r--r--impl/MaxStrategy.hpp18
-rw-r--r--impl/VariableAssignment.hpp31
-rw-r--r--tex/thesis/contribution/contribution.tex405
3 files changed, 324 insertions, 130 deletions
diff --git a/impl/MaxStrategy.hpp b/impl/MaxStrategy.hpp
index 6fee921..1b471e0 100644
--- a/impl/MaxStrategy.hpp
+++ b/impl/MaxStrategy.hpp
@@ -132,7 +132,7 @@ private:
struct DependencyAssignment : public VariableAssignment<Domain>{
DependencyAssignment(DynamicMaxStrategy& strat,
- VariableAssignment<Domain>& rho,
+ DynamicVariableAssignment<Domain>& rho,
const MaxExpression<Domain>& expr)
: _strat(strat),
_rho(rho),
@@ -144,11 +144,23 @@ private:
// recursive magic!
_strat.solve(*_strat._system[var]);
_strat._influence[*_strat._system[var]].insert(_expr);
- return _rho[var];
+
+ // force evaluation to get touched variables
+ Domain value = _rho[var];
+ // invalidate touched variables
+ IdSet<Variable<Domain> > changed = _rho.get_changed();
+ for (typename IdSet<Variable<Domain> >::iterator
+ it = changed.begin(),
+ ei = changed.end();
+ it != ei;
+ ++it) {
+ _strat.invalidate(_strat._system.variable(*it));
+ }
+ return value;
}
private:
DynamicMaxStrategy& _strat;
- VariableAssignment<Domain>& _rho;
+ DynamicVariableAssignment<Domain>& _rho;
const MaxExpression<Domain>& _expr;
IdSet<Variable<Domain> > _current;
};
diff --git a/impl/VariableAssignment.hpp b/impl/VariableAssignment.hpp
index e575d60..fff00b5 100644
--- a/impl/VariableAssignment.hpp
+++ b/impl/VariableAssignment.hpp
@@ -24,9 +24,11 @@ struct DynamicVariableAssignment : public VariableAssignment<Domain> {
) : _system(system),
_strategy(strat),
_values(system.variableCount(), value),
+ _old_values(system.variableCount(), value),
_unstable(system.variableCount()),
_influence(system.variableCount(),
- IdSet<Variable<Domain> >(system.variableCount()))
+ IdSet<Variable<Domain> >(system.variableCount())),
+ _touched(system.variableCount())
{ }
const Domain operator[](const Variable<Domain>& var) {
@@ -39,6 +41,8 @@ struct DynamicVariableAssignment : public VariableAssignment<Domain> {
log::fixpoint << indent() << "Invalidating " << x << std::endl;
_unstable.insert(x);
+ _old_values[x] = _values[x];
+ _touched.insert(x);
_values[x] = infinity<Domain>();
IdSet<Variable<Domain> > infl = _influence[x];
@@ -54,6 +58,27 @@ struct DynamicVariableAssignment : public VariableAssignment<Domain> {
}
}
+ IdSet<Variable<Domain> > get_changed() {
+ IdSet<Variable<Domain> > changed;
+ for (typename IdSet<Variable<Domain> >::iterator
+ it = _touched.begin(),
+ ei = _touched.end();
+ it != ei;
+ ++it) {
+ Variable<Domain>& var = _system.variable(*it);
+ //if (_old_values[var] != _values[var]) {
+ changed.insert(var);
+ _touched.remove(var);
+ //}
+ }
+ //_touched.clear();
+ return changed;
+ }
+
+ const IdMap<Variable<Domain>, Domain> get_old_values() const {
+ return _old_values;
+ }
+
private:
void solve(const Variable<Domain>& x) {
@@ -72,8 +97,6 @@ private:
if (val != _values[x]) {
log::fixpoint << x << " = " << val << std::endl;
- _strategy.invalidate(x);
-
IdSet<Variable<Domain> > oldInfluence = _influence[x];
_influence[x].clear();
_values[x] = val;
@@ -111,8 +134,10 @@ private:
const EquationSystem<Domain>& _system;
DynamicMaxStrategy<Domain>& _strategy;
IdMap<Variable<Domain>, Domain> _values;
+ IdMap<Variable<Domain>, Domain> _old_values;
IdSet<Variable<Domain> > _unstable;
IdMap<Variable<Domain>,IdSet<Variable<Domain> > > _influence;
+ IdSet<Variable<Domain> > _touched;
};
#endif
diff --git a/tex/thesis/contribution/contribution.tex b/tex/thesis/contribution/contribution.tex
index cc8d3ba..4a3c7b7 100644
--- a/tex/thesis/contribution/contribution.tex
+++ b/tex/thesis/contribution/contribution.tex
@@ -1,13 +1,26 @@
\floatname{algorithm}{Listing}
+\newcommand\inflFP{\mathsf{infl^{FP}}}
+\newcommand\stableFP{\mathsf{stable^{FP}}}
+\newcommand\touchedFP{\mathsf{touched^{FP}}}
+\newcommand\inflSI{\mathsf{infl^{SI}}}
+\newcommand\stableSI{\mathsf{stable^{SI}}}
+
\newcommand\stable{\mathsf{stable}}
+\newcommand\system{\mathsf{system}}
+
+
+\newcommand\Systems{\mathcal{E}}
+
+
+\newcommand\init{\mathsf{\textsc{init}}}
\newcommand\eval{\mathsf{\textsc{eval}}}
\newcommand\stabilise{\mathsf{\textsc{stabilise}}}
\newcommand\solve{\mathsf{\textsc{solve}}}
-\newcommand\system{\mathsf{system}}
\newcommand\invalidate{\mathsf{\textsc{invalidate}}}
\newcommand\fixpoint{\mathsf{\textsc{fixpoint}}}
\newcommand\strategy{\mathsf{\textsc{strategy}}}
+
\algblockx[Globals]{Globals}{EndGlobals}{\textbf{Globals:\\}}{}
\algblockx[Assume]{Assumptions}{EndAssumptions}{\textbf{Assume:\\}}{}
@@ -70,16 +83,28 @@ and all values from the set $\D$.
The subset of expressions of the form $\max(e_1, e_2,
... e_n)$, with $e_1, e_2, ..., e_n \in E$ are referred to as
- \emph{$\max$-expressions}, denoted by $E_{\max} \subset E$.
+ \emph{$\max$-expressions}, denoted by $E_{\max} \subset E$.
\end{definition}
\begin{definition}
\textbf{Equation System:} $\{ x = e_x \mid x \in X, e_x \in E
\}$. The values $x \in X$ are called ``variables'' while the values
- $e_x \in E$ are called ``right-hand-sides''.
-
- An equation system can also be considered as a function $\varepsilon
- : (X \to D) \to (X \to D)$; $\varepsilon[\rho](x) = e_x(\rho)$.
+ $e_x \in E$ are called ``right-hand-sides''. The set of all equation
+ systems is denoted by $\Systems$;
+
+ An equation system can be considered as a function $\system : X
+ \to ((X \to \D) \to \D)$; $\system[x] = e_x$, mapping from each
+ variable to its right-hand-side.
+
+ An equation system can also be considered as a function $\system
+ : (X \to \D) \to (X \to \D)$, $\system(\rho)[x] = e_x(\rho)$,
+ taking a variable assignment $\rho$ and returning the result of each
+ $e_x$'s evaluation in $\rho$.
+
+ These two functional forms are equivalent and correspond to changing
+ the argument order in the following way: $\system(\rho)[x] =
+ \system[x](\rho)$. It is only for convenience that we use both
+ forms.
\end{definition}
\begin{definition}
@@ -107,20 +132,15 @@ algorithm is presented in Listing \ref{algo:kleene}.
\begin{algorithm}[H]
\begin{algorithmic}
- \Assumptions
- \begin{tabularx}{0.9\textwidth}{rX}
- $\rho $:&$ X \to \D$, a variable assignment \\
- $\varepsilon $:&$ (X \to \D) \to (X \to \D)$, an equation system
- \end{tabularx}
- \EndAssumptions
-
- \State $n = 0$
- \State $\rho_0 = \underline{\infty}$
- \Repeat
- \State $\rho_{n+1} = \varepsilon[ \rho_{n} ]$
- \State $n = n + 1$
- \Until {$\rho_{n-1} = \rho_n$}
- \State \Return $\rho_n$
+ \Function {solvefixpoint} {$\system \in \Systems$}
+ \State $k = 0$
+ \State $\rho_0 = \underline{\infty}$
+ \Repeat
+ \State $\rho_{k+1} = \system( \rho_{k} )$
+ \State $k = k + 1$
+ \Until {$\rho_{k-1} = \rho_k$}
+ \State \Return $\rho_k$
+ \EndFunction
\end{algorithmic}
\caption{The Kleene iteration algorithm for solving fixpoint
equations for their greatest solutions.}
@@ -146,7 +166,7 @@ greatest fixpoint of $\{ x \mapsto -\infty, y \mapsto -\infty \}$.
\begin{figure}[H]
\begin{align*}
- x &= \min(0, x) \\
+ x &= \min(0, y) \\
y &= x - 1
\end{align*}
\caption{An example equation system for which Kleene iteration will
@@ -154,76 +174,113 @@ greatest fixpoint of $\{ x \mapsto -\infty, y \mapsto -\infty \}$.
\label{fig:kleene-infinite}
\end{figure}
+This particular non-termination of Kleene iteration is not a
+significant problem in our application, however, as it has been shown
+in the work of Gawlitza
+et. al. \cite{Gawlitza:2007:PFC:1762174.1762203}, that Kleene
+iteration in this specific case is restricted to at most $|X|$
+iterations.
+
\subsection{W-DFS algorithm} \label{sec:wdfs}
The W-DFS algorithm presented by Seidl, et al. takes into account some
form of data-dependencies as it solves the system. This gives it the
ability to leave portions of the system unevaluated when it is certain
-that those values have not changed.
+that those values have not changed. The algorithm is presented in
+Listing \ref{algo:w-dfs}.
-\begin{algorithm}[H]
+\begin{algorithm}
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $D : X \to \D$ & a mapping from variables to their current
- values, starting at $\{ x \mapsto \infty | \forall x \in X \}$
- \\
+ $D$ & $\in (X \to \D)$, a mapping from variables to their current
+ values \\
+
+ $\inflFP$ & $\in (X \to 2^X)$, a mapping from a variable to the
+ variables it \emph{may} influence \\
- I & A mapping from a variable to the variables which \emph{may}
- depend on it in their evaluation \\
+ $\stableFP$ & $\subseteq X$, a set of ``stable'' variables \\
- stable & The set of all variables whose values have stabilised
- \\
-
- system & The equation system, a mapping from a variable to its
- associated function \\
+ $\system$ & $\in \Systems$, an equation system \\
\end{tabularx}
\EndGlobals
\end{algorithmic}
\begin{algorithmic}
- \Function {eval} {$x$, $y$}
- \Comment{Evaluate $y$ for its value and note that when $y$
- changes, $x$ must be re-evaluated}
+ \Function {solvefixpoint} {$s \in \Systems$}
+ \State $D = \underline{\infty}$
+ \State $\inflFP = \{x \mapsto \emptyset \mid \forall x \in X\}$
+ \State $\stableFP = \emptyset$
+ \State $\system = s$
+ \State \Return $\lambda x . (\solve(x); D[x])$
+ \EndFunction
+ \label{algo:w-dfs:init}
+ \end{algorithmic}
+
+ \begin{algorithmic}
+ \Function {eval} {$x \in X$, $y \in X$}
+ \Comment{Evaluate $y$, track $x$ depends on $y$}
\State $\solve(y)$
- \State $I[y] = I[y] \cup \{x\}$
+ \State $\inflFP[y] = \inflFP[y] \cup \{x\}$
\State \Return $D[y]$
\EndFunction
\end{algorithmic}
\begin{algorithmic}
- \Function {solve} {$x$}
- \Comment{Solve a specific variable and place its value in $D$}
- \If {$x \not \in \stable$}
- \State $f = \system[x]$
- \State $\stable = \stable \cup \{x\}$
- \State $v = f( \lambda y . \eval(x, y) )$
+ \Procedure {solve} {$x \in X$}
+ \Comment{Solve a $x$ and place its value in $D$}
+ \If {$x \not \in \stableFP$}
+ \State $e_x = \system[x]$
+ \State $\stableFP = \stableFP \cup \{x\}$
+ \State $v = e_x( \lambda y . \eval(x, y) )$
\If {$v \ne D[x]$}
\State $D = \{ x \mapsto v \} \oplus D$
- \State $W = I[x]$
- \State $I(x) = \emptyset$
- \State $\stable = \stable \backslash W$
+ \State $W = \inflFP[x]$
+ \State $\inflFP(x) = \emptyset$
+ \State $\stableFP = \stableFP \backslash W$
\For {$v \in W$}
\State $\solve(v)$
\EndFor
\EndIf
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
- \caption{The W-DFS alluded to in \cite{DBLP:tr/trier/MI96-11} and
+ \caption{The W-DFS mentioned in \cite{DBLP:tr/trier/MI96-11} and
presented in \cite{fixpoint-slides}, modified to find
greatest-fixpoints of monotonic fixpoint equations}
\label{algo:w-dfs}
\end{algorithm}
-The W-DFS algorithm over-approximates the dependencies for each
-variable, keeping a map of which variables \emph{may} depend on other
-variables.
+The function $\init$ in Listing \ref{algo:w-dfs:init} is of the type
+$\Systems \to (X \to D)$. It takes an equation system and returns a
+function to query the greatest fixpoint. Each fixpoint value is solved
+on demand and will only evaluate the subset of the equation system
+which is required to ensure the correct solution is found.
+
+The algorithm performs no evaluation of any variables unless their
+value is requested (whether externally or internally). Once a value is
+requested the right-hand-side is evaluated, which may in turn request
+the values of other variables (through the $\eval$ function).
+
+In the case of mutually-recursive variables this would result in
+infinite recursion, as each time the variable's value must be
+calculated it must first calculate its own value. In order to prevent
+this a $\stable$ set is maintained of variables whose values are
+either currently being computed or whose values have already been
+computed. This set provides a ``base'' for the recursion to terminate,
+at which point the value is simply looked up in $D$.
+
+Whenever a variable's value changes it will ``destabilise'' and
+re-evaluate any variables which \emph{may} depend on it for their
+values, as they may change their value if re-evaluated (and hence are
+not stable). If these values change then they, too, will destabilise
+their dependent variables, and so on.
+
+The W-DFS algorithm provides us with a \emph{local},
+\emph{demand-driven} solver for greatest fixpoints of monotonic
+fixpoint problems.
-The particular variation of W-DFS presented here is designed to return
-the \emph{greatest} fixpoint of an equation system consisting of only
-\emph{monotonic} expressions.
@@ -233,10 +290,9 @@ The $\max$-strategy iteration can be viewed as an accelerated fixpoint
problem. We are attempting to find a strategy, $\sigma: E_{\max} \to
E$ that will result in the greatest value for each $e \in
E_{\max}$. Therefore if we consider our ``variables'' to be
-$\max$-expressions and our ``values'' to be their subexpressions then we
-can solve for the best $\max$-strategy using a similar approach
- and our
-``comparison''
+$\max$-expressions and our ``values'' to be their subexpressions then
+we can solve for the best $\max$-strategy using a similar approach to
+the W-DFS algorithm presented above.
Because $\max$-strategy iteration is so similar to a standard fixpoint
problem it is possible
@@ -247,28 +303,29 @@ problem it is possible
\begin{algorithmic}
\Assumptions
\begin{tabularx}{0.9\textwidth}{rX}
- $\sigma $:&$ E_{\max} \to E$, a $\max$ strategy \\
+ $\sigma$ & $\in E_{\max} \to E$, a $\max$ strategy \\
- $\varepsilon $:&$ (X \to \D) \to (X \to \D)$, an equation
+ $\system$ & $\in \Systems$, an equation
system \\
- $\rho $:&$ (X \to D)$, a variable assignment \\
+ $\rho$ & $\in (X \to D)$, a variable assignment \\
- $P_{\max} $:&$ ((E_{\max} \to E_{\max}), (X \to \D)) \to
- (E_{\max} \to E_{\max})$, a $\max$-strategy improvement
- operator
+ $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \D)) \to (E_{\max}
+ \to E)$, a $\max$-strategy improvement operator \\
\end{tabularx}
\EndAssumptions
- \State $n = 0$
- \State $\sigma_0 = \lambda x . -\infty$
- \State $\rho_0 = \underline{-\infty}$
- \Repeat
- \State $\sigma_{n+1} = P_{\max}(\sigma, \rho)$
- \State $\rho_{n+1} = \sigma(\varepsilon)[ \rho_{n} ]$
- \State $n = n + 1$
- \Until {$\sigma_{n-1} = \sigma_n$}
- \State \Return $\sigma_n$
+ \Function {solvestrategy} {$\system \in \Systems$}
+ \State $k = 0$
+ \State $\sigma_0 = \lambda x . -\infty$
+ \State $\rho_0 = \underline{-\infty}$
+ \Repeat
+ \State $\sigma_{k+1} = P_{\max}(\sigma_k, \rho_k)$
+ \State $\rho_{k+1} = \solve \fixpoint(\sigma_{k+1}(\system))$
+ \State $k = k + 1$
+ \Until {$\sigma_{k-1} = \sigma_k$}
+ \State \Return $\sigma_k$
+ \EndFunction
\end{algorithmic}
\caption{The naive approach to strategy iteration}
\label{algo:naive-strategy}
@@ -285,46 +342,61 @@ those parts of the strategy which have changed. Listing
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $\sigma$ & A mapping from $\max$-expressions to their current
- sub-expressions, starting by mapping to the first
- sub-expression \\
- I & A mapping from a $\max$-expression to the sub-expressions
- which depend on it in their evaluation \\
- stable & The set of all $\max$-expressions whose strategies have
- stabilised \\
- system & The equation system, a mapping from a variable to its
- associated function \\
- bestStrategy & A function $(E_{\max}, (X \to D)) \to E$ mapping
- from an expression and a variable \\& assignment to the greatest
- subexpression in that context
+ $\sigma$ & $\in (E_{\max} \to E)$, a mapping from
+ $\max$-expressions to a subexpression \\
+
+ $\inflSI$ & $\in (E_{\max} \to 2^{E_{\max}}$, a mapping from a
+ $\max$-expression to the sub-expressions it influences \\
+
+ $\stableSI$ & $\subseteq E_{\max}$, the set of all
+ $\max$-expressions whose strategies are stable \\
+
+ $\system$ & $\in \Systems$, an equation system \\
+
+ $P_{\max}$ & $\in (E_{\max}, E, (X \to \D)) \to E$, a function
+ mapping from a $\max$-expression, the current strategy and a
+ variable assignment to a better subexpression than the current
+ strategy in that variable assignment \\
\end{tabularx}
\EndGlobals
+ \end{algorithmic}
- \Function {eval} {$x$, $y$}
- \Comment{Evaluate $y$ for its value and note that when $y$
- changes, $x$ must be re-evaluated}
+ \begin{algorithmic}
+ \Function {solvestrategy} {$s \in \Systems$}
+ \State $\sigma = \underline{-\infty}$
+ \State $\inflSI = \{x \mapsto \emptyset \mid \forall x \in E_{\max}\}$
+ \State $\stableSI = \emptyset$
+ \State $\system = s$
+ \State \Return $\lambda x . (\solve(x); \sigma[x])$
+ \EndFunction
+ \label{algo:w-dfs:init}
+ \end{algorithmic}
+
+ \begin{algorithmic}
+ \Function {eval} {$x \in E_{\max}$, $y \in E_{\max}$}
\State $\solve(y)$
- \State $I[y] = I[y] \cup \{x\}$
+ \State $\inflSI[y] = \inflSI[y] \cup \{x\}$
\State \Return $\sigma[y]$
\EndFunction
+ \end{algorithmic}
- \Function {solve} {$x$}
- \Comment{Solve a specific expression and place its value in $\sigma$}
- \If {$x \not \in \stable$}
- \State $f = \system[x]$
- \State $\stable = \stable \cup \{x\}$
- \State $v = bestStrategy(f, \lambda y . \eval(x, y))$
+ \begin{algorithmic}
+ \Procedure {solve} {$x \in E_{\max}$}
+ \If {$x \not \in \stableSI$}
+ \State $\stableSI = \stableSI \cup \{x\}$
+ \State $\rho = \solve \fixpoint(\sigma(\system))$
+ \State $v = P_{\max}(x, \sigma[x], \lambda y . \eval(x, y))$
\If {$v \ne \sigma[x]$}
\State $\sigma = \{ x \mapsto v\} \oplus \sigma$
- \State $W = I[x]$
- \State $I(x) = \emptyset$
- \State $\stable = \stable \backslash W$
+ \State $W = \inflSI[x]$
+ \State $\inflSI(x) = \emptyset$
+ \State $\stableSI = \stableSI \backslash W$
\For {$v \in W$}
\State $\solve(v)$
\EndFor
\EndIf
\EndIf
- \EndFunction
+ \EndProcedure
\end{algorithmic}
\caption{W-DFS, this time modified to find the best $\max$-strategy.}
@@ -335,42 +407,127 @@ those parts of the strategy which have changed. Listing
\section{Local Demand-driven Strategy Improvement (LDSI)}
W-DFS can be used to speed up both the $\max$-strategy iteration and
-the fixpoint iteration as two independent processes, but each
-fixpoint-iteration step still requires at least one complete
-evaluation of the equation system per $\max$-strategy improvement
-step. Ideally we would be able to adapt the W-DFS algorithm so that
-the fixpoint-iteration and $\max$-strategy iteration steps could
-provide some information to each other about what values have changed
-so that at each step only a subset of the entire system would have to
-be evaluated.
+the fixpoint iteration as two independent processes, but each time we
+seek to improve our strategy we must compute at least a subset of the
+fixpoint from a base of no information. Ideally we would be able to
+provide some channel for communication between the two W-DFS instances
+to reduce the amount of work that must be done at each stage. By
+supplying each other with information about which parts of the system
+have changed we can jump in partway through the process of finding a
+solution, thereby avoiding repeating work that we have already done.
The new \emph{Local Demand-driven Strategy Improvement} algorithm,
-\emph{LDSI}, seeks to do this. By adding an ``invalidate''
-function to both W-DFS instances we provide an interface for the two
-sides of the algorithm to indicate which values have changed and
-``destabilise'' that portion of the system.
+\emph{LDSI}, seeks to do this. By adding an ``invalidate'' procedure
+to the fixpoint iteration we provide a method for the $\max$-strategy
+to invalidate fixpoint variables, and we can modify the fixpoint
+iteration algorithm to report which variables it has modified with
+each fixpoint iteration step.
This essentially results in a $\max$-strategy iteration which, at each
strategy-improvement step, invalidates a portion of the current
fixpoint iteration which may depend on the changed strategy. The
-fixpoint iteration then re-stabilises itself by evaluating what values
-have been changed, but only when such values are requested by the
-$\max$-strategy iteration. The $\max$-strategy can continue as normal,
-invalidating the fixpoint-iteration as required, and simply assume
-that the fixpoint-iteration will provide it with the correct values
-from the greatest fixpoint.
+fixpoint iteration then re-stabilises each variable as it is
+requested, tracking which values have changed since the last time they
+iterated. The $\max$-strategy iteration then uses this list of changed
+variables to determine which portions of the current strategy must be
+re-stabilised once more. This process continues until the
+$\max$-strategy stabilises (at which point the fixpoint will also
+stabilise).
+
+This entire approach is \emph{demand driven}, and so any necessary
+evaluation of strategies or fixpoint values is delayed until the point
+when it is actually required. This means that the solver will be
+\emph{local}.
+
+
+
+
+
+
+This algorithm is presented in three parts.
+
+Section \ref{sec:ldsi:top-level} presents the global state and entry
+point to the algorithm.
+
+Section \ref{sec:ldsi:max-strategy} presents the fixpoint portion of
+the algorithm. This portion bears many similarities to the W-DFS
+algorithm presented in Section \ref{sec:wdfs}, with a few
+modifications to track changes and allow external invalidations of
+parts of the solution.
+
+Section \ref{sec:ldsi:fixpoint} presents the $\max$-strategy portion
+of the algorithm. This portion is quite similar to the Adapted W-DFS
+algorithm presented in Section \ref{sec:adapted-wdfs}, with some
+modifications to allow for communication with the fixpoint portion of
+the algorithm.
+
+Section \ref{sec:ldsi:correctness} argues the correctness of this
+approach for finding the least fixpoints of monotonic, expansive
+equation systems involving $\max$-expressions.
+
+\subsection{Top level} \label{sec:ldsi:top-level}
+
+\begin{algorithm}[H]
+ \begin{algorithmic}
+ \Globals
+ \begin{tabularx}{0.9\textwidth}{rX}
+ $D$ & $\in (X \to \D)$, a mapping from variables to their current
+ values \\
+
+ $\inflFP$ & $\in (X \to 2^X)$, a mapping from a variable to the
+ variables it \emph{may} influence \\
+
+ $\stableFP$ & $\subseteq X$, a set of ``stable'' variables \\
+
+ $\touchedFP$ & $\subseteq X$, a set of variables which have been
+ ``touched'' by the fixpoint iteration, but not yet acknowledged
+ by the $\max$-strategy iteration \\
+
+ \\
+
+ $\sigma$ & $\in (E_{\max} \to E)$, a mapping from
+ $\max$-expressions to a subexpression \\
+
+ $\inflSI$ & $\in (E_{\max} \to 2^{E_{\max}}$, a mapping from a
+ $\max$-expression to the sub-expressions it influences \\
+
+ $\stableSI$ & $\subseteq E_{\max}$, the set of all
+ $\max$-expressions whose strategies are stable \\
+
+ $P_{\max}$ & $\in (E_{\max}, E, (X \to \D)) \to E$, a function
+ mapping from a $\max$-expression, the current strategy and a
+ variable assignment to a better subexpression than the current
+ strategy in that variable assignment \\
+
+ \\
+
+ $\system$ & $\in \Systems$, an equation system \\
+ \end{tabularx}
+ \EndGlobals
+ \end{algorithmic}
+ \caption{Global variables used by the LDSI algorithm}
+ \label{algo:ldsi:fixpoint:globals}
+\end{algorithm}
+
+These variables are mostly just a combination of the variables found
+in Sections \ref{sec:wdfs} and \ref{sec:adapted-wdfs}. The only
+exception is $\touchedFP$, which is a newly introduced variable to
+track variables which are touched by the fixpoint iteration steps.
+
-This entire approach is demand driven, and so any necessary evaluation
-is delayed until the point when it is actually required. Additionally,
-if it is not necessary to evaluate a particular right hand side in
-order to make a decision then the algorithm will attempt to avoid
-evaluating it.
+\subsection{$\max$-strategy iteration} \label{sec:ldsi:max-strategy}
+\subsection{Fixpoint iteration} \label{sec:ldsi:fixpoint}
+\subsection{Correctness} \label{sec:ldsi:correctness}
+THIS IS THE END OF WHERE I'M UP TO! The stuff after this still needs
+to be fixed up to use the new variables and have the ``touched'' thing
+properly put in.
+\endinput
-This algorithm is presented in two parts. Listings
+Listings
\ref{algo:ldsi:fixpoint:globals},
\ref{algo:ldsi:fixpoint:eval},
\ref{algo:ldsi:fixpoint:invalidate},
@@ -404,7 +561,7 @@ correctness of this new algorithm is argued in
have stabilised to their final strategy (stable
$\max$-expressions) \\
- $\varepsilon$ & The equation system, a mapping from a variable
+ $\system$ & The equation system, a mapping from a variable
to its associated function \\
\end{tabularx}
\EndGlobals