summaryrefslogtreecommitdiff
path: root/tex/thesis/contribution/contribution.tex
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-12 17:42:39 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-12 17:42:39 +1100
commita080384f843f51692703585170f7282b82a7d541 (patch)
tree20b77da8a374317d331fb538e4e97a0f65a0fc1a /tex/thesis/contribution/contribution.tex
parent81f0f5bdc4eabc0bb5fc00b9664879c46ec54e09 (diff)
A few minor fixes to contribution.
Diffstat (limited to 'tex/thesis/contribution/contribution.tex')
-rw-r--r--tex/thesis/contribution/contribution.tex30
1 files changed, 15 insertions, 15 deletions
diff --git a/tex/thesis/contribution/contribution.tex b/tex/thesis/contribution/contribution.tex
index 18d956f..b5d47db 100644
--- a/tex/thesis/contribution/contribution.tex
+++ b/tex/thesis/contribution/contribution.tex
@@ -372,7 +372,7 @@ strategy. This means that each time we improve our strategy we must
make it greater in at least one $\max$-expression, and no worse in the
others.
-To this end a new function, $P_{\max}: ((E_{\max} \to E), (X \to D))
+To this end a new function, $P_{\max}: ((E_{\max} \to E) \times (X \to D))
\to (E_{\max} \to E)$, is used below as a ``strategy improvement
operator''. $P_{\max}$ takes a $\max$-strategy and a variable
assignment and returns a new $\max$-strategy which constitutes an
@@ -398,14 +398,14 @@ the approach presented in Listing \ref{algo:naive-strategy}.
\begin{algorithmic}
\Assumptions
\begin{tabularx}{0.9\textwidth}{rX}
- $\sigma$ & $\in E_{\max} \to E$, a $\max$ strategy \\
+ $\sigma$ & $: E_{\max} \to E$, a $\max$ strategy \\
$\system$ & $\in \Systems$, an equation
system \\
- $\rho$ & $\in X \to D$, a variable assignment \\
+ $\rho$ & $: X \to D$, a variable assignment \\
- $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
+ $P_{\max}$ & $: ((E_{\max} \to E) \times (X \to \CZ)) \to (E_{\max}
\to E)$, a $\max$-strategy improvement operator \\
\end{tabularx}
\EndAssumptions
@@ -450,10 +450,10 @@ solve $\max$-strategy iteration problems.
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $\sigma$ & $\in (E_{\max} \to E)$, a mapping from
+ $\sigma$ & $: (E_{\max} \to E)$, a mapping from
$\max$-expressions to a subexpression \\
- $\inflSI$ & $\in (E_{\max} \to 2^{E_{\max}}$, a mapping from a
+ $\inflSI$ & $: (E_{\max} \to 2^{E_{\max}}$, a mapping from a
$\max$-expression to the sub-expressions it influences \\
$\stableSI$ & $\subseteq E_{\max}$, the set of all
@@ -461,7 +461,7 @@ solve $\max$-strategy iteration problems.
$\system$ & $\in \Systems$, an equation system \\
- $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
+ $P_{\max}$ & $: ((E_{\max} \to E) \times (X \to \CZ)) \to (E_{\max}
\to E)$, a $\max$-strategy improvement operator \\
\end{tabularx}
\EndGlobals
@@ -610,13 +610,13 @@ This algorithm is presented in three parts.
\begin{algorithmic}
\Globals
\begin{tabularx}{0.9\textwidth}{rX}
- $D$ & $\in X \to \CZ$, a mapping from variables to their current
+ $D$ & $: X \to \CZ$, a mapping from variables to their current
value \\
- $D_\old$ & $\in X \to \CZ$, a mapping from variables to their
+ $D_\old$ & $: X \to \CZ$, a mapping from variables to their
last stable value \\
- $\inflFP$ & $\in X \to 2^X$, a mapping from a variable to the
+ $\inflFP$ & $: X \to 2^X$, a mapping from a variable to the
variables it \emph{may} influence \\
$\stableFP$ & $\subseteq X$, a set of ``stable'' variables \\
@@ -627,17 +627,17 @@ This algorithm is presented in three parts.
\\
- $\sigma$ & $\in E_{\max} \to E$, a mapping from
+ $\sigma$ & $: E_{\max} \to E$, a mapping from
$\max$-expressions to a subexpression \\
- $\inflSI$ & $\in E_{\max} \to 2^{E_{\max}}$, a mapping from a
+ $\inflSI$ & $: E_{\max} \to 2^{E_{\max}}$, a mapping from a
$\max$-expression to the sub-expressions it influences \\
$\stableSI$ & $\subseteq E_{\max}$, the set of all
$\max$-expressions whose strategies are stable \\
- $P_{\max}$ & $ \in ((E_{\max} \to E), (X \to \CZ)) \to (E_{\max}
- \to E)$, a $\max$-strategy improvement operator \\
+ $P_{\max}$ & $: ((E_{\max} \to E) \times (X \to \CZ)) \to
+ (E_{\max} \to E)$, a $\max$-strategy improvement operator \\
\\
@@ -725,7 +725,7 @@ identify \emph{changed} values, rather than only \emph{unstable} ones.
This procedure is similar to the equivalent method in the W-DFS
algorithm, except for the fact that $\solve$ has been renamed to
$\solve \fixpoint$. $\eval \fixpoint$ performs exactly the same
-function as the $\eval$ function in Figure \ref{algo:wdfs}.
+function as the $\eval$ function in Listing \ref{algo:wdfs}.
\begin{algorithm}[H]
\begin{algorithmic}