summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-21 19:56:50 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-11-21 19:56:50 +1100
commit6be89e25c3d27df5eb79c077ba73f52e2048d3a1 (patch)
treec004f425d899184f90b918c34eefda2872c81ea5
parentf7f83793c2d3f8f37b49bf1eab04f08f8118797e (diff)
Thesis writing! Yum yum!
-rw-r--r--tex/thesis/contribution/contribution.tex11
-rw-r--r--tex/thesis/introduction/introduction.tex39
-rw-r--r--tex/thesis/litreview/litreview.tex378
-rw-r--r--tex/thesis/references.bib48
4 files changed, 255 insertions, 221 deletions
diff --git a/tex/thesis/contribution/contribution.tex b/tex/thesis/contribution/contribution.tex
index 3e3ed6c..48a5e73 100644
--- a/tex/thesis/contribution/contribution.tex
+++ b/tex/thesis/contribution/contribution.tex
@@ -80,11 +80,12 @@ a few terms and notations. We will assume a fixed, finite set $X$ of
variables. We will also define $\CZ = \Z \cup \{-\infty, \infty\}$.
\begin{definition}
- A \textbf{variable assignment} is a partial function, $\rho: X \to
- \CZ$ that maps variables to values. The domain of a variable
- assignment, $\rho$, is represented by $\mathsf{domain}(\rho)
- \subseteq X$. An underlined value, $\underline{a}$, indicates a
- variable assignment $\{ x \mapsto a \mid \forall x \in X \}$.
+ A \textbf{variable assignment} is a partial function, $\rho:
+ X \leadsto \CZ$ that maps variables to values. The domain of a
+ variable assignment, $\rho$, is represented by
+ $\mathsf{domain}(\rho) \subseteq X$. An underlined value,
+ $\underline{a}$, indicates a variable assignment $\{ x \mapsto
+ a \mid \forall x \in X \}$.
Variable assignments $\rho : X \to \CZ$ and $\varrho : X \to \CZ$
may be composed with the $\oplus$ operator as follows:
diff --git a/tex/thesis/introduction/introduction.tex b/tex/thesis/introduction/introduction.tex
index 453279d..f59f30f 100644
--- a/tex/thesis/introduction/introduction.tex
+++ b/tex/thesis/introduction/introduction.tex
@@ -1,4 +1,41 @@
\chapter{Introduction}
-The introduction goes in here. \newcite{clark-curran:07} did something inline. And here they did something not inline \cite{clark-curran:07}.
+\section{Motivation}
+Software bugs are bad. Bugs have significant costs in terms of time,
+money, security and lives. Hailpern and Santhanam state that
+debugging, verification and testing can easily range from 50\% to 75\%
+of the total development cost of a
+system\cite{Hailpern01softwaredebugging}.
+
+\section{Contribution}
+
+In this thesis we present an implementation of the strategy-iteration
+based static analyser presented by Gawlitza et
+al.\cite{EasyChair:117}. Our implementation has several enhancements
+which significantly improve the practical performance of the analyser
+on real-world data.
+
+Theoretical contribution:
+\begin{enumerate}
+\item
+ We present a demand-driven strategy improvement algorithm for
+ solving monotonic, expansive equation systems involving $\min$ and
+ $\max$ operators
+\end{enumerate}
+
+Systems contribution:
+\begin{enumerate}
+\item
+ We develop a solver for monotonic, expansive equation systems based
+ on the work of Gawlitza et al.\cite{EasyChair:117} with several
+ improvements
+\item
+ We analyse the performance of our improved solver on a set of
+ equation systems to demonstrate the effect of our improvements
+\item
+ We integrate our solver into the LLVM framework to perform analysis
+ over Zones\cite{mine:padoII}
+\item
+ We analyse the performance of our LLVM analysis on various program inputs
+\end{enumerate}
diff --git a/tex/thesis/litreview/litreview.tex b/tex/thesis/litreview/litreview.tex
index 1ac34d5..1e0ef90 100644
--- a/tex/thesis/litreview/litreview.tex
+++ b/tex/thesis/litreview/litreview.tex
@@ -1,184 +1,173 @@
\newcommand{\Env}{\mathsf{Env}}
\newcommand{\PP}{\mathsf{PP}}
-\newcommand{\States}{\mathsf{States}}
+\newcommand{\states}{\mathsf{states}}
\newcommand{\state}{\mathsf{state}}
\renewcommand{\b}[1]{\mathbf{#1}}
+\newcommand{\semantics}[1]{\llb #1 \rrb}
-\chapter{Literature Review} \label{chap:litreview}
+\newcommand{\Stmt}{\mathsf{Stmt}}
+\newcommand{\Vars}{\mathsf{Vars}}
+
+\chapter{Preliminaries} \label{chap:litreview}
+
+In this chapter we will review the relevant portion of literature
+pertaining to static analysis. We will briefly cover the semantics of
+programs generally before looking more closely at the framework of
+abstract interpretation for program analysis. The chapter will
+conclude with an explanation of one particular method of abstract
+interpretation over zones.
\section{Program Semantics}
In order to perform any static analysis of a program it is necessary
to have some notion of the \emph{semantics}, or meaning, of a
program. The semantics of a program can be captured in several ways,
-but for our purposes we will consider Denotational
-Semantics\cite{SCOTT70B}.
-
-Denotational semantics takes a view of a program as a (partial)
-function built up by the composition of many smaller functions. By
-expressing our expressions, assignments, conditionals and loops as
-smaller functions we can then compose them to determine the semantics
-of our whole program. For imperative languages we consider each
-assignment, conditional or loop as having the denotation $\llb s \rrb:
-\state \to \state$. We define their composition as $\llb s_1 ; s_2
-\rrb = \llb s_2 \rrb \circ \llb s_1 \rrb$.
-
-As an example, we'll define some denotational semantics for a simple
-language. Here our $\state$ will be a partial function $\state: I \to \N$
-\begin{figure}[H]
- \begin{minipage}{\textwidth}
- Syntax:
- \end{minipage} \\[0.1cm]
- \begin{minipage}{0.8\textwidth}
- $P ::= S$ \\
- $S ::= S_1;S_2 \mid \b{if}~ B ~\b{then}~ S_1 ~\b{else~} S_2 \mid I := E$ \\
- $E ::= E_1 + E_2 \mid I \mid N$ \\
- $B ::= E_1 = E_2 \mid \lnot B$ \\
- $I ::= $ variable identifiers \\
- $N ::= \N$
- \end{minipage}
- \\[0.5cm]
- \begin{minipage}{\textwidth}
- Semantics:
- \end{minipage} \\[0.1cm]
- \begin{minipage}{0.8\textwidth}
- $\b{P}: \state \to \state$ \\
- $\b{P}\llb S \rrb = \b{S}\llb S \rrb \{\}$ \\[0.2cm]
-
- $\b{S}: \state \to \state$ \\
- $\b{S}\llb S_1;S_2 \rrb = \lambda v . (\b{S}\llb S_2 \rrb (\b{S}\llb S_1 \rrb(v)))$ \\
- $\b{S}\llb \b{if}~ B ~\b{then}~ S_1 ~\b{else~} S_2 \rrb = \lambda v . (\b{S} \llb S_b \rrb(v))$
- where $b = \b{B}\llb B \rrb (v) + 1$ \\
- $\b{S}\llb I := E \rrb = \lambda v: (v \oplus \{ I \mapsto \b{E}\llb E \rrb(v) \})$ \\[0.2cm]
-
- $\b{E}: \state \to \N$ \\
- $\b{E}\llb E_1 + E_2 \rrb = \lambda v . (\b{E}\llb E_1 \rrb(v) + \b{E}\llb E_2 \rrb(v))$ \\
- $\b{E}\llb I \rrb = \lambda v . v(I)$ \\
- $\b{E}\llb N \rrb = \lambda v . N$ \\[0.2cm]
-
- $\b{B}: \state \to \{0,1\}$ \\
- $\b{B}\llb E_1 = E_2 \rrb = \lambda v . (\b{if}~ \b{E}\llb E_1 \rrb(v) = \b{E}\llb E_2 \rrb(v) ~\b{then}~ 1 ~\b{else}~ 0)$
- \end{minipage}
- \caption{Denotational semantics for a simple language.}
- \label{fig:denotational-semantics-example}
-\end{figure}
-
-
-
-A program's semantics can be realised by considering the possible
-values for each variable at each point in the program, which we will
-refer to as the possible \emph{states} of the program, and the
-transitions between these states performed by \emph{statements}.
-
-If we consider a finite set of control points, $N$, and a finite set
-of variables $X$ then we can capture the state of a program at a
-particular point
-
-As a formalisation of these notions we can consider these to be
-defined as follows:
+but for our purposes we will consider \emph{denotational}, or
+\emph{mathematical}, \emph{semantics}\cite{SCOTT70B}.
+
+Denotational semantics views a program as a (partial) function built
+up by the composition of many smaller functions. By expressing our
+expressions, assignments, conditionals and loops as smaller functions
+we can compose them to determine the semantics of the whole
+program. The operator $\semantics{\cdot}$ represents the semantics of
+its argument as a mathematical function.
+
+In imperative languages programs are not a series of functional
+transformations, however, each statement or expression may have
+side-effects, so it is necessary to introduce some notion of ``state''
+into our semantics. By defining $\semantics{\cdot}: \states \to
+\states$ for all statements $x$ we provide a mechanism for statements
+to perform side effects. Elements of $\states$ each represent a
+possible state for the program to be in. For example: for an
+integral assignment statement $I := E$, assigning the value of an
+expression $E$ to a variable $I$, have the following denotation:
\begin{align*}
- \Env:& X \to \Z \\
- \PP:& \{ \text{program points} \} \\
- \States:& \PP \times \Env
+ \semantics{I := E} = \lambda \rho . (\rho \oplus \{I \mapsto \semantics{E}(\rho)\})
\end{align*}
+where $\rho \in \states$ and $\semantics{E}: \states \to \Z$. Our
+$\states$ in this case are functions $\rho: \Vars \to \D$ from
+variables to values.
+
+Denotational semantics provide a formal abstract framework for us to
+analyse programs mathematically, but they do not provide a meaningful
+way to perform automated analysis of programs. As they perfectly
+capture the semantics of a program they do not avoid the consequences
+of Rice's Theorem\cite{Rice} or the Halting Problem\cite{Halting}
+which preclude us from performing precise analysis of programs in
+general.
+\section{Abstract Interpretation} \label{sec:abstract-interpretation}
+In order to work around the limitations on precise static analysis in
+general, Cousot et al.~ developed a framework for performing sound
+program analysis called ``abstract
+interpretation''\cite{CousotCousot77-1}. In this framework we consider
+a program's semantics under an abstraction. The original semantics we
+call ``concrete semantics'' and the abstracted semantics we call
+``abstract semantics''. The abstract semantics consist of replacing
+$\D$ and $\state$ from the denotational semantics with ``abstracted''
+structures.
+To provide us with some intuition of these concepts, consider the
+following example:
+\begin{example}
+ Let us assume that our program's state is represented by $\rho:
+ \Vars \to \Z$, with values coming from $\Z$.
-Let $X$ be the set of program variables, $D$ be the union of all
-possible domains from which variables can come, and $PP$ be the set of
-all edges in the Control Flow Graph of the program.
-
-\begin{eqnarray}
-Env: {} & X \to D \\
-Expr: {} & Env \to D \\
-States: {} & PP \times Env
-\end{eqnarray}
-
-$States$, then, contains every possible state for the program ($Env$)
-at each program point. A more interesting set for us is the set of
-reachable states, this is: states which the program can actually reach
-during execution. This concept of ``reachable states'' allows us to
-formally capture all possible execution states for a program. We can
-further define $Reach(p) = {e \in Env | \le p,e \ge \in States}$
-
-Unfortunately it is unfeasible to analyse the complete semantics of a
-program, due to the sheer number of possible reachable states which
-must be examined. In many cases there are an infinite number of states
-which would have to be considered to ensure a sound analysis of the
-program.
+ One possible abstraction would be to instead consider a variable's
+ \emph{sign}. That is, to consider $x$ as satisfying one of the
+ following (the symbols in parentheses will be used to refer to these
+ values from now):
+ \begin{align*}
+ x &\in \Z & (\pm) \\
+ x &\ge 0 & (+) \\
+ x &\le 0 &(-) \\
+ x &= 0 &(0)
+ \end{align*}
+ From this abstraction we can then determine something about normal
+ arithmetic operations in our program. We know, for instance, that
+ $(+) \times (-) = (-)$, or that $0 \times (\pm) = 0$. We also, then,
+ know that $x / 0$ is certainly an error.
+
+ \label{example:abstract-interpretation}
+\end{example}
+Abstraction sacrifices precision of analysis for decidability in
+general. By abstracting our domain in a way which is ``sound'', which
+safely over-approximates our concrete semantics, we ensure that any
+analysis we perform on the abstraction is also valid for the concrete
+semantics. Unfortunately in order to make this abstraction we
+sacrifice precision, so it is possible that the analysis will identify
+errors which are not errors in the concrete semantics.
-\section{Abstract Interpretation} \label{sec:abstract-interpretation}
\subsection{General Framework}
-Cousot and Cousot developed a framework for performing sound program
-analysis called ``Abstract Interpretation''. In this framework we
-consider a program under a set of ``abstract semantics'' which, in
-some sense, simplify the program (and the analysis thereof).
-
-As a simple example one may consider the case of mathematical
-expressions over the integers. The operators \(+,\times\) perform
-operations on these integers, with their semantics being defined as
-one would expect. We may then define an abstract semantic of these
-expressions when considering only signs (and zero), for example. Then
-our abstract states contains four states: ${(+), (-), (\pm), 0}$.
-
+In order to transform between the concrete and abstract domains we
+define two functions: $\alpha$ and $\gamma$. The function $\alpha:
+2^\states \to \states_\alpha$ transforms a concrete state into an
+abstract state. The function $\gamma: \states_\alpha \to 2^\states$
+transforms an abstract state into a set of concrete states. We require
+these two functions to satisfy the properties of a Galois connection:
\begin{align*}
- ( + ) + ( + ) &= ( + ) \\
- ( + ) + ( - ) &= ( \pm ) \\
- ( - ) + ( - ) &= ( - ) \\
- x + 0 &= x \\
- ( \pm ) + x &= ( \pm ) \\
- ( + ) \times ( + ) &= ( + ) \\
- ( + ) \times ( - ) &= ( - ) \\
- ( - ) \times ( + ) &= ( - ) \\
- 0 \times x &= 0 \\
- ( \pm ) \times x &= ( \pm ), \forall x \ne 0 \\
+ a &\le \gamma(b) & \alpha(a) &\le b & \forall a, b
+ \subseteq \states.
\end{align*}
-
-This abstract semantic will give us sound results (any analysis of the
-abstract semantic will correspond to an equivalent analysis of the
-concrete semantic), but will not give us every opportunity for
-analysis (the abstraction may ignore relevant details).
-
-In order to ensure we have a useful abstract interpretation it's
-necessary to provide two operations to map between concrete states and
-abstract states. These operations we call ``abstraction'' (mapping
-from a concrete state to an abstract state), denoted by \(\alpha\),
-and ``concretization'' (mapping from an abstract state to a concrete
-state), denoted by \(\gamma\).
-
-\textbf{Write more about the abstraction/concretization functions}
+\noindent As a consequence of this, the result of abstracting $a \in
+\states$ must at least contain $a$. In particular we are looking for
+the \emph{best} abstract transformers, such that $\alpha(a)$ is as
+tight as possible for a state $a$.
+
+Abstract interpretation follows on from the idea of denotational
+semantics, and it is possible to determine the abstract semantics of
+an operation based on its concrete semantics. We use
+$\semantics{\cdot}^\#$ to represent the abstract semantics of a
+statement, and define the transformation as $\semantics{S}^\# = \alpha
+\circ \semantics{s} \circ \gamma$.
+
+Much has been written on the topic of abstract domains, ranging from
+the relatively simple, such as signs and
+intervals\cite{CousotCousot77-1} to the more complex
+zones\cite{mine:padoII}, octagons\cite{mine:hosc06} and more general
+convex polyhedra\cite{DBLP:conf/vmcai/SankaranarayananSM05}. For this
+thesis we are only concerned with \emph{intervals} and \emph{zones}.
+
+%\todo{widening}
\subsection{Intervals}
-Something about intervals.
-
-\subsection{Zones} \label{subsec:zones}
+The abstract domain of \emph{intervals} was introduced by Cousot et
+al.\cite{CousotCousot77-1} along with abstract interpretation. In this
+domain we constrain each variable $x$ to lie within a range $[a, b]
+\in \CZ^2$. This domain is finer than signs, so it allows more
+information about the program to be determined. Another equivalent
+formulation of intervals is that we are finding bounds such that
+\begin{align*}
+ x &\le b \text{ and,} \\
+ -x &\le a.
+\end{align*}
+%\noindent We are finding \emph{upper bounds} for $x$ and $-x$ at each
+%location in the program.
-In order to perform any abstract interpretation we must first
-determine which abstract domain we will consider. Cousot and Cousot
-introduced the abstract domain of ``ranges'' in their original paper
-\cite{CousotCousot77-1}. This domain constrains variables to lie
-within a range \(x \in [a, b]\) with \(x, a, b \in \Z\).
-Further work has been done the abstract domain of zones and octagons,
-which are generalised by the template polyhedral abstract domain.
+\subsection{Zones}
-Zones provide an abstract domain in which we try to put a bound on
-either a variable's magnitude ($\pm x \le c$) or to put a bound on the
-difference between two variables ($x - y \le c$).
+\emph{Zones} are an abstract domain introduced by
+Min\'e\cite{mine:padoII} which generalise intervals to include also
+bounding the differences between variables. For each variable $x$
+zones constrain the variable and its negation in the same way as intervals,
+\begin{align*}
+ x &\le b \text{ and,} \\
+ -x &\le a.
+\end{align*}
+Zones additionally provide us with bounds on \emph{differences}
+between variables. So for each pair of variables $(x,y)$, zones
+provide us with a bound
+\begin{align*}
+ x - y & \le c.
+\end{align*}
-Octagons generalise this by allowing us to vary the sign on both
-variables to derive a bound as $\pm x \pm y \le c$.
-The polyhedral abstract domain is a clear generalisation of both the
-zone and octagon abstract domains in that it determines a bound for an
-arbitrary linear combination of arbitrary program variables. For
-example, finding a bound as $c_0 x_0 + c_1 x_1 + ... + c_n x_n \le c$,
-where $c, c_i \in \R$ and $x_i$ are program variables.
\section{Min/max problem}
@@ -187,19 +176,17 @@ In \newcite{Gawlitza:2007:PFC:1762174.1762203} a conversion from a
program's Control Flow Graph (henceforth CFG) to a system of equations
involving only monotonic, expansive operators is presented. This
conversion results in an equation system which can be solved in order
-to perform Abstract Interpretation (Section
-\ref{sec:abstract-interpretation}) over Zones (Section
-\ref{subsec:zones}).
+to perform abstract interpretation over zones.
\subsection{Partial-ordering Formulation}
To begin our construction of the equation systems we first need to
formalise one of a CFG. For our purposes we will consider a CFG to be
-a tuple $G = (N, E, S, st)$. A finite set of \emph{control-points}
+a tuple $G = (N, E, s, st)$. A finite set of \emph{control-points}
$N$, a finite set $E \subseteq N \times N$ of \emph{edges} between
these control points (ie. if $(u,v) \in E$ then an edge exists in the
-CFG from $u$ to $v$). A mapping $s: N \times N \to Stmt$
-taking each edge in the CFG to a \emph{statement}. $st \in N$ denotes
+CFG from $u$ to $v$). A mapping $s: N \times N \to \Stmt$ taking each
+edge in the CFG to a \emph{statement} $\in \Stmt$. $st \in N$ denotes
the special \emph{start control-point}.
We can now begin our construction of our abstract semantics $V^\#[v],
@@ -254,19 +241,10 @@ we find that $V^\# [v] \ge \llb s_{(w,v)} \rrb ^\# (V^\# [w])$.
For the start node in the CFG this inequality is obviously
useless. The start node has no predecessors (by definition) and thus
must have a value that is not dependent on other nodes. We can,
-therefore, simply define the start node's possible abstract states to
-be at least as big as the complete set of abstract states (as in
+therefore, define the start node's possible abstract states to be at
+least as big as the complete set of abstract states (as in
\eqref{eqn:start-node}).
-We have not yet considered the effect that $s_{(u,v)}, \forall (u,v)
-\in E$ has on the state in each edge. The effect of the statement in
-the abstract domain depends on the statement's effect in the concrete
-domain, but for all statements we define $\llb s \rrb^\# := \alpha
-\circ \llb s \rrb \circ \gamma$. In essence this is just changing the
-environment in which the statement is acting, so the statement's
-effects are moved into our abstract domain with our abstraction and
-concretization functions.
-
\begin{example}
\label{example:partial-ordered-construction}
@@ -276,7 +254,7 @@ concretization functions.
\begin{minipage}{5cm}
\begin{lstlisting}
x = 1
- while x <= 99:
+ while x <= 9:
x = x + 1
\end{lstlisting}
\end{minipage}
@@ -292,23 +270,25 @@ concretization functions.
\path[every node/.style={fill=none}]
(A) edge node[right]{$x = 1$} (B)
- (B) edge [loop right] node[right]{$x \le 99: x = x + 1$} (B)
- (B) edge node[right]{$x > 99$} (C);
+ (B) edge [loop right] node[right]{$x \le 9: x = x + 1$} (B)
+ (B) edge node[right]{$x > 9$} (C);
\end{tikzpicture}
\end{minipage}
- \caption{Program and CFG for Example
- \ref{example:partial-ordered-construction}}
+ \caption{A simple program with its corresponding CFG}
\label{fig:partial-ordered-construction}
\end{figure}
We will now attempt to derive a system of inequalities from this for
- the \emph{least upper bound} of $x$:
+ the \emph{least upper bound}
\begin{align*}
- ub: {} & ~\text{\bf Vars} \to \CZ \\
- ub(x) &= \min\{v \in \CZ ~|~ x \le v\}
+ ub: {} & ~\Vars \to \CZ \\
+ ub(x) &= \min\{v \in \CZ ~|~ x \le v\}.
\end{align*}
+ We wish to find a value for $ub(x)$ at each program point in our
+ original program. We will denote these values by $ub_A(x)$,
+ $ub_B(x)$ and $ub_C(x)$.
For $A$ the value of $x$ is entirely unknown, so no useful bound can
be given. The possible values of $x$ are bounded only by the top
@@ -322,8 +302,8 @@ concretization functions.
itself, the other from $A$.
The edge from $A$ to $B$ is very simple. It has no constraint and
- simply sets the value of $x$ to $1$. It corresponds to an equation
- of the form $ub(x_B) \ge 1$.
+ sets the value of $x$ to $1$. It corresponds to an equation of the
+ form $ub(x_B) \ge 1$.
The edge from $B$ to itself is somewhat more complex. To begin with
it changes the value of $x$ depending on the previous value of
@@ -336,7 +316,7 @@ concretization functions.
$ub(x_B)$.
\begin{align*}
ub(x_B) &\ge 1 \\
- ub(x_B) &\ge \min(ub(x_B), 99) + 1
+ ub(x_B) &\ge \min(ub(x_B), 9) + 1
\end{align*}
The upper bound on the value of $x$ for node $C$ is the same as the
@@ -355,7 +335,7 @@ concretization functions.
\begin{align*}
ub(x_A) &\ge \infty \\
ub(x_B) &\ge 1 \\
- ub(x_B) &\ge \min(ub(x_B), 99) + 1 \\
+ ub(x_B) &\ge \min(ub(x_B), 9) + 1 \\
ub(x_C) &\ge ub(x_B)
\end{align*}
@@ -398,37 +378,9 @@ one constraint by taking the maximum of them. This is because $x \ge
\label{example:both-bounds}
In order to determine a lower bound for $x$ at each program point it
- is sufficient to determine an \emph{upper} bound for $-x$. For our
- second example we will consider the following program fragment:
-
- \begin{figure}[here]
- \begin{minipage}{5cm}
- \begin{lstlisting}
- x = 0
- while x <= 9:
- x = x + 1
- \end{lstlisting}
- \end{minipage}
- \hfill
- \begin{minipage}{0.5\textwidth}
- \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node
- distance=2cm,main node/.style={circle,fill=blue!20,draw},every
- loop/.style={min distance=1.5cm}]
-
- \node[main node] (A) {$A$};
- \node[main node] (B) [below of=A] {$B$};
- \node[main node] (C) [below of=B] {$C$};
-
- \path[every node/.style={fill=none}]
- (A) edge node[right]{$x = 0$} (B)
- (B) edge [loop right] node[right]{$x \le 9: x = x + 1$} (B)
- (B) edge node[right]{$x > 9$} (C);
-
- \end{tikzpicture}
- \end{minipage}
- \caption{Program and CFG for Example \ref{example:both-bounds}}
- \label{fig:both-bounds}
- \end{figure}
+ is sufficient to determine an \emph{upper} bound for $-x$. We will
+ again consider the program fragment in Figure
+ \ref{fig:partial-ordered-construction}.
We will now try to derive an equation system for the \emph{upper}
and \emph{lower} bounds of $x$.
@@ -461,7 +413,7 @@ one constraint by taking the maximum of them. This is because $x \ge
The first constraint comes from the edge $A \to B$. This edge does
not do much of interest, it ignores the preceding value of $x$ and
- simply sets $x$ to $1$. This is captured in the following inequality:
+ sets $x$ to $1$. This is captured in the following inequality:
\begin{align*}
\left(\begin{array}{c}
ub(x_B) \\ -lb(x_B)
@@ -507,7 +459,7 @@ one constraint by taking the maximum of them. This is because $x \ge
\end{array}\right)
\end{align*}
- The edge $B \to C$ is quite simple as it simply provides another
+ The edge $B \to C$ is quite simple as it provides another
restriction on the value of $x$ without any further
transformation. This restriction is placed on the lower-bound of $x$
only (leaving the upper-bound unchanged) and is captured by the
@@ -621,10 +573,8 @@ one constraint by taking the maximum of them. This is because $x \ge
-\subsection{Formalities}
-
-
+\endinput
\subsection{Example solve}
\label{sec-1-3-5}
diff --git a/tex/thesis/references.bib b/tex/thesis/references.bib
index d85dea7..3993b5e 100644
--- a/tex/thesis/references.bib
+++ b/tex/thesis/references.bib
@@ -80,6 +80,19 @@
pages = {114--152},
publisher = {Springer-Verlag}
}
+@inproceedings{EasyChair:117,
+ author = {Thomas Martin Gawlitza and Helmut Seidl},
+ title = {Abstract Interpretation over Zones without Widening},
+ booktitle = {WING 2010},
+ editor = {Andrei Voronkov and Laura Kovacs and Nikolaj Bjorner},
+ series = {EPiC Series},
+ volume = {1},
+ pages = {12-43},
+ year = {2012},
+ publisher = {EasyChair},
+ bibsource = {EasyChair, http://www.easychair.org},
+ issn = {2040-557X},
+}
@inproceedings{Gawlitza:2007:PFC:1762174.1762203,
author = {Gawlitza, Thomas and Seidl, Helmut},
title = {Precise fixpoint computation through strategy iteration},
@@ -160,4 +173,37 @@
ADDRESS = {Oxford, England},
MONTH = {November},
YEAR = {1970}
-} \ No newline at end of file
+}
+@book{debugging-book,
+ author = {Zeller, Andreas},
+ title = {Why Programs Fail: A Guide to Systematic Debugging},
+ year = {2005},
+ isbn = {1558608664},
+ publisher = {Morgan Kaufmann Publishers Inc.},
+ address = {San Francisco, CA, USA},
+ page = {21},
+}
+@ARTICLE{Hailpern01softwaredebugging,
+ author = {Brent Hailpern and Padmanabhan Santhanam},
+ title = {Software Debugging, Testing, and Verification},
+ journal = {IBM SYSTEMS JOURNAL},
+ year = {2001},
+ volume = {41},
+ pages = {4--12}
+}
+@article{Halting,
+ added-at = {2011-12-06T14:58:09.000+0100},
+ author = {Turing, Alan M.},
+ biburl = {http://www.bibsonomy.org/bibtex/297ba9ab572e2017ce1dd25da3af837af/msteele},
+ interhash = {8ac1f5e961ff74849ab6f0c7348b9c9c},
+ intrahash = {97ba9ab572e2017ce1dd25da3af837af},
+ journal = {Proceedings of the London Mathematical Society},
+ keywords = {calculus},
+ pages = {230--265},
+ series = 2,
+ timestamp = {2011-12-06T14:58:09.000+0100},
+ title = {On Computable Numbers, with an application to the
+Entscheidungsproblem},
+ volume = 42,
+ year = 1936
+}