[1]
|
Elvira Albert, Samir Genaim, Raúl Gutiérrez, and Enrique Martin-Martin.
A Transformational Approach to Resource Analysis with
Typed-norms Inference.
Theory and Practice of Logic Programming, 20(3):310-357, 2020.
[ bibtex |
abstract |
DOI ]
In order to automatically infer the resource
consumption of programs, analyzers track how
data sizes change along program's execution.
Typically, analyzers measure the sizes of data by
applying norms which are mappings from data
to natural numbers that represent the sizes of the
corresponding data. When norms are defined by taking
type information into account, they are named
typed-norms. This article presents a
transformational approach to resource analysis with
typed-norms that are inferred by a data-flow
analysis. The analysis is based on a transformation
of the program into an intermediate abstract
program in which each variable is abstracted with
respect to all considered norms which are valid for
its type. We also present the data-flow analysis to
automatically infer the required, useful,
typed-norms from programs. Our analysis is
formalized on a simple rule-based representation to
which programs written in different programming
paradigms (e.g., functional, logic, imperative) can
be automatically translated. Experimental results on
standard benchmarks used by other type-based
analyzers show that our approach is both efficient
and accurate in practice.
@article{AlbertGGM20,
author = {Elvira Albert and Samir Genaim and Ra{\'{u}}l Guti{\'{e}}rrez and Enrique Martin{-}Martin},
title = {{A} {T}ransformational {A}pproach to {R}esource {A}nalysis with {T}yped-norms {I}nference},
journal = {Theory and Practice of Logic Programming},
volume = {20},
number = {3},
pages = {310--357},
year = {2020},
issn = {1471-0684},
press = {Cambridge University},
doi = {10.1017/S1471068419000401},
projects = {\lobass,\freetech,\forte},
rank:acronym = {TPLP},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {375},
rank:jcr:q = {2}
}
|
[2]
|
Jesús Doménech, John P. Gallagher, and Samir Genaim.
Control-Flow Refinement by Partial Evaluation, and its
Application to Termination and Cost Analysis.
Theory and Practice of Logic Programming, 19(5-6):990-1005,
2019.
[ bibtex |
abstract |
DOI ]
Control-flow refinement refers to program
transformations whose purpose is to make implicit
control-flow explicit, and is used in the context of
program analysis to increase precision. Several
techniques have been suggested for different
programming models, typically tailored to improving
precision for a particular analysis. In this paper
we explore the use of partial evaluation of Horn
clauses as a general-purpose technique for
control-flow refinement for integer transitions
systems. These are control-flow graphs where edges
are annotated with linear constraints describing
transitions between corresponding nodes, and they
are used in many program analysis tools. Using
partial evaluation for control-flow refinement has
the clear advantage over other approaches in that
soundness follows from the general properties of
partial evaluation; in particular, properties such
as termination and complexity are preserved. We use
a partial evaluation algorithm incorporating
property-based abstraction, and show how the right
choice of properties allows us to prove termination
and to infer complexity of challenging programs that
cannot be handled by state-of-the-art tools. We
report on the integration of the technique in a
termination analyzer, and its use as a preprocessing
step for several cost analyzers.
@article{DomenechGG19,
author = {Jes{\'{u}}s J. Dom{\'{e}}nech and John P. Gallagher and Samir Genaim},
title = {{C}ontrol-{F}low {R}efinement by {P}artial {E}valuation, and its {A}pplication to {T}ermination and {C}ost {A}nalysis},
journal = {Theory and Practice of Logic Programming},
volume = {19},
number = {5-6},
pages = {990--1005},
year = {2019},
issn = {1471-0684},
press = {Cambridge University},
doi = {10.1017/S1471068419000310},
projects = {\lobass,\freetech,\forte},
rank:acronym = {TPLP},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {534},
rank:jcr:q = {1}
}
|
[3]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
Rely-Guarantee Termination and Cost Analyses of Loops with
Concurrent Interleavings.
Journal of Automated Reasoning, 59(1):47-85, 2017.
[ bibtex |
abstract |
DOI ]
By following a rely-guarantee style of
reasoning, we present novel termination and cost
analyses for concurrent programs that, in order to
prove termination or infer the cost of a considered
loop: (1) infer the termination/cost of each loop as
if it were a sequential one, imposing assertions on
how shared-data is modified concurrently; and then
(2) prove that these assertions cannot be violated
infinitely many times and, for cost analysis, infer
how many times they are violated. At the core of the
analysis, we use a may-happen-in-parallel
analysis to restrict the set of program points whose
execution can interleave. Interestingly, the same
kind of reasoning can be applied to prove
termination and infer upper bounds on the
number of iterations of loops with concurrent
interleavings. To the best of our knowledge, this is
the first method to automatically bound the cost of
such kind of loops. We have implemented our analysis
for an actor-based language, and showed its
accuracy and efficiency by applying it on several
typical applications for concurrent programs and on
an industrial case study.
@article{AlbertFGM17,
author = {Elvira Albert and Antonio Flores{-}Montoya and Samir Genaim and Enrique Martin{-}Martin},
title = {{R}ely-{G}uarantee {T}ermination and {C}ost Analyses of {L}oops with {C}oncurrent {I}nterleavings},
journal = {Journal of Automated Reasoning},
press = {Springer},
issn = {0168-7433},
volume = {59},
number = {1},
pages = {47--85},
year = {2017},
doi = {10.1007/s10817-016-9400-6},
projects = {\vivac,\sicomoro,\lobass,\envisage},
rank:acronym = {JAR},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {585},
rank:jcr:q = {3},
rank:ms:rating = {59}
}
|
[4]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, Germán Puebla, and Guillermo Román-Díez.
A Formal Verification Framework for Static Analysis - As
well as its instantiation to the resource analyzer COSTA and formal
verification tool KeY.
Software and Systems Modeling, 15(4):987-1012, 2016.
[ bibtex |
abstract |
DOI ]
Static analysis tools, such as resource analyzers,
give useful information on software systems,
especially in real-time and safety-critical
applications. Therefore, the question of the
reliability of the obtained results is highly
important. State-of-the-art static analyzers
typically combine a range of complex techniques,
make use of external tools, and evolve quickly. To
formally verify such systems is not a realistic
option. In this work we propose a different
approach whereby, instead of the tools, we formally
verify the results of the tools. The central idea of
such a formal verification framework for static
analysis is the method-wise translation of the
information about a program gathered during its
static analysis into specification contracts that
contain enough information for them to be verified
automatically. We instantiate this framework with
, a state-of-the-art static analysis system
for sequential Java programs, for producing resource
guarantees and , a state-of-the-art verification
tool, for formally verifying the correctness of such
resource guarantees. Resource guarantees allow to
be certain that programs will run within the
indicated amount of resources, which may refer to
memory consumption, number of instructions executed,
etc. Our results show that the proposed tool
cooperation can be used for automatically producing
verified resource guarantees.
@article{AlbertBGHPR16,
author = {Elvira Albert and Richard Bubel and Samir Genaim and Reiner H{\"{a}}hnle and Germ{\'{a}}n Puebla and Guillermo Rom{\'{a}}n{-}D{\'{\i}}ez},
title = {{A} {F}ormal {V}erification {F}ramework for {S}tatic {A}nalysis - As well as its instantiation to the resource analyzer {COSTA} and formal verification tool {KeY}},
journal = {Software and Systems Modeling},
volume = {15},
number = {4},
pages = {987--1012},
year = {2016},
doi = {10.1007/s10270-015-0476-y},
publisher = {Springer},
issn = {1619-1366},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {SOSYM},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {748},
rank:jcr:q = {2}
}
|
[5]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
May-Happen-in-Parallel Analysis for Actor-based
Concurrency.
ACM Transactions on Computational Logic, 17(2):11:1-11:39,
2016.
[ bibtex |
abstract |
DOI ]
This article presents a may-happen-in-parallel
(MHP) analysis for languages with actor-based
concurrency. In this concurrency model, actors are
the concurrency units such that, when a
method is invoked on an actor a2 from a task
executing on actor a1, statements of the current
task in a1 may run in parallel with those of the
(asynchronous) call on a2, and with those of
transitively invoked methods. The goal of the MHP
analysis is to identify pairs of statements in the
program that may run in parallel in any
execution. Our MHP analysis is formalized as a
method-level (local) analysis whose
information can be modularly composed to obtain
application-level (global) information. The
information yielded by the MHP analysis is essential
to infer more complex properties of actor-based
concurrent programs, e.g., data race detection,
deadlock freeness, termination and resource
consumption analyses can greatly benefit from the
MHP relations to increase their accuracy. We report
on MayPar, a prototypical implementation of an MHP
static analyzer for a distributed asynchronous
language.
@article{AlbertFGM16,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim and Enrique Martin-Martin},
title = {{M}ay-{H}appen-in-{P}arallel {A}nalysis for {A}ctor-based {C}oncurrency},
journal = {ACM Transactions on Computational Logic},
year = 2016,
publisher = {ACM},
volume = {17},
number = {2},
pages = {11:1--11:39},
articleno = {1},
numpages = {40},
issn = {1529-3785},
doi = {10.1145/2824255},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {TOCL},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {559},
rank:jcr:q = {1}
}
|
[6]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, Germán Puebla, and Guillermo Román-Díez.
Object-Sensitive Cost Analysis for Concurrent Objects.
Software Testing, Verification and Reliability, 25(3):218-271,
2015.
[ bibtex |
abstract |
DOI ]
This article presents a novel cost analysis framework
for concurrent objects. Concurrent objects form a
well established model for distributed concurrent
systems. In this model, objects are the concurrency
units which communicate among them via asynchronous
method calls. Cost analysis aims at automatically
approximating the resource consumption of executing
a program in terms of its input parameters. While
cost analysis for sequential programming languages
has received considerable attention, concurrency and
distribution have been notably less studied. The
main challenges of cost analysis in a concurrent
setting are: (1) Inferring precise size abstractions
for data in the program in the presence of shared
memory. This information is essential for bounding
the number of iterations of loops. (2) Distribution
suggests that analysis must infer the cost of the
diverse distributed components separately. We handle
this by means of a novel form of object-sensitive
recurrence equations which use cost centers in order
to keep the resource usage assigned to the different
components separate. We have implemented our
analysis and evaluated it on several small
applications which are classical examples of
concurrent and distributed programming.
@article{AlbertACGGPR15,
author = {Elvira Albert and Puri Arenas and Jes\'us Correas and Samir Genaim and Miguel G\'omez-Zamalloa and Germ\'{a}n Puebla and Guillermo Rom\'an-D\'iez},
title = {{O}bject-{S}ensitive {C}ost {A}nalysis for {C}oncurrent {O}bjects},
journal = {Software Testing, Verification and Reliability},
year = {2015},
publisher = {Wiley},
pages = {218--271},
volume = {25},
number = {3},
issn = {0960-0833},
doi = {10.1002/stvr.1569},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {STVR},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {336},
rank:jcr:q = {2}
}
|
[7]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
A Practical Comparator of Cost Functions and Its
Applications.
Science of Computer Programming, 111(3):483-504, 2015.
[ bibtex |
abstract |
DOI ]
Automatic cost analysis has significantly advanced in
the last few years. Nowadays, a number of cost
analyzers exist which automatically produce upper-
and/or lower-bounds on the amount of resources
required to execute a program. Cost analysis has a
number of important applications such as
resource-usage verification and program synthesis
and optimization. For such applications to be
successful, it is not sufficient to have automatic
cost analysis. It is also required to have automated
means for handling the analysis results, which are
in the form of Cost Functions (CFs for short) i.e.,
non-recursive expressions composed of a relatively
small number of types of basic expressions. In
particular, we need automated means for comparing
CFs in order to prove that a CF is smaller than or
equal to another one for all input values of
interest. General function comparison is a hard
mathematical problem. Rather than attacking the
general problem, in this work we focus on comparing
CF by exploiting their syntactic properties and we
present, to the best of our knowledge, the first
practical CF comparator which opens the door to
fully automated applications of cost analysis. We
have implemented the comparator and made its source
code available online, so that any cost analyzer can
use it.
@article{AlbertAGP15,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla},
title = {A {P}ractical {C}omparator of {C}ost {F}unctions and {I}ts {A}pplications},
journal = {Science of Computer Programming},
year = 2015,
publisher = {Elsevier},
pages = {483--504},
volume = 111,
number = 3,
issn = {0167-6423},
doi = {10.1016/j.scico.2014.12.001},
projects = {\doves,\envisage,\vivac,\sicomoro},
rank:acronym = {SCP},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {1071},
rank:jcr:q = {3}
}
|
[8]
|
Damiano Zanardini and Samir Genaim.
Inference of Field-Sensitive Reachability and Cyclicity.
ACM Transactions on Computational Logic, 15(4):33:1-33:41,
September 2014.
[ bibtex |
abstract |
DOI |
arXiv ]
In heap-based languages, knowing that a variable
x points to an acyclic data structure is
useful for analyzing termination: this information
guarantees that the depth of the data structure to
which x points is greater than the depth
of the structure pointed to by
x.fld, and allows bounding the
number of iterations of a loop which traverses the
data structure on fld. In general,
proving termination needs acyclicity, unless
program-specific or non-automated reasoning is
performed. However, recent work could prove that
certain loops terminate even without inferring
acyclicity, because they traverse data structures
“acyclically”. Consider a double-linked list: if
it is possible to demonstrate that every cycle
involves both the “next” and the “prev” field,
then a traversal on “next” terminates since no
cycle will be traversed completely. This paper
develops a static analysis inferring field-sensitive
reachability and cyclicity information, which is
more general than existing approaches. Propositional
formulæ are computed, which describe which fields
may or may not be traversed by paths in the
heap. Consider a tree with edges “left” and
“right” to the left and right sub-trees, and
“parent” to the parent node: termination of a loop
traversing leaf-up cannot be guaranteed by
state-of-the-art analyses. Instead, propositional
formulæ computed by this analysis indicate that
cycles must traverse “parent” and at least one
between “left” and “right”: termination is
guaranteed as no cycle is traversed completely.
This paper defines the necessary abstract domains
and builds an abstract semantics on them. A
prototypical implementation provides the expected
result on relevant examples.
@article{ZanardiniG14,
author = {Damiano Zanardini and Samir Genaim},
title = {{I}nference of {F}ield-{S}ensitive {R}eachability and {C}yclicity},
journal = {ACM Transactions on Computational Logic},
year = 2014,
month = sep,
volume = {15},
number = {4},
pages = {33:1--33:41},
articleno = {33},
numpages = {41},
issn = {1529-3785},
publisher = {ACM},
doi = {10.1145/2629478},
arxiv = {http://arxiv.org/abs/1306.6526},
projects = {\doves,\envisage,\vivac},
rank:acronym = {TOCL},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {312},
rank:jcr:q = {2}
}
|
[9]
|
Amir M. Ben-Amram and Samir Genaim.
Ranking Functions for Linear-Constraint Loops.
Journal of the ACM, 61(4):26:1-26:55, 2014.
[ bibtex |
abstract |
DOI |
arXiv ]
In this paper we study the complexity of the problems:
given a loop, described by linear constraints over a
finite set of variables, is there a linear or
lexicographical-linear ranking function for this
loop? While existence of such functions implies
termination, these problems are not equivalent to
termination. When the variables range over the
rationals (or reals), it is known that both problems
are PTIME decidable. However, when they range over
the integers, whether for single-path or multipath
loops, the complexity has not yet been
determined. We show that both problems are
coNP-complete. However, we point out some special
cases of importance of PTIME complexity. We also
present complete algorithms for synthesizing linear
and lexicographical-linear ranking functions, both
for the general case and the special PTIME cases.
Moreover, in the rational setting, our algorithm for
synthesizing lexicographical-linear ranking
functions extends existing ones, because our class
of ranking functions is more general, yet it has
polynomial time complexity.
@article{Ben-AmramG14,
author = {Amir M. Ben-Amram and Samir Genaim},
title = {Ranking Functions for Linear-Constraint Loops},
journal = {Journal of the ACM},
year = {2014},
volume = {61},
number = {4},
pages = {26:1--26:55},
issn = {0004-5411},
doi = {10.1145/2629488},
arxiv = {http://arxiv.org/abs/1208.4041},
projects = {\doves,\envisage,\vivac},
rank:acronym = {JACM},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {4883},
rank:jcr:q = {2}
}
|
[10]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Guillermo Román-Díez.
Conditional Termination of Loops over Heap-allocated Data.
Science of Computer Programming, 92:2-24, 2014.
Special issue on Bytecode 2012.
[ bibtex |
abstract |
DOI ]
Static analysis which takes into account the values of
data stored in the heap is considered complex and
computationally intractable in practice. Thus, most
static analyzers do not keep track of object fields
nor of array contents, i.e., they are
heap-insensitive. In this article, we propose
locality conditions for soundly tracking
heap-allocated data in Java (bytecode) programs, by
means of ghost non-heap allocated
variables. This way, heap-insensitive analysis over
the transformed program can infer information on the
original heap-allocated data without sacrificing
efficiency. If the locality conditions cannot be
proven unconditionally, we seek to generate aliasing
preconditions which, when they hold in the initial
state, guarantee the termination of the
program. Experimental results show that we greatly
improve the accuracy w.r.t. a heap-insensitive
analysis while the overhead introduced is
reasonable.
@article{AlbertAGPG14,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla and Guillermo Rom\'{a}n-D\'{i}ez},
title = {{C}onditional {T}ermination of {L}oops over {H}eap-allocated {D}ata},
journal = {Science of Computer Programming},
publisher = {Elsevier},
doi = {10.1016/j.scico.2013.04.006},
volume = {92},
pages = {2--24},
year = {2014},
issn = {0167-6423},
note = {Special issue on Bytecode 2012},
projects = {\doves,\supra,\prometidos,\vivac},
rank:acronym = {SCP},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {911},
rank:jcr:q = {3}
}
|
[11]
|
Samir Genaim and Damiano Zanardini.
Reachability-based Acyclicity Analysis by Abstract
Interpretation.
Theoretical Computer Science, 474:60-79, 2013.
[ bibtex |
abstract |
DOI ]
In programming languages with dynamic use of memory,
such as Java, knowing that a reference variable x
points to an acyclic data structure is valuable for
the analysis of termination and resource usage
(e.g., execution time or memory consumption). For
instance, this information guarantees that thedepth
of the data structure to which x points is greater
than the depth of the data structure pointed to by
x.f for any field f of x. This, in turn, allows
bounding the number of iterations of a loop which
traverses the structure by its depth, which is
essential in order to prove the termination or infer
the resource usage of the loop. The present paper
provides an Abstract-Interpretation-based
formalization of a static analysis for inferring
acyclicity, which works on the reduced
product of two abstract domains:
reachability, which models the property that
the location pointed to by a variable w can be
reached by dereferencing another variable v (in
this case, v is said to reach w); and
cyclicity, modeling the property that v can
point to a cyclic data structure. The analysis is
proven to be sound and optimal with
respect to the chosen abstraction.
@article{GenaimZ13,
author = {Samir Genaim and Damiano Zanardini},
title = {{R}eachability-based {A}cyclicity {A}nalysis by {A}bstract {I}nterpretation},
journal = {Theoretical Computer Science},
volume = 474,
year = 2013,
pages = {60--79},
issn = {0304-3975},
publisher = {Elsevier},
doi = {10.1016/j.tcs.2012.12.018},
projects = {\doves,\supra,\prometidos,\vivac},
rank:acronym = {TCS},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {4629},
rank:jcr:q = {3}
}
|
[12]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Heap Space Analysis for Garbage Collected Languages.
Science of Computer Programming, 78(9):1427-1448, 2013.
[ bibtex |
abstract |
DOI ]
Accurately predicting the dynamic memory
consumption (or heap space) of programs can be
critical during software development. It is
well-known that garbage collection (GC) complicates
such problem. The peak heap consumption of a
program is the maximum size of the data on the heap
during its execution, i.e., the minimum amount of
heap space needed to safely run the program.
Existing heap space analyses either do not take
deallocation into account or adopt specific models
of garbage collectors which do not necessarily
correspond to the actual memory usage. This paper
presents a novel static analysis for
garbage-collected imperative languages that infers
accurate upper bounds on the peak heap usage,
including exponential, logarithmic and polynomial
bounds. A unique characteristic of the analysis is
that it is parametric on the notion of object
lifetime, i.e., on when objects become collectible.
@article{AlbertGGZ13,
author = {Elvira Albert and Samir Genaim and Miguel G\'{o}mez-Zamalloa},
title = {{H}eap {S}pace {A}nalysis for {G}arbage {C}ollected {L}anguages},
journal = {Science of Computer Programming},
publisher = {Elsevier},
volume = {78},
number = {9},
pages = {1427--1448},
year = {2013},
issn = {0167-6423},
doi = {10.1016/j.scico.2012.10.008},
projects = {\doves,\supra,\prometidos},
rank:acronym = {SCP},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {664},
rank:jcr:q = {4}
}
|
[13]
|
Elvira Albert, Samir Genaim, and Abu Naser Masud.
On the Inference of Resource Usage Upper and Lower
Bounds.
ACM Transactions on Computational Logic, 14(3):22:1-22:35,
2013.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at determining the amount of
resources required to run a program in terms of its
input data sizes. The most challenging step is to
infer the cost of executing the loops in the
program. This requires bounding the number of
iterations of each loop and finding tight bounds for
the cost of each of its iterations. This article
presents a novel approach to infer upper and lower
bounds from cost relations. These relations are an
extended form of standard recurrence equations which
can be non-deterministic, contain inexact size
constraints and have multiple arguments that
increase and/or decrease. We propose novel
techniques to automatically transform cost relations
into worst-case and best-case deterministic
one-argument recurrence relations. The solution of
each recursive relation provides a precise
upper-bound and lower-bound for executing a
corresponding loop in the program. Importantly,
since the approach is developed at the level of the
cost equations, our techniques are programming
language independent.
@article{AlbertGM13,
author = {Elvira Albert and Samir Genaim and Abu Naser Masud},
title = {{O}n the {I}nference of {R}esource {U}sage {U}pper and {L}ower {B}ounds},
journal = {ACM Transactions on Computational Logic},
year = 2013,
pages = {22:1--22:35},
volume = {14},
number = {3},
publisher = {ACM},
issn = {1529-3785},
doi = {10.1145/2499937.2499943},
projects = {\doves,\supra,\prometidos},
rank:acronym = {TOCL},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {275},
rank:jcr:q = {1}
}
|
[14]
|
Amir M. Ben-Amram, Samir Genaim, and Abu Naser Masud.
On the Termination of Integer Loops.
ACM Transactions on Programming Languages and Systems,
34(4):16:1-16:24, 2012.
[ bibtex |
abstract |
DOI ]
In this paper we study the decidability of termination
of simple integer loops. We show that it is
undecidable when the body of the loop is expressed
by a set of linear inequalities where an arbitrary
irrational is allowed as a coefficient; when the
loop is a sequence of instructions, that compute
either linear expressions or the step function; or
when the loop body is a piecewise linear
deterministic update with two pieces. For integer
constraint loops with rational coefficients only, we
show that a Petri net can be simulated with such a
loop which implies some interesting lower bounds.
@article{Ben-AmramGM12a,
author = {Amir M. Ben-Amram and Samir Genaim and Abu Naser Masud},
title = {{O}n the {T}ermination of {I}nteger {L}oops},
journal = {ACM Transactions on Programming Languages and Systems},
volume = {34},
number = {4},
year = {2012},
pages = {16:1--16:24},
doi = {10.1145/2400676.2400679},
publisher = {ACM},
issn = {0164-0925},
projects = {\doves,\prometidos},
rank:acronym = {TOPLAS},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {1055},
rank:jcr:q = {2}
}
|
[15]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Object-Oriented Bytecode Programs.
Theoretical Computer Science, 413(1):142-159, 2012.
[ bibtex |
abstract |
DOI ]
Cost analysis statically approximates the cost of
programs in terms of their input data size. This
paper presents, to the best of our knowledge, the
first approach to the automatic cost analysis of
Object-Oriented bytecode programs. In languages
such as Java and C#, analyzing bytecode has a much
wider application area than analyzing source code
since the latter is often not available. Cost
analysis in this context has to consider, among
others, dynamic dispatch, jumps, the operand stack,
and the heap. Our method takes a bytecode program
and a cost model specifying the resource of
interest, and generates cost relations which
approximate the execution cost of the program with
respect to such resource. We report on COSTA, an
implementation for Java bytecode which can obtain
upper bounds on cost for a large class of programs
and complexity classes. Our basic techniques can be
directly applied to infer cost relations for other
Object-Oriented imperative languages, not
necessarily in bytecode form.
@article{AlbertAGPZ12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {O}bject-{O}riented {B}ytecode {P}rograms},
journal = {Theoretical Computer Science},
volume = 413,
number = 1,
year = 2012,
pages = {142--159},
issn = {0304-3975},
publisher = {Elsevier},
doi = {10.1016/j.tcs.2011.07.009},
projects = {\doves,\aiverona,\prometidos},
rank:acronym = {TCS},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {5177},
rank:jcr:q = {4}
}
|
[16]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Closed-Form Upper Bounds in Static Cost Analysis.
Journal of Automated Reasoning, 46(2):161-203, 2011.
[ bibtex |
abstract |
DOI ]
The classical approach to automatic cost analysis
consists of two phases. Given a program and some
measure of cost, the analysis first produces
cost relations (CRs), i.e., recursive
equations which capture the cost of the program in
terms of the size of its input data. Second, CRs
are converted into closed-form, i.e., without
recurrences. Whereas the first phase has received
considerable attention, with a number of cost
analyses available for a variety of programming
languages, the second phase has been comparatively
less studied. This article presents, to our
knowledge, the first practical framework for the
generation of closed-form upper bounds for CRs which
(1) is fully automatic, (2) can handle the
distinctive features of CRs originating from cost
analysis of realistic programming languages, (3) is
not restricted to simple complexity classes, and (4)
produces reasonably accurate solutions. A key idea
in our approach is to view CRs as programs, which
allows applying semantic-based static analyses and
transformations to bound them, namely our method is
based on the inference of ranking functions
and loop invariants and on the use of
partial evaluation.
@article{AlbertAGP11a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'an Puebla},
title = {{C}losed-{F}orm {U}pper {B}ounds in {S}tatic {C}ost {A}nalysis},
journal = {Journal of Automated Reasoning},
year = 2011,
publisher = {Springer},
issn = {0168-7433},
volume = 46,
number = 2,
pages = {161--203},
doi = {10.1007/s10817-010-9174-1},
projects = {\doves,\prometidos},
rank:acronym = {JAR},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {274},
rank:jcr:q = {3}
}
|
[17]
|
Samir Genaim and Andy King.
Inferring non-Suspension Conditions for Logic Programs with
Dynamic Scheduling.
ACM Transactions on Computational Logic, 9(3):17:1-17:43,
2008.
[ bibtex |
abstract |
DOI ]
A logic program consists of a logic component and a
control component. The former is a specification in
predicate logic whereas the latter defines the order
of sub-goal selection. The order of sub-goal
selection is often controlled with delay
declarations that specify that a sub-goal is to
suspend until some condition on its arguments is
satisfied. Reasoning about delay declarations is
notoriously difficult for the programmer and it is
not unusual for a program and a goal to reduce to a
state that contains a sub-goal that suspends
indefinitely. Suspending sub-goals are usually
unintended and often indicate an error in the logic
or the control. A number of abstract interpretation
schemes have therefore been proposed for checking
that a given program and goal cannot reduce to such
a state. This paper considers a reversal of this
problem, advocating an analysis that for a given
program infers a class of goals that do not lead to
suspension. This paper shows that this more general
approach can have computational, implementational
and user-interface advantages. In terms of
user-interface, this approach leads to a lightweight
point-and-click mode of operation in which, after
directing the analyser at a file, the user merely
has to inspect the results inferred by the
analysis. In terms of implementation, the analysis
can be straightforwardly realised as two simple
fixpoint computations. In terms of computation, by
modelling n! different schedulings of n
sub-goals with a single Boolean function, it is
possible to reason about the suspension behaviour of
large programs. In particular, the analysis is fast
enough to be applied repeatedly within the program
development cycle. The paper also demonstrates that
the method is precise enough to locate bugs in
existing programs.
@article{GenaimK08,
author = {Samir Genaim and Andy King},
title = {{I}nferring non-{S}uspension {C}onditions for {L}ogic {P}rograms with {D}ynamic {S}cheduling},
journal = {ACM Transactions on Computational Logic},
volume = 9,
number = 3,
pages = {17:1--17:43},
year = 2008,
issn = {1529-3785},
publisher = {ACM},
doi = {10.1145/1352582.1352585},
projects = {\deter},
rank:acronym = {TOCL},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {374},
rank:jcr:q = {1}
}
|
[18]
|
Maurice Bruynooghe, Michael Codish, John P. Gallagher, Samir Genaim, and Wim Vanhoof.
Termination Analysis of Logic Programs through Combination
of Type-based Norms.
ACM Transactions on Programming Languages and Systems,
29(2):10:1-10:44, 2007.
[ bibtex |
abstract |
DOI ]
This paper makes two contributions to the work on
semantics based termination analysis for logic
programs. The first involves a novel notion
of type-based norm where for a given type a
corresponding norm is defined to count in a term the
number of sub-terms of that type. This provides a
collection of candidate norms, one for each type
defined in the program. The second enables
an analyser to base termination proofs on the
combination of several different norms. This is
useful when different norms are better suited to
justify the termination of different parts of the
program. Application of the two contributions
together consists in considering the combination of
the type-based candidate norms for a given
program. This results in a powerful and practical
technique. Both contributions have been introduced
into a working termination analyser.
Experimentation indicates that they yield
state-of-the-art results in a fully automatic
analysis tool, improving with respect to methods
that do not use both types and combined norms.
@article{BruynoogheCGGV07,
author = {Maurice Bruynooghe and Michael Codish and John P. Gallagher and Samir Genaim and Wim Vanhoof},
title = {{T}ermination {A}nalysis of {L}ogic {P}rograms through {C}ombination of {T}ype-based {N}orms},
journal = {ACM Transactions on Programming Languages and Systems},
volume = 29,
number = 2,
pages = {10:1--10:44},
year = 2007,
issn = {0164-0925},
publisher = {ACM},
doi = {10.1145/1216374.1216378},
projects = {},
rank:acronym = {TOPLAS},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {1339},
rank:jcr:q = {2}
}
|
[19]
|
Samir Genaim and Michael Codish.
Inferring Termination Conditions for Logic Programs using
Backwards Analysis.
Theory and Practice of Logic Programming, 5(1-2):75-91, 2005.
[ bibtex |
abstract |
DOI ]
This paper focuses on the inference of modes for which
a logic program is guaranteed to terminate. This
generalises traditional termination analysis where
an analyser tries to verify termination for a
specified mode. Our contribution is a methodology
in which components of traditional termination
analysis are combined with backwards analysis to
obtain an analyser for termination inference. We
identify a condition on the components of the
analyser which guarantees that termination inference
will infer all modes which can be checked to
terminate. The application of this methodology to
enhance a traditional termination analyser to
perform also termination inference is demonstrated.
@article{GenaimC05,
author = {Samir Genaim and Michael Codish},
title = {{I}nferring {T}ermination {C}onditions for {L}ogic {P}rograms using {B}ackwards {A}nalysis},
journal = {Theory and Practice of Logic Programming},
volume = {5},
number = {1-2},
year = {2005},
pages = {75-91},
issn = {1471-0684},
publisher = {Cambridge University Press},
doi = {10.1017/S1471068404002236},
bibsource = {DBLP, http://dblp.uni-trier.de},
projects = {},
rank:acronym = {TPLP},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {97},
rank:jcr:q = {1}
}
|
[20]
|
Samir Genaim, Jacob M. Howe, and Michael Codish.
Worst-case Groundness Analysis using Definite Boolean
Functions.
Theory and Practice of Logic Programming, 1(5):611-615, 2001.
[ bibtex |
abstract |
DOI ]
This note illustrates theoretical worst-case scenarios
for groundness analyses obtained through abstract
interpretation over the abstract domains of
definite (Def) and positive (Pos)
Boolean functions. For Def, an example is given for
which any Def-based abstract interpretation for
groundness analysis follows a chain which is
exponential in the number of argument positions as
well as in the number of clauses but sub-exponential
in the size of the program. For Pos, we strengthen
a previous result by illustrating an example for
which any Pos-based abstract interpretation for
groundness analysis follows a chain which is
exponential in the size of the program. It remains
an open problem to determine if the worst case for
Def is really as bad as that for Pos.
@article{GenaimHC01,
author = {Samir Genaim and Jacob M. Howe and Michael Codish},
title = {{W}orst-case {G}roundness {A}nalysis using {D}efinite {B}oolean {F}unctions},
journal = {Theory and Practice of Logic Programming},
volume = {1},
number = {5},
year = {2001},
pages = {611-615},
issn = {1471-0684},
publisher = {Cambridge University Press},
doi = {10.1017/S1471068401001077},
projects = {},
rank:acronym = {TPLP},
rank:aneca:class = {\anecamr},
rank:jcr:totalcite = {N/A},
rank:jcr:q = {1}
}
|
[1]
|
Elvira Albert, Samir Genaim, Enrique Martin-Martin, Alicia Merayo, and Albert
Rubio.
Lower-Bound Synthesis Using Loop Specialization and Max-SMT.
In Alexandra Silva and K. Rustan M. Leino, editors, Computer
Aided Verification - 33rd International Conference, CAV 2021, Virtual
Event, July 20-23, 2021, Proceedings, Part II, volume 12760 of
Lecture Notes in Computer Science, pages 863-886. Springer, 2021.
[ bibtex |
abstract |
DOI |
http ]
@inproceedings{AlbertGMMR21,
author = {Elvira Albert and
Samir Genaim and
Enrique Martin{-}Martin and
Alicia Merayo and
Albert Rubio},
editor = {Alexandra Silva and
K. Rustan M. Leino},
title = {Lower-Bound Synthesis Using Loop Specialization and Max-SMT},
booktitle = {Computer Aided Verification - 33rd International Conference, {CAV}
2021, Virtual Event, July 20-23, 2021, Proceedings, Part {II}},
series = {Lecture Notes in Computer Science},
volume = {12760},
pages = {863--886},
publisher = {Springer},
issn = {0302-9743},
year = {2021},
url = {https://doi.org/10.1007/978-3-030-81688-9_40},
doi = {10.1007/978-3-030-81688-9_40},
projects = {\freetech},
rank:acronym = {CAV},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[2]
|
Amir M. Ben-Amram, Jesús Doménech, and Samir Genaim.
Multiphase-Linear Ranking Functions and Their Relation to
Recurrent Sets.
In Bor-Yuh Evan Chang, editor, Proceedings of the 26th
International Symposium on Static Analysis (SAS'19), volume 11822 of
Lecture Notes in Computer Science, pages 459-480. Springer, 2019.
[ bibtex |
abstract |
DOI ]
Multiphase ranking functions (MΦRF) are
used to prove termination of loops in which the
computation progresses through a number of
phases. They consist of linear functions
f1,...,fd where fi decreases during
the ith phase. This work provides new insights
regarding MΦRFs for loops described by
a conjunction of linear constraints (SLC loops). In
particular, we consider the existence problem (does
a given SLC loop admit a MΦRF). The
decidability and complexity of the problem, in the
case that d is restricted by an input parameter,
have been settled in recent work, while in this
paper we make progress regarding the existence
problem without a given depth bound. Our new
approach, while falling short of a decision
procedure for the general case, reveals some
important insights into the structure of these
functions. Interestingly, it relates the problem of
seeking MΦRFs to that of seeking
recurrent sets (used to prove nontermination). It
also helps in identifying classes of loops for which
MΦRFs are sufficient, and thus have
linear runtime bounds. For the depth-bounded
existence problem, we obtain a new polynomial-time
procedure that can provide witnesses for
negative answers as well. To obtain this procedure
we introduce a new representation for SLC loops, the
difference polyhedron replacing the customary
transition polyhedron. We find that this
representation yields new insights on
MΦRFs and SLC loops in general, and
some results on termination and nontermination of
bounded SLC loops become straightforward.
@inproceedings{Ben-AmramDG19,
author = {Amir M. Ben{-}Amram and Jes{\'{u}}s J. Dom{\'{e}}nech and Samir Genaim},
editor = {Bor{-}Yuh Evan Chang},
title = {{M}ultiphase-{L}inear {R}anking {F}unctions and {T}heir {R}elation to {R}ecurrent {S}ets},
booktitle = {Proceedings of the 26th International Symposium on Static Analysis (SAS'19)},
series = {Lecture Notes in Computer Science},
volume = {11822},
pages = {459--480},
publisher = {Springer},
year = {2019},
isbn = {978-3-030-32303-5},
issn = {0302-9743},
doi = {10.1007/978-3-030-32304-2_22},
projects = {\lobass,\freetech,\forte},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[3]
|
Amir M. Ben-Amram and Samir Genaim.
On Multiphase-Linear Ranking Functions.
In Viktor Kuncak and Rupak Majumdar, editors, Proceedings of the
29th International Conference on Computer Aided Verification (CAV'17),
volume 10427 of Lecture Notes in Computer Science, pages 601-620.
Springer, 2017.
[ bibtex |
abstract |
DOI ]
Multiphase ranking functions (MΦRF) were
proposed as a means to prove the termination of a
loop in which the computation progresses through a
number of "phases", and the progress of each phase
is described by a different linear ranking function.
Our work provides new insights regarding such
functions for loops described by a conjunction of
linear constraints (single-path loops). We provide
a complete polynomial-time solution to the problem
of existence and of synthesis of MΦRF
of bounded depth (number of phases), when variables
range over rational or real numbers; a complete
solution for the (harder) case that variables are
integer, with a matching lower-bound proof, showing
that the problem is coNP-complete; and a new theorem
which bounds the number of iterations for loops with
MΦRFs. Surprisingly, the bound is
linear, even when the variables involved change in
non-linear way. We also consider a type of
lexicographic ranking functions more
expressive than types of lexicographic functions for
which complete solutions have been given so far. We
prove that for the above type of loops,
lexicographic functions can be reduced to
MΦRFs, and thus the questions of
complexity of detection, synthesis, and iteration
bounds are also answered for this class.
@inproceedings{Ben-AmramG17,
author = {Amir M. Ben-Amram and Samir Genaim},
title = {{O}n {M}ultiphase-{L}inear {R}anking {F}unctions},
editor = {Viktor Kuncak and Rupak Majumdar},
booktitle = {Proceedings of the 29th International Conference on Computer Aided Verification (CAV'17)},
series = {Lecture Notes in Computer Science},
volume = {10427},
pages = {601--620},
publisher = {Springer},
isbn = {978-3-319-63389-3},
issn = {0302-9743},
year = {2017},
doi = {10.1007/978-3-319-63390-9_32},
projects = {\vivac,\sicomoro,\lobass},
rank:acronym = {CAV},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[4]
|
Jesús Doménech, Samir Genaim, Einar Broch Johnsen, and Rudolf Schlatte.
EasyInterface: A Toolkit for Rapid Development of GUIs for
Research Prototype Tools.
In Marieke Huisman and Julia Rubin, editors, Proceedings of the
20th International Conference on Fundamental Approaches to Software
Engineering (FASE'17), volume 10202 of Lecture Notes in Computer
Science, pages 379-383. Springer, 2017.
[ bibtex |
abstract |
DOI ]
EasyInterface is an open-source toolkit to develop
web-based graphical user interfaces (GUIs) for
research prototype tools. This toolkit enables
researchers to make their tool prototypes available
to the community and integrating them in a common
environment, rapidly and without being familiar with
web programming or GUI libraries.
@inproceedings{DomenechGJS17,
author = {Jes{\'{u}}s Dom{\'{e}}nech and Samir Genaim and Einar Broch Johnsen and Rudolf Schlatte},
title = {EasyInterface: {A} {T}oolkit for {R}apid {D}evelopment of {GUI}s for {R}esearch {P}rototype {T}ools},
editor = {Marieke Huisman and Julia Rubin},
booktitle = {Proceedings of the 20th International Conference on Fundamental Approaches to Software Engineering (FASE'17)},
series = {Lecture Notes in Computer Science},
volume = {10202},
pages = {379--383},
publisher = {Springer},
isbn = {978-3-662-54493-8},
issn = {0302-9743},
year = {2017},
doi = {10.1007/978-3-662-54494-5_22},
projects = {\vivac,\sicomoro,\lobass,\envisage},
rank:acronym = {FASE},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[5]
|
Elvira Albert, Samir Genaim, and Pablo Gordillo.
May-Happen-in-Parallel Analysis with Returned Futures.
In Deepak D'Souza and K.Narayan Kumar, editors, Proceedings of
the 15th International Symposium on Automated Technology for Verification and
Analysis (ATVA'17), volume 10482 of Lecture Notes in Computer Science,
pages 42-58. Springer, 2017.
[ bibtex |
abstract |
DOI ]
Abstract. May-Happen-in-Parallel (MHP) is a fundamental analysis
to reason about concurrent programs. It infers the
pairs of program points that may execute in
parallel, or interleave their execution. This
information is essential to prove, among other
things, absence of data races, deadlock freeness,
termination, and resource usage. This paper presents
an MHP analysis for asynchronous programs that use
futures as synchronization mechanism. Future
variables are available in most concurrent languages
(e.g., in the library concurrent of Java,
in the standard thread library of C++, and in Scala
and Python). The novelty of our analysis is that it
is able to infer MHP relations that involve future
variables that are returned by asynchronous
tasks. Futures are returned when a task needs to
await for another task created in an inner
scope, e.g., task t needs to await for the
termination of task p that is spawned by task q
that is spawned during the execution of t (not
necessarily by t). Thus, task p is awaited by
task t which is in an outer scope. The challenge
for the analysis is to (back)propagate the
synchronization of tasks through future variables
from inner to outer scopes.
@inproceedings{AlbertGG17,
author = {Elvira Albert and Samir Genaim and Pablo Gordillo},
title = {May-{H}appen-in-{P}arallel {A}nalysis with {R}eturned {F}utures},
editor = {Deepak D'Souza and K.Narayan Kumar},
booktitle = {Proceedings of the 15th International Symposium on Automated Technology for Verification and Analysis (ATVA'17)},
publisher = {Springer},
year = {2017},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-68167-2_3},
pages = {42--58},
volume = {10482},
isbn = {978-3-319-68166-5},
issn = {0302-9743},
projects = {\sicomoro,\lobass},
rank:acronym = {ATVA},
rank:grin:class = {Class 3 (B)}
}
|
[6]
|
Elvira Albert, Antonio Flores-Montoya, and Samir Genaim.
May-Happen-in-Parallel Analysis with Condition
Synchronization.
In Marko C. J. D. van Eekelen and Ugo Dal Lago, editors,
Proceedings of the 4th International Workshop on Foundational and Practical
Aspects of Resource Analysis (FOPARA'15), Revised Selected Papers, volume
9964 of Lecture Notes in Computer Science, pages 1-19, 2016.
[ bibtex |
abstract |
DOI ]
Concurrent programs can synchronize by means of
conditions and/or message passing. In the former,
processes communicate and synchronize by means of
shared variables that several processes can read and
write. In the latter, communication is by sending,
receiving and waiting for messages. Condition
synchronization is often more efficient but also
more difficult to analyze and reason about. In this
paper, we leverage an existing
may-happen-in-parallel (MHP) analysis, which
was designed for a particular form of message
passing based on future variables, to handle
condition synchronization effectively, thus enabling
the analysis of programs that use both mechanisms.
The information inferred by an MHP has been proven
to be essential to infer both safety properties
(e.g., deadlock freedom) and liveness properties
(termination and resource boundedness) of concurrent
programs.
@inproceedings{AlbertFG16,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim},
title = {{M}ay-{H}appen-in-{P}arallel {A}nalysis with {C}ondition {S}ynchronization},
editor = {Marko C. J. D. van Eekelen and Ugo Dal Lago},
booktitle = {Proceedings of the 4th International Workshop on Foundational and Practical Aspects of Resource Analysis (FOPARA'15), Revised Selected Papers},
series = {Lecture Notes in Computer Science},
volume = {9964},
pages = {1--19},
year = {2016},
isbn = {978-3-319-46558-6},
issn = {0302-9743},
doi = {10.1007/978-3-319-46559-3_1},
projects = {},
rank:acronym = {FOPARA}
}
|
[7]
|
Amir M. Ben-Amram and Samir Genaim.
Complexity of Bradley-Manna-Sipma Lexicographic Ranking
Functions.
In Daniel Kroening and Corina Pasareanu, editors, Proceedings of
the 27th International Conference on Computer Aided Verification (CAV'15),
volume 9207 of Lecture Notes in Computer Science, pages 304-321.
Springer, July 2015.
[ bibtex |
abstract |
DOI ]
In this paper we turn the spotlight on a class of
lexicographic ranking functions introduced by
Bradley, Manna and Sipma in a seminal CAV 2005
paper, and establish for the first time the
complexity of some problems involving the inference
of such functions for linear-constraint loops
(without precondition). We show that finding such a
function, if one exists, can be done in polynomial
time in a way which is sound and complete when the
variables range over the rationals (or reals). We
show that when variables range over the integers,
the problem is harder-deciding the existence of a
ranking function is coNP-complete. Next, we study
the problem of minimizing the number of components
in the ranking function (a.k.a. the dimension). This
number is interesting in contexts like computing
iteration bounds and loop
parallelization. Surprisingly, and unlike the
situation for some other classes of lexicographic
ranking functions, we find that even deciding
whether a two-component ranking function exists is
harder than the unrestricted problem: NP-complete
over the rationals and ΣP2-complete over
the integers.
@inproceedings{Ben-AmramG15,
author = {Amir M. Ben-Amram and Samir Genaim},
title = {{C}omplexity of {B}radley-{M}anna-{S}ipma {L}exicographic {R}anking {F}unctions},
editor = {Daniel Kroening and Corina Pasareanu},
booktitle = {Proceedings of the 27th International Conference on Computer Aided Verification (CAV'15)},
year = {2015},
month = jul,
pages = {304--321},
volume = {9207},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-21668-3_18},
isbn = {978-3-319-21667-6},
issn = {0302-9743},
publisher = {Springer},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {CAV},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[8]
|
Pierre Ganty, Samir Genaim, Ratan Lal, and Pavithra Prabhakar.
From Non-Zenoness Verification to Termination.
In Proceedings of the 13th ACM/IEEE International Conference
on Formal Methods and Models for Codesign (MEMOCODE'2015), pages 228-237.
IEEE, 2015.
[ bibtex |
abstract |
DOI ]
We investigate the problem of verifying the absence
of zeno executions in a hybrid system. A zeno
execution is one in which there are infinitely many
discrete transitions in a finite time interval. The
presence of zeno executions poses challenges towards
implementation and analysis of hybrid control
systems. We present a simple transformation of the
hybrid system which reduces the non-zenoness
verification problem to the termination verification
problem, that is, the original system has no zeno
executions if and only if the transformed system has
no non-terminating executions. This provides both
theoretical insights and practical techniques for
non-zenoness verification. Further, it also
provides techniques for isolating parts of the
hybrid system and its initial states which do not
exhibit zeno executions. We illustrate the
feasibility of our approach by applying it on hybrid
system examples.
@inproceedings{GantyGLP15,
author = {Pierre Ganty and Samir Genaim and Ratan Lal and Pavithra Prabhakar},
title = {{F}rom {N}on-{Z}enoness {V}erification to {T}ermination},
booktitle = {Proceedings of the 13th {ACM/IEEE} International Conference on Formal Methods and Models for Codesign (MEMOCODE'2015)},
pages = {228--237},
year = {2015},
publisher = {{IEEE}},
doi = {10.1109/MEMCOD.2015.7340490},
projects = {},
rank:acronym = {MEMCODE}
}
|
[9]
|
Elvira Albert, Samir Genaim, and Pablo Gordillo.
May-Happen-in-Parallel Analysis for Asynchronous Programs
with Inter-Procedural Synchronization.
In Sandrine Blazy and Thomas Jensen, editors, Proceedings of the
22nd International Symposium on Static Analysis (SAS'15), volume 9291 of
Lecture Notes in Computer Science, pages 72-89. Springer, 2015.
[ bibtex |
abstract |
DOI ]
Abstract. A may-happen-in-parallel (MHP) analysis
computes pairs of program points that may execute in
parallel across different distributed
components. This information has been proven to be
essential to infer both safety properties (e.g.,
deadlock freedom) and liveness properties
(termination and resource boundedness) of
asynchronous programs. Existing MHP analyses take
advantage of the synchronization points to learn
that one task has finished and thus will not happen
in parallel with other tasks that are still
active. Our starting point is an existing MHP
analysis developed for intra-procedural
synchronization, i.e., it only al- lows
synchronizing with tasks that have been spawned
inside the current task. This paper leverages such
MHP analysis to handle inter-procedural
synchronization, i.e., a task spawned by one task
can be awaited within a different task. This is
challenging because task synchronization goes beyond
the boundaries of methods, and thus the inference of
MHP relations requires novel extensions to capture
inter-procedural dependencies. The analysis has
been implemented and it can be tried online.
@inproceedings{AlbertGG15,
author = {Elvira Albert and Samir Genaim and Pablo Gordillo},
title = {{M}ay-{H}appen-in-{P}arallel {A}nalysis for {A}synchronous {P}rograms with {I}nter-{P}rocedural {S}ynchronization},
editor = {Sandrine Blazy and Thomas Jensen},
booktitle = {Proceedings of the 22nd International Symposium on Static Analysis (SAS'15)},
series = {Lecture Notes in Computer Science},
volume = {9291},
publisher = {Springer},
pages = {72--89},
doi = {10.1007/978-3-662-48288-9_5},
isbn = {978-3-662-48287-2},
issn = {0302-9743},
year = {2015},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[10]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and
Guillermo Román-Díez.
Rsource Analysis: From Sequential to Concurrent and
Distributed Programs.
In Nikolaj Bjørner and Frank D. de Boer, editors, Proceedings
of the 20th International Symposium on Formal Methods (FM'15), volume 9109
of Lecture Notes in Computer Science, pages 3-17. Springer, 2015.
[ bibtex |
abstract |
DOI ]
Resource analysis aims at automatically inferring
upper/lower bounds on the worst/best-case cost of
executing programs. Ideally, a resource analyzer
should be parametric on the cost model, i.e., the
type of cost that the user wants infer (e.g., number
of steps, amount of memory allocated, amount of data
transmitted, etc.). The inferred upper bounds have
important applications in the fields of program
optimization, verification and certification. In
this talk, we will review the basic techniques used
in resource analysis of sequential programs and the
new extensions needed to handle concurrent and
distributed systems.
@inproceedings{AlbertACGGMPR15,
author = {Elvira Albert and Puri Arenas and Jes{\'{u}}s Correas and Samir Genaim and Miguel G{\'{o}}mez{-}Zamalloa and Enrique Martin{-}Martin and Germ{\'{a}}n Puebla and Guillermo Rom{\'{a}}n{-}D{\'{\i}}ez},
title = {{R}source {A}nalysis: {F}rom {S}equential to {C}oncurrent and {D}istributed {P}rograms},
editor = {Nikolaj Bj{\o}rner and Frank D. de Boer},
booktitle = {Proceedings of the 20th International Symposium on Formal Methods (FM'15)},
pages = {3--17},
year = {2015},
volume = {9109},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
isbn = {978-3-319-19248-2},
issn = {0302-9743},
doi = {10.1007/978-3-319-19249-9_1},
projects = {\envisage,\vivac,\sicomoro},
rank:acronym = {FM},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[11]
|
Erika Ábrahám, Costas Bekas, Ivona Brandic, Samir Genaim,
Einar Broch Johnsen, Ivan Kondov, Sabri Pllana, and Achim Streit.
Preparing HPC Applications for Exascale: Challenges and
Recommendations.
In Leonard Barolli, Makoto Takizawa, Hui-Huang Hsu, Tomoya Enokido,
and Fatos Xhafa, editors, Proceedings of the 18th International
Conference on Network-Based Information Systems (NBis'15), pages 401-406.
IEEE Computer Society, 2015.
[ bibtex |
abstract |
DOI ]
@inproceedings{AbrahamBBGJKPS15,
author = {Erika {\'{A}}brah{\'{a}}m and Costas Bekas and Ivona Brandic and Samir Genaim and Einar Broch Johnsen and Ivan Kondov and Sabri Pllana and Achim Streit},
editor = {Leonard Barolli and Makoto Takizawa and Hui{-}Huang Hsu and Tomoya Enokido and Fatos Xhafa},
title = {Preparing {HPC} Applications for Exascale: Challenges and Recommendations},
booktitle = {Proceedings of the 18th International Conference on Network-Based Information Systems (NBis'15)},
pages = {401--406},
publisher = {{IEEE} Computer Society},
year = {2015},
doi = {10.1109/NBiS.2015.61},
projects = {},
rank:acronym = {NBis},
abstrcat = { While the HPC community is working towards the
development of the first Exaflop computer (expected
around 2020), after reaching the Petaflop milestone
in 2008 still only few HPC applications are able to
fully exploit the capabilities of Petaflop
systems. In this paper we argue that efforts for
preparing HPC applications for Exascale should start
before such systems become available. We identify
challenges that need to be addressed and recommend
solutions in key areas of interest, including formal
modeling, static analysis and optimization, runtime
analysis and optimization, and autonomic
computing. Furthermore, we outline a conceptual
framework for porting HPC applications to future
Exascale computing systems and propose steps for its
implementation. }
}
|
[12]
|
Elvira Albert, Puri Arenas, Antonio Flores-Montoya, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and Guillermo Román-Díez.
SACO: Static Analyzer for Concurrent Objects.
In Erika Ábrahám and Klaus Havelund, editors,
Proceedings of the 20th International Conference on Tools and Algorithms for
the Construction and Analysis of Systems (TACAS'14), volume 8413 of
Lecture Notes in Computer Science, pages 562-567. Springer, 2014.
[ bibtex |
abstract |
DOI ]
We present the main concepts, usage and implementation
of SACO, a static analyzer for concurrent
objects. Interestingly, SACO is able to infer both
liveness (namely termination and resource
boundedness) and safety properties (namely deadlock
freeness) of programs based on concurrent
objects. The system integrates auxiliary analyses
such as points-to and may-happen-in-parallel, which
are essential for increasing the accuracy of the
aforementioned more complex properties. SACO
provides accurate information about the dependencies
which may introduce deadlocks, loops whose
termination is not guaranteed, and upper bounds on
the resource consumption of methods.
@inproceedings{AlbertAFGGMPR14,
author = {Elvira Albert and Puri Arenas and Antonio Flores-Montoya and Samir Genaim and Miguel G\'omez-Zamalloa and Enrique Martin-Martin and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{SACO}: {S}tatic {A}nalyzer for {C}oncurrent {O}bjects},
booktitle = {Proceedings of the 20th International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS'14)},
year = {2014},
editor = {Erika {\'A}brah{\'a}m and Klaus Havelund},
pages = {562--567},
volume = {8413},
series = {Lecture Notes in Computer Science},
isbn = {978-3-642-54862-8},
issn = {0302-9743},
publisher = {Springer},
doi = {10.1007/978-3-642-54862-8_46},
projects = {\doves,\envisage,\vivac},
rank:acronym = {TACAS},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[13]
|
Elvira Albert, Samir Genaim, and Raúl Gutiérrez.
A Transformational Approach to Resource Analysis with
Typed-Norms.
In Gopal Gupta and Ricardo Peña, editors, Proceedings of the
23rd International Symposium on Logic-based Program Synthesis and
Transformation (LOPSTR'13), volume 8901 of Lecture Notes in Computer
Science, pages 38-53. Springer, 2014.
[ bibtex |
abstract |
DOI ]
In order to automatically infer the resource
consumption of programs, analyzers track how
data sizes change along program's execution.
Typically, analyzers measure the sizes of data by
applying norms which are mappings from data
to natural numbers that represent the sizes of the
corresponding data. When norms are defined by taking
type information into account, they are named
typed-norms. The main contribution of this
extended abstract is a transformational approach to
resource analysis with typed-norms. The analysis is
based on a transformation of the program into an
intermediate abstract program in which each
variable is abstracted with respect to all
considered norms which are valid for its type. We
also sketch a simple analysis that can be used to
automatically infer the required, useful,
typed-norms from programs.
@inproceedings{AlbertGG14,
author = {Elvira Albert and Samir Genaim and Ra{\'u}l Guti{\'e}rrez},
title = {{A} {T}ransformational {A}pproach to {R}esource {A}nalysis with {T}yped-{N}orms},
booktitle = {Proceedings of the 23rd International Symposium on Logic-based Program Synthesis and Transformation (LOPSTR'13)},
editor = {Gupta, Gopal and Pe{\~n}a, Ricardo},
year = {2014},
pages = {38--53},
volume = {8901},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-14125-1_3},
publisher = {Springer},
isbn = {978-3-319-14124-4},
issn = {0302-9743},
projects = {\doves,\envisage,\vivac},
rank:acronym = {LOPSTR}
}
|
[14]
|
Elvira Albert, Samir Genaim, and Enrique Martin-Martin.
May-Happen-in-Parallel Analysis for Priority-based
Scheduling.
In Ken McMillan, Aart Middeldorp, and Andrei Voronkov, editors,
Proceedings of the 19th International Conference on Logic for Programming
Artificial Intelligence and Reasoning (LPAR-19), volume 8312 of Lecture
Notes in Computer Science, pages 18-34. Springer, December 2013.
[ bibtex |
abstract |
DOI ]
A may-happen-in-parallel (MHP) analysis infers the
sets of pairs of program points that may execute in
parallel along a program's execution. This is an
essential piece of information to detect data races,
and also to infer more complex properties of
concurrent programs, e.g., deadlock freeness,
termination and resource consumption analyses can
greatly benefit from the MHP relations to increase
their accuracy. Previous MHP analyses have assumed a
worst case scenario by adopting a simplistic
(non-deterministic) task scheduler which can select
any available task. While the results of the
analysis for a non-deterministic scheduler are
obviously sound, they can lead to an overly
pessimistic result. We present an MHP analysis for
an asynchronous language with prioritized tasks
buffers. Priority-based scheduling is arguably the
most common scheduling strategy adopted in the
implementation of concurrent languages. The
challenge is to be able to take task priorities into
account at static analysis time in order to filter
out unfeasible MHP pairs.
@inproceedings{AlbertGM13a,
author = {Elvira Albert and Samir Genaim and Enrique Martin-Martin},
title = {May-{H}appen-in-{P}arallel {A}nalysis for {P}riority-based {S}cheduling},
editor = {McMillan, Ken and Middeldorp, Aart and Voronkov, Andrei},
booktitle = {Proceedings of the 19th International Conference on Logic for Programming Artificial Intelligence and Reasoning (LPAR-19)},
volume = {8312},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
isbn = {978-3-642-45220-8},
issn = {0302-9743},
month = dec,
year = {2013},
pages = {18--34},
doi = {10.1007/978-3-642-45221-5_2},
projects = {\doves,\supra,\envisage,\prometidos,\vivac},
rank:acronym = {LPAR},
rank:grin:class = {Class 3 (B)}
}
|
[15]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
Termination and Cost Analysis of Loops with Concurrent Interleavings.
In Dang Van Hung and Mizuhito Ogawa, editors, Proceedings of the
11th International Symposium on Automated Technology for Verification and
Analysis (ATVA'13), volume 8172 of Lecture Notes in Computer Science,
pages 349-364. Springer, 2013.
[ bibtex |
abstract |
DOI ]
By following a rely-guarantee style of reasoning, we
present a novel termination analysis for concurrent
programs that, in order to prove termination of a
considered loop, makes the assumption that the
"shared-data that is involved in the termination
proof of the loop is modified a finite number of
times". In a subsequent step, it proves that this
assumption holds in all code whose execution might
interleave with such loop. At the core of the
analysis, we use a may-happen-in-parallel analysis
to restrict the set of program points whose
execution can interleave with the considered
loop. Interestingly, the same kind of reasoning can
be applied to infer upper bounds on the number of
iterations of loops with concurrent
interleavings. To the best of our knowledge, this is
the first method to automatically bound the cost of
such kind of loops.
@inproceedings{AlbertFGM13,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim and Enrique Martin-Martin},
title = {Termination and Cost Analysis of Loops with Concurrent Interleavings},
booktitle = {Proceedings of the 11th International Symposium on Automated Technology for Verification and Analysis (ATVA'13)},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {349--364},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
doi = {10.1007/978-3-319-02444-8_25},
isbn = {978-3-319-02443-1},
issn = {0302-9743},
projects = {\doves,\supra,\envisage,\prometidos,\vivac},
rank:acronym = {ATVA},
rank:grin:class = {Class 3 (B)}
}
|
[16]
|
Diego Esteban Alonso Blas, Puri Arenas, and Samir Genaim.
Precise Cost Analysis via Local Reasoning.
In Dang Van Hung and Mizuhito Ogawa, editors, Proceedings of the
11th International Symposium on Automated Technology for Verification and
Analysis (ATVA'13), volume 8172 of Lecture Notes in Computer Science,
pages 319-333. Springer, 2013.
[ bibtex |
abstract |
DOI ]
The classical approach to static cost analysis is
based on first transforming a given program into a
set of cost relations, and then solving them into
closed-form upper-bounds. The quality of the
upper-bounds and the scalability of such cost
analysis highly depend on the precision and
efficiency of the solving phase. Several techniques
for solving cost relations exist, some are efficient
but not precise enough, and some are very precise
but do not scale to large cost relations. In this
paper we explore the gap between these techniques,
seeking for ones that are both precise and
efficient. In particular, we propose a novel
technique that first splits the cost relation into
several atomic ones, and then uses precise local
reasoning for some and less precise but efficient
reasoning for others. For the precise local
reasoning, we propose several methods that define
the cost as a solution of a universally quantified
formula. Preliminary experiments demonstrate the
effectiveness of our approach.
@inproceedings{AlonsoAG13,
author = {Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim},
title = {{P}recise {C}ost {A}nalysis via {L}ocal {R}easoning},
booktitle = {Proceedings of the 11th International Symposium on Automated Technology for Verification and Analysis (ATVA'13)},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {319--333},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
isbn = {978-3-319-02443-1},
issn = {0302-9743},
doi = {10.1007/978-3-319-02444-8_23},
projects = {\doves,\supra,\envisage,\prometidos,\vivac},
rank:acronym = {ATVA},
rank:grin:class = {Class 3 (B)}
}
|
[17]
|
Antonio Flores-Montoya, Elvira Albert, and Samir Genaim.
May-Happen-in-Parallel based Deadlock Analysis for
Concurrent Objects.
In Dirk Beyer and Michele Boreale, editors, Proceedings of the
International Conference on Formal Techniques for Distributed Systems
(FMOODS/FORTE'13), volume 7892 of Lecture Notes in Computer Science,
pages 273-288. Springer, 2013.
[ bibtex |
abstract |
DOI ]
We present a novel deadlock analysis for concurrent
objects based on the results inferred by a points-to
analysis and a may-happen-in-parallel (MHP)
analysis. Similarly to other analysis, we build a
dependency graph such that the absence of cycles in
the graph ensures deadlock freeness. An MHP analysis
provides an over-approximation of the pairs of
program points that may be running in parallel. The
crux of the method is that the analysis integrates
the MHP information within the dependency graph in
order to discard unfeasible cycles that otherwise
would lead to false positives. We argue that our
analysis is more precise and/or efficient than
previous proposals for deadlock analysis of
concurrent objects. As regards accuracy, we are able
to handle cases that other analyses have pointed out
as challenges. As regards efficiency, the complexity
of our deadlock analysis is polynomial.
@inproceedings{FloresAG13,
author = {Antonio Flores-Montoya and Elvira Albert and Samir Genaim},
title = {{M}ay-{H}appen-in-{P}arallel based {D}eadlock {A}nalysis for {C}oncurrent {O}bjects},
booktitle = {Proceedings of the International Conference on Formal Techniques for Distributed Systems (FMOODS/FORTE'13)},
editor = {Dirk Beyer and Michele Boreale},
series = {Lecture Notes in Computer Science},
year = {2013},
pages = {273--288},
volume = {7892},
isbn = {978-3-642-38591-9},
issn = {0302-9743},
doi = {10.1007/978-3-642-38592-6_19},
publisher = {Springer},
projects = {\doves,\supra,\prometidos},
rank:acronym = {FMOODS/FORTE},
rank:grin:class = {Class 3 (B-)}
}
|
[18]
|
Pierre Ganty and Samir Genaim.
Proving Termination Starting from the End.
In Natasha Sharygina and Helmut Veith, editors, Proceedings of
the 25th International Conference on Computer Aided Verification (CAV'13),
volume 8044 of Lecture Notes in Computer Science, pages 397-412.
Springer, 2013.
[ bibtex |
abstract |
DOI |
arXiv ]
We present a novel technique for proving program
termination which introduces a new dimension of
modularity. Existing techniques use the program to
incrementally construct a termination proof. While
the proof keeps changing, the program remains the
same. Our technique goes a step further. We show
how to use the current partial proof to partition
the transition relation into those behaviors known
to be terminating from the current proof, and those
whose status (terminating or not) is not known
yet. This partition enables a new and unexplored
dimension of incremental reasoning on the program
side. In addition, we show that our approach
naturally applies to conditional termination which
searches for a precondition ensuring termination.
We further report on a prototype implementation that
advances the state-of-the-art on the grounds of
termination and conditional termination.
@inproceedings{GantyG13,
author = {Pierre Ganty and Samir Genaim},
title = {{P}roving {T}ermination {S}tarting from the {E}nd},
editor = {Natasha Sharygina and Helmut Veith},
booktitle = {Proceedings of the 25th International Conference on Computer Aided Verification (CAV'13)},
year = {2013},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
pages = {397--412},
volume = {8044},
arxiv = {http://arxiv.org/abs/1302.4539},
doi = {10.1007/978-3-642-39799-8_27},
isbn = {978-3-642-39798-1},
issn = {0302-9743},
projects = {\doves,\prometidos,\vivac},
rank:acronym = {CAV},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[19]
|
Amir M. Ben-Amram and Samir Genaim.
On the Linear Ranking Problem for Integer
Linear-Constraint Loops.
In Roberto Giacobazzi and Radhia Cousot, editors, Proceedings of
the 40th Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming
Languages (POPL'13), volume 48(1) of ACM SIGPLAN NOTICES, pages
51-62. ACM, 2013.
Nominated for CACM Research Highlights
http://www.sigplan.org/Highlights/Papers/. Alternative DOI:
https://dl.acm.org/doi/10.1145/2429069.2429078.
[ bibtex |
abstract |
DOI ]
In this paper we study the complexity of the Linear
Ranking problem: given a loop, described by linear
constraints over a finite set of integer variables,
is there a linear ranking function for this loop?
While existence of such a function implies
termination, this problem is not equivalent to
termination. When the variables range over the
rationals or reals, the Linear Ranking problem is
known to be PTIME decidable. However, when they
range over the integers, whether for single-path or
multipath loops, the decidability of the Linear
Ranking problem has not yet been determined. We show
that it is decidable, but harder than the former
case: it is coNP-complete. However, we point out
some special cases of importance of PTIME
complexity. We also present complete algorithms for
synthesizing linear ranking functions, both for the
general case and the special PTIME cases.
@inproceedings{Ben-AmramG13,
author = {Amir M. Ben-Amram and Samir Genaim},
title = {{O}n the {L}inear {R}anking {P}roblem for {I}nteger {L}inear-{C}onstraint {L}oops},
editor = {Roberto Giacobazzi and Radhia Cousot},
booktitle = {Proceedings of the 40th Annual ACM SIGPLAN-SIGACT Symposium on Principles
of Programming Languages (POPL'13)},
year = {2013},
pages = {51--62},
volume = {48(1)},
series = {ACM SIGPLAN NOTICES},
issn = {0362-1340},
publisher = {ACM},
doi = {10.1145/2480359.2429078},
note = {Nominated for CACM Research Highlights \url{http://www.sigplan.org/Highlights/Papers/}. Alternative DOI: \url{https://dl.acm.org/doi/10.1145/2429069.2429078}},
projects = {\doves,\vivac},
rank:acronym = {POPL},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A++)}
}
|
[20]
|
Elvira Albert, Antonio Flores-Montoya, and Samir Genaim.
MayPar: A May-Happen-in-Parallel Analyzer for
Concurrent Objects.
In Will Tracz, Martin P. Robillard, and Tevfik Bultan, editors,
Proceedings of the 20th ACM SIGSOFT Symposium on the Foundations of Software
Engineering (FSE-20), pages 14:1-14:5. ACM, 2012.
[ bibtex |
abstract |
DOI ]
We present the concepts, usage and prototypical
implementation of MayPar, a may-happen-in-parallel
(MHP) static analyzer for a distributed asynchronous
language based on concurrent objects. Our tool
allows analyzing an application and finding out the
pairs of statements that can execute in
parallel. The information can be displayed by means
of a graphical representation of the MHP analysis
graph or, in a textual way, as a set of pairs which
identify the program points that may run in
parallel. The information yield by MayPar can be
relevant (1) to spot bugs in the program related to
fragments of code which should not run in parallel
and also (2) to improve the precision of other
analyses which infer more complex properties (e.g.,
termination and resource consumption).
@inproceedings{AlbertFG12b,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim},
title = {{M}ay{P}ar: A {M}ay-{H}appen-in-{P}arallel {A}nalyzer for {C}oncurrent {O}bjects},
editor = {Will Tracz and Martin P. Robillard and Tevfik Bultan},
booktitle = {Proceedings of the 20th ACM SIGSOFT Symposium on the Foundations of Software Engineering (FSE-20)},
year = 2012,
pages = {14:1--14:5},
publisher = {ACM},
isbn = {978-1-4503-1614-9},
doi = {10.1145/2393596.2393611},
projects = {\doves,\prometidos},
rank:acronym = {FSE},
rank:aneca:class = {\anecamr},
rank:grin:class = {Class 1 (A+)}
}
|
[21]
|
Diego Esteban Alonso Blas and Samir Genaim.
On the Limits of the Classical Approach to Cost Analysis.
In Antoine Miné and David Schmidt, editors, Proceedings of
the 19th International Symposium of Static Analysis (SAS'12), volume 7460 of
Lecture Notes in Computer Science, pages 405-421. Springer, 2012.
[ bibtex |
abstract |
DOI ]
The classical approach to static cost analysis is
based on transforming a given program into cost
relations and solving them into closed-form
upper-bounds. It is known that for some programs,
this approach infers upper-bounds that are
asymptotically less precise than the actual cost.
As yet, it was assumed that this imprecision is due
to the way cost relations are solved into
upper-bounds. In this paper: (1) we show that this
assumption is partially true, and identify the
reason due to which cost relations cannot precisely
model the cost of such programs; and (2) to overcome
this imprecision, we develop a new approach to cost
analysis, based on SMT and quantifier elimination.
Interestingly, we find a strong relation between our
approach and amortised cost analysis.
@inproceedings{AlonsoG12,
author = {Diego Esteban Alonso Blas and Samir Genaim},
title = {{O}n the {L}imits of the {C}lassical {A}pproach to {C}ost {A}nalysis},
booktitle = {Proceedings of the 19th International Symposium of Static Analysis (SAS'12)},
editor = {Antoine Min{\'e} and David Schmidt},
year = 2012,
pages = {405-421},
doi = {10.1007/978-3-642-33125-1_27},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7460,
isbn = {978-3-642-33124-4},
issn = {0302-9743},
projects = {\doves,\supra,\prometidos},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[22]
|
Elvira Albert, Antonio Flores-Montoya, and Samir Genaim.
Analysis of May-Happen-in-Parallel in Concurrent Objects.
In Holger Giese and Grigore Rosu, editors, Proceedings of the
International Conference on Formal Techniques for Distributed Systems
(FMOODS/FORTE'12), volume 7273 of Lecture Notes in Computer Science,
pages 35-51. Springer, 2012.
[ bibtex |
abstract |
DOI ]
This paper presents a may-happen-in-parallel
(MHP) analysis for OO languages based on
concurrent objects. In this concurrency
model, objects are the concurrency units such
that, when a method is invoked on an object o2
from a task executing on object o1, statements of
the current task in o1 may run in parallel with
those of the (asynchronous) call on o2, and with
those of transitively invoked methods. The goal of
the MHP analysis is to identify pairs of statements
in the program that may run in parallel in any
execution. Our MHP analysis is formalized as a
method-level (local) analysis whose
information can be modularly composed to obtain
application-level (global) information.
@inproceedings{AlbertFG12a,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim},
title = {{A}nalysis of {M}ay-{H}appen-in-{P}arallel in {C}oncurrent {O}bjects},
editor = {Holger Giese and Grigore Rosu},
booktitle = {Proceedings of the International Conference on Formal Techniques for Distributed Systems (FMOODS/FORTE'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7273,
year = 2012,
isbn = {978-3-642-30792-8},
issn = {0302-9743},
pages = {35-51},
doi = {10.1007/978-3-642-30793-5_3},
projects = {\doves,\supra,\prometidos},
rank:acronym = {FMOODS/FORTE},
rank:grin:class = {Class 3 (B-)}
}
|
[23]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, and Guillermo Román-Díez.
Verified Resource Guarantees for Heap Manipulating
Programs.
In Juan de Lara and Andrea Zisman, editors, Proceedings of the
15th International Conference on Fundamental Approaches to Software
Engineering (FASE'12), volume 7212 of Lecture Notes in Computer
Science, pages 130-145. Springer, 2012.
[ bibtex |
abstract |
DOI ]
Program properties that are automatically inferred by
static analysis tools are generally not considered
to be completely trustworthy, unless the tool
implementation or the results are formally
verified. Here we focus on the formal verification
of resource guarantees inferred by automatic cost
analysis. Resource guarantees ensure that programs
run within the indicated amount of resources which
may refer to memory consumption, to number of
instructions executed, etc. In previous work we
studied formal verification of inferred resource
guarantees that depend only on integer data. In
realistic programs, however, resource consumption is
often bounded by the size of heap-allocated data
structures. Bounding their size requires to perform
a number of structural heap analyses. The
contributions of this paper are (i) to identify what
exactly needs to be verified to guarantee sound
analysis of heap manipulating programs, (ii) to
provide a suitable extension of the program logic
used for verification to handle structural heap
properties in the context of resource guarantees,
and (iii) to improve the underlying theorem prover
so that proof obligations can be automatically
discharged.
@inproceedings{AlbertBGHR12,
author = {Elvira Albert and Richard Bubel and Samir Genaim and Reiner H{\"a}hnle and Guillermo Rom{\'a}n-D\'{\i}ez},
title = {{V}erified {R}esource {G}uarantees for {H}eap {M}anipulating {P}rograms},
editor = {Juan de Lara and Andrea Zisman},
booktitle = {Proceedings of the 15th International Conference on Fundamental Approaches to Software Engineering (FASE'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7212,
year = 2012,
isbn = {978-3-642-28871-5},
issn = {0302-9743},
pages = {130--145},
doi = {10.1007/978-3-642-28872-2_10},
projects = {\doves,\prometidos},
rank:acronym = {FASE},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[24]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Automatic Inference of Resource Consumption Bounds.
In Nikolaj Bjørner and Andrei Voronkov, editors, Proceedings
of the 18th International Conference on Logic for Programming, Artificial
Intelligence, and Reasoning (LPAR-18), volume 7180 of Lecture Notes in
Computer Science, pages 1-11. Springer, 2012.
[ bibtex |
abstract |
DOI ]
One of the main features of programs is the amount of
resources which are needed in order to run
them. Different resources can be taken into
consideration, such as the number of execution
steps, amount of memory allocated, number of calls
to certain methods, etc. Unfortunately, manually
determining the resource consumption of programs is
difficult and error-prone. We provide an overview of
a state of the art framework for automatically
obtaining both upper and lower bounds on the
resource consumption of programs. The bounds
obtained are functions on the size of the input
arguments to the program and are obtained
statically, i.e., without running the program. Due
to the approximations introduced, the framework can
fail to obtain (non-trivial) bounds even if they
exist. On the other hand, modulo implementation
bugs, the bounds thus obtained are valid for any
execution of the program. The framework has been
implemented in the COSTA system and can provide
useful bounds for realistic object-oriented and
actor-based concurrent programs.
@inproceedings{AlbertAGGP12b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{A}utomatic {I}nference of {R}esource {C}onsumption {B}ounds},
editor = {Nikolaj Bj{\o}rner and Andrei Voronkov},
booktitle = {Proceedings of the 18th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR-18)},
pages = {1--11},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7180,
year = 2012,
isbn = {978-3-642-28716-9},
issn = {0302-9743},
doi = {10.1007/978-3-642-28717-6_1},
projects = {\doves,\prometidos},
rank:acronym = {LPAR},
rank:grin:class = {Class 3 (B)}
}
|
[25]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
COSTABS: A Cost and Termination Analyzer for ABS.
In Oleg Kiselyov and Simon Thompson, editors, Proceedings of the
ACM SIGPLAN 2012 Workshop on Partial Evaluation and Program Manipulation
(PEPM'12), pages 151-154. ACM, 2012.
[ bibtex |
abstract |
DOI ]
ABS is an abstract behavioural
specification language to model distributed
concurrent systems. Characteristic features of ABS
are that: (1) it allows abstracting from
implementation details while remaining executable: a
functional sub-language over abstract data
types is used to specify internal, sequential
computations; and (2) the imperative
sub-language provides flexible concurrency and
synchronization mechanisms by means of asynchronous
method calls, release points in method definitions,
and cooperative scheduling of method
activations. This paper presents COSTABS, a COSt and
Termination analyzer for ABS, which is able to prove
termination and obtain resource usage bounds
for both the imperative and functional fragments of
programs. The resources that COSTABS can infer
include termination, number of execution steps,
memory consumption, number of asynchronous calls,
among others. The analysis bounds provide formal
guarantees that the execution of the program will
never exceed the inferred amount of resources. The
system can be downloaded as free software from its
web site, where a repository of examples and a web
interface are also provided. To the best of our
knowledge, COSTABS is the first system able to
perform resource analysis for a concurrent language.
@inproceedings{AlbertAGGP12a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{COSTABS}: {A} {C}ost and {T}ermination {A}nalyzer for {ABS}},
editor = {Oleg Kiselyov and Simon Thompson},
booktitle = {Proceedings of the ACM SIGPLAN 2012 Workshop on Partial Evaluation and Program Manipulation (PEPM'12)},
pages = {151--154},
publisher = {ACM},
year = 2012,
isbn = {978-1-4503-1118-2},
doi = {10.1145/2103746.2103774},
projects = {\doves,\prometidos},
rank:acronym = {PEPM},
rank:grin:class = {Class 3 (B)}
}
|
[26]
|
Amir M. Ben-Amram, Samir Genaim, and Abu Naser Masud.
On the Termination of Integer Loops.
In Viktor Kuncak and Andrey Rybalchenko, editors, Proceedings of
the 13th International Conference on Verification, Model Checking, and
Abstract Interpretation (VMCAI'12), volume 7148 of Lecture Notes in
Computer Science, pages 72-87. Springer, 2012.
[ bibtex |
abstract |
DOI ]
In this paper we study the decidability of termination
of several variants of simple integer loops, without
branching in the loop body and with affine
constraints as the loop guard (and possibly a
precondition). We show that termination of such
loops is undecidable in some cases, in particular,
when the body of the loop is expressed by a set of
linear inequalities where the coefficients are from
ZU{r} with r an arbitrary irrational; or
when the loop is a sequence of instructions, that
compute either linear expressions or the step
function. The undecidability result is proven by a
reduction from counter programs, whose termination
is known to be undecidable. For the common case of
integer constraints loops with rational coefficients
only we have not succeeded in proving decidability
nor undecidability of termination, however, this
attempt led to the result that a Petri net can be
simulated with such a loop, which implies some
interesting lower bounds. For example, termination
for a given input is at least EXPSPACE-hard.
@inproceedings{Ben-AmramGM12,
author = {Amir M. Ben-Amram and Samir Genaim and Abu Naser Masud},
title = {{O}n the {T}ermination of {I}nteger {L}oops},
editor = {Viktor Kuncak and Andrey Rybalchenko},
booktitle = {Proceedings of the 13th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'12)},
pages = {72--87},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7148,
year = 2012,
isbn = {978-3-642-27939-3},
issn = {0302-9743},
doi = {10.1007/978-3-642-27940-9_6},
projects = {\doves,\prometidos},
rank:acronym = {VMCAI},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[27]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and German
Puebla.
Cost Analysis of Concurrent OO Programs.
In Hongseok Yang, editor, Proceedings of the 9th Asian Symposium
on Programming Languages and Systems (APLAS'11), volume 7078 of Lecture
Notes in Computer Science, pages 238-254. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at automatically approximating the
resource consumption (e.g., memory) of
executing a program in terms of its input
parameters. While cost analysis for sequential
programming languages has received considerable
attention, concurrency and distribution have been
notably less studied. The main challenges (and our
contributions) of cost analysis in a concurrent
setting are: (1) Inferring precise size
relations for data in the program in the presence
of shared memory. This information is essential for
bounding the number of iterations of loops. (2)
Distribution suggests that analysis must keep the
cost of the diverse distributed components
separate. We handle this by means of a novel form of
recurrence equations which are parametric on
the notion of cost center, which represents a
corresponding component. To the best of our
knowledge, our work is the first one to present a
general cost analysis framework and an
implementation for concurrent OO programs.
@inproceedings{AlbertAGGP11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and German Puebla},
title = {{C}ost {A}nalysis of {C}oncurrent {OO} {P}rograms},
editor = {Hongseok Yang},
booktitle = {Proceedings of the 9th Asian Symposium on Programming Languages and Systems (APLAS'11)},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = 7078,
year = 2011,
pages = {238--254},
doi = {10.1007/978-3-642-25318-8_19},
isbn = {978-3-642-25317-1},
issn = {0302-9743},
projects = {\doves,\prometidos},
rank:acronym = {APLAS},
rank:grin:class = {Class 3 (B-)}
}
|
[28]
|
Elvira Albert, Samir Genaim, Miguel Gómez-Zamalloa, Einar Broch Johnsen,
Rudolf Schlatte, and Silvia Lizeth Tapia Tarifa.
Simulating Concurrent Behaviors with Worst-Case Cost
Bounds.
In Michael Butler and Wolfram Schulte, editors, Proceedings of
the 17th International Symposium on Formal Methods (FM'11), volume 6664 of
Lecture Notes in Computer Science, pages 353-368. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Modern software systems are increasingly being
developed for deployment on a range of
architectures. For this purpose, it is interesting
to capture aspects of low-level deployment concerns
in high-level modeling languages. In this paper, an
executable object-oriented modeling language is
extended with resource-restricted deployment
components. To analyze model behavior a formal
methodology is proposed to assess resource
consumption, which balances the scalability of the
method and the reliability of the obtained
results. The approach applies to a general notion of
resource, including traditional cost measures (e.g.,
time, memory) as well as concurrency-related
measures (e.g., requests to a server, spawned
tasks). The main idea of our approach is to combine
reliable (but expensive) worst-case cost analysis of
statically predictable parts of the model with fast
(but inherently incomplete) simulations of the
concurrent aspects in order to avoid the state-space
explosion. The approach is illustrated by the
analysis of memory consumption.
@inproceedings{AlbertGGJST11,
author = {Elvira Albert and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Einar Broch Johnsen and Rudolf Schlatte and Silvia Lizeth Tapia Tarifa},
title = {{S}imulating {C}oncurrent {B}ehaviors with {W}orst-{C}ase {C}ost {B}ounds},
editor = {Michael Butler and Wolfram Schulte},
booktitle = {Proceedings of the 17th International Symposium on Formal Methods (FM'11)},
pages = {353--368},
series = {Lecture Notes in Computer Science},
volume = {6664},
publisher = {Springer},
year = {2011},
isbn = {978-3-642-21436-3},
issn = {0302-9743},
doi = {10.1007/978-3-642-21437-0_27},
projects = {\doves,\aiverona,\prometidos},
rank:acronym = {FM},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[29]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Damiano Zanardini.
Task-Level Analysis for a Language with Async-Finish
Parallelism.
In Jan Vitek and Bjorn De Sutter, editors, Proceedings of the
ACM SIGPLAN/SIGBED 2011 conference on Languages, compilers, and tools for
embedded systems (LCTES'11), volume 46(5) of ACM SIGPLAN NOTICES,
pages 21-30. ACM, 2011.
[ bibtex |
abstract |
DOI ]
The task-level of a program is the maximum number of
tasks that can be available (i.e., not finished nor
suspended) simultaneously during its execution for
any input data. Static knowledge of the task-level
is of utmost importance for understanding and
debugging parallel programs as well as for guiding
task schedulers. We present, to the best of our
knowledge, the first static analysis which infers
safe and precise approximations on the task-level
for a language with async-finish parallelism. In
parallel languages, async and finish are the basic
constructs, respectively, for spawning tasks and for
waiting until they terminate. They are the core of
modern, parallel, distributed languages like X10.
Given a (parallel) program, our analysis returns a
task-level upper bound, i.e., a function on the
program's input arguments that guarantees that the
task-level of the program will never exceed its
value along any execution. Our analysis provides a
series of useful (over)-approximations, going from
the total number of tasks spawned in the execution
up to an accurate estimation of the task-level.
@inproceedings{AlbertAGZ11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Damiano Zanardini},
title = {Task-{L}evel {A}nalysis for a {L}anguage with {A}sync-{F}inish {P}arallelism},
booktitle = {Proceedings of the ACM SIGPLAN/SIGBED 2011 conference on Languages, compilers, and tools for embedded systems (LCTES'11)},
editor = {Jan Vitek and Bjorn De Sutter},
publisher = {ACM},
series = {ACM SIGPLAN NOTICES},
volume = {46(5)},
year = 2011,
pages = {21--30},
isbn = {978-1-4503-0555-6},
issn = {0362-1340},
doi = {10.1145/1967677.1967681},
projects = {\doves,\aiverona,\prometidos},
rank:acronym = {SIGPLAN NOTICES (LCTES'11)},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {767},
rank:jcr:q = {4},
rank:grin:class = {Class 2 (A-)}
}
|
[30]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, Germán Puebla,
and Guillermo Román-Díez.
Verified Resource Guarantees using COSTA and KeY.
In Siau-Cheng Khoo and Jeremy G. Siek, editors, Proceedings of
the 2011 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation
(PEPM'11), SIGPLAN, pages 73-76. ACM, 2011.
[ bibtex |
abstract |
DOI ]
Resource guarantees allow being certain that programs
will run within the indicated amount of resources,
which may refer to memory consumption, number of
instructions executed, etc. This information can be
very useful, especially in real-time and
safety-critical applications.Nowadays, a number of
automatic tools exist, often based on type systems
or static analysis, which produce such resource
guarantees. In spite of being based on
theoretically sound techniques, the implemented
tools may contain bugs which render the resource
guarantees thus obtained not completely trustworthy.
Performing full-blown verification of such tools is
a daunting task, since they are large and complex.
In this work we investigate an alternative approach
whereby, instead of the tools, we formally verify
the results of the tools. We have implemented this
idea using COSTA, a state-of-the-art static analysis
system, for producing resource guarantees and KeY, a
state-of-the-art verification tool, for formally
verifying the correctness of such resource
guarantees. Our preliminary results show that the
proposed tool cooperation can be used for
automatically producing verified resource
guarantees.
@inproceedings{AlbertBGHPR11,
author = {Elvira Albert and Richard Bubel and Samir Genaim and Reiner H\"ahnle and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{V}erified {R}esource {G}uarantees using {COSTA} and {KeY}},
booktitle = {Proceedings of the 2011 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation (PEPM'11)},
editor = {Siau-Cheng Khoo and Jeremy G. Siek},
publisher = {ACM},
series = {SIGPLAN},
year = 2011,
pages = {73--76},
doi = {10.1145/1929501.1929513},
isbn = {978-1-4503-0485-6},
projects = {\doves,\prometidos},
rank:acronym = {PEPM},
rank:grin:class = {Class 3 (B)}
}
|
[31]
|
Elvira Albert, Samir Genaim, and Abu Naser Masud.
More Precise yet Widely Applicable Cost Analysis.
In Ranjit Jhala and David A. Schmidt, editors, Proceedings of
the 12th International Conference on Verification, Model Checking, and
Abstract Interpretation (VMCAI'11), volume 6538 of Lecture Notes in
Computer Science, pages 38-53. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at determining the amount of
resources required to run a program in terms of its
input data sizes. Automatically inferring precise
bounds, while at the same time being able to handle
a wide class of programs, is a main challenge in
cost analysis. (1) Existing methods which rely on
computer algebra systems (CAS ) to solve the ob-
tained cost recurrence equations (CR) are very
precise when applicable, but handle a very
restricted class of CR. (2) Specific solvers
developed for CR tend to sacrifice accuracy for
wider applicability. In this paper, we present a
novel approach to inferring precise upper and lower
bounds on CR which, when compared to (1), is
strictly more widely applicable while precision is
kept and, when compared to (2), is in practice more
precise and still keeps wide applicability. The main
novelty is that we are able to accurately bound the
worst-case/best-case cost of each iteration of the
program loops and, then, by summing the resulting
sequences, we obtain precise upper/lower
bounds. Preliminary experimental results on Java
(bytecode) programs confirm the good balance between
the accu- racy of our analysis and its
applicability.
@inproceedings{AlbertGM11,
author = {Elvira Albert and Samir Genaim and Abu Naser Masud},
title = {{M}ore {P}recise yet {W}idely {A}pplicable {C}ost {A}nalysis},
booktitle = {Proceedings of the 12th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'11)},
editor = {Ranjit Jhala and David A. Schmidt},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 6538,
pages = {38--53},
year = 2011,
isbn = {978-3-642-18274-7},
issn = {0302-9743},
doi = {10.1007/978-3-642-18275-4_5},
projects = {\doves,\aiverona,\prometidos},
rank:acronym = {VMCAI},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[32]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Diana
Ramírez.
From Object Fields to Local Variables: A Practical
Approach to Field-Sensitive Analysis.
In Radhia Cousot and Matthieu Martel, editors, Proceedings of
the 17th International Symposium on Static Analysis (SAS'10), volume 6337 of
Lecture Notes in Computer Science, pages 100-116. Springer, 2010.
[ bibtex |
abstract |
DOI ]
Static analysis which takes into account the value of
data stored in the heap is typically considered
complex and computationally intractable in
practice. Thus, most static analyzers do not keep
track of object fields (or fields for short), i.e.,
they are field-insensitive. In this paper, we
propose locality conditions for soundly converting
fields into local variables. This way,
field-insensitive analysis over the transformed
program can infer information on the original
fields. Our notion of locality is context-sensitive
and can be applied both to numeric and reference
fields. We propose then a polyvariant
transformation which actually converts object fields
meeting the locality condition into variables and
which is able to generate multiple versions of code
when this leads to increasing the amount of fields
which satisfy the locality conditions. We have
implemented our analysis within a termination
analyzer for Java bytecode. Interestingly, we can
now prove termination of programs which use
iterators without the need of a sophisticated heap
analysis.
@inproceedings{AlbertAGPR10,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'irez},
title = {{F}rom {O}bject {F}ields to {L}ocal {V}ariables: {A} {P}ractical {A}pproach to {F}ield-{S}ensitive {A}nalysis},
booktitle = {Proceedings of the 17th International Symposium on Static Analysis (SAS'10)},
editor = {Radhia Cousot and Matthieu Martel},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 6337,
isbn = {978-3-642-15768-4},
issn = {0302-9743},
year = 2010,
pages = {100--116},
doi = {10.1007/978-3-642-15769-1_7},
projects = {\doves,\aiverona,\prometidos},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[33]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Parametric Inference of Memory Requirements for Garbage
Collected Languages.
In Jan Vitek and Doug Lea, editors, Proceedings of the 9th
International Symposium on Memory Management (ISMM'10), volume 45(8) of
ACM SIGPLAN NOTICES, pages 121-130. ACM, 2010.
[ bibtex |
abstract |
DOI ]
The accurate prediction of program's memory
requirements is a critical component in software
development. Existing heap space analyses either do
not take deallocation into account or adopt specific
models of garbage collectors which do not
necessarily correspond to the actual memory usage.
We present a novel approach to inferring upper
bounds on memory requirements of Java-like programs
which is parametric on the notion of
object lifetime, i.e., on when objects become
collectable. If objects lifetimes are inferred by a
reachability analysis, then our analysis
infers accurate upper bounds on the memory
consumption for a reachability-based garbage
collector. Interestingly, if objects lifetimes are
inferred by a heap liveness analysis, then we
approximate the program minimal memory requirement,
i.e., the peak memory usage when using an optimal
garbage collector which frees objects as soon as
they become dead. The key idea is to integrate
information on objects lifetimes into the process of
generating the recurrence equations which
capture the memory usage at the different program
states. If the heap size limit is set to the memory
requirement inferred by our analysis, it is ensured
that execution will not exceed the memory limit with
the only assumption that garbage collection works
when the limit is to be reached. Experiments on Java
bytecode programs provide evidence of the
feasibility and accuracy of our analysis.
@inproceedings{AlbertGG10,
author = {Elvira Albert and Samir Genaim and Miguel G\'{o}mez-Zamalloa},
title = {{P}arametric {I}nference of {M}emory {R}equirements for {G}arbage {C}ollected {L}anguages},
editor = {Jan Vitek and Doug Lea},
booktitle = {Proceedings of the 9th International Symposium on Memory Management (ISMM'10)},
year = 2010,
series = {ACM SIGPLAN NOTICES},
volume = {45(8)},
isbn = {978-1-4503-0054-4},
issn = {0362-1340},
pages = {121--130},
doi = {10.1145/1806651.1806671},
publisher = {ACM},
projects = {\promesas,\doves,\cara,\aiverona},
rank:acronym = {SIGPLAN NOTICES (ISMM'10)},
rank:aneca:class = {\anecar},
rank:jcr:totalcite = {999},
rank:jcr:q = {4},
rank:grin:class = {Class 3 (B)}
}
|
[34]
|
Michael Codish, Samir Genaim, and Peter J. Stuckey.
A Declarative Encoding of Telecommunications Feature
Subscription in SAT.
In António Porto and Francisco Javier López-Fraguas, editors,
Proceedings of the 11th International ACM SIGPLAN Conference on
Principles and Practice of Declarative Programming (PPDP'09), pages
255-266. ACM, 2009.
[ bibtex |
abstract |
DOI ]
This paper describes the encoding of a
telecommunications feature subscription
configuration problem to propositional logic and its
solution using a state-of-the-art Boolean
satisfaction solver. The transformation of a
problem instance to a corresponding propositional
formula in conjunctive normal form is obtained in a
declarative style. An experimental evaluation
indicates that our encoding is considerably faster
than previous approaches based on the use of Boolean
satisfaction solvers. The key to obtaining such a
fast solver is the careful design of the Boolean
representation and of the basic operations in the
encoding. The choice of a declarative programming
style makes the use of complex circuit designs
relatively easy to incorporate into the encoder and
to fine tune the application.
@inproceedings{CodishGS09,
author = {Michael Codish and Samir Genaim and Peter J. Stuckey},
title = {A {D}eclarative {E}ncoding of {T}elecommunications {F}eature {S}ubscription in {SAT}},
booktitle = {Proceedings of the 11th International ACM SIGPLAN Conference on Principles and Practice of Declarative Programming (PPDP'09)},
editor = {Ant{\'o}nio Porto and Francisco Javier L{\'o}pez-Fraguas},
year = 2009,
pages = {255--266},
doi = {10.1145/1599410.1599442},
publisher = {ACM},
isbn = {978-1-60558-568-0},
projects = {\mobius,\promesas,\merit,\doves,\aiverona},
rank:acronym = {PPDP},
rank:grin:class = {Class 3 (B)}
}
|
[35]
|
Elvira Albert, Puri Arenas, Samir Genaim, Israel Herraiz, and Germán Puebla.
Comparing Cost Functions in Resource Analysis.
In Marko C. J. D. van Eekelen and Olha Shkaravska, editors,
Proceedings of the 1st International Workshop on Foundational and Practical
Aspects of Resource Analysis (FOPARA'09), volume 6324 of Lecture Notes
in Computer Science, pages 1-17. Springer, 2009.
[ bibtex |
abstract |
DOI ]
Cost functions provide information about the
amount of resources required to execute a program in
terms of the sizes of input arguments. They can
provide an upper-bound, a lower-bound, or the
average-case cost. Motivated by the existence of a
number of automatic cost analyzers which produce
cost functions, we propose an approach for
automatically proving that a cost function is
smaller than another one. In all applications of
resource analysis, such as resource-usage
verification, program synthesis and optimization,
etc., it is essential to compare cost
functions. This allows choosing an implementation
with smaller cost or guaranteeing that the given
resource-usage bounds are preserved. Unfortunately,
automatically generated cost functions for realistic
programs tend to be rather intricate, defined by
multiple cases, involving non-linear subexpressions
(e.g., exponential, polynomial and logarithmic) and
they can contain multiple variables, possibly
related by means of constraints. Thus, comparing
cost functions is far from trivial. Our approach
first syntactically transforms functions into
simpler forms and then applies a number of
sufficient conditions which guarantee that a set of
expressions is smaller than another expression. Our
preliminary implementation in the COSTA system
indicates that the approach can be useful in
practice.
@inproceedings{AlbertAGHP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Israel Herraiz and Germ{\'a}n Puebla},
title = {{C}omparing {C}ost {F}unctions in {R}esource {A}nalysis},
booktitle = {Proceedings of the 1st International Workshop on Foundational and Practical Aspects of Resource Analysis (FOPARA'09)},
editor = {Marko C. J. D. van Eekelen and Olha Shkaravska},
year = 2009,
volume = 6324,
pages = {1--17},
doi = {10.1007/978-3-642-15331-0_1},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
issn = {0302-9743},
isbn = {978-3-642-15330-3},
projects = {},
rank:acronym = {FOPARA}
}
|
[36]
|
Elvira Albert, Diego Alonso, Puri Arenas, Samir Genaim, and Germán Puebla.
Asymptotic Resource Usage Bounds.
In Zhenjiang Hu, editor, Proceedings of the 7th Asian Symposium
on Programming Languages and Systems (APLAS'09), volume 5904 of Lecture
Notes in Computer Science, pages 294-310. Springer, 2009.
[ bibtex |
abstract |
DOI ]
When describing the resource usage of a program, it is
usual to talk in asymptotic terms, such as
the well-known “big O” notation, whereby we focus
on the behaviour of the program for large input data
and make a rough approximation by considering as
equivalent programs whose resource usage grows at
the same rate. Motivated by the existence of
non-asymptotic resource usage analyzers, in
this paper, we develop a novel transformation from a
non-asymptotic cost function (which can be produced
by multiple resource analyzers) into its asymptotic
form. Our transformation aims at producing tight
asymptotic forms which do not contain
redundant subexpressions (i.e., expressions
asymptotically subsumed by others). Interestingly,
we integrate our transformation at the heart of a
cost analyzer to generate asymptotic upper
bounds without having to first compute their
non-asymptotic counterparts. Our experimental
results show that, while non-asymptotic cost
functions become very complex, their asymptotic
forms are much more compact and manageable. This is
essential to improve scalability and to enable the
application of cost analysis in resource-aware
verification/certification.
@inproceedings{AlbertAAGP09,
author = {Elvira Albert and Diego Alonso and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{A}symptotic {R}esource {U}sage {B}ounds},
booktitle = {Proceedings of the 7th Asian Symposium on Programming Languages and Systems (APLAS'09)},
year = 2009,
editor = {Zhenjiang Hu},
series = {Lecture Notes in Computer Science},
volume = 5904,
pages = {294--310},
isbn = {978-3-642-10671-2},
issn = {0302-9743},
publisher = {Springer},
doi = {10.1007/978-3-642-10672-9_21},
projects = {\promesas,\doves,\aiverona},
rank:acronym = {APLAS},
rank:grin:class = {Class 3 (B-)}
}
|
[37]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Field-Sensitive Value Analysis by Field-Insensitive
Analysis.
In Ana Cavalcanti and Dennis Dams, editors, Proceedings of the
16th International Symposium on Formal Methods (FM'09), volume 5850 of
Lecture Notes in Computer Science, pages 370-386. Springer, 2009.
[ bibtex |
abstract |
DOI ]
Shared mutable data structures pose major problems in
static analysis and most analyzers are unable to
keep track of the values of numeric variables
stored in the heap. In this paper, we first
identify sufficient conditions under which heap
allocated numeric variables in object oriented
programs (i.e., numeric fields) can be handled as
non-heap allocated variables. Then, we present a
static analysis to infer which numeric fields
satisfy these conditions at the level of
(sequential) bytecode. This allows
instrumenting the code with ghost variables
which make such numeric fields observable to any
field-insensitive value analysis. Our experimental
results in termination analysis show that we greatly
enlarge the class of analyzable programs with a
reasonable overhead.
@inproceedings{AlbertAGP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{F}ield-{S}ensitive {V}alue {A}nalysis by {F}ield-{I}nsensitive {A}nalysis},
booktitle = {Proceedings of the 16th International Symposium on Formal Methods (FM'09)},
year = 2009,
editor = {Ana Cavalcanti and Dennis Dams},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {5850},
pages = {370--386},
isbn = {978-3-642-05088-6},
issn = {0302-9743},
doi = {10.1007/978-3-642-05089-3_24},
projects = {\mobius,\promesas,\merit,\doves,\aiverona},
rank:acronym = {FM},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[38]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Live Heap Space Analysis for Languages with Garbage
Collection.
In Hillel Kolodner and Guy L. Steele Jr., editors, Proceedings
of the 8th International Symposium on Memory Management (ISMM'09), pages
129-138. ACM, 2009.
[ bibtex |
abstract |
DOI ]
The peak heap consumption of a program is the
maximum size of the live data on the heap
during the execution of the program, i.e., the
minimum amount of heap space needed to run the
program without exhausting the memory. It is
well-known that garbage collection (GC) makes the
problem of predicting the memory required to run a
program difficult. This paper presents, the best of
our knowledge, the first live heap space
analysis for garbage-collected languages which
infers accurate upper bounds on the peak heap usage
of a program's execution that are not restricted to
any complexity class, i.e., we can infer
exponential, logarithmic, polynomial, etc.,
bounds. Our analysis is developed for an
(sequential) object-oriented bytecode language with
a scoped-memory manager that reclaims
unreachable memory when methods return. We also show
how our analysis can accommodate other GC schemes
which are closer to the ideal GC which
collects objects as soon as they become unreachable.
The practicality of our approach is experimentally
evaluated on a prototype implementation. We
demonstrate that it is fully automatic, reasonably
accurate and efficient by inferring live heap space
bounds for a standardized set of benchmarks, the
JOlden suite.
@inproceedings{AlbertGG09,
author = {Elvira Albert and Samir Genaim and Miguel G{\'o}mez-Zamalloa},
title = {{L}ive {H}eap {S}pace {A}nalysis for {L}anguages with {G}arbage {C}ollection},
editor = {Hillel Kolodner and Guy L. Steele Jr.},
booktitle = {Proceedings of the 8th International Symposium on Memory Management (ISMM'09)},
year = 2009,
pages = {129--138},
isbn = {978-1-60558-347-1},
publisher = {ACM},
doi = {10.1145/1542431.1542450},
projects = {\mobius,\promesas,\merit,\doves},
rank:acronym = {ISMM},
rank:grin:class = {Class 3 (B)}
}
|
[39]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Automatic Inference of Upper Bounds for Recurrence
Relations in Cost Analysis.
In María Alpuente and Germán Vidal, editors,
Proceedings of the 15th International Symposium on Static Analysis (SAS'08),
volume 5079 of Lecture Notes in Computer Science, pages 221-237.
Springer, 2008.
[ bibtex |
abstract |
DOI ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGP08,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{A}utomatic {I}nference of {U}pper {B}ounds for {R}ecurrence {R}elations in {C}ost {A}nalysis},
booktitle = {Proceedings of the 15th International Symposium on Static Analysis (SAS'08)},
editor = {Mar\'{\i}a Alpuente and Germ{\'a}n Vidal},
year = 2008,
pages = {221--237},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 5079,
isbn = {978-3-540-69163-1},
issn = {0302-9743},
doi = {10.1007/978-3-540-69166-2_15},
projects = {\mobius,\promesas,\merit},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[40]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Gilles Barthe and Frank S. de Boer, editors, Proceedings of
the 10th IFIP WG 6.1 International Conference on Formal Methods for Open
Object-Based Distributed Systems (FMOODS'08), volume 5051 of Lecture
Notes in Computer Science, pages 2-18. Springer, 2008.
[ bibtex |
abstract |
DOI ]
Termination analysis has received considerable
attention, traditionally in the context of
declarative programming, and recently also for
imperative languages. In existing approaches,
termination is performed on source
programs. However, there are many situations,
including mobile code, where only the compiled code
is available. In this work we present an automatic
termination analysis for sequential Java Bytecode
programs. Such analysis presents all of the
challenges of analyzing a low-level language as well
as those introduced by object-oriented languages.
Interestingly, given a bytecode program, we produce
a constraint logic program, CLP, whose
termination entails termination of the bytecode
program. This allows applying the large body of work
in termination of CLP programs to termination of
Java bytecode. A prototype analyzer is described
and initial experimentation is reported.
@inproceedings{AlbertACGPZ08,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{T}ermination {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 10th IFIP WG 6.1 International Conference on Formal Methods for Open Object-Based Distributed Systems (FMOODS'08)},
editor = {Gilles Barthe and Frank S. de Boer},
year = 2008,
pages = {2--18},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 5051,
isbn = {978-3-540-68862-4},
issn = {0302-9743},
doi = {10.1007/978-3-540-68863-1_2},
projects = {\mobius,\promesas,\merit},
rank:acronym = {FMOODS},
rank:grin:class = {Class 3 (B-)}
}
|
[41]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Removing Useless Variables in Cost Analysis of Java
Bytecode.
In Roger L. Wainwright and Hisham Haddad, editors, Proceedings
of the 2008 ACM Symposium on Applied Computing (SAC'08), pages 368-375.
ACM, 2008.
[ bibtex |
abstract |
DOI ]
Automatic cost analysis has interesting applications
in the context of verification and certification of
mobile code. For instance, the code receiver can
use cost information in order to decide whether to
reject mobile code which has too large cost
requirements in terms of computing resources (in
time and/or space) or billable events (SMSs sent,
bandwidth required). Existing cost analyses for a
variety of languages describe the resource
consumption of programs by means of Cost
Equation Systems (CESs), which are similar to, but
more general than recurrence equations. CESs express
the cost of a program in terms of the size of its
input data. In a further step, a closed form (i.e.,
non-recursive) solution or upper bound can sometimes
be found by using existing Computer Algebra Systems
(CASs), such as Maple and Mathematica. In this
work, we focus on cost analysis of Java bytecode, a
language which is widely used in the context of
mobile code and we study the problem of identifying
variables which are useless in the sense that
they do not affect the execution cost and therefore
can be ignored by cost analysis. We identify two
classes of useless variables and propose automatic
analysis techniques to detect them. The first class
corresponds to stack variables that can be
replaced by program variables or constant values.
The second class corresponds to variables whose
value is cost-irrelevant, i.e., does not
affect the cost of the program. We propose an
algorithm, inspired in static slicing which
safely identifies cost-irrelevant variables. The
benefits of eliminating useless variables are
two-fold: (1) cost analysis without useless
variables can be more efficient and (2) resulting
CESs are more likely to be solvable by existing
CASs.
@inproceedings{AlbertAGPZ08,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{R}emoving {U}seless {V}ariables in {C}ost {A}nalysis of {J}ava {B}ytecode},
editor = {Roger L. Wainwright and Hisham Haddad},
booktitle = {Proceedings of the 2008 ACM Symposium on Applied Computing (SAC'08)},
year = 2008,
pages = {368--375},
isbn = {978-1-59593-753-7},
doi = {10.1145/1363686.1363779},
publisher = {ACM},
projects = {\mobius,\promesas,\merit},
rank:acronym = {SAC}
}
|
[42]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Heap Space Analysis for Java Bytecode.
In Greg Morrisett and Mooly Sagiv, editors, Proceedings of the
6th International Symposium on Memory management (ISMM'07), pages 105-116.
ACM, 2007.
[ bibtex |
abstract |
DOI ]
This article presents a heap space analysis for
(sequential) Java bytecode. The analysis generates
heap space cost relations which define at
compile-time the heap consumption of a program as a
function of its data size. These relations can be
used to obtain upper bounds on the heap space
allocated during the execution of the different
methods. In addition, we describe how to refine the
cost relations, by relying on escape
analysis, in order to take into account the heap
space that can be safely deallocated by the garbage
collector upon exit from a corresponding
method. These refined cost relations are then used
to infer upper bounds on the active heap
space upon methods return. Example applications
for the analysis consider inference of constant heap
usage and heap usage proportional to the data size
(including polynomial and exponential heap
consumption). Our prototype implementation is
reported and demonstrated by means of a series of
examples which illustrate how the analysis naturally
encompasses standard data-structures like lists,
trees and arrays with several dimensions written in
object-oriented programming style.
@inproceedings{AlbertGG07,
author = {Elvira Albert and Samir Genaim and Miguel G{\'o}mez-Zamalloa},
title = {{H}eap {S}pace {A}nalysis for {J}ava {B}ytecode},
booktitle = {Proceedings of the 6th International Symposium on Memory management (ISMM'07)},
editor = {Greg Morrisett and Mooly Sagiv},
year = 2007,
isbn = {978-1-59593-893-0},
pages = {105--116},
doi = {10.1145/1296907.1296922},
publisher = {ACM},
projects = {\mobius,\promesas,\merit},
rank:acronym = {ISMM},
rank:grin:class = {Class 3 (B)}
}
|
[43]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Java Bytecode.
In Rocco De Nicola, editor, Proceedings of the 16th European
Symposium on Programming (ESOP'07), volume 4421 of Lecture Notes in
Computer Science, pages 157-172. Springer-Verlag, 2007.
[ bibtex |
abstract |
DOI ]
Cost analysis of Java bytecode is complicated by its
unstructured control flow, the use of an operand
stack and its object-oriented programming features
(like dynamic dispatching). This paper addresses
these problems and develops a generic framework for
the automatic cost analysis of sequential Java
bytecode. Our method generates cost
relations which define at compile-time the cost of
programs as a function of their input data size. To
the best of our knowledge, this is the first
approach to the automatic cost analysis of Java
bytecode.
@inproceedings{AlbertAGPZ07,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 16th European Symposium on Programming (ESOP'07)},
year = 2007,
editor = {Rocco De Nicola},
series = {Lecture Notes in Computer Science},
publisher = {Springer-Verlag},
volume = {4421},
pages = {157--172},
issn = {0302-9743},
isbn = {978-3-540-71314-2},
doi = {10.1007/978-3-540-71316-6_12},
projects = {\mobius,\promesas,\merit},
rank:acronym = {ESOP},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[44]
|
Andy King, Lunjin Lu, and Samir Genaim.
Detecting Determinacy in Prolog Programs.
In Sandro Etalle and Miroslaw Truszczynski, editors, Proceedings
of the 22nd International Conference on Logic Programming (ICLP'06), volume
4079 of Lecture Notes in Computer Science, pages 132-147.
Springer-Verlag, 2006.
[ bibtex |
abstract |
DOI ]
The aim of determinacy inference is to infer a class
of calls to a given logic program, for which
execution will generate at most one answer and
generate the answer only once. Two serious
impediments to accurate determinacy inference are:
(1) the way bindings imposed by a rightmost goal can
make a leftmost goal deterministic; (2) the way
determinacy is often enforced with cut. Rather
surprisingly, this paper shows how: problem (1) can
be tackled by recasting determinacy inference as a
problem in concurrency; problem (2) can be addressed
within this concurrency framework. Experimental
evaluation shows that the new analysis can infer
richer classes of deterministic calls for many
programs.
@inproceedings{KingLG06,
author = {Andy King and Lunjin Lu and Samir Genaim},
title = {{D}etecting {D}eterminacy in {P}rolog {P}rograms},
booktitle = {Proceedings of the 22nd International Conference on Logic Programming (ICLP'06)},
editor = {Sandro Etalle and Miroslaw Truszczynski},
pages = {132--147},
doi = {10.1007/11799573_12},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {4079},
year = {2006},
isbn = {978-3-540-36635-5},
issn = {0302-9743},
projects = {\deter},
rank:acronym = {ICLP},
rank:aneca:class = {\anecar},
rank:aneca:note = {El congreso ICLP es relevante, el mejor en el area de programaci\'on l\'ogica con un \'indice de aceptaci\'on competitivo, no aparece en el ranking GII-GRIN-SCIE porque las actas del congreso se publican en revista en los \'ultimos a\~nos}
}
|
[45]
|
Samir Genaim and Fausto Spoto.
Information Flow Analysis for Java Bytecode.
In Radhia Cousot, editor, Proceedings of the 6th International
Conference on Verification, Model Checking, and Abstract Interpretation
(VMCAI'05), volume 3385 of Lecture Notes in Computer Science, pages
346-362. Springer-Verlag, 2005.
[ bibtex |
abstract |
DOI ]
We present a flow and context sensitive compositional
information flow analysis for full (mono-threaded)
Java bytecode. We base our analysis on the
transformation of the Java bytecode into a
control-flow graph of basic blocks of code
which makes explicit the complex features of the
Java bytecode. We represent information flows
through Boolean functions and hence implement an
accurate and efficient information flow analysis
through binary decision diagrams. To the best of
our knowledge, it is the first one for full Java
bytecode.
@inproceedings{GenaimS05,
author = {Samir Genaim and Fausto Spoto},
title = {{I}nformation {F}low {A}nalysis for {J}ava {B}ytecode},
booktitle = {Proceedings of the 6th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'05)},
pages = {346--362},
editor = {Radhia Cousot},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {3385},
year = {2005},
isbn = {978-3-540-24297-0},
issn = {0302-9743},
doi = {10.1007/978-3-540-30579-8_23},
projects = {},
rank:acronym = {VMCAI},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[46]
|
Samir Genaim and Andy King.
Goal-Independent Suspension Analysis for Logic Programs
with Dynamic Scheduling.
In Pierpaolo Degano, editor, Proceedings of the 12th European
Symposium on Programming (ESOP'03), volume 2618 of Lecture Notes in
Computer Science, pages 84-98. Springer-Verlag, 2003.
[ bibtex |
abstract |
DOI ]
A goal-independent suspension analysis is presented
that infers a class of goals for which a logic
program with delay can be executed without
suspension. The crucial point is that the analysis
does not verify that an (abstract) goal does not
lead to non-suspension but infers (abstract) goals
that do not lead to non-suspension. The analysis
has applications in debugging, verification and
program optimisation.
@inproceedings{GenaimK03,
author = {Samir Genaim and Andy King},
title = {{G}oal-{I}ndependent {S}uspension {A}nalysis for {L}ogic {P}rograms with {D}ynamic {S}cheduling},
editor = {Pierpaolo Degano},
booktitle = {Proceedings of the 12th European Symposium on Programming (ESOP'03)},
pages = {84--98},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
isbn = {978-3-540-00886-6},
issn = {0302-9743},
volume = {2618},
year = {2003},
doi = {10.1007/3-540-36575-3_7},
projects = {},
rank:acronym = {ESOP},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[47]
|
Maurice Bruynooghe, Michael Codish, Samir Genaim, and Wim Vanhoof.
Reuse of Results in Termination Analysis of Typed Logic
Programs.
In Manuel V. Hermenegildo and Germán Puebla, editors,
Proceedings of the 9th International Symposium on Static Analysis (SAS'02),
volume 2477 of Lecture Notes in Computer Science, pages 477-492.
Springer-Verlag, 2002.
[ bibtex |
abstract |
DOI ]
Recent works by the authors address the problem of
automating the selection of a candidate norm for the
purpose of termination analysis. These works
illustrate a powerful technique in which a
collection of simple type-based norms - one for
each data type in the program - are combined
together to provide the candidate norm. The current
paper extends these results by investigating type
polymorphism. We show that by considering
polymorphic types we reduce, without sacrificing
precision, the number of type-based norms which
should be combined to provide the candidate norm.
Moreover, we show that when a generic polymorphic
typed program component occurs in one or more
specific type contexts, we need not reanalyse
it. All of the information concerning its
termination and its effect on the termination of
other predicates in that context can be derived
directly from the context independent analysis of
that component based on norms derived from the
polymorphic types.
@inproceedings{BruynoogheCGV02,
author = {Maurice Bruynooghe and Michael Codish and Samir Genaim and Wim Vanhoof},
title = {{R}euse of {R}esults in {T}ermination {A}nalysis of {T}yped {L}ogic {P}rograms},
booktitle = {Proceedings of the 9th International Symposium on Static Analysis (SAS'02)},
editor = {Manuel V. Hermenegildo and Germ{\'a}n Puebla},
pages = {477--492},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {2477},
year = {2002},
isbn = {978-3-540-44235-6},
issn = {0302-9743},
doi = {10.1007/3-540-45789-5_33},
projects = {},
rank:acronym = {SAS},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[48]
|
Samir Genaim, Michael Codish, John P. Gallagher, and Vitaly Lagoon.
Combining Norms to Prove Termination.
In Agostino Cortesi, editor, Proceedings of the 3rd
International Workshop on Verification, Model Checking, and Abstract
Interpretation (VMCAI'02), volume 2294 of Lecture Notes in Computer
Science, pages 126-138. Springer-Verlag, 2002.
[ bibtex |
abstract |
DOI ]
Automatic termination analysers typically measure the
size of terms applying norms which are mappings from
terms to the natural numbers. This paper illustrates
how to enable the use of size functions defined as
tuples of these simpler norm functions. This
approach enables us to simplify the problem of
deriving automatically a candidate norm with which
to prove termination. Instead of deriving a single,
complex norm function, it is sufficient to determine
a collection of simpler norms, some combination of
which, leads to a proof of termination. We propose
that a collection of simple norms, one for each of
the recursive data-types in the program, is often a
suitable choice. We first demonstrate the power of
combining norm functions and then the adequacy of
combining norms based on regular types.
@inproceedings{GenaimCGL02,
author = {Samir Genaim and Michael Codish and John P. Gallagher and Vitaly Lagoon},
title = {{C}ombining {N}orms to {P}rove {T}ermination},
booktitle = {Proceedings of the 3rd International Workshop on Verification, Model Checking, and Abstract Interpretation (VMCAI'02)},
editor = {Agostino Cortesi},
pages = {126--138},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {2294},
year = {2002},
isbn = {978-3-540-43631-7},
issn = {0302-9743},
doi = {10.1007/3-540-47813-2_9},
projects = {},
rank:acronym = {VMCAI},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A-)}
}
|
[49]
|
Samir Genaim and Michael Codish.
The Def-inite Approach to Dependency Analysis.
In David Sands, editor, Proceedings of the 10th European
Symposium on Programming (ESOP'01), volume 2028 of Lecture Notes in
Computer Science, pages 417-432. Springer-Verlag, 2001.
[ bibtex |
abstract |
DOI ]
We propose a new representation for the domain of
Definite Boolean functions. The key idea is to view
the set of models of a Boolean function as an
incidence relation between variables and
models. This enables two dual representations: the
usual one, in terms of models, specifying which
variables they contain; and the other in terms of
variables, specifying which models contain them. We
adopt the dual representation which provides a clean
theoretical basis for the definition of efficient
operations on Def in terms of classic
ACI1 unification theory. Our approach illustrates
in an interesting way the relation of Def
to the well-known set-Sharing domain
which can also be represented in terms of sets of
models and ACI1 unification. From the practical
side, a prototype implementation provides promising
results which indicate that this representation
supports efficient groundness analysis using
Def formula. Moreover, widening on this
representation is easily defined.
@inproceedings{GenaimC01b,
author = {Samir Genaim and Michael Codish},
title = {{T}he {D}ef-inite {A}pproach to {D}ependency {A}nalysis},
booktitle = {Proceedings of the 10th European Symposium on Programming (ESOP'01)},
editor = {David Sands},
pages = {417--432},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {2028},
year = {2001},
isbn = {978-3-540-41862-7},
issn = {0302-9743},
doi = {10.1007/3-540-45309-1_28},
projects = {},
rank:acronym = {ESOP},
rank:aneca:class = {\anecar},
rank:grin:class = {Class 2 (A)}
}
|
[50]
|
Michael Codish, Samir Genaim, Harald Søndergaard, and Peter J. Stuckey.
Higher-Precision Groundness Analysis.
In Philippe Codognet, editor, Proceedings of the 17th
International Conference Logic Programming (ICLP'01), volume 2237 of
Lecture Notes in Computer Science, pages 135-149. Springer-Verlag, 2001.
[ bibtex |
abstract |
DOI ]
Groundness analysis of logic programs using Pos-based
abstract interpretation is one of the clear success
stories of the last decade in the area of logic
program analysis. In this work we identify two
problems with the Pos domain, the multiplicity and
sign problems, that arise independently in
groundness and uniqueness analysis. We describe how
these problems can be solved using an analysis based
on a domain Size for inferring term size relations.
However this solution has its own shortcomings
because it involves a widening operator which leads
to a loss of Pos information. Inspired by Pos, Size
and the LSign domain for abstract linear arithmetic
constraints we introduce a new domain LPos, and show
how it can be used for groundness and uniqueness
analysis. The idea is to use the sign information of
LSign to improve the widening of Size so that it
does not lose Pos information. We prove that the
resulting analyses using LPos are uniformly more
precise than those using Pos.
@inproceedings{CodishGSS01,
author = {Michael Codish and Samir Genaim and Harald S{\o}ndergaard and Peter J. Stuckey},
title = {{H}igher-{P}recision {G}roundness {A}nalysis},
booktitle = {Proceedings of the 17th International Conference Logic Programming (ICLP'01)},
editor = {Philippe Codognet},
pages = {135--149},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {2237},
year = {2001},
isbn = {978-3-540-42935-7},
issn = {0302-9743},
doi = {10.1007/3-540-45635-X_17},
projects = {},
rank:acronym = {ICLP},
rank:aneca:class = {\anecar},
rank:aneca:note = {El congreso ICLP es relevante, el mejor en el area de programaci\'on l\'ogica con un \'indice de aceptaci\'on competitivo, no aparece en el ranking GII-GRIN-SCIE porque las actas del congreso se publican en revista en los \'ultimos a\~nos}
}
|
[51]
|
Samir Genaim and Michael Codish.
Inferring Termination Conditions for Logic Programs Using
Backwards Analysis.
In Robert Nieuwenhuis and Andrei Voronkov, editors, Proceedings
of the 8th International Conference on Logic for Programming, Artificial
Intelligence, and Reasoning (LPAR'01), volume 2250 of Lecture Notes in
Computer Science, pages 685-694. Springer-Verlag, 2001.
[ bibtex |
abstract |
DOI ]
This paper focuses on the inference of modes for which
a logic program is guaranteed to terminate. This
generalizes traditional termination analysis where
an analyzer tries to verify termination for a
specified mode. The contribution is a methodology
which combines traditional termination analysis and
backwards analysis to obtain termination inference.
We demonstrate the application of this methodology
to enhance a termination analyzer to perform also
termination inference.
@inproceedings{GenaimC01a,
author = {Samir Genaim and Michael Codish},
title = {{I}nferring {T}ermination {C}onditions for {L}ogic {P}rograms {U}sing {B}ackwards {A}nalysis},
booktitle = {Proceedings of the 8th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR'01)},
editor = {Robert Nieuwenhuis and Andrei Voronkov},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = 2250,
isbn = {978-3-540-42957-9},
issn = {0302-9743},
year = 2001,
pages = {685--694},
doi = {10.1007/3-540-45653-8_47},
projects = {},
rank:acronym = {LPAR},
rank:grin:class = {Class 3 (B)}
}
|
[52]
|
Samir Genaim and Michael Codish.
Incremental Refinement of Semantic Based Program Analysis
for Logic Programs.
In Jenny. Edwards, editor, Proceedings of the 22nd Australasian
Computer Science Conference (ACSC'99), volume 21(1) of Australian
Computer Science Communications, pages 348-359. Singapore ; New York :
Springer, 1999.
[ bibtex |
abstract |
DOI |
.ps ]
This paper is concerned with the incremental
refinement of semantic based program analyses. We
address the following question: How can we use the
result of a less precise program analysis to
accelerate a more precise analysis of the same
program. We describe an approach based on abstract
program specialisation in which any approximation to
the result of a (more refined) program analysis is
used to specialise the evaluation process. When
analyses are based on abstract compilation this
boils down to adding constraints in the abstract
program which narrow its search space and accelerate
fix point evaluation based on re-computation. We
illustrate several examples of incremental analysis
for logic programs.
@inproceedings{GenaimC99,
author = {Samir Genaim and Michael Codish},
title = {{I}ncremental {R}efinement of {S}emantic {B}ased {P}rogram {A}nalysis for {L}ogic {P}rograms},
booktitle = {Proceedings of the 22nd Australasian Computer Science Conference (ACSC'99)},
editor = {Jenny. Edwards},
series = {Australian Computer Science Communications},
volume = {21(1)},
year = {1999},
pages = {348-359},
publisher = {Singapore ; New York : Springer},
ee = {http://www.worldcat.org/title/computer-science-99-proceedings-of-the-22nd-australasian-computer-science-conference-acsc99-auckland-new-zealand-18-21-january-1999/oclc/41549430},
isbn = {9789814021548},
projects = {},
rank:acronym = {ACSC}
}
|
[1]
|
Jesús Doménech and Samir Genaim.
Termination Analysis of Programs with Multiphase
Control-Flow.
In Hossein Hojjat and Bishoksan Kafle, editors, Proceedings 8th
Workshop on Horn Clauses for Verification and Synthesis (HCVS'21), pages
13-21, 2021.
[ bibtex |
abstract |
DOI ]
In this extended abstract we explored the use of
partial evaluation as a control-flow refinement
technique in the context for termination and cost
analysis. Our preliminary experiments show that
partial evaluation can improve both analyses.
@inproceedings{DomenechG21,
author = {Jes{\'{u}}s J. Dom{\'{e}}nech and Samir Genaim},
title = {{T}ermination {A}nalysis of {P}rograms with {M}ultiphase {C}ontrol-{F}low},
booktitle = {Proceedings 8th Workshop on Horn Clauses for Verification and Synthesis (HCVS'21)},
editor = {Hossein Hojjat and Bishoksan Kafle},
pages = {13--21},
year = 2021,
doi = {10.4204/EPTCS.344.2},
projects = {\freetech},
rank:acronym = {HCVS}
}
|
[2]
|
Amir M. Ben-Amram, Jesús Doménech, and Samir Genaim.
Loops for which Multiphase-Linear Ranking Functions are
Sufficient.
In Samir Genaim, editor, Proceedings of the 17th International
Workshop on Termination (WST'21), pages 60-74, 2021.
[ bibtex |
abstract ]
In this extended abstract we explored the use of
partial evaluation as a control-flow refinement
technique in the context for termination and cost
analysis. Our preliminary experiments show that
partial evaluation can improve both analyses.
@inproceedings{Ben-AmramDG21,
author = {Amir M. Ben-Amram and Jes{\'u}s Dom{\'e}nech and Samir Genaim},
title = {{L}oops for which {M}ultiphase-{L}inear {R}anking {F}unctions are {S}ufficient},
booktitle = {Proceedings of the 17th International Workshop on Termination (WST'21)},
editor = {Samir Genaim},
pages = {60--74},
year = 2021,
projects = {\freetech},
rank:acronym = {WST}
}
|
[3]
|
Alicia Merayo and Samir Genaim.
Inference of Linear Upper-Bounds on the Expected Cost by Solving Cost
Relations.
In Salvador Lucas, editor, Proceedings of the 16th International
Workshop on Termination (WST'18), pages 60-64, 2018.
[ bibtex |
abstract ]
In this extended abstract, we report on preliminary
results in this direction implementation that can
infer linear upper-bounds on the expected cost of
simple control-flow graphs by solving corresponding
cost relations.
@inproceedings{MerayoG18,
author = {Alicia Merayo and Samir Genaim},
title = {Inference of Linear Upper-Bounds on the Expected Cost by Solving Cost Relations},
booktitle = {Proceedings of the 16th International Workshop on Termination (WST'18)},
pages = {60--64},
year = {2018},
editor = {Salvador Lucas},
projects = {},
rank:acronym = {WST}
}
|
[4]
|
Jesús Doménech, Samir Genaim, and John P. Gallagher.
Control-Flow Refinement via Partial Evaluation.
In Salvador Lucas, editor, Proceedings of the 16th International
Workshop on Termination (WST'18), pages 55-59, 2018.
[ bibtex |
abstract ]
In this extended abstract we explored the use of
partial evaluation as a control-flow refinement
technique in the context for termination and cost
analysis. Our preliminary experiments show that
partial evaluation can improve both analyses.
@inproceedings{DomenechGG18,
author = {Jes{\'u}s Dom{\'e}nech and Samir Genaim and John P. Gallagher},
title = {{C}ontrol{-}{F}low {R}efinement via {P}artial {E}valuation},
booktitle = {Proceedings of the 16th International Workshop on Termination (WST'18)},
editor = {Salvador Lucas},
pages = {55--59},
year = 2018,
projects = {},
rank:acronym = {WST}
}
|
[5]
|
Elvira Albert, Samir Genaim, and Guillermo Román-Díez.
Conditional Termination of Loops over Arrays.
In Proceedings of the 7th Workshop on Bytecode Semantics,
Verification, Analysis and Transformation (Bytecode'12), 2012.
[ bibtex |
abstract ]
This paper studies conditional termination of loops
that involve array accesses, including loops which
traverse circularly arrays, loops which use as loop
bound or as loop counter an array element, etc. This
kind of loops are commonly used, and their
termination can only be proven if array contents are
tracked. We present a new termination analysis for
sequential bytecode programs which performs three
main steps: (1) it first statically infers whether
the memory locations of arrays remain constant along
the execution of the loop and also whether there are
aliases to such locations; (2) based on the inferred
information, we provide sufficient conditions which
guarantee if array accesses can be transformed into
local variables; (3) when the conditions cannot be
proven, we generate conditional termination
assertions which, if satisfied, termination is
guaranteed.
@inproceedings{AlbertGR12,
author = {Elvira Albert and Samir Genaim and Guillermo Rom{\'a}n-D\'{\i}ez},
title = {{C}onditional {T}ermination of {L}oops over {A}rrays},
booktitle = {Proceedings of the 7th Workshop on Bytecode Semantics, Verification, Analysis and Transformation (Bytecode'12)},
year = 2012,
projects = {},
rank:acronym = {BYTECODE}
}
|
[6]
|
Diego Alonso, Puri Arenas, and Samir Genaim.
Handling non-Linear Operations in the Value Analysis of
COSTA.
Electronic Notes on Theoretical Computer Science, 279(1):3-17,
2011.
Presented at Bytecode'10.
[ bibtex |
abstract |
DOI ]
Inferring precise relations between (the values of)
program variables at different program points is
essential for termination and resource usage
analysis. In both cases, this information is used to
synthesize ranking functions that imply the
program's termination and bound the number of
iterations of its loops. For efficiency, it is
common to base value analysis on
non-disjunctive abstract domains such as Polyhedra,
Octagon, etc. While these domains are efficient and
able to infer complex relations for a wide class of
programs, they are often not sufficient for modeling
the effect of non-linear and bit arithmetic
operations. Modeling such operations precisely can
be done by using more sophisticated abstract
domains, at the price of performance overhead. In
this paper we report on the value analysis of
COSTA that is based on the idea of encoding
the disjunctive nature of non-linear operations into
the (abstract) program itself, instead of using more
sophisticated abstract domains. Our experiments
demonstrate that COSTA is able to prove
termination and infer bounds on resource consumption
for programs that could not be handled before.
@article{AlonsoAG11,
author = {Diego Alonso and Puri Arenas and Samir Genaim},
title = {{H}andling non-{L}inear {O}perations in the {V}alue {A}nalysis of {COSTA}},
journal = {Electronic Notes on Theoretical Computer Science},
editor = {Pierre Ganty and Mark Marron},
publisher = {Elsevier},
issn = {1571-0661},
volume = 279,
number = 1,
year = 2011,
pages = {3--17},
doi = {10.1016/j.entcs.2011.11.002},
note = {Presented at Bytecode'10},
projects = {},
rank:acronym = {BYTECODE}
}
|
[7]
|
Samir Genaim and Damiano Zanardini.
The Acyclicity Inference of COSTA.
In Proceeding of the 11th International Workshop on
Termination, 2010.
[ bibtex |
abstract ]
The paper summarizes a recent work about inference of
acyclicity in COSTA.
@inproceedings{GenaimZ10,
author = {Samir Genaim and Damiano Zanardini},
title = {{T}he {A}cyclicity {I}nference of {COSTA}},
booktitle = {Proceeding of the 11th International Workshop on Termination},
year = 2010,
projects = {},
rank:acronym = {WST}
}
|
[8]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, German
Puebla, Diana V. Ramírez-Deantes, Guillermo Román-Díez, and
Damiano Zanardini.
Termination and Cost Analysis with COSTA and its User Interfaces.
Electronic Notes on Theoretical Computer Science,
258(1):109-121, 2009.
Presented at PROLE'09.
[ bibtex |
abstract |
DOI ]
COSTA is a static analyzer for Java bytecode which is
able to infer cost and termination information for
large classes of programs. The analyzer takes as
input a program and a resource of interest, in the
form of a cost model, and aims at obtaining an upper
bound on the execution cost with respect to the
resource and at proving program termination. The
COSTA system has reached a considerable degree of
maturity in that (1) it includes state-of-the-art
techniques for statically estimating the resource
consumption and the termination behavior of
programs, plus a number of specialized techniques
which are required for achieving accurate results in
the context of object-oriented programs, such as
handling numeric fields in value analysis; (2) it
provides several non-trivial notions of cost
(resource consumption) including, in addition to the
number of execution steps, the amount of memory
allocated in the heap or the number of calls to some
user-specified method; (3) it provides several user
interfaces: a classical command line, a Web
interface which allows experimenting remotely with
the system without the need of installing it
locally, and a recently developed Eclipse plugin
which facilitates usage of the analyzer, even during
the development phase; (4) it can deal with both the
Standard and Micro editions of Java. In the tool
demonstration, we will show that COSTA is able to
produce meaningful results for non-trivial programs,
possibly using Java libraries. Such results can then
be used in many applications, including program
development, resource usage certification, program
optimization, etc.
@article{AlbertAGGPRRZ09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and German Puebla and Diana V. Ram\'{\i}rez-Deantes and Guillermo Rom{\'a}n-D\'{\i}ez and Damiano Zanardini},
title = {Termination and Cost Analysis with COSTA and its User Interfaces},
journal = {Electronic Notes on Theoretical Computer Science},
editor = {Paqui Lucio, Gin\'{e}s Moreno, Ricardo Pe\~{n}a},
publisher = {Elsevier},
issn = {1571-0661},
volume = {258},
number = {1},
year = {2009},
pages = {109--121},
doi = {10.1016/j.entcs.2009.12.008},
note = {Presented at PROLE'09},
projects = {},
rank:acronym = {PROLE}
}
|
[9]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Cost Relation Systems: A Language-Independent Target Language for
Cost Analysis.
Electronic Notes on Theoretical Computer Science, 248:31-46,
2009.
Presented at PROLE'08.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at obtaining information about the
execution cost of programs. This paper studies
cost relation systems (CESs): the sets of
recursive equations used in cost analysis in order
to capture the execution cost of programs in terms
of the size of their input arguments. We investigate
the notion of CES from a general perspective which
is independent of the particular cost analysis
framework. Our main contributions are: we provide a
formal definition of execution cost and of CES which
is not tied to a particular programming language; we
present the notion of sound CES, i.e., which
correctly approximates the cost of the corresponding
program; we identify the differences with recurrence
relation systems, its possible applications and the
new challenges that they bring about. Our general
framework is illustrated by instantiating it to cost
analysis of Java bytecode, Haskell, and Prolog.
@article{AlbertAGP09a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {Cost Relation Systems: A Language-Independent Target Language for Cost Analysis},
journal = {Electronic Notes on Theoretical Computer Science},
editor = {Jes\'{u}s M. Almendros-Jim\'{e}nez},
publisher = {Elsevier},
issn = {1571-0661},
volume = {248},
year = {2009},
pages = {31--46},
issn = {1571-0661},
doi = {10.1016/j.entcs.2009.07.057},
note = {Presented at PROLE'08},
projects = {},
rank:acronym = {PROLE}
}
|
[10]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, Diana Ramírez,
and Damiano Zanardini.
Upper Bounds of Resource Usage for Java Bytecode using
COSTA and its Web Interface.
In Workshop on Resource Analysis, 9th September 2008 University
of Hertfordshire, Hatfield, UK, 2008.
[ bibtex |
abstract ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGPRZ08b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'irez and Damiano Zanardini},
title = {{U}pper {B}ounds of {R}esource {U}sage for {J}ava {B}ytecode using {COSTA} and its {W}eb {I}nterface},
booktitle = {Workshop on Resource Analysis, 9th September 2008 University of Hertfordshire, Hatfield, UK},
year = 2008,
projects = {},
rank:acronym = {WRA}
}
|
[11]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, Diana Ramírez,
and Damiano Zanardini.
The COSTA Cost and Termination Analyzer for Java Bytecode
and its Web Interface (Tool Demo).
In The 22nd European Conference on Object-Oriented Programming
(ECOOP'08), 2008.
[ bibtex |
abstract ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGPRZ08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'irez and Damiano Zanardini},
title = {The {COSTA} {C}ost and {T}ermination {A}nalyzer for {J}ava {B}ytecode and its {W}eb {I}nterface (Tool Demo)},
booktitle = {The 22nd European Conference on Object-Oriented Programming (ECOOP'08)},
year = 2008,
projects = {},
rank:acronym = {ECOOP}
}
|
[12]
|
Samir Genaim and Fausto Spoto.
Constancy Analysis.
In The 10th Workshop on Formal Techniques for Java-like Programs
(FTfJP'08), 2008.
[ bibtex |
abstract ]
A reference variable x is constant in a piece of
code C if the execution of C does not modify the
heap structure reachable from x. This information
lets us infer purity of method arguments, an
important ingredient during the analysis of programs
dealing with dynamically allocated data
structures. We define here an abstract domain
expressing constancy as an abstract interpretation
of concrete denotations. Then we define the induced
abstract denotational semantics for Java-like
programs and show how constancy information improves
the precision of existing static analyses such as
sharing, cyclicity and path-length.
@inproceedings{GenaimS08,
author = {Samir Genaim and Fausto Spoto},
title = {{C}onstancy {A}nalysis},
booktitle = {The 10th Workshop on Formal Techniques for Java-like Programs (FTfJP'08)},
year = 2008,
projects = {},
rank:acronym = {FTfJP}
}
|
[13]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Dealing with Numeric Fields in Termination Analysis of
Java-like Languages.
In The 10th Workshop on Formal Techniques for Java-like Programs
(FTfJP'08), 2008.
[ bibtex |
abstract ]
Termination analysis tools strive to find proofs of
termination for as wide a class of (terminating)
programs as possible. Though several tools exist
which are able to prove termination of non-trivial
programs, when one tries to apply them to realistic
programs, there are still a number of open
problems. In the case of Java-like languages, one of
such problems is to find a practical solution to
prove termination when the termination behaviour of
loops is affected by numeric fields. We have
performed statistics on the Java libraries to see
how often this happens in practice and we found that
in 12.95% of cases, the number of iterations of
loops (and therefore termination) explicitly depends
on values stored in fields and, in the vast majority
of cases, such fields are numeric. Inspired by the
examples found in the libraries, this paper
identifies a series of difficulties that need to be
solved in order to deal with numeric fields in
termination and propose some ideas towards a
lightweight analysis which is able to prove
termination of sequential Java-like programs in the
presence of numeric fields.
@inproceedings{AlbertAGP08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{D}ealing with {N}umeric {F}ields in {T}ermination {A}nalysis of {J}ava-like {L}anguages},
booktitle = {The 10th Workshop on Formal Techniques for Java-like Programs (FTfJP'08)},
year = 2008,
projects = {},
rank:acronym = {FTfJP}
}
|
[14]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
COSTA: A Cost and Termination Analyzer for Java
Bytecode.
In The 3rd Workshop on Bytecode Semantics, Verification,
Analysis and Transformation (BYTECODE'08), Budapest, Hungary, 2008.
[ bibtex |
abstract ]
This paper describes COSTA, a cost and
termination analyzer for Java bytecode. The
system receives as input a bytecode program and a
selection of a resources of interest, and
tries to bound the resource consumption of the
program with respect to such a cost model.
COSTA provides several non-trivial notions of
resource, as the consumption of the heap, the number
of bytecode instructions executed, the number of
calls to a specific method (e.g., the library method
for sending text messages in mobile phones), etc.
The system uses the same machinery to infer
upper bounds on cost, and for proving
termination (which also implies the
boundedness of any resource consumption). The demo
will describe the architecture of COSTA and show it
on a series of programs for which interesting bounds
w.r.t. the above notions of resource can be
obtained. Also, examples for which the system
cannot obtain upper bounds, but can prove
termination, will be presented.
@inproceedings{AlbertAGPZ08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{COSTA}: {A} {C}ost and {T}ermination {A}nalyzer for {J}ava {B}ytecode},
booktitle = {The 3rd Workshop on Bytecode Semantics, Verification, Analysis and Transformation (BYTECODE'08)},
year = 2008,
address = {Budapest, Hungary},
projects = {},
rank:acronym = {BYTECODE}
}
|
[15]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Applications of Static Slicing in Cost Analysis of Java
Bytecode.
In The 3rd International Workshop on Programming Language
Interference and Dependence (PLID'07), 2007.
[ bibtex |
abstract ]
In this abstract, we discuss several aspects of the
role of static slicing to minimize the number
of arguments which need to be taken into account in
CES. We will focus on two benefits of eliminating
the arguments which do not have an impact on the
cost of the program. On one hand, analysis can be
more efficient if we reduce the number of
variables. And also CES are more likely to be solved
by standard CAS.
@inproceedings{AlbertAGPZ07c,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{A}pplications of {S}tatic {S}licing in {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {The 3rd International Workshop on Programming Language Interference and Dependence (PLID'07)},
year = 2007,
projects = {},
rank:acronym = {PLID}
}
|
[16]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
A Generic Framework for the Cost Analysis of Java
Bytecode.
In Pimentel Ernesto, editor, Proceedings of the 2007 Spanish
Conference on Programming and Computer Languages (PROLE'07), pages 61-70,
2007.
[ bibtex |
abstract ]
Cost analysis of Java bytecode is complicated by its
unstructured control flow, the use of an operand
stack and its object-oriented programming features
(like dynamic dispatching). This paper addresses
these problems and develops a generic framework for
the automatic cost analysis of sequential Java
bytecode. Our method generates cost
relations which define at compile-time the cost of
programs as a function of their input data size. To
the best of our knowledge, this is the first
approach to the automatic cost analysis of Java
bytecode.
@inproceedings{AlbertAGPZ07b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {A {G}eneric {F}ramework for the {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 2007 Spanish Conference on Programming and Computer Languages (PROLE'07)},
year = 2007,
pages = {61-70},
editor = {Pimentel Ernesto},
isbn = {978-840-9732-599-8},
projects = {},
rank:acronym = {PROLE}
}
|
[17]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Alexander Serebrenik and Dieter Hofbauer, editors, The 9th
International Workshop on Termination (WST'07), pages 38-42, 2007.
[ bibtex |
abstract ]
In this work, we describe a termination analysis for
Java bytecode, based on a translation into a
structured intermediate representation. This
representation captures in a uniform setting all
loops (regardless of whether they originate from
recursive calls or iterative loops) and all
variables (either local variables or operand stack
elements). Given this representation we consider a
general form of the size-change graphs
principle to prove termination of the intermediate
program, which in turn implies the termination of
the original program.
@inproceedings{AlbertACGPZ07,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{T}ermination {A}nalysis of {J}ava {B}ytecode},
booktitle = {The 9th International Workshop on Termination (WST'07)},
editor = {Alexander Serebrenik and Dieter Hofbauer},
pages = {38-42},
year = 2007,
projects = {},
rank:acronym = {WST}
}
|
[18]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Experiments in Cost Analysis of Java Bytecode.
Electronic Notes on Theoretical Computer Science,
190(1):67-83, 2007.
Presented at BYTECODE'07.
[ bibtex |
abstract |
DOI ]
Recently, we proposed a general framework for the cost
analysis of Java bytecode which can be used for
measuring resource usage. This analysis generates,
at compile-time, cost relations which define
the cost of programs as a function of their input
data size. The purpose of this paper is to assess
the practicality of such cost analysis by
experimentally evaluating a prototype analyzer
implemented in CIAO. With this aim, we approximate
the computational complexity of a set of selected
benchmarks, including both well-known algorithms
which have been used to evaluate existing cost
analyzers in other programming paradigms, and other
benchmarks which illustrate object-oriented
features. In our evaluation, we first study whether
the generated cost relations can be automatically
solved. Our experiments show that in some cases the
inferred cost relations can be automatically solved
by using the Mathmatica system, whereas, in other
cases, some prior manipulation is required for the
equations to be solvable. Moreover, we
experimentally evaluated the running time of the
different phases of the analysis process. Overall,
we believe our experiments show that the efficiency
of our cost analysis is acceptable, and that the
obtained cost relations are useful in practice
since, at least in our experiments, it is possible
to get a closed form solution.
@article{AlbertAGPZ07a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Experiments in Cost Analysis of Java Bytecode},
journal = {Electronic Notes on Theoretical Computer Science},
editor = {Marieke Huisman and Fausto Spotot},
publisher = {Elsevier},
issn = {1571-0661},
volume = {190},
number = {1},
year = {2007},
pages = {67--83},
doi = {10.1016/j.entcs.2007.02.061},
note = {Presented at BYTECODE'07},
projects = {},
rank:acronym = {BYTECODE}
}
|
[19]
|
Samir Genaim and Fausto Spoto.
Information Flow Analysis for Java Bytecode.
In Roberto Giacobazzi, editor, The 1st International Workshop on
Programming Language Interference and Dependence (PLID'04), 2004.
[ bibtex |
abstract ]
We present a context-sensitive compositional analysis
of information flow for full (mono-threaded) Java
bytecode. Our idea consists in transforming the
Java bytecode into a control-flow graph of
basic blocks of code such that the complex
features of the Java bytecode made explicit. The
analysis is based on modeling the information flow
dependencies with Boolean functions which leads to
an accurate analysis and efficient implementation
which uses Binary Decision Diagrams to manipulate
Boolean functions. To the best of our knowledge, it
is the first implementation of an analysis of
information flow for the full Java bytecode. The
work is still in progress but it is already support
a quite large portion of the Java bytecode which
includes exceptions and the subroutine handling
mechanism.
@inproceedings{GenaimS04,
author = {Samir Genaim and Fausto Spoto},
title = {{I}nformation {F}low {A}nalysis for {J}ava {B}ytecode},
booktitle = {The 1st International Workshop on Programming Language Interference and Dependence (PLID'04)},
editor = {Roberto Giacobazzi},
year = 2004,
projects = {},
rank:acronym = {PLID}
}
|
[20]
|
Samir Genaim, Roberto Giacobazzi, and Isabela Mastroeni.
Modeling Secure Information Flow with Boolean Functions.
In IFIP WG 1.7, ACM SIGPLAN and GI FoMSESS Workshop on Issues in
the Theory of Security (WITS'04), pages 55-66, 2004.
[ bibtex |
abstract ]
In this paper we describe two uses of Boolean
functions in the context of secure information flow
analysis. The first contribution concerns with
modeling information flow with Boolean functions,
which leads to an accurate information flow analysis
that captures dependencies between possible
flows. These dependencies are useful for debugging;
refining the notion of secure information flow; and
achieving efficient implementation using
sophisticated data structures like Binary Decision
Diagrams. The second contribution concerns with
analyzing dynamic security policies. We describe how
to construct a Boolean function, such that its
models describe possible non-interference sets of
program variables. This can be used to enforce
security classes dynamically, rather than
re-analyzing the program.
@inproceedings{GenaimGM04,
author = {Samir Genaim and Roberto Giacobazzi and Isabela Mastroeni},
title = {{M}odeling {S}ecure {I}nformation {F}low with {B}oolean {F}unctions},
booktitle = {IFIP WG 1.7, ACM SIGPLAN and GI FoMSESS Workshop on Issues in the Theory of Security (WITS'04)},
pages = {55-66},
year = 2004,
projects = {},
rank:acronym = {WITS}
}
|
[21]
|
Michael Codish and Samir Genaim.
Proving Termination One Loop at a Time.
In Frédéric Mesnard and Alexander Serebrenik, editors,
Proceedings of the 13th International Workshop on Logic Programming
Environments (WLPE'03), Technical Report CW371, pages 48-59. Katholieke
Universiteit Leuven, Department of Computer Science, Celestijnenlaan 200A,
B-3001 Heverlee (Belgium), 2003.
[ bibtex |
abstract ]
Classic techniques for proving termination require the
identification of a measure mapping program states
to the elements of a well founded
domain. Termination is guaranteed if this measure is
shown to decrease with each iteration of a loop in
the program. This is a global termination
condition - there exists a measure which is shown
to decrease over all of the loops in the program.
In recent years, systems based on local
termination conditions are emerging. Here,
termination is guaranteed if for every loop there
exists a measure which decreases as execution
follows through that loop. In this paper we
question the relationship between the two
approaches. Reasoning locally is more
convenient. But is the local approach really more
powerful? We show that for a large class of
termination problems, a global termination condition
can be derived by an efficient algorithm given the
corresponding local conditions. However, for the
general case, this approach does not always apply
and it is not always clear how to construct a global
condition from the local ones.
@inproceedings{CodishG03,
author = {Michael Codish and Samir Genaim},
title = {{P}roving {T}ermination {O}ne {L}oop at a {T}ime},
booktitle = {Proceedings of the 13th International Workshop on Logic Programming Environments (WLPE'03)},
editor = {Fr{\'e}d{\'e}ric Mesnard and Alexander Serebrenik},
pages = {48--59},
publisher = {Katholieke Universiteit Leuven, Department of Computer Science, Celestijnenlaan 200A, B-3001 Heverlee (Belgium)},
series = {Technical Report CW371},
year = {2003},
projects = {},
rank:acronym = {WLPE}
}
|
[22]
|
Michael Codish, Samir Genaim, Maurice Bruynooghe, John P. Gallagher, and Wim Vanhoof.
One Loop at a Time.
In Albert Rubio, editor, Proceedings of the 6th International
Workshop on Termination (WST'03), Technical Report DSIC-II/15/03, pages
1-4. Departamento de Sistemas Informáticos y Computación,
Universidad Politécnica de Valencia, 2003.
[ bibtex |
abstract ]
Classic techniques for proving termination require to
identify a measure mapping program states to the
elements of a well founded domain and to show that
this measure decreases with each iteration of a loop
in the program. This is a global termination
condition - there is a single measure which must
be shown to decrease over all of the loops in the
program. In this abstract we look at systems based
on local termination conditions which allow
to involve different well founded domains and
termination measures for different loops in the
program. We illustrate the practical advantages of
applying local criteria in automated termination
proving systems and demonstrate how Ramsey's Theorem
clarifies their formal justification.
@inproceedings{CodishGBGV03,
author = {Michael Codish and Samir Genaim and Maurice Bruynooghe and John P. Gallagher and Wim Vanhoof},
title = {{O}ne {L}oop at a {T}ime},
booktitle = {Proceedings of the 6th International Workshop on Termination (WST'03)},
editor = {Albert Rubio},
pages = {1--4},
publisher = {Departamento de Sistemas Inform{\'a}ticos y Computaci{\'o}n, Universidad Polit{\'e}cnica de Valencia},
series = {Technical Report DSIC-II/15/03},
year = 2003,
projects = {},
rank:acronym = {WST}
}
|
[23]
|
Samir Genaim and Michael Codish.
Inferring Termination Conditions for Logic Programs using
Backwards Analysis.
In Luís Moniz Pereira and Paulo Quaresma, editors,
Proceedings of APPIA-GULP-PRODE'01 Joint Conference on Declarative
Programming (AGP'01), pages 229-243. Departamento de Informática,
Universidade de Évora, 2001.
[ bibtex |
abstract |
.pdf ]
This paper focuses on the inference of modes for which
a logic program is guaranteed to terminate. This
generalizes traditional termination analysis where
an analyzer tries to verify termination for a
specified mode. The contribution is a methodology
which combines traditional termination analysis and
backwards analysis to obtain termination inference.
We demonstrate the application of this methodology
to enhance a termination analyzer to perform also
termination inference.
@inproceedings{GenaimC01,
author = {Samir Genaim and Michael Codish},
title = {{I}nferring {T}ermination {C}onditions for {L}ogic {P}rograms using {B}ackwards {A}nalysis},
editor = {Lu{\'{\i}}s Moniz Pereira and Paulo Quaresma},
booktitle = {Proceedings of APPIA-GULP-PRODE'01 Joint Conference on Declarative Programming (AGP'01)},
pages = {229--243},
publisher = {Departamento de Inform{\'{a}}tica, Universidade de {\'{E}}vora},
url = {http://www.di.uevora.pt/~pq/agp01/finals/13.pdf},
year = 2001,
projects = {},
rank:acronym = {AGP}
}
|
[24]
|
Muhammad Abozaed, Samir Genaim, and Michael Codish.
Optimized Eager Evaluation of Fixed Points for the
Analysis of Logic Programs.
In Workshop on Fixed Points in Computer Science (FICS'01),
2001.
[ bibtex |
abstract ]
We introduce an interpreter which performs optimized
fixed point evaluation based on an eager evaluation
strategy. The interpreter is written in Prolog and
provides a core engine for the analysis of logic
programs. Eager evaluation is the preferred
strategy chosen in several recent analysis
implementations and we demonstrate that our
technique can improve the performance of these
analyzers considerably. The result is a simple and
efficient, yet lightweight approach to implementing
program analyses.
@inproceedings{AbozaedGC01,
author = {Muhammad Abozaed and Samir Genaim and Michael Codish},
title = {{O}ptimized {E}ager {E}valuation of {F}ixed {P}oints for the {A}nalysis of {L}ogic {P}rograms},
booktitle = {Workshop on Fixed Points in Computer Science (FICS'01)},
year = 2001,
projects = {},
rank:acronym = {FICS}
}
|
[25]
|
Samir Genaim and Michael Codish.
The Def-inite Approach to Dependency Analysis.
In Proceedings of APPIA-GULP-PRODE'00 Joint Conference on
Declarative Programming (AGP'00), 2000.
[ bibtex |
abstract ]
We propose a new representation for the domain of
Definite Boolean functions. The key idea is to view
the set of models of a Boolean function as an
incidence relation between variables and
models. This enables two dual representations: the
usual one, in terms of models, specifying which
variables they contain; and the other in terms of
variables, specifying which models contain them. We
adopt the dual representation which provides a clean
theoretical basis for the definition of efficient
operations on Def in terms of classic ACI1
unification theory. Our approach illustrates in an
interesting way the relation of Def to the
well-known set-Sharing domain which can also be
represented in terms of sets of models and ACI1
unification. From the practical side, a prototype
implementation provides promising results which
indicate that this representation supports efficient
groundness analysis using Def formula.
@inproceedings{GenaimC00,
author = {Samir Genaim and Michael Codish},
title = {{T}he $Def$-inite {A}pproach to {D}ependency {A}nalysis},
booktitle = {Proceedings of APPIA-GULP-PRODE'00 Joint Conference on Declarative Programming (AGP'00)},
year = 2000,
projects = {},
rank:acronym = {AGP}
}
|