[1]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, and Germán Puebla an d Guillermo Román-Díez.
Object-Sensitive Cost Analysis for Concurrent Objects.
Software Testing, Verification and Reliability, 25(3):218-271,
2015.
[ bibtex |
abstract |
DOI |
PDF ]
This article presents a novel cost
analysis framework for concurrent objects.
Concurrent objects form a well established model for distributed
concurrent systems. In this model, objects are the
concurrency units which communicate among them via
asynchronous method calls. Cost analysis aims at
automatically approximating the resource consumption of
executing a program in terms of its input parameters. While cost
analysis for sequential programming languages has received
considerable attention, concurrency and distribution have been notably
less studied. The main challenges of cost analysis in a concurrent
setting are:
(1) Inferring precise size abstractions for data in the program in
the presence of shared memory. This information is essential for
bounding the number of iterations of loops.
(2) Distribution suggests that analysis must infer the cost of the
diverse distributed components separately. We handle this by means of a
novel form of object-sensitive recurrence equations which use
cost centers in order to keep the resource usage assigned to
the different components separate.
We have implemented our analysis and evaluated it
on several small applications which are
classical examples of concurrent and distributed programming.
@article{AlbertACGGPR15,
author = {Elvira Albert and
Puri Arenas and
Jes\'us Correas and
Samir Genaim and
Miguel G\'omez-Zamalloa and
Germ\'{a}n Puebla an d
Guillermo Rom\'an-D\'iez},
title = {{O}bject-{S}ensitive {C}ost {A}nalysis for {C}oncurrent {O}bjects},
journal = {Software Testing, Verification and Reliability},
year = {2015},
publisher = {Wiley},
pages = {218--271},
doi = {10.1002/stvr.1569},
volume = {25},
number = {3},
issn = {0960-0833},
doi = {10.1002/stvr.1569}
}
|
[2]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
A Practical Comparator of Cost Functions and Its
Applications.
Science of Computer Programming, 111(3):483-504, 2015.
[ bibtex |
abstract |
DOI |
PDF ]
Automatic cost analysis has
significantly advanced in the last few years. Nowadays, a number of
cost analyzers exist which automatically produce upper- and/or
lower-bounds on the amount of resources required to execute a
program. Cost analysis has a number of important applications such as
resource-usage verification and program synthesis and optimization.
For such applications to be successful, it is not sufficient to have
automatic cost analysis. It is also required to have automated means
for handling the analysis results, which are in the form of
Cost Functions (CFs for short) i.e., non-recursive
expressions composed of a relatively small number of types of basic
expressions. In particular, we need automated means for comparing CFs in
order to prove that a CF is smaller than or equal to
another one for all input values of interest.
General function comparison is a hard mathematical problem. Rather
than attacking the general problem, in this work we focus on
comparing CF by exploiting their syntactic properties
and we present, to the best of our knowledge, the first
practical CF comparator which opens the door to fully automated
applications of cost analysis.
We have implemented the
comparator and made its source code
available online, so that any cost analyzer can use it.
@article{AlbertAGP15,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla},
title = {A {P}ractical {C}omparator of {C}ost {F}unctions and {I}ts {A}pplications},
journal = {Science of Computer Programming},
year = {2015},
publisher = {Elsevier},
pages = {483--504},
volume = {111},
number = {3},
issn = {0167-6423},
doi = {10.1016/j.scico.2014.12.001 }
}
|
[3]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Guillermo Román-Díez.
Conditional Termination of Loops over Heap-allocated Data.
Science of Computer Programming, 92:2 - 24, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Static analysis which takes into account the values of data stored
in the heap is considered complex and computationally intractable in
practice. Thus, most static analyzers do not keep track of object
fields nor of array contents, i.e., they are
heap-insensitive. In this article, we propose locality
conditions for soundly tracking heap-allocated data in Java
(bytecode) programs, by means of ghost non-heap allocated
variables. This way, heap-insensitive analysis over the transformed
program can infer information on the original heap-allocated data
without sacrificing efficiency. If the locality conditions cannot
be proven unconditionally, we seek to generate aliasing
preconditions which, when they hold in the initial state,
guarantee the termination of the program. Experimental results show
that we greatly improve the accuracy w.r.t. a heap-insensitive
analysis while the overhead introduced is reasonable.
@article{AlbertAGPG13,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla and Guillermo Rom\'{a}n-D\'{i}ez},
title = {{C}onditional {T}ermination of {L}oops over {H}eap-allocated {D}ata},
journal = {Science of Computer Programming},
publisher = {Elsevier},
doi = {http://dx.doi.org/10.1016/j.scico.2013.04.006},
url = {http://dx.doi.org/10.1016/j.scico.2013.04.006},
issn = {0167-6423},
volume = {92},
pages = {2 - 24},
year = {2014}
}
|
[4]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Object-Oriented Bytecode Programs.
Theoretical Computer Science (Special Issue on Quantitative
Aspects of Programming Languages), 413(1):142-159, 2012.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Cost analysis statically approximates the
cost of programs in terms of their input data size.
This paper presents, to the best of our knowledge,
the first approach to the automatic cost analysis of
Object-Oriented bytecode programs. In languages
such as Java and C#, analyzing bytecode has a much
wider application area than analyzing source code
since the latter is often not available. Cost
analysis in this context has to consider, among
others, dynamic dispatch, jumps, the operand stack,
and the heap. Our method takes a bytecode program
and a cost model specifying the resource of
interest, and generates cost relations which
approximate the execution cost of the program with
respect to such resource. We report on COSTA, an
implementation for Java bytecode which can obtain
upper bounds on cost for a large class of programs
and complexity classes. Our basic techniques can be
directly applied to infer cost relations for other
Object-Oriented imperative languages, not
necessarily in bytecode form.
@article{AlbertAGPZ12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Cost {A}nalysis of {O}bject-{O}riented {B}ytecode {P}rograms},
journal = {Theoretical Computer Science (Special Issue on Quantitative Aspects of Programming Languages)},
volume = 413,
number = 1,
pages = {142--159},
year = {2012},
publisher = {Elsevier},
issn = {0304-3975},
doi = {10.1016/j.tcs.2011.07.009},
url = {http://www.sciencedirect.com/science/article/pii/S0304397511006190}
}
|
[5]
|
Elvira Albert, Puri Arenas, Germán Puebla, and Manuel V. Hermenegildo.
Certificate size reduction in abstraction-carrying code.
Theory and Practice of Logic Programming, 12(3):283-318, 2012.
[ bibtex |
abstract |
DOI ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for mobile code safety in which the code supplier provides a program together with an abstraction (or abstract model of the program) whose validity entails compliance with a predefined safety policy. The abstraction plays thus the role of safety certificate and its generation is carried out automatically by a fixpoint analyzer. The advantage of providing a (fixpoint) abstraction to the code consumer is that its validity is checked in a single pass (i.e., one iteration) of an abstract interpretation-based checker. A main challenge to make ACC useful in practice is to reduce the size of certificates as much as possible while at the same time not increasing checking time. The intuitive idea is to only include in the certificate information that the checker is unable to reproduce without iterating. We introduce the notion of reduced certificate which characterizes the subset of the abstraction which a checker needs in order to validate (and re-construct) the full certificate in a single pass. Based on this notion, we instrument a generic analysis algorithm with the necessary extensions in order to identify the information relevant to the checker. Interestingly, the fact that the reduced certificate omits (parts of) the abstraction has implications in the design of the checker. We provide the sufficient conditions which allow us to ensure that (1) if the checker succeeds in validating the certificate, then the certificate is valid for the program (correctness) and (2) the checker will succeed for any reduced certificate which is valid (completeness). Our approach has been implemented and benchmarked within the CiaoPP system. The experimental results show that our proposal is able to greatly reduce the size of certificates in practice.
@article{DBLP-journals-tplp-AlbertAPH12,
author = {Elvira Albert and
Puri Arenas and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Certificate size reduction in abstraction-carrying code}},
journal = {Theory and Practice of Logic Programming},
volume = {12},
number = {3},
year = {2012},
pages = {283-318},
ee = {http://dx.doi.org/10.1017/S1471068410000487},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[6]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Closed-Form Upper Bounds in Static Cost Analysis.
Journal of Automated Reasoning, 46(2):161-203, 2011.
[ bibtex |
abstract |
DOI |
PDF ]
The classical approach to automatic cost analysis
consists of two phases. Given a program and some
measure of cost, the analysis first produces
cost relations (CRs), i.e., recursive
equations which capture the cost of the program in
terms of the size of its input data. Second, CRs
are converted into closed-form, i.e., without
recurrences. Whereas the first phase has received
considerable attention, with a number of cost
analyses available for a variety of programming
languages, the second phase has been comparatively
less studied. This article presents, to our
knowledge, the first practical framework for the
generation of closed-form upper bounds for CRs which
(1) is fully automatic, (2) can handle the
distinctive features of CRs originating from cost
analysis of realistic programming languages, (3) is
not restricted to simple complexity classes, and (4)
produces reasonably accurate solutions. A key idea
in our approach is to view CRs as programs, which
allows applying semantic-based static analyses and
transformations to bound them, namely our method is
based on the inference of ranking functions
and loop invariants and on the use of
partial evaluation.
@article{AlbertAGP11a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'an Puebla},
title = {{C}losed-{F}orm {U}pper {B}ounds in {S}tatic {C}ost {A}nalysis},
journal = {Journal of Automated Reasoning},
year = {2011},
publisher = {Springer},
volume = {46},
number = {2},
pages = {161-203},
doi = {10.1007/s10817-010-9174-1},
issn = {0168-7433}
}
|
[7]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Damiano Zanardini.
Task-Level Analysis for a Language with Async-Finish
Parallelism.
ACM SIGPLAN Notices, 46(5):21-30, 2011.
[ bibtex |
abstract |
DOI |
PDF ]
The task-level of a program is the maximum
number of tasks that can be available (i.e., not
finished nor suspended) simultaneously during its
execution for any input data. Static knowledge of
the task-level is of utmost importance for
understanding and debugging parallel programs as
well as for guiding task schedulers. We present, to
the best of our knowledge, the first static analysis
which infers safe and precise approximations on the
task-level for a language with async-finish
parallelism. In parallel languages, async and
finish are the basic constructs, respectively, for
spawning tasks and for waiting until they terminate.
They are the core of modern, parallel, distributed
languages like X10. Given a (parallel) program, our
analysis returns a task-level upper bound, i.e., a
function on the program's input arguments that
guarantees that the task-level of the program will
never exceed its value along any execution. Our
analysis provides a series of useful
(over)-approximations, going from the total number
of tasks spawned in the execution up to an accurate
estimation of the task-level.
@article{AlbertAGZ11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Damiano Zanardini},
title = {Task-{L}evel {A}nalysis for a {L}anguage with {A}sync-{F}inish {P}arallelism},
journal = {ACM SIGPLAN Notices},
volume = {46},
number = {5},
publisher = {ACM Press},
year = {2011},
issn = {0362-1340},
doi = {10.1145/1967677.1967681},
pages = {21--30}
}
|
[8]
|
Puri Arenas and Mario Rodríguez-Artalejo.
A General Framework for Lazy Functional Logic, Programming with
Algebraic Polymorphic Types.
Theory and Practice of Logic Programming, 1(2):185-245, 2001.
[ bibtex |
abstract |
DOI ]
We propose a general framework for first-order functional logic programming, supporting lazy functions, non-determinism and polymorphic datatypes whose data constructors obey a set of equational axioms. On top of a given C, we specify a program as a set R of C-based conditional rewriting rules for defined functions. We argue that equational logic does not supply the proper semantics for such programs. Therefore, we present an alternative logic which includes C-based rewriting calculi and a notion of model. We get soundness and completeness for C-based rewriting w.r.t. models, existence of free models for all programs, and type preservation results. As operational semantics, we develop a sound and complete procedure for goal solving, which is based on the combination of lazy narrowing with unification modulo C. Our framework is quite expressive for many purposes, such as solving action and change problems, or realizing the GAMMA computation model.
@article{DBLP:journals/tplp/Arenas-SanchezR01,
author = {Puri Arenas and
Mario Rodr\'{\i}guez-Artalejo},
title = {A General Framework for Lazy Functional Logic, Programming
with Algebraic Polymorphic Types},
journal = {Theory and Practice of Logic Programming},
volume = {1},
number = {2},
year = {2001},
pages = {185-245},
ee = {http://journals.cambridge.org/action/displayAbstract?aid=74981},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[9]
|
Puri Arenas and Agostino Dovier.
A Minimality Study for Set Unification.
Journal of Functional and Logic Programming, 1997(7), 1997.
[ bibtex |
abstract |
DOI ]
A unification algorithm is said to be minimal for a unification problem if it generates exactly a (minimal) complete set of most-general unifiers, without instances, and without repetitions. The aim of this paper is to present a combinatorial minimality study for a significant collection of sample problems that can be used as benchmarks for testing any set-unification algorithm. Based on this combinatorial study, a new Set-Unification Algorithm (named SUA) is also described and proved to be minimal for all the analyzed problems. Furthermore, an existing naive set-unification algorithm has also been tested to show its bad behavior for most of the sample problems.
@article{DBLP:journals/jflp/Arenas-SanchezD97,
author = {Puri Arenas and
Agostino Dovier},
title = {A {M}inimality {S}tudy for {S}et {U}nification},
journal = {Journal of Functional and Logic Programming},
volume = {1997},
number = {7},
year = {1997},
ee = {http://danae.uni-muenster.de/lehre/kuchen/JFLP/articles/1997/A97-07/A97-07.html},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[1]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and
Guillermo Román-Díez.
Resource Analysis: From Sequential to Concurrent and
Distributed Programs.
In Nikolaj Bjørner and Frank D. de Boer, editors, FM 2015:
Formal Methods - 20th International Symposium, Oslo, Norway, June 24-26,
2015, Proceedings, volume 9109 of Lecture Notes in Computer Science,
pages 3-17. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertACGGMPR15,
author = {Elvira Albert and
Puri Arenas and
Jes{\'{u}}s Correas and
Samir Genaim and
Miguel G{\'{o}}mez{-}Zamalloa and
Enrique Martin{-}Martin and
Germ{\'{a}}n Puebla and
Guillermo Rom{\'{a}}n{-}D{\'{\i}}ez},
title = {{R}esource {A}nalysis: {F}rom {S}equential to {C}oncurrent and {D}istributed {P}rograms},
booktitle = {{FM} 2015: Formal Methods - 20th International Symposium, Oslo, Norway,
June 24-26, 2015, Proceedings},
pages = {3--17},
year = {2015},
doi = {10.1007/978-3-319-19249-9_1},
editor = {Nikolaj Bj{\o}rner and Frank D. de Boer},
series = {Lecture Notes in Computer Science},
isbn = {978-3-319-19248-2},
volume = {9109},
publisher = {Springer}
}
|
[2]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Test Case Generation of Actor Systems.
In 13th International Symposium on Automated Technology for
Verification and Analysis, ATVA 2015. Proceedings, volume 9364 of
Lecture Notes in Computer Science, pages 259-275. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. Test
case generation (TCG) is the process of automatically generating
test inputs for interesting coverage criteria, which
are then applied to the system under test. This paper presents a
TCG framework for actor systems, which consists of three main
elements, which are the original contributions of this work: (1) a
symbolic execution calculus, which allows symbolically executing the
program (i.e., executing the program for unknown input data), (2)
improved techniques to avoid performing redundant computations
during symbolic execution, (3) new termination and coverage criteria,
which ensure the termination of symbolic execution and guarantee
that the test cases provide the desired degree of code coverage.
Finally, our framework has been
implemented and evaluated within the aPET system.
@inproceedings{AlbertAGM15,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Test {C}ase {G}eneration of {A}ctor {S}ystems},
booktitle = {13th International Symposium on Automated Technology for Verification and Analysis, {ATVA} 2015. Proceedings},
publisher = {Springer},
year = {2015},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-24953-7_21},
pages = {259-275},
volume = {9364},
isbn = {978-3-319-24952-0}
}
|
[3]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Actor- and Task-Selection Strategies for Pruning Redundant
State-Exploration in Testing.
In Erika Ábrahám and Catuscia Palamidessi, editors, 34th
IFIP International Conference on Formal Techniques for Distributed Objects,
Components and Systems (FORTE 2014), volume 8461 of Lecture Notes in
Computer Science, pages 49-65. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF ]
Testing concurrent systems requires exploring all possible
non-deterministic interleavings that the concurrent execution may
have. This is because any of the interleavings may reveal the
erroneous behaviour. In testing of actor systems, we can distinguish
two sources on non-determinism: (1) actor-selection, the
order in which actors are explored and (2) task-selection,
the order in which the messages (or tasks) within each actor are
explored. This paper provides new strategies and heuristics for
pruning redundant state-exploration when testing actor systems by
reducing the amount of unnecessary non-determinism. First, we
propose a method and heuristics for actor-selection based on
tracking the amount and the type of interactions among actors.
Second, we can avoid further redundant interleavings in task-selection
by taking into account the access to the shared-memory that
the tasks make.
@inproceedings{AlbertAG14,
author = {Elvira Albert and
Puri Arenas and
Miguel G\'omez-Zamalloa},
title = {Actor- and {T}ask-{S}election {S}trategies for {P}runing {R}edundant {S}tate-{E}xploration in {T}esting},
booktitle = {34th IFIP International Conference on Formal Techniques for Distributed Objects, Components and Systems (FORTE 2014)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2014},
doi = {10.1007/978-3-662-43613-4_4},
pages = {49-65},
volume = {8461},
editor = {Erika {\'A}brah{\'a}m and Catuscia Palamidessi}
}
|
[4]
|
Elvira Albert, Puri Arenas, Antonio Flores-Montoya, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and Guillermo Román-Díez.
SACO: Static Analyzer for Concurrent Objects.
In Erika Ábrahám and Klaus Havelund, editors, Tools and
Algorithms for the Construction and Analysis of Systems - 20th International
Conference, TACAS 2014, volume 8413 of Lecture Notes in Computer
Science, pages 562-567. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF ]
We present the main concepts, usage and implementation of SACO,
a static analyzer for concurrent objects. Interestingly, SACO is
able to infer both liveness (namely termination and resource
boundedness) and safety properties (namely deadlock freeness)
of programs based on concurrent objects. The system integrates
auxiliary analyses such as points-to and
may-happen-in-parallel, which are essential for increasing the
accuracy of the aforementioned more complex properties. SACO
provides accurate information about the dependencies which may
introduce deadlocks, loops whose termination is not guaranteed, and
upper bounds on the resource consumption of methods.
@inproceedings{AlbertAFGGMPR14,
author = {Elvira Albert and
Puri Arenas and
Antonio Flores-Montoya and
Samir Genaim and
Miguel G\'omez-Zamalloa and
Enrique Martin-Martin and
Germ\'an Puebla and
Guillermo Rom\'an-D\'iez},
title = {{SACO}: {S}tatic {A}nalyzer for {C}oncurrent {O}bjects},
booktitle = {Tools and Algorithms for the Construction and Analysis of
Systems - 20th International Conference, TACAS 2014},
year = {2014},
editor = {Erika {\'A}brah{\'a}m and
Klaus Havelund},
pages = {562-567},
volume = {8413},
series = {Lecture Notes in Computer Science},
isbn = {978-3-642-54861-1},
publisher = {Springer},
doi = {http://dx.doi.org/10.1007/978-3-642-54862-8_46}
}
|
[5]
|
Elvira Albert, Puri Arenas, Miguel Gómez-Zamalloa, and Peter Y.H. Wong.
aPET: A Test Case Generation Tool for Concurrent
Objects.
In Bertrand Meyer, Luciano Baresi, and Mira Mezini, editors,
Joint Meeting of the European Software Engineering Conference and the ACM
SIGSOFT Symposium on the Foundations of Software Engineering, ESEC/FSE'13,
Saint Petersburg, Russian Federation, August 18-26, 2013, pages 595-598.
ACM, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present the concepts, usage and prototypical implementation of
aPET, a test case generation tool for a distributed asynchronous
language based on concurrent objects. The system receives as
input a program, a selection of methods to be tested, and a set of
parameters that include a selection of a coverage criterion. It
yields as output a set of test cases which guarantee that the
selected coverage criterion is achieved.
aPET is completely integrated within the language's IDE via
Eclipse. The generated test cases can be displayed in textual mode
and, besides, it is possible to generate ABSUnit code (i.e., code
runnable in a simple framework similar to JUnit to write repeatable
tests). The information yield by aPET can be relevant to spot
bugs during program development and also to perform regression
testing.
@inproceedings{AlbertAGZW13,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa and Peter Y.H. Wong},
title = {a{PET}: {A} {T}est {C}ase {G}eneration {T}ool for {C}oncurrent {O}bjects},
booktitle = {Joint Meeting of the European Software Engineering Conference
and the ACM SIGSOFT Symposium on the Foundations of Software
Engineering, ESEC/FSE'13, Saint Petersburg, Russian Federation,
August 18-26, 2013},
year = 2013,
publisher = {ACM},
pages = {595--598},
editor = {Bertrand Meyer and
Luciano Baresi and
Mira Mezini},
doi = {10.1145/2491411.2494590},
url = {http://dx.doi.org/10.1145/2491411.2494590}
}
|
[6]
|
Diego Esteban Alonso Blas, Puri Arenas, and Samir Genaim.
Precise Cost Analysis via Local Reasoning.
In Dang Van Hung and Mizuhito Ogawa, editors, Automated
Technology for Verification and Analysis - 11th International Symposium,
ATVA 2013, Hanoi, Vietnam, October 15-18, 2013. Proceedings, volume 8172
of Lecture Notes in Computer Science, pages 319-333. Springer, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
The classical approach to static cost analysis is based on first
transforming a given program into a set of cost relations, and then solving
them into closed-form upper-bounds.
The quality of the upper-bounds and the scalability of such cost
analysis highly depend on the precision and efficiency of the
solving phase.
Several techniques for solving cost relations exist, some are
efficient but not precise enough, and some are very precise but do
not scale to large cost relations.
In this paper we explore the gap between these techniques, seeking
for ones that are both precise and efficient.
In particular, we propose a novel technique that first splits the
cost relation into several atomic ones, and then uses precise
local reasoning for some and less precise but efficient reasoning for
others.
For the precise local reasoning, we propose several methods that
define the cost as a solution of a universally quantified formula.
Preliminary experiments demonstrate the effectiveness of our
approach.
@inproceedings{AlonsoAG13,
author = {Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim},
title = {Precise {C}ost {A}nalysis via {L}ocal {R}easoning},
booktitle = {Automated Technology for Verification and Analysis - 11th International
Symposium, {ATVA} 2013, Hanoi, Vietnam, October 15-18, 2013. Proceedings},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {319-333},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
doi = {10.1007/978-3-319-02444-8_23},
url = {http://dx.doi.org/10.1007/978-3-319-02444-8_23}
}
|
[7]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Towards Testing Concurrent Objects in CLP.
In Agostino Dovier and Vítor Santos Costa, editors, 28th
International Conference on Logic Programming (ICLP'12), volume 17 of
LIPIcs, pages 98-108. Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik,
2012.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. This
paper studies the extension of the CLP-based framework for test data
generation of sequential programs to the context of concurrent
objects, a concurrency model which constitutes a promising
solution to concurrency in OO languages.
The challenge is in developing the following required extensions entirely in CLP
(which are our main contributions):
(1) we simulate
task interleavings which could occur in an actual execution and
could lead to additional test cases for the method under test (while
interleavings that would not add new test cases are disregarded),
(2) we integrate sophisticated scheduling policies and, besides,
use different policies for the different fragments of the program
that can run in parallel,
(3) we combine standard termination and coverage
criteria used for testing sequential programs with specific criteria
which control termination and coverage from the concurrency point of
view, e.g., we can limit the number of task interleavings allowed and
the number of loop unrollings performed in each parallel component, etc.
@inproceedings{AlbertAG-Z12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Towards Testing Concurrent Objects in CLP},
booktitle = {28th International Conference on Logic Programming (ICLP'12)},
editor = {Agostino Dovier and V\'{\i}tor Santos Costa},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik},
series = {LIPIcs},
volume = {17},
year = {2012},
doi = {10.4230/LIPIcs.ICLP.2012.98},
pages = {98-108}
}
|
[8]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and Germán Puebla.
COSTABS: A Cost and Termination Analyzer for ABS.
In Oleg Kiselyov and Simon Thompson, editors, Proceedings of the
2012 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation,
PEPM 2012, Philadelphia, Pennsylvania, USA, January 23-24, 2012, pages
151-154. ACM Press, 2012.
[ bibtex |
abstract |
DOI ]
ABS is an abstract behavioural specification
language to model distributed concurrent systems. Characteristic
features of ABS are that: (1) it allows abstracting from
implementation details while remaining executable: a
functional sub-language over abstract data types is used to
specify internal, sequential computations; and (2) the
imperative sub-language provides flexible concurrency and
synchronization mechanisms by means of asynchronous method calls,
release points in method definitions, and cooperative scheduling of
method activations. This paper presents COSTABS, a COSt and
Termination analyzer for ABS, which is able to prove termination and
obtain resource usage bounds for both the imperative and
functional fragments of programs. The resources that COSTABS can
infer include termination, number of execution steps, memory
consumption, number of asynchronous calls, among others. The
analysis bounds provide formal guarantees that the execution of the
program will never exceed the inferred amount of resources. The
system can be downloaded as free software from its web site, where a
repository of examples and a web interface are also provided. To the
best of our knowledge, COSTABS is the first system able to perform
resource analysis for a concurrent language.
@inproceedings{AlbertAGGZP12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'omez-Zamalloa and Germ\'an Puebla},
title = {{COSTABS}: A {C}ost and {T}ermination {A}nalyzer for {ABS}},
booktitle = {Proceedings of the 2012 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2012, Philadelphia, Pennsylvania, USA,
January 23-24, 2012},
publisher = {ACM Press},
editor = {Oleg Kiselyov and Simon Thompson},
year = {2012},
isbn = {978-1-4503-1118-2 },
pages = {151-154},
doi = {10.1145/2103746.2103774}
}
|
[9]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Automatic Inference of Resource Consumption Bounds.
In Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, Mérida, Venezuela, March 11-15,
2012. Proceedings LPAR, volume 7180 of Lecture Notes in Computer
Science, pages 1-11. Springer, 2012.
[ bibtex |
abstract |
DOI ]
One of the main features of programs is the amount of resources
which are needed in order to run them. Different resources can be
taken into consideration, such as the number of execution steps,
amount of memory allocated, number of calls to certain methods,
etc. Unfortunately, manually determining the resource consumption of
programs is difficult and error-prone. We provide an overview of a
state of the art framework for automatically obtaining both upper
and lower bounds on the resource consumption of programs. The bounds
obtained are functions on the size of the input arguments to the
program and are obtained statically, i.e., without running the
program. Due to the approximations introduced, the framework can
fail to obtain (non-trivial) bounds even if they exist. On the other
hand, modulo implementation bugs, the bounds thus obtained are valid
for any execution of the program. The framework has been
implemented in the COSTA system and can provide useful bounds for
realistic object-oriented and actor-based concurrent programs.
@inproceedings{AlbertAGGZP12a,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Miguel G{\'o}mez-Zamalloa and
Germ{\'a}n Puebla},
title = {Automatic {I}nference of {R}esource {C}onsumption {B}ounds},
booktitle = {Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, M{\'e}rida,
Venezuela, March 11-15, 2012. Proceedings LPAR},
year = {2012},
pages = {1-11},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-642-28717-6},
volume = {7180}
}
|
[10]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Symbolic Execution of Concurrent Objects in CLP.
In Practical Aspects of Declarative Languages (PADL'12),
Lecture Notes in Computer Science, pages 123-137. Springer, January 2012.
[ bibtex |
abstract |
DOI |
PDF ]
In the concurrent objects model,objects have conceptually
dedicated processors and live in a distributed environment with
unordered communication by means of asynchronous method calls.
Method callers may decide at runtime when to synchronize with the reply
from a call. This paper presents a CLP-based approach to symbolic execution
of concurrent OO programs. Developing a symbolic execution engine for
concurrent objects is challenging because it needs to combine the
OO features of the language, concurrency and backtracking.
Our approach consists in, first, transforming the OO program into an
equivalent CLP program which contains calls to specific builtins that
handle the concurrency model. The builtins are implemented in CLP and
include primitives to handle asynchronous calls synchronization
operations and scheduling policies, among others. Interestingly,
symbolic execution of the transformed programs then relies
simply on the standard sequential execution of CLP.
We report on a prototype implementation within the PET system which
shows the feasibility of our approach.
@inproceedings{AlbertAGZ12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Symbolic {E}xecution of {C}oncurrent {O}bjects in {CLP}},
booktitle = {Practical Aspects of Declarative Languages (PADL'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2012},
month = jan,
volumen = {7149},
pages = {123-137},
doi = {10.1007/978-3-642-27694-1_10}
}
|
[11]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Cost Analysis of Concurrent OO Programs.
In Hongseok Yang, editor, Programming Languages and Systems -
9th Asian Symposium, APLAS 2011, Kenting, Taiwan, December 5-7, 2011.
Proceedings, volume 7078 of Lecture Notes in Computer Science, pages
238-254. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at automatically approximating the
resource consumption (e.g., memory) of
executing a program in terms of its input
parameters. While cost analysis for sequential
programming languages has received considerable
attention, concurrency and distribution have been
notably less studied. The main challenges (and our
contributions) of cost analysis in a concurrent
setting are: (1) Inferring precise size
relations for data in the program in the presence
of shared memory. This information is essential for
bounding the number of iterations of loops. (2)
Distribution suggests that analysis must keep the
cost of the diverse distributed components
separate. We handle this by means of a novel form of
recurrence equations which are parametric on
the notion of cost center, which represents a
corresponding component. To the best of our
knowledge, our work is the first one to present a
general cost analysis framework and an
implementation for concurrent OO programs.
@inproceedings{AlbertAGGP11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'{o}mez-Zamalloa and Germ\'an Puebla},
editor = {Hongseok Yang},
title = {Cost {A}nalysis of {C}oncurrent {OO} Programs},
booktitle = {Programming Languages and Systems - 9th Asian Symposium,
APLAS 2011, Kenting, Taiwan, December 5-7, 2011. Proceedings},
series = {Lecture Notes in Computer Science},
year = {2011},
publisher = {Springer},
pages = {238-254},
volume = {7078},
doi = {10.1007/978-3-642-25318-8_19}
}
|
[12]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Diana
Ramírez-Deantes.
From Object Fields to Local Variables: A Practical
Approach to Field-Sensitive Analysis.
In Radhia Cousot and Matthieu Martel, editors, Static Analysis -
17th International Symposium, SAS 2010, Perpignan, France, September 14-16,
2010. Proceedings, volume 6337 of Lecture Notes in Computer Science,
pages 100-116. Springer, 2010.
[ bibtex |
abstract |
DOI |
PDF ]
Static analysis which takes into account the value of
data stored in the heap is typically considered complex
and computationally intractable in practice. Thus, most
static analyzers do not keep track of object fields (or
fields for short), i.e., they are field-insensitive.
In this paper, we propose locality conditions for
soundly converting fields into local variables.
This way, field-insensitive analysis over the
transformed program can infer information on the original
fields.
Our notion of locality is context-sensitive and can be
applied both to numeric and reference fields.
We propose then a polyvariant transformation which
actually converts object fields meeting the locality
condition into variables and which is able to generate
multiple versions of code when this leads to increasing
the amount of fields which satisfy the locality
conditions. We have implemented our analysis within a
termination analyzer for Java bytecode.
Interestingly, we can now prove termination of programs
which use iterators without the need of a sophisticated
heap analysis.
@inproceedings{AlbertAGPR10,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'{\i}rez-Deantes},
title = {From {O}bject {F}ields to {L}ocal {V}ariables: A {P}ractical {A}pproach
to {F}ield-{S}ensitive {A}nalysis},
booktitle = {Static Analysis - 17th International Symposium, SAS 2010,
Perpignan, France, September 14-16, 2010. Proceedings},
editor = {Radhia Cousot and
Matthieu Martel},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {6337},
year = {2010},
isbn = {978-3-642-15768-4},
pages = {100-116},
doi = {10.1007/978-3-642-15769-1_7},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[13]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, Germán Puebla, Diana Ramirez, Guillermo Román-Díez, and Damiano Zanardini.
Termination and Cost Analysis with COSTA and its User
Interfaces.
In Spanish Conference on Programming and Computer Languages
(PROLE'09), volume 258 of Electronic Notes in Theoretical Computer
Science, pages 109-121. Elsevier, September 2009.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a static analyzer for Java
bytecode which is able to infer cost and termination
information for large classes of programs. The
analyzer takes as input a program and a resource of
interest, in the form of a cost model, and aims at
obtaining an upper bound on the execution cost with
respect to the resource and at proving program
termination. The COSTA system has reached a
considerable degree of maturity in that (1) it
includes state-of-the-art techniques for statically
estimating the resource consumption and the
termination behavior of programs, plus a number of
specialized techniques which are required for
achieving accurate results in the context of
object-oriented programs, such as handling numeric
fields in value analysis; (2) it provides several
non-trivial notions of cost (resource consumption)
including, in addition to the number of execution
steps, the amount of memory allocated in the heap or
the number of calls to some user-specified method;
(3) it provides several user interfaces: a classical
command line, a Web interface which allows
experimenting remotely with the system without the
need of installing it locally, and a recently
developed Eclipse plugin which facilitates usage of
the analyzer, even during the development phase; (4)
it can deal with both the Standard and Micro
editions of Java. In the tool demonstration, we will
show that COSTA is able to produce meaningful
results for non-trivial programs, possibly using
Java libraries. Such results can then be used in
many applications, including program development,
resource usage certification, program optimization,
etc.
@inproceedings{AlbertAGGPRRZ09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Germ{\'a}n Puebla and Diana Ramirez and Guillermo Rom\'an-D\'iez and Damiano Zanardini},
title = {{T}ermination and {C}ost {A}nalysis with {COSTA} and its {U}ser {I}nterfaces},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'09)},
series = {Electronic Notes in Theoretical Computer Science},
publisher = {Elsevier},
volume = {258},
year = {2009},
pages = {109-121},
doi = {10.1016/j.entcs.2009.12.008},
month = sep
}
|
[14]
|
Elvira Albert, Diego Esteban Alonso Blas, Puri Arenas, Samir Genaim, and
Germán Puebla.
Asymptotic Resource Usage Bounds.
In Zhenjiang Hu, editor, Programming Languages and Systems, 7th
Asian Symposium, APLAS 2009, Seoul, Korea, December 14-16, 2009.
Proceedings, volume 5904 of Lecture Notes in Computer Science, pages
294-310. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF |
slides ]
When describing the resource usage of a program, it is
usual to talk in asymptotic terms, such as
the well-known “big O” notation, whereby we focus
on the behaviour of the program for large input data
and make a rough approximation by considering as
equivalent programs whose resource usage grows at
the same rate. Motivated by the existence of
non-asymptotic resource usage analyzers, in
this paper, we develop a novel transformation from a
non-asymptotic cost function (which can be produced
by multiple resource analyzers) into its asymptotic
form. Our transformation aims at producing tight
asymptotic forms which do not contain
redundant subexpressions (i.e., expressions
asymptotically subsumed by others). Interestingly,
we integrate our transformation at the heart of a
cost analyzer to generate asymptotic upper
bounds without having to first compute their
non-asymptotic counterparts. Our experimental
results show that, while non-asymptotic cost
functions become very complex, their asymptotic
forms are much more compact and manageable. This is
essential to improve scalability and to enable the
application of cost analysis in resource-aware
verification/certification.
@inproceedings{AlbertAAGP09,
author = {Elvira Albert and Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {Asymptotic {R}esource {U}sage {B}ounds},
booktitle = {Programming Languages and Systems, 7th Asian Symposium, {APLAS} 2009,
Seoul, Korea, December 14-16, 2009. Proceedings},
year = 2009,
editor = {Zhenjiang Hu},
series = {Lecture Notes in Computer Science},
volume = {5904},
pages = {294-310},
publisher = {Springer},
doi = {10.1007/978-3-642-10672-9_21},
slides = {http://costa.ls.fi.upm.es/papers/costa/AlbertAAGP09-slides.pdf}
}
|
[15]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Field-Sensitive Value Analysis by Field-Insensitive
Analysis.
In Ana Cavalcanti and Dennis Dams, editors, 16th International
Symposium on Formal Methods, FM'09, volume 5850 of Lecture Notes in
Computer Science, pages 370-386. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Shared mutable data structures pose major problems in
static analysis and most analyzers are unable to
keep track of the values of numeric variables
stored in the heap. In this paper, we first
identify sufficient conditions under which heap
allocated numeric variables in object oriented
programs (i.e., numeric fields) can be handled as
non-heap allocated variables. Then, we present a
static analysis to infer which numeric fields
satisfy these conditions at the level of
(sequential) bytecode. This allows
instrumenting the code with ghost variables
which make such numeric fields observable to any
field-insensitive value analysis. Our experimental
results in termination analysis show that we greatly
enlarge the class of analyzable programs with a
reasonable overhead.
@inproceedings{AlbertAGP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{F}ield-{S}ensitive {V}alue {A}nalysis by {F}ield-{I}nsensitive {A}nalysis},
booktitle = {16th International Symposium on Formal Methods, FM'09},
year = 2009,
editor = {Ana Cavalcanti and Dennis Dams},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {5850},
pages = {370-386},
isbn = {978-3-642-05088-6},
doi = {10.1007/978-3-642-05089-3_24},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[16]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Cost Relation Systems: a Language-Independent Target
Language for Cost Analysis.
In Spanish Conference on Programming and Computer Languages
(PROLE'08), volume 248 of Electronic Notes in Theoretical Computer
Science, pages 31-46. Elsevier, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis aims at obtaining information about the
execution cost of programs. This paper studies
cost relation systems (CESs): the sets of
recursive equations used in cost analysis in order
to capture the execution cost of programs in terms
of the size of their input arguments. We investigate
the notion of CES from a general perspective which
is independent of the particular cost analysis
framework. Our main contributions are: we provide a
formal definition of execution cost and of CES which
is not tied to a particular programming language; we
present the notion of sound CES, i.e., which
correctly approximates the cost of the corresponding
program; we identify the differences with recurrence
relation systems, its possible applications and the
new challenges that they bring about. Our general
framework is illustrated by instantiating it to cost
analysis of Java bytecode, Haskell, and Prolog.
@inproceedings{AlbertAGP08b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{C}ost {R}elation {S}ystems: a
{L}anguage--{I}ndependent {T}arget {L}anguage for
{C}ost {A}nalysis},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'08)},
series = {Electronic Notes in Theoretical Computer Science},
volume = {248},
year = {2009},
pages = {31-46},
publisher = {Elsevier},
doi = {10.1016/j.entcs.2009.07.057},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[17]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Automatic Inference of Upper Bounds for Recurrence Relations in Cost
Analysis.
In María Alpuente and Germán Vidal, editors, Static
Analysis, 15th International Symposium, SAS 2008, Valencia, Spain, July
16-18, 2008. Proceedings, volume 5079 of Lecture Notes in Computer
Science, pages 221-237. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGP08,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla},
title = {Automatic Inference of Upper Bounds for Recurrence Relations
in Cost Analysis},
booktitle = {Static Analysis, 15th International Symposium, SAS 2008,
Valencia, Spain, July 16-18, 2008. Proceedings},
editor = {Mar\'{\i}a Alpuente and
Germ{\'a}n Vidal},
year = {2008},
pages = {221-237},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5079},
isbn = {978-3-540-69163-1},
doi = {10.1007/978-3-540-69166-2_15},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[18]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Gilles Barthe and Frank S. de Boer, editors, Formal Methods
for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International
Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings, volume
5051 of Lecture Notes in Computer Science, pages 2-18. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Termination analysis has received
considerable attention, traditionally in the context
of declarative programming, and recently also for
imperative languages. In existing approaches,
termination is performed on source
programs. However, there are many situations,
including mobile code, where only the compiled code
is available. In this work we present an automatic
termination analysis for sequential Java Bytecode
programs. Such analysis presents all of the
challenges of analyzing a low-level language as well
as those introduced by object-oriented languages.
Interestingly, given a bytecode program, we produce
a constraint logic program, CLP, whose
termination entails termination of the bytecode
program. This allows applying the large body of work
in termination of CLP programs to termination of
Java bytecode. A prototype analyzer is described
and initial experimentation is reported.
@inproceedings{AlbertACGPZ08,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Termination Analysis of Java Bytecode},
booktitle = {Formal Methods for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings},
editor = {Gilles Barthe and Frank S. de Boer},
year = {2008},
pages = {2--18},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5051},
isbn = {978-3-540-68862-4},
doi = {10.1007/978-3-540-68863-1_2},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[19]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Removing Useless Variables in Cost Analysis of Java
Bytecode.
In Roger L. Wainwright and Hisham Haddad, editors, Proceedings
of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara,
Brazil, March 16-20, 2008, pages 368-375. ACM, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Automatic cost analysis has interesting
applications in the context of verification and
certification of mobile code. For instance, the
code receiver can use cost information in order to
decide whether to reject mobile code which has too
large cost requirements in terms of computing
resources (in time and/or space) or billable events
(SMSs sent, bandwidth required). Existing cost
analyses for a variety of languages describe the
resource consumption of programs by means of
Cost Equation Systems (CESs), which are
similar to, but more general than recurrence
equations. CESs express the cost of a program in
terms of the size of its input data. In a further
step, a closed form (i.e., non-recursive) solution
or upper bound can sometimes be found by using
existing Computer Algebra Systems (CASs), such as
Maple and Mathematica. In this work, we focus on
cost analysis of Java bytecode, a language which is
widely used in the context of mobile code and we
study the problem of identifying variables which are
useless in the sense that they do not affect
the execution cost and therefore can be ignored by
cost analysis. We identify two classes of useless
variables and propose automatic analysis techniques
to detect them. The first class corresponds to
stack variables that can be replaced by
program variables or constant values. The second
class corresponds to variables whose value is
cost-irrelevant, i.e., does not affect the
cost of the program. We propose an algorithm,
inspired in static slicing which safely
identifies cost-irrelevant variables. The benefits
of eliminating useless variables are two-fold: (1)
cost analysis without useless variables can be more
efficient and (2) resulting CESs are more likely to
be solvable by existing CASs.
@inproceedings{AlbertAGPZ08,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Removing {U}seless {V}ariables in {C}ost {A}nalysis of {J}ava {B}ytecode},
editor = {Roger L. Wainwright and Hisham Haddad},
booktitle = {Proceedings of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara, Brazil, March 16-20, 2008},
year = {2008},
pages = {368--375},
publisher = {ACM},
isbn = {978-1-59593-753-7},
doi = {10.1145/1363686.1363779},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[20]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
A Generic Framework for the Cost Analysis of Java
Bytecode.
In Pimentel Ernesto, editor, Spanish Conference on Programming
and Computer Languages (PROLE'07), pages 61-70, September 2007.
[ bibtex |
abstract |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {A {G}eneric {F}ramework for the {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'07)},
year = {2007},
pages = {61--70},
month = sep,
editor = {Pimentel Ernesto}
}
|
[21]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Java Bytecode.
In Rocco De Nicola, editor, Programming Languages and Systems,
16th European Symposium on Programming, ESOP 2007, Held as Part of the
Joint European Conferences on Theory and Practics of Software, ETAPS 2007,
Braga, Portugal, March 24 - April 1, 2007, Proceedings, volume 4421 of
Lecture Notes in Computer Science, pages 157-172. Springer-Verlag, March
2007.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Programming Languages and Systems, 16th European Symposium on Programming,
{ESOP} 2007, Held as Part of the Joint European Conferences on Theory
and Practics of Software, {ETAPS} 2007, Braga, Portugal, March 24
- April 1, 2007, Proceedings},
year = 2007,
editor = {Rocco De Nicola},
series = {Lecture Notes in Computer Science},
month = mar,
publisher = {Springer-Verlag},
volume = {4421},
pages = {157--172},
isbn = {978-3-540-71314-2},
doi = {10.1007/978-3-540-71316-6_12},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[22]
|
Elvira Albert, Puri Arenas-Sánchez, Germán Puebla, and Manuel V.
Hermenegildo.
Reduced Certificates for Abstraction-Carrying Code.
In Logic Programming, 22nd International Conference, ICLP 2006,
Seattle, WA, USA, August 17-20, 2006, Proceedings, volume 4079 of
LNCS, pages 163-178. Springer, 2006.
[ bibtex |
abstract ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for mobile code safety in which the code supplier provides a program together with an abstraction whose validity entails compliance with a predefined safety policy. The abstraction plays thus the role of safety certificate and its generation is carried out automatically by a fixed-point analyzer. The advantage of providing a (fixed-point) abstraction to the code consumer is that its validity is checked in a single pass of an abstract interpretation-based checker. A main challenge is to reduce the size of certificates as much as possible while at the same time not increasing checking time. We introduce the notion of reduced certificate which characterizes the subset of the abstraction which a checker needs in order to validate (and re-construct) the full certificate in a single pass. Based on this notion, we instrument a generic analysis algorithm with the necessary extensions in order to identify the information relevant to the checker. We also provide a correct checking algorithm together with sufficient conditions for ensuring its completeness. The experimental results within the CiaoPP system show that our proposal is able to greatly reduce the size of certificates in practice.
@inproceedings{DBLP-conf-iclp-AlbertAPH06,
author = {Elvira Albert and
Puri Arenas-S{\'a}nchez and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Reduced Certificates for Abstraction-Carrying Code}},
booktitle = {{Logic Programming, 22nd International Conference, ICLP 2006, Seattle, WA, USA, August 17-20, 2006, Proceedings}},
year = {2006},
series = {LNCS},
volume = {4079},
pages = {163-178},
publisher = {Springer}
}
|
[23]
|
Elvira Albert, Puri Arenas, and Germán Puebla.
An Incremental Approach to Abstraction-Carrying Code.
In Logic for Programming, Artificial Intelligence, and
Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia,
November 13-17, 2006, Proceedings, volume 4246 of LNCS, pages
377-391. Springer, 2006.
[ bibtex |
abstract ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for Proof-Carrying Code (PCC) in which the code supplier provides a program together with an abstraction (or abstract model of the program) whose validity entails compliance with a predefined safety policy. Existing approaches for PCC are developed under the assumption that the consumer reads and validates the entire program w.r.t. the full certificate at once, in a non incremental way. In the context of ACC, we propose an incremental approach to PCC for the generation of certificates and the checking of untrusted updates of a (trusted) program, i.e., when a producer provides a modified version of a previously validated program. Our proposal is that, if the consumer keeps the original (fixed-point) abstraction, it is possible to provide only the program updates and the incremental certificate (i.e., the difference of abstractions). Furthermore, it is now possible to define an incremental checking algorithm which, given the new updates and its incremental certificate, only re-checks the fixpoint for each procedure affected by the updates and the propagation of the effect of these fixpoint changes. As a consequence, both certificate transmission time and checking time can be reduced significantly.
@inproceedings{DBLP-conf-lpar-AlbertAP06,
author = {Elvira Albert and
Puri Arenas and
Germ{\'a}n Puebla},
title = {{An Incremental Approach to Abstraction-Carrying Code}},
booktitle = {{Logic for Programming, Artificial Intelligence, and Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia, November 13-17, 2006, Proceedings}},
year = {2006},
volume = {4246},
series = {LNCS},
pages = {377-391},
publisher = {Springer}
}
|
[24]
|
Puri Arenas, Francisco Javier López-Fraguas, and Mario
Rodríguez-Artalejo.
Functional Plus Logic Programming with Built-In and Symbolic
Constraints.
In Principles and Practice of Declarative Programming,
International Conference PPDP'99, Paris, France, September 29 - October 1,
1999, Proceedings, volume 1702 of Lecture Notes in Computer Science,
pages 152-169. Springer, 1999.
[ bibtex |
abstract ]
In this paper we propose a lazy functional logic language, named SETA, which allows to handle multisets, built-in arithmetic constraints over the domain of real numbers, as well as various symbolic constraints over datatypes. As main theoretical results, we have proved the existence of free term models for all SETA programs and we have developed a correct and complete goal solving mechanism.
@inproceedings{DBLP:conf/ppdp/Arenas-SanchezLR99,
author = {Puri Arenas and
Francisco Javier L{\'o}pez-Fraguas and
Mario Rodr\'{\i}guez-Artalejo},
title = {Functional Plus {L}ogic {P}rogramming with {B}uilt-In and {S}ymbolic
{C}onstraints},
booktitle = {Principles and Practice of Declarative Programming, International
Conference PPDP'99, Paris, France, September 29 - October
1, 1999, Proceedings},
year = {1999},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1702},
pages = {152-169}
}
|
[25]
|
Puri Arenas, Francisco Javier López-Fraguas, and Mario
Rodríguez-Artelejo.
Embedding Multiset Constraints into a Lazy Functional Logic
Language.
In Principles of Declarative Programming, 10th International
Symposium, PLILP'98 Held Jointly with the 7th International Conference,
ALP'98, Pisa, Italy, September 16-18, 1998, Proceedings, volume 1490 of
Lecture Notes in Computer Science, pages 429-444. Springer, 1998.
[ bibtex |
abstract |
DOI ]
In recent works, we have proposed a general framework for lazy functional logic programming with algebraic polymorphic types, i.e., parametric datatypes whose data constructors fulfill a given set of equational axioms. The aim of this paper is to investigate implementation techniques for an extended instance of this framework, namely, lazy functional logic programming with multisets and constraints. We consider a language (named Seta) which supports a polymorphic datatype Mset(α) along with specific constraints for multisets: strict equality (already present in the general framework), disequality, membership and non-membership. We describe a quite readable Prolog-based implementation which can be executed on top of any Prolog system that provides the ability to solve simple arithmetic constraints.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezLR98,
author = {Puri Arenas and
Francisco Javier L{\'o}pez-Fraguas and
Mario Rodr\'{\i}guez-Artelejo},
title = {Embedding {M}ultiset {C}onstraints into a {L}azy {F}unctional {L}ogic
{L}anguage},
booktitle = {Principles of Declarative Programming, 10th International
Symposium, PLILP'98 Held Jointly with the 7th International
Conference, ALP'98, Pisa, Italy, September 16-18, 1998,
Proceedings},
year = {1998},
pages = {429-444},
ee = {http://dx.doi.org/10.1007/BFb0056631},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1490}
}
|
[26]
|
Puri Arenas and Mario Rodríguez-Artalejo.
A Lazy Narrowing Calculus for Functional Logic Programming with
Algebraic Polymorphic Types.
In Logic Programming, Proceedngs of the 1997 International
Symposium, pages 53-67. The MIT Press, 1997.
[ bibtex |
abstract ]
In a recent work, we have proposed a semantic framework for lazy functional logic programming with algebraic polymorphic types, i.e., polymorphic types whose data constructors must obey a finite set C of equaxional axioms. That framework included C-based rewriting calculi and a notion of model, providing soundness and completeness of C-based rewriting w.r.t. models, existence of free models for all programs and type preservation results, but no goal solving mechanism was presented. The present paper extends the privous one by developing a sound and complete procedure for goal solving, which is based on the combination of lazy narrowing with unification modulo C. The resulting language is quite expressive for many kinds of probles, as e.g. action and change problems.
@inproceedings{DBLP:conf/slp/Arenas-SanchezR97,
author = {Puri Arenas and
Mario Rodr\'{\i}guez-Artalejo},
title = {A Lazy Narrowing Calculus for Functional Logic Programming
with Algebraic Polymorphic Types},
booktitle = {Logic Programming, Proceedngs of the 1997 International Symposium},
year = {1997},
pages = {53-67},
publisher = {The MIT Press}
}
|
[27]
|
Puri Arenas and Mario Rodríguez-Artalejo.
A Semantic Framework for Functional Logic Programming with
Algebraic Polymorphic Types.
In TAPSOFT'97: Theory and Practice of Software Development, 7th
International Joint Conference CAAP/FASE, Lille, France, April 14-18, 1997,
Proceedings, volume 1214 of Lecture Notes in Computer Science, pages
453-464. Springer, 1997.
[ bibtex |
abstract |
DOI ]
We propose a formal framework for functional logic programming, supporting lazy functions, non-determinism and polymorphic datatypes whose data constructors obey a given set C of equational axioms. On top of a given C, we specify a program as a set R of C-based conditional rewriting rules for defined functions. We argue that equational logic does not supply the proper semantics for such programs. Therefore, we present an alternative logic which includes C-based rewriting calculi and a notion of model. We get soundness and completeness for C-based rewriting w.r.t. models, existence of free models for all programs, and type preservation results.
@inproceedings{DBLP:conf/tapsoft/Arenas-SanchezR97,
author = {Puri Arenas and
Mario Rodr\'{\i}guez-Artalejo},
title = {A {S}emantic {F}ramework for {F}unctional {L}ogic {P}rogramming with
{A}lgebraic {P}olymorphic {T}ypes},
booktitle = {TAPSOFT'97: Theory and Practice of Software Development,
7th International Joint Conference CAAP/FASE, Lille, France,
April 14-18, 1997, Proceedings},
pages = {453-464},
ee = {http://dx.doi.org/10.1007/BFb0030618},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1214},
year = {1997},
isbn = {3-540-62781-2}
}
|
[28]
|
Puri Arenas and Agostino Dovier.
Minimal Set Unification.
In Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands, September
20-22, 1995, Proceedings, volume 982 of Lecture Notes in Computer
Science, pages 397-414. Springer, 1995.
[ bibtex |
abstract |
DOI ]
A unification algorithm is said to be minimal for a unification problem if it generates exactly a complete set of minimal unifiers, without instances, without repetitions. Aim of this paper is to describe a new set unification algorithm minimal for a significant collection of sample problems that can be used as benchmarks for testing any set unification algorithm. To this end, a deep combinatorial study for such problems has been realized. Moreover, an existing naïve set unification algorithm has been also tested in order to show its bad behavior for most of the sample problems.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezD95,
author = {Puri Arenas and Agostino Dovier},
title = {Minimal {S}et {U}nification},
booktitle = {Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands,
September 20-22, 1995, Proceedings},
year = {1995},
pages = {397-414},
ee = {http://dx.doi.org/10.1007/BFb0026832},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {982}
}
|
[29]
|
Puri Arenas and Ana Gil-Luezas.
A Debugging Model for Lazy Narrowing.
In Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands, September
20-22, 1995, Proceedings, volume 982 of Lecture Notes in Computer
Science, pages 453-454. Springer, 1995.
[ bibtex |
abstract |
DOI ]
@inproceedings{DBLP:conf/plilp/Arenas-SanchezG95,
author = {Puri Arenas and
Ana Gil-Luezas},
title = {A {D}ebugging {M}odel for {L}azy {N}arrowing},
booktitle = {Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands,
September 20-22, 1995, Proceedings},
year = {1995},
pages = {453-454},
ee = {http://dx.doi.org/10.1007/BFb0026836},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {982}
}
|
[30]
|
Puri Arenas, Ana Gil-Luezas, and Francisco Javier López-Fraguas.
Combining Lazy Narrowing with Disequality Constraints.
In Programming Language Implementation and Logic Programming,
6th International Symposium, PLILP'94, Madrid, Spain, September 14-16, 1994,
Proceedings, volume 844 of Lecture Notes in Computer Science, pages
385-399. Springer, 1994.
[ bibtex |
abstract |
DOI ]
We investigate an extension of a lazy functional logic language, which uses term disequations both in programs and in computed answers. The semantic properties of the language are derived from the fact that it can be viewed as an instance of the CFLP(X)-scheme proposed for constraint functional logic programming. In particular, the operational semantics for CFLP(X) ― so called lazy constrained narrowing ― is a computation mechanism parameterized by a constraint solver over the underlying domain. We define a constraint solver for our language, whose properties ensure completeness. As a further step towards implementation, we present an executable Prolog specification of the operational semantics, which has been implemented on top of a Prolog system. Experimental results have shown good performance.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezGL94,
author = {Puri Arenas and
Ana Gil-Luezas and
Francisco Javier L{\'o}pez-Fraguas},
title = {Combining {L}azy {N}arrowing with {D}isequality {C}onstraints},
booktitle = {Programming Language Implementation and Logic Programming,
6th International Symposium, PLILP'94, Madrid, Spain, September
14-16, 1994, Proceedings},
year = {1994},
pages = {385-399},
ee = {http://dx.doi.org/10.1007/3-540-58402-1_27},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {844},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[1]
|
Diego Esteban Alonso Blas, Puri Arenas, and Samir Genaim.
Handling Non-linear Operations in the Value Analysis of
COSTA.
In Proceedings of the Bytecode 2011 workshop, the Sixth Workshop
on Bytecode Semantics, Verification, Analysis and Transformation (Bytecode),
volume 279 Issue 1 of Electronic Notes in Theoretical Computer Science,
pages 3-17. Elsevier, 2011.
[ bibtex |
abstract |
DOI |
PDF |
slides ]
Inferring precise relations between (the values of) program
variables at different program points is essential for termination
and resource usage analysis. In both cases, this information is used
to synthesize ranking functions that imply the program's termination
and bound the number of iterations of its loops.
For efficiency, it is common to base value analysis on
non-disjunctive abstract domains such as Polyhedra, Octagon,
etc. While these domains are efficient and able to infer complex
relations for a wide class of programs, they are often not
sufficient for modeling the effect of non-linear and bit arithmetic
operations. Modeling such operations precisely can be done by using
more sophisticated abstract domains, at the price of performance
overhead.
In this paper we report on the value analysis of COSTA that
is based on the idea of encoding the disjunctive nature of
non-linear operations into the (abstract) program itself, instead of
using more sophisticated abstract domains.
Our experiments demonstrate that COSTA is able to prove
termination and infer bounds on resource consumption for programs
that could not be handled before.
@inproceedings{AlonsoAG11,
author = {Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim},
title = {{H}andling {N}on-linear {O}perations in the {V}alue {A}nalysis of {COSTA}},
booktitle = {Proceedings of the Bytecode 2011 workshop, the Sixth Workshop on Bytecode Semantics, Verification, Analysis and Transformation (Bytecode)},
year = {2011},
publisher = {Elsevier},
volume = {279 Issue 1},
pages = {3-17},
series = {Electronic Notes in Theoretical Computer Science},
slides = {http://costa.ls.fi.upm.es/papers/costa/AlonsoAG11-slides.pdf},
doi = {10.1016/j.entcs.2011.11.002}
}
|
[2]
|
Elvira Albert, Puri Arenas, Samir Genaim, Israel Herraiz, and Germán Puebla.
Comparing Cost Functions in Resource Analysis.
In Marko C. J. D. van Eekelen and Olha Shkaravska, editors,
Foundational and Practical Aspects of Resource Analysis - First International
Workshop, FOPARA 2009, Eindhoven, The Netherlands, November 6, 2009,
Revised Selected Papers, volume 6324 of Lecture Notes in Computer
Science, pages 1-17. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Cost functions provide information about the
amount of resources required to execute a program in
terms of the sizes of input arguments. They can
provide an upper-bound, a lower-bound, or the
average-case cost. Motivated by the existence of a
number of automatic cost analyzers which produce
cost functions, we propose an approach for
automatically proving that a cost function is
smaller than another one. In all applications of
resource analysis, such as resource-usage
verification, program synthesis and optimization,
etc., it is essential to compare cost
functions. This allows choosing an implementation
with smaller cost or guaranteeing that the given
resource-usage bounds are preserved. Unfortunately,
automatically generated cost functions for realistic
programs tend to be rather intricate, defined by
multiple cases, involving non-linear subexpressions
(e.g., exponential, polynomial and logarithmic) and
they can contain multiple variables, possibly
related by means of constraints. Thus, comparing
cost functions is far from trivial. Our approach
first syntactically transforms functions into
simpler forms and then applies a number of
sufficient conditions which guarantee that a set of
expressions is smaller than another expression. Our
preliminary implementation in the COSTA system
indicates that the approach can be useful in
practice.
@inproceedings{AlbertAGHP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Israel Herraiz and Germ{\'a}n Puebla},
title = {Comparing {C}ost {F}unctions in {R}esource {A}nalysis},
booktitle = {Foundational and Practical Aspects of Resource Analysis - First International Workshop, {FOPARA} 2009, Eindhoven, The Netherlands, November 6, 2009,
Revised Selected Papers},
year = 2009,
volume = 6324,
pages = {1-17},
editor = {Marko C. J. D. van Eekelen and Olha Shkaravska},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-642-15331-0_1},
publisher = {Springer}
}
|
[3]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, Diana Ramírez,
and Damiano Zanardini.
Upper Bounds of Resource Usage for Java Bytecode using
COSTA and its Web Interface.
In Workshop on Resource Analysis, September 2008.
[ bibtex |
abstract |
PDF ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGPRZ08b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'irez and Damiano Zanardini},
title = {Upper {B}ounds of {R}esource {U}sage for {J}ava {B}ytecode using {COSTA} and its {W}eb {I}nterface},
booktitle = {Workshop on Resource Analysis},
year = 2008,
month = sep
}
|
[4]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, Diana Ramírez,
and Damiano Zanardini.
The COSTA Cost and Termination Analyzer for Java Bytecode
and its Web Interface (Tool Demo).
In Anna Philippou, editor, 22nd European Conference on
Object-Oriented Programming, July 2008.
[ bibtex |
abstract |
PDF ]
COSTA is a research prototype which
performs automatic program analysis and which is
able to infer cost and termination information about
Java bytecode programs. The system receives as input
a bytecode program and a cost model chosen from a
selection of resource descriptions, and tries to
bound the resource consumption of the program with
respect to the given cost model. COSTA provides
several non-trivial notions of resource, such as the
amount of memory allocated on the heap, the number
of bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGPRZ08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'irez and Damiano Zanardini},
title = {The {COSTA} {C}ost and {T}ermination {A}nalyzer for {J}ava {B}ytecode and its {W}eb {I}nterface ({T}ool {D}emo)},
booktitle = {22nd European Conference on Object-Oriented Programming},
year = 2008,
editor = {Anna Philippou},
month = jul
}
|
[5]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Dealing with Numeric Fields in Termination Analysis of Java-like
Languages.
In 10th Workshop on Formal Techniques for Java-like Programs,
July 2008.
[ bibtex |
abstract |
PDF ]
Termination analysis tools strive to find proofs of
termination for as wide a class of (terminating)
programs as possible. Though several tools exist
which are able to prove termination of non-trivial
programs, when one tries to apply them to realistic
programs, there are still a number of open
problems. In the case of Java-like languages, one of
such problems is to find a practical solution to
prove termination when the termination behaviour of
loops is affected by numeric fields. We have
performed statistics on the Java libraries to see
how often this happens in practice and we found that
in 12.95% of cases, the number of iterations of
loops (and therefore termination) explicitly depends
on values stored in fields and, in the vast majority
of cases, such fields are numeric. Inspired by the
examples found in the libraries, this paper
identifies a series of difficulties that need to be
solved in order to deal with numeric fields in
termination and propose some ideas towards a
lightweight analysis which is able to prove
termination of sequential Java-like programs in the
presence of numeric fields.
@inproceedings{AlbertAGP08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {Dealing with Numeric Fields in Termination Analysis of Java-like Languages},
booktitle = {10th Workshop on Formal Techniques for Java-like Programs},
year = 2008,
month = jul
}
|
[6]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
COSTA: A Cost and Termination Analyzer for Java
Bytecode.
In Proceedings of the Workshop on Bytecode Semantics,
Verification, Analysis and Transformation (Bytecode), Electronic Notes in
Theoretical Computer Science, Budapest, Hungary, April 2008. Elsevier.
To appear.
[ bibtex |
abstract |
PDF ]
This paper describes COSTA, a cost
and termination analyzer for Java
bytecode. The system receives as input a bytecode
program and a selection of a resources of
interest, and tries to bound the resource
consumption of the program with respect to such a
cost model. COSTA provides several
non-trivial notions of resource, as the consumption
of the heap, the number of bytecode instructions
executed, the number of calls to a specific method
(e.g., the library method for sending text messages
in mobile phones), etc. The system uses the same
machinery to infer upper bounds on cost, and
for proving termination (which also implies
the boundedness of any resource consumption). The
demo will describe the architecture of COSTA and
show it on a series of programs for which
interesting bounds w.r.t. the above notions of
resource can be obtained. Also, examples for which
the system cannot obtain upper bounds, but can prove
termination, will be presented.
@inproceedings{AlbertAGPZ08a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{COSTA}: {A} {C}ost and {T}ermination {A}nalyzer for {J}ava {B}ytecode},
booktitle = {Proceedings of the Workshop on Bytecode Semantics, Verification, Analysis and Transformation (Bytecode)},
year = 2008,
series = {Electronic Notes in Theoretical Computer Science},
address = {Budapest, Hungary},
month = apr,
publisher = {Elsevier},
note = {To appear}
}
|
[7]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Experiments in Cost Analysis of Java Bytecode.
In Fausto Spoto and Marieke Huisman, editors, 2nd Workshop on
Bytecode Semantics, Verification, Analysis and Transformations, BYTECODE
2007, April 1, 2007, Braga, Portugal, volume 190 of Electronic Notes in
Theoretical Computer Science, pages 67-83. Elsevier, April 2007.
[ bibtex |
abstract |
DOI |
PDF ]
Recently, we proposed a general framework
for the cost analysis of Java bytecode which can be
used for measuring resource usage. This analysis
generates, at compile-time, cost relations
which define the cost of programs as a function of
their input data size. The purpose of this paper is
to assess the practicality of such cost analysis by
experimentally evaluating a prototype analyzer
implemented in CIAO. With this aim, we approximate
the computational complexity of a set of selected
benchmarks, including both well-known algorithms
which have been used to evaluate existing cost
analyzers in other programming paradigms, and other
benchmarks which illustrate object-oriented
features. In our evaluation, we first study whether
the generated cost relations can be automatically
solved. Our experiments show that in some cases the
inferred cost relations can be automatically solved
by using the Mathmatica system, whereas, in other
cases, some prior manipulation is required for the
equations to be solvable. Moreover, we
experimentally evaluated the running time of the
different phases of the analysis process. Overall,
we believe our experiments show that the efficiency
of our cost analysis is acceptable, and that the
obtained cost relations are useful in practice
since, at least in our experiments, it is possible
to get a closed form solution.
@inproceedings{AlbertAGPZ07a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{E}xperiments in {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {2nd Workshop on Bytecode Semantics, Verification, Analysis and Transformations, BYTECODE 2007, April 1, 2007, Braga, Portugal},
pages = {67--83},
year = 2007,
editor = {Fausto Spoto and Marieke Huisman},
volume = {190},
series = {Electronic Notes in Theoretical Computer Science},
month = apr,
doi = {10.1016/j.entcs.2007.02.061},
publisher = {Elsevier}
}
|
[8]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Applications of Static Slicing in Cost Analysis of Java
Bytecode.
In The 3rd International Workshop on Programming Language
Interference and Dependence (PLID'07) ,Kongens Lyngby, Denmark, 21 August,
2007, 2007.
[ bibtex |
abstract |
PDF ]
In this abstract, we discuss several
aspects of the role of static slicing to
minimize the number of arguments which need to be
taken into account in CES. We will focus on two
benefits of eliminating the arguments which do not
have an impact on the cost of the program. On one
hand, analysis can be more efficient if we reduce
the number of variables. And also CES are more
likely to be solved by standard CAS.
@inproceedings{AlbertAGPZ07c,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{A}pplications of {S}tatic {S}licing in {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {The 3rd International Workshop on Programming Language Interference and Dependence (PLID'07) ,Kongens Lyngby, Denmark, 21 August, 2007},
year = 2007
}
|
[9]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Alexander Serebrenik and Dieter Hofbauer, editors,
Proceedings of the 9th International Workshop on Termination, WST'07 Paris,
France, June 29, 2007, 2007.
[ bibtex |
abstract |
PDF ]
In this work, we describe a termination analysis for
Java bytecode, based on a translation into a
structured intermediate representation. This
representation captures in a uniform setting all
loops (regardless of whether they originate from
recursive calls or iterative loops) and all
variables (either local variables or operand stack
elements). Given this representation we consider a
general form of the size-change graphs
principle to prove termination of the intermediate
program, which in turn implies the termination of
the original program.
@inproceedings{AlbertACGPZ07,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{T}ermination {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 9th International Workshop on Termination, WST'07 Paris, France, June 29, 2007},
editor = {Alexander Serebrenik and Dieter Hofbauer},
year = 2007
}
|
[10]
|
Puri Arenas, Maria Teresa Hortalá-González, Francisco Javier
López-Fraguas, and Eva Ullán.
Real Constraints within a Functional Logic Language.
In 1996 Joint Conf. on Declarative Programming,
APPIA-GULP-PRODE'96, Donostia-San Sebastian, Spain, July 15-18, 1996, pages
451-464, 1996.
[ bibtex |
abstract ]
@inproceedings{DBLP:conf/agp/Arenas-SanchezHLU96,
author = {Puri Arenas and
Maria Teresa Hortal{\'a}-Gonz{\'a}lez and
Francisco Javier L{\'o}pez-Fraguas and
Eva Ull{\'a}n},
title = {Real {C}onstraints within a {F}unctional {L}ogic {L}anguage},
booktitle = {1996 Joint Conf. on Declarative Programming, APPIA-GULP-PRODE'96,
Donostia-San Sebastian, Spain, July 15-18, 1996},
year = {1996},
pages = {451-464},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[11]
|
Puri Arenas and Agostino Dovier.
Minimal Set Unification.
In 1995 Joint Conference on Declarative Programming,
GULP-PRODE'95, Marina di Vietri, Italy, September 11-14, 1995, pages
447-458, 1995.
[ bibtex |
abstract ]
@inproceedings{DBLP:conf/agp/Arenas-SanchezD95,
author = {Puri Arenas and
Agostino Dovier},
title = {Minimal {S}et {U}nification},
booktitle = {1995 Joint Conference on Declarative Programming, GULP-PRODE'95,
Marina di Vietri, Italy, September 11-14, 1995},
year = {1995},
pages = {447-458},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[12]
|
Puri Arenas and Ana Gil-Luezas.
A Debugging Model for Lazy Functional Logic Languages.
In 1994 Joint Conference on Declarative Programming,
GULP-PRODE'94 Peñiscola, Spain, September 19-22, 1994, Volume 2, pages
117-131, 1994.
[ bibtex |
abstract ]
@inproceedings{DBLP:conf/agp/Arenas-SanchezG94,
author = {Puri Arenas and
Ana Gil-Luezas},
title = {A {D}ebugging {M}odel for {L}azy {F}unctional {L}ogic {L}anguages},
booktitle = {1994 Joint Conference on Declarative Programming, GULP-PRODE'94
Pe{\~n}iscola, Spain, September 19-22, 1994, Volume 2},
pages = {117-131},
bibsource = {DBLP, http://dblp.uni-trier.de},
year = {1994}
}
|