[1]
|
Elvira Albert, Shelly Grossman, Noam Rinetzky, Clara
Rodríguez-Núñez, Albert Rubio, and Mooly Sagiv.
Relaxed Effective Callback Freedom: A Parametric Correctness
Condition for Sequential Modules With Callbacks.
IEEE Trans. Dependable Secur. Comput., 20(3):2256-2273,
2023.
[ bibtex |
abstract |
DOI |
http ]
Callbacks are an essential mechanism for event-driven
programming. Unfortunately, callbacks make reasoning
challenging because they introduce behaviors where
calls to the module are interleaved. We present a
parametric method that, from a particular invariant
of the program, allows reducing the problem of
verifying the invariant in the presence of
callbacks, to the callback-free
setting. Intuitively, we allow callbacks to
introduce behaviors that cannot be produced by
callback free executions, as long as they do not
affect correctness. A chief insight is that the user
is aware of the potential effect of the callbacks on
the program state. To this end, we present a
parametric verification technique which accepts this
insight as a relation between callback and callback
free executions. We implemented our approach and
applied it successfully to a large set of real-world
programs.
@article{AlbertGRRRS23,
author = {Elvira Albert and
Shelly Grossman and
Noam Rinetzky and
Clara Rodr{\'{\i}}guez{-}N{\'{u}}{\~{n}}ez and
Albert Rubio and
Mooly Sagiv},
title = {Relaxed Effective Callback Freedom: {A} Parametric Correctness Condition
for Sequential Modules With Callbacks},
journal = {{IEEE} Trans. Dependable Secur. Comput.},
volume = {20},
number = {3},
pages = {2256--2273},
year = {2023},
url = {https://doi.org/10.1109/TDSC.2022.3178836},
doi = {10.1109/TDSC.2022.3178836},
rank:jcr:q = {1}
}
|
[2]
|
Elvira Albert, Maria Garcia de la Banda, Miguel Gómez-Zamalloa, Miguel
Isabel, and Peter J. Stuckey.
Optimal dynamic partial order reduction with context-sensitive
independence and observers.
Journal of Systems and Software, 202:111730, 2023.
[ bibtex |
abstract |
DOI |
http ]
Dynamic Partial Order Reduction (DPOR) algorithms are used in stateless model checking of concurrent programs to avoid the exploration of equivalent execution sequences. In order to detect equivalence, DPOR relies on the notion of independence between execution steps. As this notion must be approximated, it can lose precision and thus treat execution steps as interfering when they are not. Our work is inspired by recent progress in the area that has introduced more accurate ways to exploit conditional notions of independence: Context-Sensitive DPOR considers two steps p and t independent in the current state if the states obtained by executing p⋅t and t⋅p are the same; Optimal DPOR with Observers makes their dependency conditional to the existence of future events that observe their operations. This article introduces a new algorithm, Optimal Context-Sensitive DPOR with Observers, that combines these two notions of conditional independence, and goes beyond them by exploiting their synergies. The implementation of our algorithm has been undertaken within the Nidhugg model checking tool. Our experimental evaluation, using benchmarks from the previous works, shows that our algorithm is able to effectively combine the benefits of both context-sensitive and observers-based independence and that it can produce exponential reductions over both of them.
@article{AlbertGGIS23,
title = {Optimal dynamic partial order reduction with context-sensitive independence and observers},
journal = {Journal of Systems and Software},
volume = {202},
pages = {111730},
year = {2023},
issn = {0164-1212},
doi = {https://doi.org/10.1016/j.jss.2023.111730},
url = {https://www.sciencedirect.com/science/article/pii/S0164121223001255},
author = {Elvira Albert and Maria Garcia {de la Banda} and Miguel G\'omez-Zamalloa and Miguel Isabel and Peter J. Stuckey},
rank:jcr:q = {2}
}
|
[3]
|
Elvira Albert, Pablo Gordillo, Alejandro Hernández-Cerezo, Albert Rubio, and
Maria A. Schett.
Super-Optimization of Smart Contracts.
ACM Transactions on Software Engineering and Methodology, 31
Issue 4(70):1-29, 2022.
[ bibtex |
abstract |
DOI |
http ]
Smart contracts are programs deployed on a
blockchain. They are executed for a monetary fee
paid in gas, a clear optimization target for smart
contract compilers. Because smart contracts are a
young, fast-moving field without (manually)
fine-tuned compilers, they highly benefit from
automated and adaptable approaches, especially as
smart contracts are effectively immutable, and
as-such need a high-level of assurance. This makes
them an ideal domain for applying formal methods.
Super-optimization is a technique to find the best
translation of a block of instructions by trying all
possible sequences of instructions which produce the
same result. We present a framework for
super-optimizing smart contracts based on Max-SMT
with two main ingredients: (1) a stack functional
specification extracted from the basic blocks of a
smart contract, which is simplified using rules
capturing the semantics of arithmetic, bit-wise, and
relational operations, and (2) the synthesis of
optimized blocks, which finds, by means of an
efficient SMT encoding, basic blocks with minimal
gas cost whose stack functional specification is
equal (modulo commutativity) to the extracted one.
We implemented our framework in the tool
SYRUP. Through large-scale experiments on real-world
smart contracts we analyze performance improvements
for different SMT encodings, as well as trade-offs
between quality of optimizations and required
optimization time.
@article{AlbertGHRS22,
author = {Elvira Albert and
Pablo Gordillo and
Alejandro Hern\'andez-Cerezo and
Albert Rubio and
Maria A. Schett},
title = {{Super-Optimization of Smart Contracts}},
journal = {{ACM Transactions on Software Engineering and Methodology}},
year = {2022},
volume = {31 Issue 4},
number = {70},
pages = {1--29},
issn = {1049-331X},
doi = {10.1145/3506800},
url = {https://doi.org/10.1145/3506800},
rank:jcr:q = {1}
}
|
[4]
|
Elvira Albert, Jesús Correas, Pablo Gordillo, Guillermo Román-Díez, and
Albert Rubio.
Don't Run on Fumes - Parametric Gas Bounds for Smart Contracts.
Journal of Systems and Software, 176:110923, 2021.
[ bibtex |
abstract |
DOI |
PDF ]
Gas is a measurement unit of the computational effort
that it will take to execute every single replicated
operation that takes part in the Ethereum blockchain
platform. Each transaction executed by the Ethereum
Virtual Machine (EVM), has an associated gas
consumption specified by Ethereum. If a transaction
exceeds the amount of gas allotted by the user
(known as gas limit), an out-of-gas exception is
raised, interrupting the current execution. We
present, to the best of our knowledge, the first
static analysis that is able to infer parametric gas
bounds for smart contracts. The inferred bounds are
typically parametric on the sizes of the input
parameters for the functions, but also they are
often parametric on the contract state, and even
sometimes they will be parametric on blockchain
data. The analysis is implemented in a tool named
GASTAP, Gas-Aware Smart contracT Analysis Platform,
which takes as input a smart contract (either in
EVM, disassembled EVM, or in Solidity source code)
and automatically infers sound gas upper bounds for
its public functions. GASTAP has been applied over
the entire contents of the Ethereum blockchain, in
total we have been able to analyze 318,397
functions, and succeeded to obtain gas bounds for
91.02% of them.
@article{AlbertCGRR21,
author = {Elvira Albert and
Jes\'us Correas and
Pablo Gordillo and
Guillermo Rom\'an-D\'iez and
Albert Rubio},
title = {{Don't Run on Fumes --- Parametric Gas Bounds for Smart Contracts}},
journal = {{J}ournal of {S}ystems and {S}oftware},
volume = {176},
pages = {110923},
issn = {0164-1212},
doi = {https://doi.org/10.1016/j.jss.2021.110923},
year = {2021},
rank:jcr:q = {2}
}
|
[5]
|
Elvira Albert, Miguel Gómez-Zamalloa, Miguel Isabel, Albert Rubio,
Matteo Sammartino, and Alexandra Silva.
Actor-based model checking for Software-Defined Networks.
J. Log. Algebraic Methods Program., 118:100617, 2021.
[ bibtex |
abstract |
DOI |
PDF |
http ]
|
[6]
|
Elvira Albert, Nikolaos Bezirgiannis, Frank de Boer, and Enrique Martin-Martin.
A Formal, Resource Consumption-Preserving Translation from Actors
with Cooperative Scheduling to Haskell.
Fundamenta Informaticae, 177(3-4):203-234, 2020.
[ bibtex |
abstract |
DOI |
http ]
@article{AlbertBBM19,
author = {Elvira Albert and Nikolaos Bezirgiannis and Frank de Boer and Enrique Martin-Martin},
title = {A Formal, Resource Consumption-Preserving Translation from Actors with Cooperative Scheduling to Haskell},
journal = {Fundamenta Informaticae},
publisher = {IOS Press},
volume = {177},
number = {3-4},
pages = {203--234},
year = {2020},
url = {https://doi.org/10.3233/FI-2020-1988},
doi = {10.3233/FI-2020-1988},
rank:jcr:q = {3}
}
|
[7]
|
Elvira Albert, Samir Genaim, Raúl Gutiérrez, and Enrique Martin-Martin.
A Transformational Approach to Resource Analysis with Typed-norms
Inference.
Theory and Practice of Logic Programming, 20(3):310-357, 2020.
[ bibtex |
abstract |
DOI ]
@article{AlbertGGM19,
author = {Albert, Elvira and Genaim, Samir and Guti\'errez, Ra\'ul and Martin-Martin, Enrique},
title = {A Transformational Approach to Resource Analysis with Typed-norms Inference},
doi = {10.1017/S1471068419000401},
journal = {Theory and Practice of Logic Programming},
publisher = {Cambridge University Press},
volume = {20},
number = {3},
pages = {310--357},
year = {2020},
issn = {1471-0684},
rank:jcr:q = {2}
}
|
[8]
|
Elvira Albert, Miquel Bofill, Cristina Borralleras, Enrique Martin-Martin, and
Albert Rubio.
Resource Analysis driven by (Conditional) Termination Proofs.
Theory and Practice of Logic Programming, 19(5-6):722-739,
2019.
[ bibtex |
abstract |
DOI ]
@article{AlbertBBMR19,
author = {Elvira Albert and Miquel Bofill and Cristina Borralleras and Enrique Martin-Martin and Albert Rubio},
title = {Resource Analysis driven by (Conditional) Termination Proofs},
volume = {19},
doi = {10.1017/S1471068419000152},
number = {5-6},
journal = {Theory and Practice of Logic Programming},
publisher = {Cambridge University Press},
year = {2019},
pages = {722-739},
rank:jcr:q = {1}
}
|
[9]
|
Elvira Albert, Jesús Correas, and Guillermo Román-Díez.
Peak Resource Analysis of Concurrent Distributed Systems.
Journal of Systems and Software, 149:35 - 62, 2019.
[ bibtex |
abstract |
DOI ]
Traditional cost analysis frameworks have been defined
for cumulative resources which keep on increasing
along the computation. Examples of cumulative
resources are execution time, number of executed
steps, and energy consumption. Non-cumulative
resources are acquired and (possibly) released along
the execution. Examples of non-cumulative cost are
number of connections established that are later
closed, or resources requested to a virtual host
which are released after using them. We present a
static analysis framework to infer the peak cost for
non-cumulative types of resources in concurrent
distributed systems. Our framework is generic
w.r.t. the type of resource and can be instantiated
with any of the above mentioned resources as well as
with any other resource that is measurable by
inspecting the instructions of the program. The
concurrent distributed language that we consider
allows creating distributed locations dynamically
within the program and spawning tasks that execute
concurrently at any of the existing locations. Our
analysis infers, for the different distributed
locations, the maximum (or peak) amount of resources
that they may require along any execution. This
information can be used, among other applications,
to detect bottlenecks in the system and to
efficiently dimension the processing capacity and
storage that the locations of the concurrent
distributed system require.
@article{AlbertCR18,
author = {Elvira Albert and
Jes\'us Correas and
Guillermo Rom\'an-D\'iez},
title = {{P}eak {R}esource {A}nalysis of {C}oncurrent {D}istributed {S}ystems},
journal = {{J}ournal of {S}ystems and {S}oftware},
volume = {149},
pages = {35 - 62},
issn = {0164-1212},
year = {2019},
doi = {https://doi.org/10.1016/j.jss.2018.11.018},
publisher = {Elsevier},
rank:jcr:q = {2}
}
|
[10]
|
Elvira Albert, Jesús Correas, Einar Broch Johnsen Ka I Pun, and Guillermo Román-Díez.
Parallel Cost Analysis.
ACM Transactions on Computational Logic, 19(4):1 - 37, 2018.
[ bibtex |
abstract |
DOI ]
This paper presents parallel cost analysis, a static
cost analysis targeting to over-approximate the cost
of parallel execution in distributed systems. In
contrast to the standard notion of serial cost,
parallel cost captures the cost of synchronized
tasks executing in parallel by exploiting the true
concurrency available in the execution model of
distributed processing. True concurrency is
challenging for static cost analysis, because the
parallelism between tasks needs to be soundly
inferred, and the waiting and idle processor times
at the different locations need to be accounted for.
Parallel cost analysis works in three phases: (1) it
performs a block-level analysis to estimate the
serial costs of the blocks between synchronization
points in the program; (2) it then constructs a
distributed flow graph (DCG) to capture the
parallelism, the waiting and idle times at the
locations of the distributed system; and (3) the
parallel cost can finally be obtained as the path of
maximal cost in the DCG. We prove the correctness of
the proposed parallel cost analysis, and provide a
prototype implementation to perform an experimental
evaluation of the accuracy and feasibility of the
proposed analysis.
@article{AlbertCPJR18,
author = {Elvira Albert and
Jes\'us Correas and
Ka I Pun,
Einar Broch Johnsen and
Guillermo Rom\'an-D\'iez},
title = {{P}arallel {C}ost {A}nalysis},
journal = {ACM Transactions on Computational Logic},
issn = {1529-3785},
publisher = {ACM},
rank:jcr:q = {4},
year = {2018},
volume = {19},
pages = {1 -- 37},
number = {4},
doi = {10.1145/3274278}
}
|
[11]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Systematic testing of actor systems.
Software Testing, Verification and Reliability, 2018.
[ bibtex |
abstract |
DOI |
http ]
Testing concurrent systems requires exploring all
possible nondeterministic interleavings that the
concurrent execution may have, as any of the
interleavings may reveal the erroneous behavior. In
testing of actor systems, we can distinguish 2
sources of nondeterminism: (1) actor selection, the
order in which actors are explored, and (2) task
selection, the order in which the tasks within each
actor are explored. This article provides new
strategies and heuristics for pruning redundant
state-exploration when testing actor systems by
reducing the amount of unnecessary nondeterminism of
both types. Furthermore, we extend these techniques
to handle synchronization primitives that allow
awaiting for the completion of an asynchronous
task. We report on an implementation and
experimental evaluation of the proposed techniques
in SYCO, a testing tool for actor-based
concurrency.
@article{AlbertAGZ18,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Systematic testing of actor systems},
journal = {Software Testing, Verification and Reliability},
year = {2018},
publisher = {Wiley},
issn = {1099-1689},
url = {http://dx.doi.org/10.1002/stvr.1661},
doi = {10.1002/stvr.1661},
rank:jcr:q = {3}
}
|
[12]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
Rely-Guarantee Termination and Cost Analyses of Loops
with Concurrent Interleavings.
Journal of Automated Reasoning, 59(1):47-85, Jun 2017.
[ bibtex |
abstract |
DOI |
PDF |
http ]
By following a rely-guarantee style of reasoning, we present
novel termination and cost analyses for concurrent programs that, in order to
prove termination or infer the cost of a considered loop: (1) infer the
termination/cost of each loop as if it were a sequential one, imposing
assertions on how shared-data is modified concurrently; and then (2) prove
that these assertions cannot be violated infinitely many times and, for cost
analysis, infer how many times they are violated. At the core of the analysis,
we use a may-happen-in-parallel analysis to restrict the set of program
points whose execution can interleave. Interestingly, the same kind of
reasoning can be applied to prove termination and infer upper bounds on
the number of iterations of loops with concurrent interleavings. To the best
of our knowledge, this is the first method to automatically bound the cost of
such kind of loops. We have implemented our analysis for an actor-based
language, and showed its accuracy and efficiency by applying it on several
typical applications for concurrent programs and on an industrial case study.
@article{AlbertFGM17,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim and Enrique Martin-Martin},
title = {{R}ely-{G}uarantee {T}ermination and {C}ost {A}nalyses of {L}oops with {C}oncurrent {I}nterleavings},
year = {2017},
month = {Jun},
volume = {59},
number = {1},
pages = {47--85},
doi = {10.1007/s10817-016-9400-6},
url = {http://dx.doi.org/10.1007/s10817-016-9400-6},
journal = {Journal of Automated Reasoning},
issn = {0168-7433},
publisher = {Springer},
rank:jcr:q = {3},
rank:ms:rating = {59}
}
|
[13]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
May-Happen-in-Parallel Analysis for Actor-based
Concurrency.
ACM Transactions on Computational Logic, 17(2):11:1-11:39,
March 2016.
[ bibtex |
abstract |
DOI |
PDF |
http ]
This article presents a may-happen-in-parallel (MHP) analysis
for languages with actor-based concurrency. In this
concurrency model, actors are the concurrency units such
that, when a method is invoked on an actor a2 from a task
executing on actor a1, statements of the current task in a1
may run in parallel with those of the (asynchronous) call on a2,
and with those of transitively invoked methods. The goal of the MHP
analysis is to identify pairs of statements in the program that may
run in parallel in any execution. Our MHP analysis is formalized as
a method-level (local) analysis whose information can be
modularly composed to obtain application-level (global)
information. The information yielded by the MHP analysis is essential
to infer more complex properties of actor-based concurrent programs,
e.g., data race detection, deadlock freeness, termination and
resource consumption analyses can greatly benefit from the MHP
relations to increase their accuracy. We report on MayPar, a
prototypical implementation of an MHP static analyzer for a
distributed asynchronous language.
@article{AlbertFGM15,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim and Enrique Martin-Martin},
title = {{M}ay-{H}appen-in-{P}arallel {A}nalysis for {A}ctor-based {C}oncurrency},
journal = {ACM Transactions on Computational Logic},
volume = {17},
number = {2},
month = mar,
year = {2016},
issn = {1529-3785},
pages = {11:1--11:39},
articleno = {11},
numpages = {39},
doi = {10.1145/2824255},
url = {http://dx.doi.org/10.1145/2824255},
publisher = {ACM},
rank:jcr:q = {1},
rank:ms:rating = {38}
}
|
[14]
|
Damiano Zanardini, Elvira Albert, and Karina Villela.
Resource-Usage-Aware Configuration in Software Product
Lines.
Journal of Logical and Algebraic Methods in Programming, 85(1,
Part 2):173-199, 2016.
Formal Methods for Software Product Line Engineering.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Deriving concrete products from a product-line
infrastructure requires resolving the variability
captured in the product line, based on the company
market strategy or requirements from specific
customers. Selecting the most appropriate set of
features for a product is a complex task, especially
if quality requirements have to be
considered. Resource-usage-aware configuration aims
at providing awareness of resource-usage properties
of artifacts throughout the configuration
process. This article envisages several strategies
for resource-usage-aware configuration which feature
different performance and efficiency trade-offs. The
common idea in all strategies is the use of
resource-usage estimates obtained by an
off-the-shelf static resource-usage analyzer as a
heuristic for choosing among different candidate
configurations. We report on a prototype
implementation of the most practical strategies for
resource-usage-aware configuration and apply it on
an industrial case study.
@article{ZanardiniAV16,
author = {Damiano Zanardini and Elvira Albert and Karina Villela},
title = {Resource-{U}sage-{A}ware {C}onfiguration in {S}oftware {P}roduct {L}ines},
journal = {Journal of Logical and Algebraic Methods in Programming},
year = {2016},
volume = {85},
number = {1, Part 2},
pages = {173--199},
doi = {10.1016/j.jlamp.2015.08.003},
url = {https://doi.org/10.1016/j.jlamp.2015.08.003},
publisher = {Elsevier},
issn = {2352-2208},
rank:jcr:q = {2},
note = {Formal Methods for Software Product Line Engineering},
butype = {article}
}
|
[15]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, Germán Puebla,
and Guillermo Román-Díez.
A Formal Verification Framework for Static Analysis.
Software and Systems Modeling, 15(4):987-1012, 2016.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Static analysis tools, such as resource analyzers, give useful
information on software systems, especially in real-time and
safety-critical applications. Therefore, the question of the
reliability of the obtained results is highly important.
State-of-the-art static analyzers typically combine a range of
complex techniques, make use of external tools, and evolve quickly. To
formally verify such systems is not a realistic option.
In this work we propose a different approach whereby, instead of
the tools, we formally verify the results of the
tools. The central idea of such a formal verification framework for
static analysis is the method-wise translation of the information
about a program gathered during its static analysis into
specification contracts that contain enough information for them to
be verified automatically.
We instantiate this framework with , a state-of-the-art
static analysis system for sequential Java programs, for producing
resource guarantees and , a state-of-the-art verification tool,
for formally verifying the correctness of such resource guarantees.
Resource guarantees allow to be certain that programs will
run within the indicated amount of resources, which may refer to
memory consumption, number of instructions executed, etc.
Our results show that the proposed tool cooperation can be used for
automatically producing verified resource guarantees.
@article{AlbertBGHPR15,
author = {Elvira Albert and
Richard Bubel and
Samir Genaim and
Reiner H\"ahnle and
Germ\'{a}n Puebla and
Guillermo Rom\'an-D\'iez},
title = {{A} {F}ormal {V}erification {F}ramework for {S}tatic {A}nalysis},
journal = {{S}oftware and {S}ystems {M}odeling},
year = {2016},
pages = {987-1012},
volume = {15},
number = {4},
publisher = {Springer},
url = {http://dx.doi.org/10.1007/s10270-015-0476-y},
doi = {10.1007/s10270-015-0476-y},
issn = {1619-1366},
rank:jcr:q = {2}
}
|
[16]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, and Germán Puebla an d Guillermo Román-Díez.
Object-Sensitive Cost Analysis for Concurrent Objects.
Software Testing, Verification and Reliability, 25(3):218-271,
2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
This article presents a novel cost
analysis framework for concurrent objects.
Concurrent objects form a well established model for distributed
concurrent systems. In this model, objects are the
concurrency units which communicate among them via
asynchronous method calls. Cost analysis aims at
automatically approximating the resource consumption of
executing a program in terms of its input parameters. While cost
analysis for sequential programming languages has received
considerable attention, concurrency and distribution have been notably
less studied. The main challenges of cost analysis in a concurrent
setting are:
(1) Inferring precise size abstractions for data in the program in
the presence of shared memory. This information is essential for
bounding the number of iterations of loops.
(2) Distribution suggests that analysis must infer the cost of the
diverse distributed components separately. We handle this by means of a
novel form of object-sensitive recurrence equations which use
cost centers in order to keep the resource usage assigned to
the different components separate.
We have implemented our analysis and evaluated it
on several small applications which are
classical examples of concurrent and distributed programming.
@article{AlbertACGGPR15,
author = {Elvira Albert and
Puri Arenas and
Jes\'us Correas and
Samir Genaim and
Miguel G\'omez-Zamalloa and
Germ\'{a}n Puebla an d
Guillermo Rom\'an-D\'iez},
title = {{O}bject-{S}ensitive {C}ost {A}nalysis for {C}oncurrent {O}bjects},
journal = {Software Testing, Verification and Reliability},
year = {2015},
publisher = {Wiley},
pages = {218--271},
url = {http://dx.doi.org/10.1002/stvr.1569},
doi = {10.1002/stvr.1569},
volume = {25},
number = {3},
issn = {0960-0833},
rank:jcr:q = {2}
}
|
[17]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
A Practical Comparator of Cost Functions and Its
Applications.
Science of Computer Programming, 111(3):483-504, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Automatic cost analysis has
significantly advanced in the last few years. Nowadays, a number of
cost analyzers exist which automatically produce upper- and/or
lower-bounds on the amount of resources required to execute a
program. Cost analysis has a number of important applications such as
resource-usage verification and program synthesis and optimization.
For such applications to be successful, it is not sufficient to have
automatic cost analysis. It is also required to have automated means
for handling the analysis results, which are in the form of
Cost Functions (CFs for short) i.e., non-recursive
expressions composed of a relatively small number of types of basic
expressions. In particular, we need automated means for comparing CFs in
order to prove that a CF is smaller than or equal to
another one for all input values of interest.
General function comparison is a hard mathematical problem. Rather
than attacking the general problem, in this work we focus on
comparing CF by exploiting their syntactic properties
and we present, to the best of our knowledge, the first
practical CF comparator which opens the door to fully automated
applications of cost analysis.
We have implemented the
comparator and made its source code
available online, so that any cost analyzer can use it.
@article{AlbertAGP15,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla},
title = {A {P}ractical {C}omparator of {C}ost {F}unctions and {I}ts {A}pplications},
journal = {Science of Computer Programming},
year = {2015},
publisher = {Elsevier},
pages = {483--504},
volume = {111},
number = {3},
rank:jcr:q = {3},
issn = {0167-6423},
doi = {10.1016/j.scico.2014.12.001},
url = {http://dx.doi.org/10.1016/j.scico.2014.12.001}
}
|
[18]
|
Elvira Albert, Jesús Correas, Germán Puebla, and Guillermo Román-Díez.
Quantified Abstract Configurations of Distributed Systems.
Formal Aspects of Computing, 27(4):665-699, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
When reasoning about distributed systems, it is essential to have
information about the different kinds of nodes that compose the
system, how many instances of each kind exist, and how nodes
communicate with
other nodes.
In this paper we present a static-analysis-based approach which is
able to provide information about the questions above.
In order to cope with an unbounded number of nodes and an unbounded
number of calls among them, the analysis
performs an abstraction of the system producing a graph whose
nodes may represent (infinitely) many concrete nodes and arcs represent
any number of (infinitely) many calls among nodes.
The crux of our approach is that the abstraction is enriched with
upper bounds inferred by resource analysis that limit the
number of concrete instances that the nodes and arcs represent and
their resource consumption.
The information available in our quantified abstract
configurations allows us to define performance indicators which
measure the quality of the system. In particular, we present
several indicators that assess the level of distribution in the
system, the amount of communication among distributed nodes
that it requires, and how balanced the load of the distributed nodes
that compose the system is. Our performance indicators are given as
functions on the input data sizes, and they can be used to automate the
comparison of different distributed settings and guide towards finding the
optimal configuration.
@article{AlbertCPR14,
author = {Elvira Albert and Jes\'us Correas and Germ\'{a}n Puebla and Guillermo Rom\'{a}n-D\'{i}ez},
title = {{Q}uantified {A}bstract {C}onfigurations of {D}istributed {S}ystems},
journal = {Formal Aspects of Computing},
year = {2015},
publisher = {Springer},
issn = {0934-5043},
volume = {27},
number = {4},
pages = {665--699},
rank:jcr:q = {4},
doi = {10.1007/s00165-014-0321-z},
url = {https://doi.org/10.1007/s00165-014-0321-z}
}
|
[19]
|
Elvira Albert, Jesús Correas, Germán Puebla, and Guillermo Román-Díez.
A Multi-Domain Incremental Analysis Engine and its
Application to Incremental Resource Analysis.
Theoretical Computer Science, 585(0):91 - 114, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
The aim of incremental analysis is, given a program,
its analysis results, and a series of changes to the program, to
obtain the new analysis results as efficiently as possible and,
ideally, without having to (re-)analyze fragments of code which are
not affected by the changes. Incremental analysis can significantly
reduce both the time and the memory requirements of analysis.
The first contribution of this article is a multi-domain
incremental fixed-point algorithm for a sequential Java-like
language. The algorithm is multi-domain in the sense that it
interleaves the (re-)analysis for multiple domains by taking into
account dependencies among them. Importantly, this allows the
incremental analyzer to invalidate only those analysis results
previously inferred by certain dependent domains. The second
contribution is an incremental resource usage analysis which,
in its first phase, uses the multi-domain incremental fixed-point
algorithm to carry out all global pre-analyses required to infer
cost in an interleaved way. Such resource analysis is parametric on
the cost metrics one wants to measure (e.g., number of executed
instructions, number of objects created, etc.). Besides, we present
a novel form of cost summaries which allows us to
incrementally reconstruct only those components of cost functions
affected by the changes. Experimental results in the COSTA system
show that the proposed incremental analysis
provides significant performance gains, ranging from a speedup of 1.48 up to
5.13 times faster than non-incremental analysis.
@article{AlbertCPR13-tcs,
author = {Elvira Albert and Jes\'us Correas and Germ\'{a}n Puebla and Guillermo Rom\'{a}n-D\'{i}ez},
title = {A {M}ulti-{D}omain {I}ncremental {A}nalysis {E}ngine and its {A}pplication to {I}ncremental {R}esource {A}nalysis},
journal = {Theoretical Computer Science},
year = {2015},
publisher = {Elsevier},
volume = {585},
number = {0},
pages = {91 - 114},
issn = {0304-3975},
doi = {10.1016/j.tcs.2015.03.002},
url = {https://doi.org/10.1016/j.tcs.2015.03.002},
rank:jcr:q = {3}
}
|
[20]
|
Elvira Albert, Frank S. de Boer, Reiner Hähnle, Einar Broch Johnsen, Rudolf Schlatte, S. Lizeth Tapia Tarifa, and Peter Y. H. Wong.
Formal Modeling and Analysis of Resource Management for Cloud
Architectures.
Journal of Service Oriented Computing and Applications,
8(4):323-339, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We demonstrate by a case study of an industrial distributed system
how performance, resource consumption, and deployment on the cloud
can be formally modeled and analyzed using the abstract behavioral
specification language Real-Time ABS. These non-functional aspects of the
system are integrated with an existing formal model of the
functional system behavior, achieving a separation of concerns
between the functional and non-functional aspects in the integrated
model. The resource costs associated with execution in the system
depend on the size of local data structures, which evolve over time;
we derive corresponding worst-case cost estimations by static
analysis techniques and integrate them into our resource-sensitive
model. The model is further parameterized with respect to deployment
scenarios which capture different application-level management
policies for virtualized resources. The model is validated against
the existing system's performance characteristics and used to
simulate, analyze, and compare deployment scenarios on the cloud.
@article{AlbertBHJTW-soca13,
author = {Elvira Albert and Frank S. de Boer and Reiner H\"ahnle and Einar Broch Johnsen and Rudolf Schlatte and S. Lizeth Tapia Tarifa and Peter Y. H. Wong},
title = {Formal Modeling and Analysis of Resource Management for Cloud Architectures},
journal = {Journal of Service Oriented Computing and Applications},
year = {2014},
number = {4},
volume = {8},
pages = {323-339},
publisher = {Springer},
issn = {1863-2386},
doi = {10.1007/s11761-013-0148-0},
url = {http://dx.doi.org/10.1007/s11761-013-0148-0}
}
|
[21]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Guillermo Román-Díez.
Conditional Termination of Loops over Heap-allocated Data.
Science of Computer Programming, 92:2 - 24, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Static analysis which takes into account the values of data stored
in the heap is considered complex and computationally intractable in
practice. Thus, most static analyzers do not keep track of object
fields nor of array contents, i.e., they are
heap-insensitive. In this article, we propose locality
conditions for soundly tracking heap-allocated data in Java
(bytecode) programs, by means of ghost non-heap allocated
variables. This way, heap-insensitive analysis over the transformed
program can infer information on the original heap-allocated data
without sacrificing efficiency. If the locality conditions cannot
be proven unconditionally, we seek to generate aliasing
preconditions which, when they hold in the initial state,
guarantee the termination of the program. Experimental results show
that we greatly improve the accuracy w.r.t. a heap-insensitive
analysis while the overhead introduced is reasonable.
@article{AlbertAGPG13,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'{a}n Puebla and Guillermo Rom\'{a}n-D\'{i}ez},
title = {{C}onditional {T}ermination of {L}oops over {H}eap-allocated {D}ata},
journal = {Science of Computer Programming},
publisher = {Elsevier},
doi = {10.1016/j.scico.2013.04.006},
url = {http://dx.doi.org/10.1016/j.scico.2013.04.006},
issn = {0167-6423},
volume = {92},
pages = {2 - 24},
year = {2014},
rank:jcr:q = {3}
}
|
[22]
|
Elvira Albert, María García de la Banda, Miguel Gómez-Zamalloa,
José Miguel Rojas, and Peter J. Stuckey.
A CLP Heap Solver for Test Case Generation.
Theory and Practice of Logic Programming, 13(4-5):721-735,
July 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
One of the main challenges to software testing today is
to efficiently handle heap-manipulating
programs. Thexse programs often build complex,
dynamically allocated data structures during
execution and, to ensure reliability, the testing
process needs to consider all possible shapes these
data structures can take. This creates scalability
issues since high (often exponential) numbers of
shapes may be built due to the aliasing of
references. This paper presents a novel CLP
heap solver for the test case generation of
heap-manipulating programs that is more scalable
than previous proposals, thanks to the treatment of
reference aliasing by means of disjunction,
and to the use of advanced back-propagation
of heap related constraints. In addition, the heap
solver supports the use of heap assumptions
to avoid aliasing of data that, though legal, should
not be provided as input.
@article{AlbertGGZRS13,
author = {Elvira Albert and Mar{\'i}a Garc{\'i}a de la Banda and Miguel G\'{o}mez-Zamalloa and Jos\'{e} Miguel Rojas and Peter J. Stuckey},
title = {{A} {CLP} {H}eap {S}olver for {T}est {C}ase {G}eneration},
journal = {Theory and Practice of Logic Programming},
year = {2013},
month = jul,
publisher = {Cambridge U. Press},
volume = {13},
number = {4-5},
pages = {721--735},
issn = {1471-0684},
doi = {10.1017/S1471068413000458},
url = {http://journals.cambridge.org/article_S1471068413000458},
rank:jcr:q = {1}
}
|
[23]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Heap Space Analysis for Garbage Collected Languages.
Science of Computer Programming, 78(9):1427-1448, 2013.
[ bibtex |
abstract |
PDF |
http ]
Accurately predicting the dynamic memory
consumption (or heap space) of programs can be
critical during software development. It is
well-known that garbage collection (GC) complicates
such problem. The peak heap consumption of a
program is the maximum size of the data on the heap
during its execution, i.e., the minimum amount of
heap space needed to safely run the program.
Existing heap space analyses either do not take
deallocation into account or adopt specific models
of garbage collectors which do not necessarily
correspond to the actual memory usage. This paper
presents a novel static analysis for
garbage-collected imperative languages that infers
accurate upper bounds on the peak heap usage,
including exponential, logarithmic and polynomial
bounds. A unique characteristic of the analysis is
that it is parametric on the notion of object
lifetime, i.e., on when objects become collectible.
@article{AlbertGGZ12,
author = {Elvira Albert and Samir Genaim and Miguel G\'{o}mez-Zamalloa},
title = {Heap Space Analysis for Garbage Collected Languages},
journal = {Science of Computer Programming},
publisher = {Elsevier},
issn = {0167-6423},
volume = {78},
number = {9},
pages = {1427--1448},
year = {2013},
url = {http://dx.doi.org/10.1016/j.scico.2012.10.008},
rank:jcr:q = {4}
}
|
[24]
|
Elvira Albert, Samir Genaim, and Abu Naser Masud.
On the Inference of Resource Usage Upper and Lower
Bounds.
ACM Transactions on Computational Logic, 14(3):22:1-22:35,
2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Cost analysis aims at determining the amount of
resources required to run a program in terms of its
input data sizes. The most challenging step is to
infer the cost of executing the loops in the
program. This requires bounding the number of
iterations of each loop and finding tight bounds for
the cost of each of its iterations. This article
presents a novel approach to infer upper and lower
bounds from cost relations. These relations are an
extended form of standard recurrence equations which
can be non-deterministic, contain inexact size
constraints and have multiple arguments that
increase and/or decrease. We propose novel
techniques to automatically transform cost relations
into worst-case and best-case deterministic
one-argument recurrence relations. The solution of
each recursive relation provides a precise
upper-bound and lower-bound for executing a
corresponding loop in the program. Importantly,
since the approach is developed at the level of the
cost equations, our techniques are programming
language independent.
@article{AlbertGM12,
author = {Elvira Albert and Samir Genaim and Abu Naser Masud},
title = {On the {I}nference of {R}esource {U}sage {U}pper and {L}ower {B}ounds},
journal = {ACM Transactions on Computational Logic},
year = 2013,
pages = {22:1--22:35},
volume = {14},
number = {3},
issn = {1529-3785},
publisher = {ACM Press},
url = {http://doi.acm.org/10.1145/2499937.2499943},
doi = {10.1145/2499937.2499943},
rank:jcr:q = {1}
}
|
[25]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Object-Oriented Bytecode Programs.
Theoretical Computer Science (Special Issue on Quantitative
Aspects of Programming Languages), 413(1):142-159, 2012.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Cost analysis statically approximates the
cost of programs in terms of their input data size.
This paper presents, to the best of our knowledge,
the first approach to the automatic cost analysis of
Object-Oriented bytecode programs. In languages
such as Java and C#, analyzing bytecode has a much
wider application area than analyzing source code
since the latter is often not available. Cost
analysis in this context has to consider, among
others, dynamic dispatch, jumps, the operand stack,
and the heap. Our method takes a bytecode program
and a cost model specifying the resource of
interest, and generates cost relations which
approximate the execution cost of the program with
respect to such resource. We report on COSTA, an
implementation for Java bytecode which can obtain
upper bounds on cost for a large class of programs
and complexity classes. Our basic techniques can be
directly applied to infer cost relations for other
Object-Oriented imperative languages, not
necessarily in bytecode form.
@article{AlbertAGPZ12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Cost {A}nalysis of {O}bject-{O}riented {B}ytecode {P}rograms},
journal = {Theoretical Computer Science (Special Issue on Quantitative Aspects of Programming Languages)},
volume = 413,
number = 1,
pages = {142--159},
year = {2012},
publisher = {Elsevier},
issn = {0304-3975},
doi = {10.1016/j.tcs.2011.07.009},
url = {http://www.sciencedirect.com/science/article/pii/S0304397511006190},
rank:jcr:q = {4}
}
|
[26]
|
Peter Y. H. Wong, Elvira Albert, Radu Muschevici, José Proença, Jan
Schäfer, and Rudolf Schlatte.
The ABS Tool Suite: Modelling, Executing and Analysing
Distributed Adaptable Object-Oriented Systems.
International Journal on Software Tools for Technology
Transfer, 14(5):567-588, 2012.
[ bibtex |
abstract |
DOI ]
Modern software systems must support a high degree of
variability to accommodate a wide range of
requirements and operating conditions. This paper
introduces the Abstract Behavioural Specification
(ABS) language and tool suite, a comprehensive
platform for developing and analysing highly
adaptable distributed concurrent software
systems. The ABS language has a hybrid functional
and object-oriented core, and comes with extensions
that support the development of systems that are
adaptable to diversified requirements, yet capable
to maintain a high level of trustworthiness. Using
ABS, system variability is consistently traceable
from the level of requirements engineering down to
object behaviour. This facilitates temporal
evolution, as changes to the required set of
features of a system are automatically reflected by
functional adaptation of the system's behaviour. The
analysis capabilities of ABS stretch from debugging,
observing and simulating to resource analysis of ABS
models and help ensure that a system will remain
dependable throughout its evolutionary lifetime. We
report on the experience of using the ABS language
and the ABS tool suite in an industrial case study.
@article{WongAMPSS12,
author = {Peter Y. H. Wong and Elvira Albert and Radu
Muschevici and Jos{\'e} Proen\c{c}a and Jan Sch\"afer and Rudolf Schlatte},
title = {The {ABS} {T}ool {S}uite: {M}odelling, {E}xecuting and {A}nalysing {D}istributed
{A}daptable {O}bject-{O}riented {S}ystems},
journal = {International Journal on Software Tools for Technology Transfer},
year = {2012},
publisher = {Springer},
issn = {1433-2779},
doi = {10.1007/s10009-012-0250-1},
volume = {14},
number = {5},
pages = {567-588}
}
|
[27]
|
Elvira Albert, Puri Arenas, Germán Puebla, and Manuel V. Hermenegildo.
Certificate size reduction in abstraction-carrying code.
TPLP, 12(3):283-318, 2012.
[ bibtex |
abstract |
DOI ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for mobile code safety in which the code supplier provides a program together with an abstraction (or abstract model of the program) whose validity entails compliance with a predefined safety policy. The abstraction plays thus the role of safety certificate and its generation is carried out automatically by a fixpoint analyzer. The advantage of providing a (fixpoint) abstraction to the code consumer is that its validity is checked in a single pass (i.e., one iteration) of an abstract interpretation-based checker. A main challenge to make ACC useful in practice is to reduce the size of certificates as much as possible while at the same time not increasing checking time. The intuitive idea is to only include in the certificate information that the checker is unable to reproduce without iterating. We introduce the notion of reduced certificate which characterizes the subset of the abstraction which a checker needs in order to validate (and re-construct) the full certificate in a single pass. Based on this notion, we instrument a generic analysis algorithm with the necessary extensions in order to identify the information relevant to the checker. Interestingly, the fact that the reduced certificate omits (parts of) the abstraction has implications in the design of the checker. We provide the sufficient conditions which allow us to ensure that (1) if the checker succeeds in validating the certificate, then the certificate is valid for the program (correctness) and (2) the checker will succeed for any reduced certificate which is valid (completeness). Our approach has been implemented and benchmarked within the CiaoPP system. The experimental results show that our proposal is able to greatly reduce the size of certificates in practice.
@article{DBLP-journals-tplp-AlbertAPH12,
author = {Elvira Albert and
Puri Arenas and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Certificate size reduction in abstraction-carrying code}},
journal = {TPLP},
volume = {12},
number = {3},
year = {2012},
pages = {283-318},
ee = {http://dx.doi.org/10.1017/S1471068410000487},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[28]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Closed-Form Upper Bounds in Static Cost Analysis.
Journal of Automated Reasoning, 46(2):161-203, 2011.
[ bibtex |
abstract |
DOI |
PDF ]
The classical approach to automatic cost analysis
consists of two phases. Given a program and some
measure of cost, the analysis first produces
cost relations (CRs), i.e., recursive
equations which capture the cost of the program in
terms of the size of its input data. Second, CRs
are converted into closed-form, i.e., without
recurrences. Whereas the first phase has received
considerable attention, with a number of cost
analyses available for a variety of programming
languages, the second phase has been comparatively
less studied. This article presents, to our
knowledge, the first practical framework for the
generation of closed-form upper bounds for CRs which
(1) is fully automatic, (2) can handle the
distinctive features of CRs originating from cost
analysis of realistic programming languages, (3) is
not restricted to simple complexity classes, and (4)
produces reasonably accurate solutions. A key idea
in our approach is to view CRs as programs, which
allows applying semantic-based static analyses and
transformations to bound them, namely our method is
based on the inference of ranking functions
and loop invariants and on the use of
partial evaluation.
@article{AlbertAGP11a,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ\'an Puebla},
title = {{C}losed-{F}orm {U}pper {B}ounds in {S}tatic {C}ost {A}nalysis},
journal = {Journal of Automated Reasoning},
year = {2011},
publisher = {Springer},
volume = {46},
number = {2},
pages = {161-203},
doi = {10.1007/s10817-010-9174-1},
issn = {0168-7433},
rank:jcr:q = {3}
}
|
[29]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Damiano Zanardini.
Task-Level Analysis for a Language with Async-Finish
Parallelism.
ACM SIGPLAN Notices, 46(5):21-30, 2011.
[ bibtex |
abstract |
DOI |
PDF ]
The task-level of a program is the maximum
number of tasks that can be available (i.e., not
finished nor suspended) simultaneously during its
execution for any input data. Static knowledge of
the task-level is of utmost importance for
understanding and debugging parallel programs as
well as for guiding task schedulers. We present, to
the best of our knowledge, the first static analysis
which infers safe and precise approximations on the
task-level for a language with async-finish
parallelism. In parallel languages, async and
finish are the basic constructs, respectively, for
spawning tasks and for waiting until they terminate.
They are the core of modern, parallel, distributed
languages like X10. Given a (parallel) program, our
analysis returns a task-level upper bound, i.e., a
function on the program's input arguments that
guarantees that the task-level of the program will
never exceed its value along any execution. Our
analysis provides a series of useful
(over)-approximations, going from the total number
of tasks spawned in the execution up to an accurate
estimation of the task-level.
@article{AlbertAGZ11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Damiano Zanardini},
title = {Task-{L}evel {A}nalysis for a {L}anguage with {A}sync-{F}inish {P}arallelism},
journal = {ACM SIGPLAN Notices},
volume = {46},
number = {5},
publisher = {ACM Press},
year = {2011},
issn = {0362-1340},
doi = {10.1145/1967677.1967681},
pages = {21--30},
rank:jcr:q = {4}
}
|
[30]
|
Germán Puebla, Elvira Albert, and Manuel V. Hermenegildo.
Efficient local unfolding with ancestor stacks.
Theory and Practice of Logic Programming, 11(1):1-32, 2011.
[ bibtex |
abstract |
DOI ]
The most successful unfolding rules used nowadays in the partial evaluation of logic programs are based on well quasi orders (wqo) applied over (covering) ancestors, i.e., a subsequence of the atoms selected during a derivation. Ancestor (sub)sequences are used to increase the specialization power of unfolding while still guaranteeing termination and also to reduce the number of atoms for which the wqo has to be checked. Unfortunately, maintaining the structure of the ancestor relation during unfolding introduces significant overhead. We propose an efficient, practical local unfolding rule based on the notion of covering ancestors which can be used in combination with a wqo and allows a stack-based implementation without losing any opportunities for specialization. Using our technique, certain nonleftmost unfoldings are allowed as long as local unfolding is performed, i.e., we cover depth-first strategies. To deal with practical programs, we propose assertion-based techniques which allow our approach to treat programs that include (Prolog) built-ins and external predicates in a very extensible manner, for the case of leftmost unfolding. Finally, we report on our implementation of these techniques embedded in a practical partial evaluator, which shows that our techniques, in addition to dealing with practical programs, are also significantly more efficient in time and somewhat more efficient in memory than traditional tree-based implementations.
@article{DBLP-journals-tplp-PueblaAH11,
author = {Germ{\'a}n Puebla and
Elvira Albert and
Manuel V. Hermenegildo},
title = {{Efficient local unfolding with ancestor stacks}},
journal = {Theory and Practice of Logic Programming},
volume = {11},
number = {1},
year = {2011},
issn = {1471-0684},
pages = {1-32},
publisher = {Cambridge U. Press},
ee = {http://dx.doi.org/10.1017/S1471068409990263},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[31]
|
Miguel Gómez-Zamalloa, Elvira Albert, and Germán Puebla.
Test Case Generation for Object-Oriented Imperative
Languages in CLP.
Theory and Practice of Logic Programming, 26th Int'l. Conference
on Logic Programming (ICLP'10) Special Issue, 10(4-6):659-674, July 2010.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development
process. Test Case Generation (TCG) is the process
of automatically generating a collection of
test-cases which are applied to a system under test.
White-box TCG is usually performed by means of
symbolic execution, i.e., instead of executing the
program on normal values (e.g., numbers), the
program is executed on symbolic values representing
arbitrary values. When dealing with an
object-oriented (OO) imperative language, symbolic
execution becomes challenging as, among other
things, it must be able to backtrack, complex
heap-allocated data structures should be created
during the TCG process and features like
inheritance, virtual invocations and exceptions have
to be taken into account. Due to its inherent
symbolic execution mechanism, we pursue in this
paper that Constraint Logic Programming (CLP) has a
promising application field in TCG. We will support
our claim by developing a fully CLP-based framework
to TCG of an OO imperative language, and by
assessing it on a corresponding implementation on a
set of challenging Java programs.
@article{Gomez-ZAP10,
author = {Miguel G\'{o}mez-Zamalloa and Elvira Albert and Germ{\'a}n Puebla},
title = {{T}est {C}ase {G}eneration for {O}bject-{O}riented {I}mperative {L}anguages in {CLP}},
journal = {Theory and Practice of Logic Programming, 26th Int'l. Conference on Logic Programming (ICLP'10) Special Issue},
year = {2010},
month = jul,
editor = {M.~Hermenegildo and T.~Schaub},
pages = {659--674},
volume = {10},
number = {4-6},
issn = {1471-0684},
ee = {http://dx.doi.org/10.1017/S1471068410000347},
publisher = {Cambridge U. Press},
rank:jcr:q = {2}
}
|
[32]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Parametric Inference of Memory Requirements for Garbage
Collected Languages.
ACM SIGPLAN Notices, 45(8):121-130, 2010.
[ bibtex |
abstract |
PDF ]
The accurate prediction of program's memory requirements
is a critical component in software development.
Existing heap space analyses either do not take
deallocation into account or adopt specific models
of garbage collectors which do not necessarily
correspond to the actual memory usage. We present a
novel approach to inferring upper bounds on memory
requirements of Java-like programs which is
parametric on the notion of object
lifetime, i.e., on when objects become collectable.
If objects lifetimes are inferred by a
reachability analysis, then our analysis
infers accurate upper bounds on the memory
consumption for a reachability-based garbage
collector. Interestingly, if objects lifetimes are
inferred by a heap liveness analysis, then we
approximate the program minimal memory requirement,
i.e., the peak memory usage when using an optimal
garbage collector which frees objects as soon as
they become dead. The key idea is to integrate
information on objects lifetimes into the process of
generating the recurrence equations which
capture the memory usage at the different program
states. If the heap size limit is set to the memory
requirement inferred by our analysis, it is ensured
that execution will not exceed the memory limit with
the only assumption that garbage collection works
when the limit is to be reached. Experiments on Java
bytecode programs provide evidence of the
feasibility and accuracy of our analysis.
@article{AlbertGG10b,
author = {Elvira Albert and Samir Genaim and Miguel G\'{o}mez-Zamalloa},
title = {{P}arametric {I}nference of {M}emory {R}equirements for {G}arbage {C}ollected {L}anguages},
journal = {ACM SIGPLAN Notices},
volume = {45},
number = {8},
year = {2010},
pages = {121--130},
issn = {0362-1340},
publisher = {ACM Press},
rank:jcr:q = {4}
}
|
[33]
|
Miguel Gómez-Zamalloa, Elvira Albert, and Germán Puebla.
Decompilation of Java Bytecode to Prolog by Partial
Evaluation.
Information and Software Technology, 51(10):1409-1427, October
2009.
[ bibtex |
abstract |
DOI |
PDF ]
@article{Gomez-ZAP09,
author = {Miguel G\'{o}mez-Zamalloa and Elvira Albert and Germ{\'a}n Puebla},
title = {{D}ecompilation of {J}ava {B}ytecode to {P}rolog by {P}artial {E}valuation},
journal = {Information and Software Technology},
publisher = {Elsevier},
volume = {51},
number = {10},
issn = {0950-5849},
ee = {http://dx.doi.org/10.1016/j.infsof.2009.04.010},
pages = {1409--1427},
npages = {19},
month = oct,
year = {2009},
rank:jcr:q = {1}
}
|
[34]
|
Elvira Albert, John P. Gallagher, Miguel Gómez-Zamalloa, and Germán Puebla.
Type-based Homeomorphic Embedding for Online Termination.
Information Processing Letters, 109(15):879-886, July 2009.
[ bibtex |
abstract |
DOI |
PDF ]
@article{AlbertGGP09,
author = {Elvira Albert and John P. Gallagher and Miguel G\'{o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{T}ype-based {H}omeomorphic {E}mbedding for {O}nline {T}ermination},
journal = {Information Processing Letters},
publisher = {Elsevier},
volume = {109},
number = {15},
issn = {0020-0190},
ee = {http://dx.doi.org/10.1016/j.ipl.2009.04.016},
pages = {879--886},
month = jul,
year = {2009},
rank:jcr:q = {3}
}
|
[35]
|
Elvira Albert, Germán Puebla, and Manuel V. Hermenegildo.
Abstraction-Carrying Code: a Model for Mobile Code Safety.
New Generation Computing, 26(2):171-204, 2008.
[ bibtex |
abstract |
DOI ]
Proof-Carrying Code (PCC) is a general approach to mobile code safety in which programs are augmented with a certificate (or proof). The intended benefit is that the program consumer can locally validate the certificate w.r.t. the “untrusted” program by means of a certificate checker—a process which should be much simpler, efficient, and automatic than generating the original proof. The practical uptake of PCC greatly depends on the existence of a variety of enabling technologies which allow both proving programs correct and replacing a costly verification process by an efficient checking procedure on the consumer side. In this work we propose Abstraction-Carrying Code (ACC), a novel approach which uses abstract interpretation as enabling technology. We argue that the large body of applications of abstract interpretation to program verification is amenable to the overall PCC scheme. In particular, we rely on an expressive class of safety policies which can be defined over different abstract domains. We use an abstraction (or abstract model) of the program computed by standard static analyzers as a certificate. The validity of the abstraction on the consumer side is checked in a single pass by a very efficient and specialized abstract-interpreter. We believe that ACC brings the expressiveness, flexibility and automation which is inherent in abstract interpretation techniques to the area of mobile code safety.
@article{DBLP-journals-ngc-AlbertPH08,
author = {Elvira Albert and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Abstraction-Carrying Code: a Model for Mobile Code Safety}},
journal = {New Generation Computing},
publisher = {Springer},
volume = {26},
number = {2},
year = {2008},
pages = {171-204},
ee = {http://dx.doi.org/10.1007/s00354-008-0039-7},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[36]
|
Elvira Albert, Michael Hanus, Frank Huch, Javier Oliver, and Germán Vidal.
Operational semantics for declarative multi-paradigm languages.
Journal of Symbolic Computation, 40(1):795-829, 2005.
[ bibtex |
abstract |
DOI ]
Declarative multi-paradigm languages combine the most important features of functional, logic and concurrent programming. The computational model of such integrated languages is usually based on a combination of two different operational principles: narrowing and residuation. This work is motivated by the fact that a precise definition of an operational semantics including all aspects of modern multi-paradigm languages like laziness, sharing, non-determinism, equational constraints, external functions and concurrency does not exist. Therefore, in this article, we present the first rigorous operational description covering all the aforementioned features in a precise and understandable manner. We develop our operational semantics in several steps. First, we define a natural (big-step) semantics covering laziness, sharing and non-determinism. We also present an equivalent small-step semantics which additionally includes a number of practical features like equational constraints and external functions. Then, we introduce a deterministic version of the small-step semantics which makes the search strategy explicit; this is essential for profiling, tracing, debugging etc. Finally, the deterministic semantics is extended in order to cover the concurrent facilities of modern declarative multi-paradigm languages. The semantics developed provides an appropriate foundation for modeling actual declarative multi-paradigm languages like Curry. The complete operational semantics has been implemented and used with various programming tools.
@article{DBLP-journals-jsc-AlbertHHOV05,
author = {Elvira Albert and
Michael Hanus and
Frank Huch and
Javier Oliver and
Germ{\'a}n Vidal},
title = {{Operational semantics for declarative multi-paradigm languages}},
journal = {Journal of Symbolic Computation},
volume = {40},
number = {1},
issn = {0747-7171},
publisher = {{Elsevier}},
year = {2005},
pages = {795-829},
ee = {http://dx.doi.org/10.1016/j.jsc.2004.01.001},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[37]
|
Elvira Albert, Michael Hanus, and Germán Vidal.
A residualizing semantics for the partial evaluation of functional
logic programs.
Information Processing Letters, 85(1):19-25, 2003.
[ bibtex |
abstract |
DOI ]
Recent proposals for multi-paradigm declarative programming combine the most important features of functional, logic and concurrent programming into a single framework. The operational semantics of these languages is usually based on a combination of narrowing and residuation. In this paper, we introduce a non-standard, residualizing semantics for multi-paradigm declarative programs and prove its equivalence with a standard operational semantics. Our residualizing semantics is particularly relevant within the area of program transformation where it is useful, e.g., to perform computations during partial evaluation. Thus, the proof of equivalence is a crucial result to demonstrate the correctness of (existing) partial evaluation schemes.
@article{DBLP-journals-ipl-AlbertHV03,
author = {Elvira Albert and Michael Hanus and Germ{\'a}n Vidal},
title = {A residualizing semantics for the partial evaluation of functional logic programs},
journal = {Information Processing Letters},
volume = {85},
number = {1},
year = {2003},
pages = {19-25},
issn = {0020-0190},
ee = {http://dx.doi.org/10.1016/S0020-0190(02)00336-8},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[38]
|
Elvira Albert, Michael Hanus, and Germán Vidal.
A Practical Partial Evaluation Scheme for Multi-Paradigm Declarative
Languages.
Journal of Functional and Logic Programming, 2002, 2002.
[ bibtex |
abstract |
DOI |
DOI ]
This paper abstracts the contents of a PhD dissertation entitled 'Partial evaluation of multi-paradigm declarative languages: foundations, control, algorithms and efficiency' which has been recently defended. Partial evaluation is an automatic technique for program optimization whose range of potential applications covers a wide spectrum of problem-specific optimizations. For instance, there are successful experiences in the field of artificial intelligence, like the optimization (by partial evaluation) on simulators for neural network training. The thesis presents novel methods and techniques for the partial evaluation of multi-paradigm declarative languages, which integrate features from functional and logic programming.
@article{DBLP-journals-jflp-AlbertHV02,
author = {Elvira Albert and
Michael Hanus and
Germ{\'a}n Vidal},
title = {{A Practical Partial Evaluation Scheme for Multi-Paradigm
Declarative Languages}},
journal = {Journal of Functional and Logic Programming},
doi = {10.1.1.14.8420},
volume = {2002},
year = {2002},
ee = {http://danae.uni-muenster.de/lehre/kuchen/JFLP/articles/2002/S02-01/JFLP-A02-01.pdf},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[39]
|
Elvira Albert.
Partial evaluation of multi-paradigm declarative languages.
AI Communications, 14(4):235-237, 2001.
[ bibtex |
abstract |
DOI ]
|
[40]
|
Elvira Albert and Germán Vidal.
The Narrowing-driven Approach to Functional Logic Program
Specialization.
New Generation Computing, 20(1):3-26, 2001.
[ bibtex |
abstract |
DOI ]
Partial evaluation is a semantics-based program optimization technique which has been investigated within different programming paradigms and applied to a wide variety of languages. Recently, a partial evaluation framework for functional logic programs has been proposed. In this framework, narrowing—the standard operational semantics of integrated languages—is used to drive the partial evaluation process. This paper surveys the essentials of narrowing-driven partial evaluation.
@article{DBLP-journals-ngc-AlbertV01,
author = {Elvira Albert and
Germ{\'a}n Vidal},
title = {{The Narrowing-driven Approach to Functional Logic Program
Specialization}},
journal = {New Generation Computing},
issn = {0288-3635},
volume = {20},
publisher = {Springer},
number = {1},
year = {2001},
pages = {3-26},
ee = {http://dx.doi.org/10.1007/BF03037257},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[1]
|
Elvira Albert, Maria Garcia de la Banda, Alejandro Hernández-Cerezo,
Alexey Ignatiev, Albert Rubio, and Peter J. Stuckey.
SuperStack: Superoptimization of Stack-Bytecode via Greedy,
Constraint-based, and SAT Techniques.
In PLDI '24: 45th ACM SIGPLAN International Conference on
Programming Language Design and Implementation, Copenhagen, Denmark, June
24-28, 2024. ACM, 2024.
[ bibtex |
abstract ]
Given a loop-free sequence of instructions,
superoptimization techniques use a constraint solver
to search for an equivalent sequence that is optimal
for a desired objective. The complexity of the
search grows exponentially with the length of the
solution being constructed, and the problem becomes
intractable for large sequences of
instructions. This paper presents a new approach to
superoptimizing stack-bytecode via three novel
components: (1) a greedy algorithm to refine the
bound on the length of the optimal solution; (2) a
new representation of the optimization problem as a
set of weighted soft clauses in MaxSAT; (3) a series
of domain-specific dominance and redundant
constraints to reduce the search space for optimal
solutions. We have developed a tool, named
SuperStack, which can be used to find optimal code
translations of modern stack-based bytecode, namely
WebAssembly or Ethereum bytecode. Experimental
evaluation on more than 500,000 sequences shows the
proposed greedy, constraint-based and SAT
combination is able to greatly increase optimization
gains achieved by existing superoptimizers and
reduce to at least a fourth the optimization time.
@inproceedings{AlbertGHIRS24,
author = {Elvira Albert and
Maria Garcia de la Banda and
Alejandro Hern{\'{a}}ndez{-}Cerezo and
Alexey Ignatiev and
Albert Rubio and
Peter J. Stuckey},
title = {{SuperStack}: {Superoptimization} of {Stack-Bytecode} via
{Greedy}, {Constraint-based}, and {SAT} {Techniques}},
booktitle = {{PLDI} '24: 45th {ACM} {SIGPLAN} International Conference on Programming
Language Design and Implementation, Copenhagen, Denmark, June 24-28,
2024},
year = {2024},
publisher = {{ACM}},
rank:grinscie:class = {1}
}
|
[2]
|
Elvira Albert, Samir Genaim, Daniel Kirchner, and Enrique Martin-Martin.
Formally Verified EVM Block-Optimizations.
In Computer Aided Verification - 35th International Conference,
CAV 2023, Paris, France, July 17-22, 2023, Proceedings, Part III, volume
13966 of Lecture Notes in Computer Science, pages 176-189. Springer,
2023.
[ bibtex |
abstract |
DOI ]
@inproceedings{AlbertGKMM23,
author = {Elvira Albert and
Samir Genaim and
Daniel Kirchner and
Enrique Martin-Martin},
title = {Formally Verified EVM Block-Optimizations},
booktitle = {Computer Aided Verification - 35th International Conference, {CAV}
2023, Paris, France, July 17-22, 2023, Proceedings, Part {III}},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {13966},
doi = {10.1007/978-3-031-37709-9\_9},
year = {2023},
pages = {176--189},
rank:grinscie:class = {1}
}
|
[3]
|
Elvira Albert, Jesús Correas, Pablo Gordillo, Guillermo Román-Díez, and
Albert Rubio.
Inferring Needless Write Memory Accesses on Ethereum Smart Contracts.
In 29th International Conference on Tools and Algorithms for the
Construction and Analysis of Systems, TACAS 2023. Proceedings, volume 13993
of Lecture Notes in Computer Science, pages 448-466. Springer, 2023.
[ bibtex |
abstract |
DOI |
PDF ]
Efficiency is a fundamental property of any type of
program, but it is even more so in the context of
the programs executing on the blockchain (known as
smart contracts). This is because optimizing smart
contracts has direct consequences on reducing the
costs of deploying and executing the contracts, as
there are fees to pay related to their bytes-size
and to their resource consumption (called
gas). Optimizing memory usage is considered a
challenging problem that, among other things,
requires a precise inference of the memory locations
being accessed. This is also the case for the
Ethereum Virtual Machine (EVM) bytecode generated by
the most-widely used compiler, solc, whose rather
unconventional and low-level memory usage challenges
automated reasoning. This paper presents a static
analysis, developed at the level of the EVM bytecode
generated by solc, that infers write memory accesses
that are needless and thus can be safely
removed. The application of our implementation on
more than 19,000 real smart contracts has detected
about 6,200 needless write accesses in less than 4
hours. Interestingly, many of these writes were
involved in memory usage patterns generated by solc
that can be greatly optimized by removing entire
blocks of bytecodes. To the best of our knowledge,
existing optimization tools cannot infer such
needless write accesses, and hence cannot detect
these inefficiencies that affect both the deployment
and the execution costs of Ethereum smart
contracts.
@inproceedings{AlbertCGRR23,
author = {Elvira Albert and
Jes\'us Correas and Pablo Gordillo and
Guillermo Rom\'an-D\'iez and
Albert Rubio},
title = {Inferring Needless Write Memory Accesses on Ethereum Smart Contracts},
booktitle = {29th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS 2023. Proceedings},
year = {2023},
rank:grinscie:class = {1},
volume = {13993},
pages = {448--466},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-031-30823-9_23}
}
|
[4]
|
Elvira Albert, Marta Bellés-Muñoz, Miguel Isabel, Clara
Rodríguez-Núñez, and Albert Rubio.
Distilling Constraints in Zero-Knowledge Protocols.
In Sharon Shoham and Yakir Vizel, editors, Computer Aided
Verification - 34th International Conference, CAV 2022, Haifa, Israel,
August 7-10, 2022, Proceedings, Part I, volume 13371 of Lecture Notes
in Computer Science, pages 430-443. Springer, 2022.
[ bibtex |
abstract |
DOI |
http ]
The most widely used Zero-Knowledge (ZK) protocols require provers to prove they know a solution to a computational problem expressed as a Rank-1 Constraint System (R1CS). An R1CS is essentially a system of non-linear arithmetic constraints over a set of signals, whose security level depends on its non-linear part only, as the linear (additive) constraints can be easily solved by an attacker. Distilling the essential constraints from an R1CS by removing the part that does not contribute to its security is important, not only to reduce costs (time and space) of producing the ZK proofs, but also to reveal to cryptographic programmers the real hardness of their proofs. In this paper, we formulate the problem of distilling constraints from an R1CS as the (hard) problem of simplifying constraints in the realm of non-linearity. To the best of our knowledge, it is the first time that constraint-based techniques developed in the context of formal methods are applied to the challenging problem of analysing and optimizing ZK protocols.
@inproceedings{DBLP:conf/cav/AlbertBIRR22,
author = {Elvira Albert and
Marta Bell{\'{e}}s{-}Mu{\~{n}}oz and
Miguel Isabel and
Clara Rodr{\'{\i}}guez{-}N{\'{u}}{\~{n}}ez and
Albert Rubio},
editor = {Sharon Shoham and
Yakir Vizel},
title = {Distilling Constraints in Zero-Knowledge Protocols},
booktitle = {Computer Aided Verification - 34th International Conference, {CAV}
2022, Haifa, Israel, August 7-10, 2022, Proceedings, Part {I}},
series = {Lecture Notes in Computer Science},
volume = {13371},
pages = {430--443},
publisher = {Springer},
rank:grinscie:class = {1},
year = {2022},
url = {https://doi.org/10.1007/978-3-031-13185-1\_21},
doi = {10.1007/978-3-031-13185-1\_21},
timestamp = {Sat, 10 Sep 2022 20:58:38 +0200},
biburl = {https://dblp.org/rec/conf/cav/AlbertBIRR22.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
|
[5]
|
Elvira Albert, Pablo Gordillo, Alejandro Hernández-Cerezo, Clara
Rodríguez-Núñez, and Albert Rubio.
Using Automated Reasoning Techniques for Enhancing the Efficiency and
Security of (Ethereum) Smart Contracts.
In Jasmin Blanchette, Laura Kovács, and Dirk Pattinson,
editors, Automated Reasoning - 11th International Joint Conference,
IJCAR 2022, Haifa, Israel, August 8-10, 2022, Proceedings, volume 13385 of
Lecture Notes in Computer Science, pages 3-7. Springer, 2022.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertGHRR22,
author = {Elvira Albert and
Pablo Gordillo and
Alejandro Hern{\'{a}}ndez{-}Cerezo and
Clara Rodr{\'{\i}}guez{-}N{\'{u}}{\~{n}}ez and
Albert Rubio},
title = {Using Automated Reasoning Techniques for Enhancing the Efficiency
and Security of (Ethereum) Smart Contracts},
booktitle = {Automated Reasoning - 11th International Joint Conference, {IJCAR}
2022, Haifa, Israel, August 8-10, 2022, Proceedings},
year = {2022},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-031-10769-6_1},
pages = {3--7},
volume = {13385},
isbn = {978-3-031-10769-6},
rank:grinscie:class = {2},
editor = {Jasmin Blanchette and
Laura Kov{\'{a}}cs and
Dirk Pattinson}
}
|
[6]
|
Elvira Albert, Pablo Gordillo, Alejandro Hernández-Cerezo, and Albert Rubio.
A Max-SMT Superoptimizer for EVM handling Memory and
Storage.
In Dana Fisman and Grigore Rosu, editors, 28th International
Conference on Tools and Algorithms for the Construction and Analysis of
Systems, TACAS 2022. Proceedings, volume 13243 of Lecture Notes in
Computer Science, pages 201-219. Springer, 2022.
[ bibtex |
abstract |
DOI |
PDF ]
Superoptimization is a compilation technique that
searches for the optimal sequence of instructions
semantically equivalent to a given (loop-free)
initial sequence. With the advent of SMT solvers, it
has been successfully applied to LLVM code (to
reduce the number of instructions) and to Ethereum
EVM bytecode (to reduce its gas consumption). Both
applications, when proven practical, have left out
memory operations and thus missed important
optimization opportunities. A main challenge to
superoptimization today is handling memory
operations while remaining scalable. We present
GASOLv2, a gas and bytes-size superoptimization tool
for Ethereum smart contracts, that leverages a
previous Max-SMT approach for only stack
optimization to optimize also w.r.t. memory and
storage. GASOLv2 can be used to optimize the size
in bytes, aligned with the optimization criterion
used by the Solidity compiler solc, and it can also
be used to optimize gas consumption. Our experiments
on 12,378 blocks from 30 randomly selected real
contracts achieve gains of 16.42% in gas w.r.t. the
previous version of the optimizer without memory
handling, and gains of 3.28% in bytes-size over
code already optimized by solc.
@inproceedings{AlbertGHR22,
author = {Elvira Albert and
Pablo Gordillo and
Alejandro Hern\'andez-Cerezo and
Albert Rubio},
title = {{A} {M}ax-{SMT} {S}uperoptimizer for {EVM}
handling {M}emory and {S}torage},
booktitle = {28th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS 2022. Proceedings},
year = {2022},
rank:grinscie:class = {1},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-030-99524-9_11},
pages = {201-219},
volume = {13243},
isbn = {978-3-030-99524-9},
editor = {Dana Fisman and
Grigore Rosu}
}
|
[7]
|
Elvira Albert, Samir Genaim, Enrique Martin-Martin, Alicia Merayo, and Albert
Rubio.
Lower-Bound Synthesis Using Loop Specialization and Max-SMT.
In Alexandra Silva and K. Rustan M. Leino, editors, Computer
Aided Verification - 33rd International Conference, CAV 2021, Virtual
Event, July 20-23, 2021, Proceedings, Part II, volume 12760 of
Lecture Notes in Computer Science, pages 863-886. Springer, 2021.
[ bibtex |
abstract |
DOI |
http ]
This paper presents a new framework to synthesize lower-
bounds on the worst-case cost for non-deterministic integer loops. As
in previous approaches, the analysis searches for a metering function
that under-approximates the number of loop iterations. The key novelty
of our framework is the specialization of loops, which is achieved by
restricting their enabled transitions to a subset of the inputs combined
with the narrowing of their transition scopes. Specialization allows us
to find metering functions for complex loops that could not be handled
before or be more precise than previous approaches. Technically, it is
performed (1) by using quasi-invariants while searching for the metering
function, (2) by strengthening the loop guards, and (3) by narrowing the
space of non-deterministic choices. We also propose a Max-SMT encoding
that takes advantage of the use of soft constraints to force the solver look
for more accurate solutions. We show our accuracy gains on benchmarks
extracted from the 2020 Termination and Complexity Competition by
comparing our results to those obtained by the LoAT system.
@inproceedings{AlbertGMMR21,
author = {Elvira Albert and
Samir Genaim and
Enrique Martin-Martin and
Alicia Merayo and
Albert Rubio},
editor = {Alexandra Silva and
K. Rustan M. Leino},
title = {Lower-Bound Synthesis Using Loop Specialization and Max-SMT},
booktitle = {Computer Aided Verification - 33rd International Conference, {CAV} 2021, Virtual Event, July 20-23, 2021, Proceedings, Part {II}},
series = {Lecture Notes in Computer Science},
volume = {12760},
pages = {863--886},
publisher = {Springer},
year = {2021},
url = {https://doi.org/10.1007/978-3-030-81688-9\_40},
rank:grinscie:class = {1},
doi = {10.1007/978-3-030-81688-9\_40}
}
|
[8]
|
Elvira Albert, Reiner Hähnle, Alicia Merayo, and Dominic
Steinhöfel.
Certified Abstract Cost Analysis.
In Esther Guerra and Mariëlle Stoelinga, editors,
Fundamental Approaches to Software Engineering - 24th International
Conference, FASE 2021, Held as Part of the European Joint Conferences on
Theory and Practice of Software, ETAPS 2021, Luxembourg City, Luxembourg,
March 27 - April 1, 2021, Proceedings, volume 12649 of Lecture Notes in
Computer Science, pages 24-45. Springer, 2021.
[ bibtex |
abstract |
DOI |
http ]
A program containing placeholders for unspecified
statements or expressions is called an abstract (or
schematic) program. Placeholder symbols occur
naturally in program transformation rules, as used
in refactoring, compilation, optimization, or
parallelization. We present a generalization of
automated cost analysis that can handle abstract
programs and, hence, can analyze the impact on the
cost of program transformations. This kind of
relational property requires provably precise cost
bounds which are not always produced by cost
analysis. Therefore, we certify by deductive
verification that the inferred abstract cost bounds
are correct and sufficiently precise. It is the
first approach solving this problem. Both, abstract
cost analysis and certification, are based on
quantitative abstract execution (QAE) which in turn
is a variation of abstract execution, a recently
developed symbolic execution technique for abstract
programs. To realize QAE the new concept of a cost
invariant is introduced. QAE is implemented and runs
fully automatically on a benchmark set consisting of
representative optimization rules.
@inproceedings{AlbertHMS21,
author = {Elvira Albert and
Reiner H{\"{a}}hnle and
Alicia Merayo and
Dominic Steinh{\"{o}}fel},
editor = {Esther Guerra and
Mari{\"{e}}lle Stoelinga},
title = {Certified Abstract Cost Analysis},
booktitle = {Fundamental Approaches to Software Engineering - 24th International
Conference, {FASE} 2021, Held as Part of the European Joint Conferences
on Theory and Practice of Software, {ETAPS} 2021, Luxembourg City,
Luxembourg, March 27 - April 1, 2021, Proceedings},
series = {Lecture Notes in Computer Science},
volume = {12649},
rank:grinscie:class = {3},
pages = {24--45},
publisher = {Springer},
year = {2021},
url = {https://doi.org/10.1007/978-3-030-71500-7\_2},
doi = {10.1007/978-3-030-71500-7\_2}
}
|
[9]
|
Elvira Albert, Shelly Grossman, Noam Rinetzky, Clara Rodríguez-Nú nez,
Albert Rubio, and Mooly Sagiv.
Taming Callbacks for Smart Contract Modularity.
In ACM SIGPLAN Conference on Object-Oriented Programming
Systems, Languages and Applications, OOPSLA 2020. Proceedings, volume 4,
pages 209:1-209:30, 2020.
[ bibtex |
abstract |
DOI ]
Callbacks are an effective programming discipline for implementing
event-driven programming, especially in environments like Ethereum which
forbid shared global state and concurrency.
Callbacks allow a callee to delegate the execution back to the caller.
Though effective, they can lead to subtle mistakes principally in open environments where
callbacks can be added in a new code.
Indeed, several high profile bugs in smart contracts exploit callbacks.
We present the first static technique ensuring modularity in
the presence of callbacks and apply it to verify prominent smart
contracts. Modularity ensures that external calls to other contracts
cannot affect the behavior of the contract. Importantly, modularity
is guaranteed without restricting programming.
In general, checking modularity is undecidable-even for programs without loops.
This paper describes an effective technique for soundly ensuring modularity harnessing SMT solvers.
The main idea is to define a constructive version of modularity using commutativity and projection operations on program segments.
We believe that this approach is also accessible to programmers, since counterexamples to modularity can be generated automatically
by the SMT solvers, allowing programmers to understand and fix the error.
We implemented our approach in order to demonstrate the precision of
the modularity analysis and applied it to real smart contracts, including a subset of the 150 most active contracts in
Ethereum.
Our implementation decompiles bytecode programs into an intermediate representation and then implements the modularity checking
using SMT queries.
Overall, we argue that our experimental results indicate that the method can be applied to many realistic contracts,
and that it is able to prove modularity where other methods fail.
@inproceedings{AlbertGRRRS20,
author = {Elvira Albert and
Shelly Grossman and
Noam Rinetzky and
Clara Rodr\'iguez-N\'u\~{n}ez and
Albert Rubio and
Mooly Sagiv},
title = {{Taming Callbacks for Smart Contract Modularity}},
booktitle = { ACM SIGPLAN Conference on Object-Oriented Programming Systems, Languages and Applications, {OOPSLA}
2020. Proceedings},
year = {2020},
volume = {4},
pages = {209:1--209:30},
number = {OOPSLA},
rank:grinscie:class = {1},
doi = {10.1145/3428277}
}
|
[10]
|
Elvira Albert, Pablo Gordillo, Albert Rubio, and Maria A. Schett.
Synthesis of Super-Optimized Smart Contracts using Max-SMT.
In 32nd International Conference on Computer Aided Verification,
CAV 2020. Proceedings, volume 12224 of Lecture Notes in Computer
Science, pages 177-200, 2020.
[ bibtex |
abstract |
DOI |
PDF ]
With the advent of smart contracts that execute on the
blockchain ecosystem, a new mode of reasoning is
required for developers that must pay meticulous
attention to the gas spent by their smart contracts,
as well as for optimization tools that must be
capable of effectively reducing the gas required by
the smart contracts. Super-optimization is a
technique which attempts to find the best
translation of a block of code by trying all
possible sequences of instructions that produce the
same result. This paper presents a novel approach
for super-optimization of smart contracts based on
Max-SMT which is split into two main phases: (i) the
extraction of a stack functional specification from
the basic blocks of the smart contract, which is
simplified using rules that capture the semantics of
the arithmetic, bit-wise, relational operations,
etc. (ii) the synthesis of optimized-blocks which,
by means of an efficient Max-SMT encoding, finds the
bytecode blocks with minimal gas cost whose stack
functional specification is equal (modulo
commutativity) to the extracted one. Our
experimental results are very promising: we are able
to optimize 52.78% of the blocks, and prove that
34.28% were already optimal, for more than 61,000
blocks from the most used 2,500 Ethereum contracts.
@inproceedings{AlbertGRS20,
author = {Elvira Albert and
Pablo Gordillo and
Albert Rubio and
Maria A. Schett},
title = {{Synthesis of Super-Optimized Smart Contracts using Max-SMT}},
booktitle = {32nd International Conference on Computer Aided Verification, {CAV}
2020. Proceedings},
year = {2020},
series = {Lecture Notes in Computer Science},
pages = {177-200},
volume = {12224},
rank:grinscie:class = {1},
doi = {10.1007/978-3-030-53288-8_10},
isbn = {978-3-030-53288-8}
}
|
[11]
|
Elvira Albert, Jesús Correas, Pablo Gordillo, Guillermo Román-Díez, and
Albert Rubio.
GASOL: Gas Analysis and Optimization for Ethereum Smart
Contracts.
In Armin Biere and David Parker, editors, 26th International
Conference on Tools and Algorithms for the Construction and Analysis of
Systems, TACAS 2020. Proceedings, volume 12079 of Lecture Notes in
Computer Science, pages 118-125. Springer, 2020.
[ bibtex |
abstract |
DOI |
PDF ]
We present the main concepts, components, and usage of
GASOL, a Gas AnalysiS and Optimization tooL for
Ethereum smart contracts. GASOL offers a wide
variety of cost models that allow inferring
the gas consumption associated to selected types of
EVM instructions and/or inferring the number of
times that such types of bytecode instructions are
executed. Among others, we have cost models to
measure only storage opcodes, to measure a selected
family of gas-consumption opcodes following the
Ethereum's classification, to estimate the cost of a
selected program line, etc. After choosing the
desired cost model and the function of interest,
GASOL returns to the user an upper bound of the
cost for this function. As the gas consumption is
often dominated by the instructions that access the
storage, GASOL uses the gas analysis to detect
under-optimized storage patterns, and includes an
(optional) automatic optimization of the selected
function. Our tool can be used within an Eclipse
plugin for Solidity which displays the gas and
instructions bounds and, when applicable, the
gas-optimized Solidity function.
@inproceedings{AlbertCGRR20,
author = {Elvira Albert and
Jes\'us Correas and
Pablo Gordillo and
Guillermo Rom\'an-D\'iez and
Albert Rubio},
title = {{GASOL}: {G}as {A}nalysis and {O}ptimization for {E}thereum {S}mart {C}ontracts},
booktitle = {26th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS 2020. Proceedings},
year = {2020},
rank:grinscie:class = {1},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
editor = {Armin Biere and
David Parker},
doi = {10.1007/978-3-030-45237-7_7},
pages = {118-125},
volume = {12079},
isbn = {978-3-030-45237-7}
}
|
[12]
|
Elvira Albert, Jesús Correas, Pablo Gordillo, Guillermo Román-Díez, and
Albert Rubio.
Smart, and also reliable and gas-efficient, contracts.
In 13th IEEE International Conference on Software Testing,
Validation and Verification, ICST 2020. Proceedings, IEEE, 2020.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertCGRR20-icst,
author = {Elvira Albert and
Jes\'us Correas and
Pablo Gordillo and
Guillermo Rom\'an-D\'iez and
Albert Rubio},
title = {{Smart, and also reliable and gas-efficient, contracts}},
booktitle = {13th {IEEE} International Conference on Software Testing, Validation
and Verification, {ICST} 2020. Proceedings},
year = {2020},
series = {IEEE},
rank:grinscie:class = {2},
doi = {10.1109/ICST46399.2020.00010}
}
|
[13]
|
Elvira Albert, Pablo Gordillo, Albert Rubio, and Ilya Sergey.
Running on Fumes: Preventing Out-Of-Gas Vulnerabilities
in Ethereum Smart Contracts using Static Resource Analysis.
In Pierre Ganty and Mohamed Kaâniche, editors, 13th
International Conference on Verification and Evaluation of Computer and
Communication Systems, VECoS 2019. Proceedings, volume 11847 of
Lecture Notes in Computer Science, pages 63-78. Springer, 2019.
[ bibtex |
abstract |
DOI |
PDF ]
Abstract. Gas is a measurement unit of the
computational effort that it will take to execute
every single operation that takes part in the
Ethereum blockchain platform. Each instruction
executed by the Ethereum Virtual Machine (EVM) has
an associated gas consumption specified by
Ethereum. If a transaction exceeds the amount of gas
allotted by the user (known as gas limit), an
out-of-gas exception is raised. There is a wide
family of contract vulnerabilities due to out-of-gas
behaviors. We report on the design and
implementation of Gastap, a Gas-Aware Smart contracT
Analysis Platform, which takes as input a smart
contract (either in EVM, disassembled EVM, or in
Solidity source code) and automatically infers gas
upper bounds for all its public functions. Our
bounds ensure that if the gas limit paid by the user
is higher than our inferred gas bounds, the contract
is free of out-of-gas vulnerabilities.
@inproceedings{AlbertGRS19,
author = {Elvira Albert and
Pablo Gordillo and
Albert Rubio and Ilya Sergey},
title = {{R}unning on {F}umes: {P}reventing {O}ut-{O}f-{G}as {V}ulnerabilities in {E}thereum {S}mart {C}ontracts using {S}tatic {R}esource {A}nalysis},
booktitle = {13th International Conference on Verification and Evaluation of Computer and Communication Systems, {VECoS} 2019. Proceedings},
year = {2019},
publisher = {Springer},
editor = {Pierre Ganty and
Mohamed Ka\^aniche},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-030-35092-5_5},
pages = {63-78},
volume = {11847},
isbn = {978-3-030-35092-5}
}
|
[14]
|
Elvira Albert, Maria Garcia de la Banda, Miguel Gómez-Zamalloa, Miguel
Isabel, and Peter J. Stuckey.
Optimal Context-Sensitive Dynamic Partial Order
Reduction with Observers.
In Proceedings of the ACM SIGSOFT International Symposium on
Software Testing and Analysis 2019 (ISSTA'19), ACM, pages 352-362, 2019.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertGGIS19,
author = {Elvira Albert and
Maria Garcia de la Banda and
Miguel G\'omez-Zamalloa and
Miguel Isabel and
Peter J. Stuckey},
title = {{O}ptimal {C}ontext-{S}ensitive {D}ynamic
{P}artial {O}rder {R}eduction with {O}bservers},
booktitle = {Proceedings of the ACM SIGSOFT International Symposium on Software Testing and Analysis 2019 (ISSTA'19)},
year = {2019},
pages = {352--362},
series = {ACM},
doi = {10.1145/3293882.3330565},
rank:grinscie:class = {1}
}
|
[15]
|
Elvira Albert, Jesús Correas, Pablo Gordillo, Guillermo Román-Díez, and
Albert Rubio.
SAFEVM: A Safety Verifier for Ethereum Smart Contracts.
In Dongmei Zhang and Anders Møller, editors, ACM SIGSOFT
International Symposium on Software Testing and Analysis, ISSTA 2019.
Proceedings, ACM, pages 386-389, 2019.
[ bibtex |
abstract |
DOI |
PDF ]
Ethereum smart contracts are public, immutable and
distributed and, as such, they are prone to
vulnerabilities sourcing from programming mistakes
of developers. This paper presents SAFEVM, a
verification tool for Ethereum smart contracts that
makes use of state-of-the-art verification engines
for C programs. SAFEVM takes as input an Ethereum
smart contract (provided either in Solidity source
code, or in compiled EVM bytecode), optionally with
assert and require verification annotations, and
produces in the output a report with the
verification results. Besides general safety
annotations, SAFEVM handles the verification of
array accesses: it automatically generates SV-COMP
verification assertions such that C verification
engines can prove safety of array accesses. Our
experimental evaluation has been undertaken on all
contracts pulled from etherscan.io (more than
24,000) by using as back-end verifiers CPAchecker,
SeaHorn and VeryMax.
@inproceedings{AlbertCGRR19,
author = {Elvira Albert and
Jes\'us Correas and
Pablo Gordillo and
Guillermo Rom\'an-D\'iez and
Albert Rubio},
title = {{SAFEVM}: {A} {S}afety {V}erifier for {E}thereum {S}mart {C}ontracts},
booktitle = {ACM SIGSOFT International Symposium on Software Testing and Analysis, ISSTA 2019. Proceedings},
year = {2019},
series = {ACM},
pages = {386--389},
editor = {Dongmei Zhang and
Anders M{\o}ller},
doi = {10.1145/3293882.3338999}
}
|
[16]
|
Elvira Albert, Pablo Gordillo, Benjamin Livshits, Albert Rubio, and Ilya
Sergey.
EthIR: A Framework for High-Level Analysis of Ethereum
Bytecode.
In Shuvendu Lahiri and Chao Wang, editors, 16th International
Symposium on Automated Technology for Verification and Analysis, ATVA 2018.
Proceedings, volume 11138 of Lecture Notes in Computer Science, pages
513-520. Springer, 2018.
[ bibtex |
abstract |
DOI |
PDF ]
Abstract. Analyzing Ethereum bytecode, rather than the
source code from which it was generated, is a
necessity when: (1) the source code is not available
(e.g., the blockchain only stores the bytecode), (2)
the information to be gathered in the analysis is
only visible at the level of bytecode (e.g., gas
consumption is specified at the level of EVM
instructions), (3) the analysis results may be
affected by optimizations performed by the compiler
(thus the analysis should be done ideally after
compilation). This paper presents EthIR, a framework
for analyzing Ethereum bytecode, which relies on (an
extension of) OYENTE, a tool that generates CFGs;
EthIR produces from the CFGs, a rule-based
representation (RBR) of the bytecode that enables
the application of (existing) high-level analyses to
infer properties of EVM code.
@inproceedings{AlbertGLRS18,
author = {Elvira Albert and Pablo Gordillo and Benjamin Livshits and Albert Rubio and Ilya Sergey},
title = {EthIR: A {F}ramework for {H}igh-{L}evel {A}nalysis of {E}thereum {B}ytecode},
editor = {Shuvendu Lahiri and Chao Wang},
booktitle = {16th International Symposium on Automated Technology for Verification and Analysis, {ATVA} 2018. Proceedings},
publisher = {Springer},
year = {2018},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-030-01090-4_30},
pages = {513-520},
volume = {11138},
isbn = {978-3-030-01089-8}
}
|
[17]
|
Elvira Albert, Miguel Gómez-Zamalloa, Albert Rubio, Matteo Sammartino,
and Alexandra Silva.
SDN-Actors: Modeling and Verification of SDN Programs.
In Formal Methods - 22nd International Symposium, FM 2018,
Held as Part of the Federated Logic Conference, FloC 2018, Oxford, UK, July
15-17, 2018, Proceedings, pages 550-567, 2018.
[ bibtex |
abstract |
DOI |
http ]
|
[18]
|
Elvira Albert, Miguel Gómez-Zamalloa, Miguel Isabel, and Albert Rubio.
Constrained Dynamic Partial Order Reduction.
In Computer Aided Verification - 30th International Conference,
CAV 2018, Held as Part of the Federated Logic Conference, FloC 2018,
Oxford, UK, July 14-17, 2018, Proceedings, Part II, volume 10982 of
Lecture Notes in Computer Science, pages 392-410. Springer, 2018.
[ bibtex |
abstract |
DOI |
PDF |
http ]
@inproceedings{AlbertGIR18,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Miguel Isabel and Albert Rubio},
title = {{C}onstrained {D}ynamic {P}artial {O}rder {R}eduction},
booktitle = {Computer Aided Verification - 30th International Conference, {CAV}
2018, Held as Part of the Federated Logic Conference, FloC 2018, Oxford,
UK, July 14-17, 2018, Proceedings, Part {II}},
pages = {392--410},
year = {2018},
url = {https://doi.org/10.1007/978-3-319-96142-2\_24},
doi = {10.1007/978-3-319-96142-2\_24},
timestamp = {Wed, 03 Oct 2018 12:55:01 +0200},
biburl = {https://dblp.org/rec/bib/conf/cav/AlbertGIR18},
bibsource = {dblp computer science bibliography, https://dblp.org},
series = {Lecture Notes in Computer Science},
volume = {10982},
doi = {10.1007/978-3-319-96142-2},
isbn = {978-3-319-96141-5},
publisher = {Springer},
rank:grinscie:class = {1}
}
|
[19]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Miguel Isabel.
Generation of Initial Contexts for Effective Deadlock Detection.
In Logic-Based Program Synthesis and Transformation: 27th
International Symposium, LOPSTR 2017, Namur, Belgium, October 10-12, 2017,
Revised Selected Papers, volume 10855 of Lecture Notes in Computer
Science, pages 3-19. Springer International Publishing, 2018.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertGI18,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Miguel Isabel},
title = {Generation of Initial Contexts for Effective Deadlock Detection},
booktitle = {Logic-Based Program Synthesis and Transformation: 27th International Symposium, {LOPSTR} 2017, Namur, Belgium, October 10-12, 2017, Revised Selected Papers},
year = {2018},
pages = {3--19},
publisher = {Springer International Publishing},
series = {Lecture Notes in Computer Science},
volume = {10855},
doi = {10.1007/978-3-319-94460-9_1}
}
|
[20]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Miguel Isabel.
On the Generation of Initial Contexts for Effective Deadlock
Detection.
In LOPSTR 2017, October 2017.
Extended abstract.
[ bibtex |
abstract |
PDF ]
@inproceedings{AlbertGI17,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Miguel Isabel},
title = {On the Generation of Initial Contexts for Effective Deadlock Detection},
booktitle = {LOPSTR 2017},
month = oct,
year = {2017},
note = {Extended abstract}
}
|
[21]
|
Elvira Albert, Puri Arenas, María García de la Banda, Miguel Gómez-Zamalloa, and Peter J. Stuckey.
Context Sensitive Dynamic Partial Order Reduction.
In Victor Kuncak and Rupak Majumdar, editors, 29th International
Conference on Computer Aided Verification (CAV 2017), volume 10426 of
Lecture Notes in Computer Science, pages 526-543. Springer, 2017.
[ bibtex |
abstract |
DOI |
http ]
@inproceedings{AlbertAGGS17,
author = {Elvira Albert and Puri Arenas and Mar{\'i}a Garc{\'i}a de la Banda and Miguel G\'{o}mez-Zamalloa and Peter J. Stuckey},
title = {{C}ontext {S}ensitive {D}ynamic {P}artial {O}rder {R}eduction},
editor = {Victor Kuncak and Rupak Majumdar},
booktitle = {29th International Conference on Computer Aided Verification (CAV 2017)},
pages = {526--543},
year = {2017},
series = {Lecture Notes in Computer Science},
volume = {10426},
publisher = {Springer},
url = {https://doi.org/10.1007/978-3-319-63387-9_26},
doi = {10.1007/978-3-319-63387-9_26},
isbn = {978-3-319-63386-2},
rank:grinscie:class = {1}
}
|
[22]
|
Elvira Albert, Samir Genaim, and Pablo Gordillo.
May-Happen-in-Parallel Analysis with Returned Futures.
In Deepak D'Souza and K.Narayan Kumar, editors, 15th
International Symposium on Automated Technology for Verification and
Analysis, ATVA 2017. Proceedings, volume 10482 of Lecture Notes in
Computer Science, pages 42-58. Springer, 2017.
[ bibtex |
abstract |
DOI |
PDF ]
Abstract. May-Happen-in-Parallel (MHP) is a fundamental analysis
to reason about concurrent programs. It infers the
pairs of program points that may execute in
parallel, or interleave their execution. This
information is essential to prove, among other
things, absence of data races, deadlock freeness,
termination, and resource usage. This paper presents
an MHP analysis for asynchronous programs that use
futures as synchronization mechanism. Future
variables are available in most concurrent languages
(e.g., in the library concurrent of Java,
in the standard thread library of C++, and in Scala
and Python). The novelty of our analysis is that it
is able to infer MHP relations that involve future
variables that are returned by asynchronous
tasks. Futures are returned when a task needs to
await for another task created in an inner
scope, e.g., task t needs to await for the
termination of task p that is spawned by task q
that is spawned during the execution of t (not
necessarily by t). Thus, task p is awaited by
task t which is in an outer scope. The challenge
for the analysis is to (back)propagate the
synchronization of tasks through future variables
from inner to outer scopes.
@inproceedings{AlbertGG17,
author = {Elvira Albert and Samir Genaim and Pablo Gordillo},
title = {May-{H}appen-in-{P}arallel {A}nalysis with {R}eturned {F}utures},
editor = {Deepak D'Souza and K.Narayan Kumar},
booktitle = {15th International Symposium on Automated Technology for Verification and Analysis, {ATVA} 2017. Proceedings},
publisher = {Springer},
year = {2017},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-68167-2_3},
pages = {42-58},
volume = {10482},
isbn = {978-3-319-68167-2}
}
|
[23]
|
Elvira Albert, Nikolaos Bezirgiannis, Frank de Boer, and Enrique Martin-Martin.
A Formal, Resource Consumption-Preserving Translation of
Actors to Haskell.
In Manuel V Hermenegildo and Pedro Lopez-Garcia, editors,
Logic-Based Program Synthesis and Transformation: 26th International
Symposium, LOPSTR 2016, Edinburgh, UK, September 6-8, 2016, Revised Selected
Papers, volume 10184 of Lecture Notes in Computer Science, pages
21-37. Springer International Publishing, 2017.
[ bibtex |
abstract |
DOI |
http ]
@inproceedings{AlbertBBM16,
author = {Elvira Albert and Nikolaos Bezirgiannis and Frank de Boer and Enrique Martin-Martin},
editor = {Hermenegildo, Manuel V and Lopez-Garcia, Pedro},
title = {{A} {F}ormal, {R}esource {C}onsumption-{P}reserving {T}ranslation of {A}ctors to {H}askell},
booktitle = {Logic-Based Program Synthesis and Transformation: 26th International Symposium, LOPSTR 2016, Edinburgh, UK, September 6--8, 2016, Revised Selected Papers},
year = {2017},
publisher = {Springer International Publishing},
pages = {21--37},
series = {Lecture Notes in Computer Science},
volume = 10184,
isbn = {978-3-319-63139-4},
doi = {10.1007/978-3-319-63139-4_2},
url = {https://doi.org/10.1007/978-3-319-63139-4_2}
}
|
[24]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Testing of Concurrent and Imperative Software Using CLP.
In Proceedings of the 18th International Symposium on Principles
and Practice of Declarative Programming, Edinburgh, United Kingdom, September
5-7, 2016, pages 1-8. ACM, 2016.
[ bibtex |
abstract |
DOI |
PDF |
http ]
@inproceedings{AlbertAG16,
author = {Elvira Albert and
Puri Arenas and
Miguel G{\'{o}}mez{-}Zamalloa},
title = {Testing of {C}oncurrent and {I}mperative {S}oftware Using {CLP}},
booktitle = {Proceedings of the 18th International Symposium on Principles and
Practice of Declarative Programming, Edinburgh, United Kingdom, September
5-7, 2016},
pages = {1--8},
year = {2016},
publisher = {ACM},
doi = {10.1145/2967973.2968593},
url = {http://dx.doi.org/10.1145/2967973.2968593}
}
|
[25]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Miguel Isabel.
Combining Static Analysis and Testing for Deadlock
Detection.
In Erika Ábrahám and Marieke Huisman, editors,
Integrated Formal Methods - 12th International Conference, IFM 2016,
Reykjavik, Iceland, June 1-5, 2016, Proceedings, volume 9681 of Lecture
Notes in Computer Science, pages 409-424. Springer, 2016.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Abstract. Static deadlock analyzers might be able to verify the absence of
deadlock. However, they are usually not able to detect its presence.
Also, when they detect a potential deadlock cycle, they provide
little (or even no) information on their output. Due to the complex
flow of concurrent programs, the user might not be able to find the
source of the anomalous behaviour from the abstract information
computed by static analysis. This paper proposes the combined use
of static analysis and testing for effective deadlock detection in
asynchronous programs.When the program features a deadlock, our combined use of
analysis and testing provides an effective technique to catch deadlock
traces. While if the program does not have deadlock, but the
analyzer inaccurately spotted it, we might prove deadlock freedom.
@inproceedings{AlbertGI16b,
author = {Elvira Albert and Miguel G\'omez-Zamalloa and Miguel Isabel},
title = {Combining {S}tatic {A}nalysis and {T}esting for {D}eadlock {D}etection},
booktitle = {Integrated Formal Methods - 12th International Conference, {IFM} 2016,
Reykjavik, Iceland, June 1-5, 2016, Proceedings},
publisher = {Springer},
editor = {Erika {\'{A}}brah{\'{a}}m and
Marieke Huisman},
year = {2016},
pages = {409--424},
series = {Lecture Notes in Computer Science},
volume = {9681},
doi = {10.1007/978-3-319-33693-0_26},
url = {http://dx.doi.org/10.1007/978-3-319-33693-0_26}
}
|
[26]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Miguel Isabel.
SYCO: A Systematic Testing Tool for Concurrent Objects.
In Ayal Zaks and Manuel V. Hermenegildo, editors, Proceedings of
the 25th International Conference on Compiler Construction, CC 2016,
Barcelona, Spain, March 12-18, 2016, pages 269-270. ACM, 2016.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Abstract. We present the concepts, usage and prototypical implementation of
SYCO: a SYstematic testing tool for Concurrent Objects. The
system receives as input a program, a selection of method to be
tested, and a set of initial values for its parameters. SYCO offers
a visual web interface to carry out the testing process and
visualize the results of the different executions as well as the
sequences of tasks scheduled as a sequence diagram. Its kernel
includes state-of-the-art partial-order reduction techniques to
avoid redundant computations during testing.
Besides, SYCO incorporates an option to effectively
catch deadlock errors. In particular, it uses advanced
techniques which guide the execution towards potential deadlock
paths and discard paths that are guaranteed to be deadlock free.
@inproceedings{AlbertGI16,
editor = {Ayal Zaks and Manuel V. Hermenegildo},
author = {Elvira Albert and Miguel G\'omez-Zamalloa and Miguel Isabel},
title = {SYCO: A {S}ystematic {T}esting {T}ool for {C}oncurrent {O}bjects},
booktitle = {Proceedings of the 25th International Conference on Compiler Construction,
{CC} 2016, Barcelona, Spain, March 12-18, 2016},
year = {2016},
pages = {269--270},
publisher = {ACM},
doi = {10.1145/2892208.2892236},
url = {http://dx.doi.org/10.1145/2892208.2892236}
}
|
[27]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and
Guillermo Román-Díez.
Resource Analysis: From Sequential to Concurrent and
Distributed Programs.
In Nikolaj Bjørner and Frank D. de Boer, editors, FM 2015:
Formal Methods - 20th International Symposium, Oslo, Norway, June 24-26,
2015, Proceedings, volume 9109 of Lecture Notes in Computer Science,
pages 3-17. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
@inproceedings{AlbertACGGMPR15,
author = {Elvira Albert and
Puri Arenas and
Jes{\'{u}}s Correas and
Samir Genaim and
Miguel G{\'{o}}mez{-}Zamalloa and
Enrique Martin{-}Martin and
Germ{\'{a}}n Puebla and
Guillermo Rom{\'{a}}n{-}D{\'{\i}}ez},
title = {{R}esource {A}nalysis: {F}rom {S}equential to {C}oncurrent and {D}istributed {P}rograms},
booktitle = {{FM} 2015: Formal Methods - 20th International Symposium, Oslo, Norway,
June 24-26, 2015, Proceedings},
pages = {3--17},
year = {2015},
doi = {10.1007/978-3-319-19249-9_1},
url = {https://doi.org/10.1007/978-3-319-19249-9_1},
editor = {Nikolaj Bj{\o}rner and Frank D. de Boer},
series = {Lecture Notes in Computer Science},
isbn = {978-3-319-19248-2},
volume = {9109},
publisher = {Springer},
rank:grinscie:class = {2}
}
|
[28]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Test Case Generation of Actor Systems.
In 13th International Symposium on Automated Technology for
Verification and Analysis, ATVA 2015. Proceedings, volume 9364 of
Lecture Notes in Computer Science, pages 259-275. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. Test
case generation (TCG) is the process of automatically generating
test inputs for interesting coverage criteria, which
are then applied to the system under test. This paper presents a
TCG framework for actor systems, which consists of three main
elements, which are the original contributions of this work: (1) a
symbolic execution calculus, which allows symbolically executing the
program (i.e., executing the program for unknown input data), (2)
improved techniques to avoid performing redundant computations
during symbolic execution, (3) new termination and coverage criteria,
which ensure the termination of symbolic execution and guarantee
that the test cases provide the desired degree of code coverage.
Finally, our framework has been
implemented and evaluated within the aPET system.
@inproceedings{AlbertAGM15,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Test {C}ase {G}eneration of {A}ctor {S}ystems},
booktitle = {13th International Symposium on Automated Technology for Verification and Analysis, {ATVA} 2015. Proceedings},
publisher = {Springer},
year = {2015},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-24953-7_21},
url = {http://dx.doi.org/10.1007/978-3-319-24953-7_21},
pages = {259-275},
volume = {9364},
isbn = {978-3-319-24952-0}
}
|
[29]
|
Elvira Albert, Samir Genaim, and Pablo Gordillo.
May-Happen-in-Parallel Analysis for Asynchronous Programs
with Inter-Procedural Synchronization.
In Sandrine Blazy and Thomas P. Jensen, editors, Static Analysis
- 22nd International Symposium, SAS 2015. Proceedings, volume 9291 of
Lecture Notes in Computer Science, pages 72-89. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Abstract. A may-happen-in-parallel (MHP) analysis
computes pairs of program points that may execute in
parallel across different distributed
components. This information has been proven to be
essential to infer both safety properties (e.g.,
deadlock freedom) and liveness properties
(termination and resource boundedness) of
asynchronous programs. Existing MHP analyses take
advantage of the synchronization points to learn
that one task has finished and thus will not happen
in parallel with other tasks that are still
active. Our starting point is an existing MHP
analysis developed for intra-procedural
synchronization, i.e., it only al- lows
synchronizing with tasks that have been spawned
inside the current task. This paper leverages such
MHP analysis to handle inter-procedural
synchronization, i.e., a task spawned by one task
can be awaited within a different task. This is
challenging because task synchronization goes beyond
the boundaries of methods, and thus the inference of
MHP relations requires novel extensions to capture
inter-procedural dependencies. The analysis has
been implemented and it can be tried online.
@inproceedings{AlbertGG15,
author = {Elvira Albert and Samir Genaim and Pablo Gordillo},
title = {May-{H}appen-in-{P}arallel {A}nalysis for {A}synchronous {P}rograms with {I}nter-{P}rocedural {S}ynchronization},
booktitle = {Static Analysis - 22nd International Symposium, {SAS} 2015. Proceedings},
editor = {Sandrine Blazy and
Thomas P. Jensen},
publisher = {Springer},
year = {2015},
rank:grinscie:class = {2},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-662-48288-9_5},
url = {http://dx.doi.org/10.1007/978-3-662-48288-9_5},
pages = {72-89},
volume = {9291},
isbn = {978-3-662-48287-2}
}
|
[30]
|
Elvira Albert, Jesús Correas, Einar Broch Johnsen, and Guillermo Román-Díez.
Parallel Cost Analysis of Distributed Systems.
In Static Analysis - 22nd International Symposium, SAS 2015.
Proceedings, volume 9291 of Lecture Notes in Computer Science, pages
275-292. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present a novel static analysis to infer the parallel cost of
distributed systems. Parallel cost differs from the standard notion
of serial cost by exploiting the truly concurrent execution model of
distributed processing to capture the cost of synchronized tasks
executing in parallel. It is challenging to infer parallel cost
because one needs to soundly infer the parallelism between tasks while
accounting for waiting and idle processor times at the different
locations. Our analysis works in three phases: (1) It first performs
a block-level analysis to estimate the serial costs of the blocks
between synchronization points in the program; (2) Next, it constructs
a distributed flow graph (DFG) to capture the parallelism, the waiting
and idle times at the locations of the distributed system; Finally,
(3) the parallel cost can be obtained as the path of maximal cost in
the DFG. A prototype implementation demonstrates the accuracy and
feasibility of the proposed analysis.
@inproceedings{AlbertCJR15,
author = {Elvira Albert and
Jes\'us Correas and
Einar Broch Johnsen and
Guillermo Rom\'an-D\'iez},
title = {{P}arallel {C}ost {A}nalysis of {D}istributed {S}ystems},
booktitle = {Static Analysis - 22nd International Symposium, {SAS} 2015. Proceedings},
publisher = {Springer},
year = {2015},
rank:grinscie:class = {2},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-662-48288-9_16},
url = {https://doi.org/10.1007/978-3-662-48288-9_16},
pages = {275-292},
volume = {9291},
isbn = {978-3-662-48287-2}
}
|
[31]
|
Elvira Albert, Jesús Correas, and Guillermo Román-Díez.
Non-Cumulative Resource Analysis.
In Proceedings of 21st International Conference on Tools and
Algorithms for the Construction and Analysis of Systems (TACAS 2015), volume
9035 of Lecture Notes in Computer Science, pages 85-100. Springer,
2015.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Existing cost analysis frameworks have been defined for cumulative resources
which keep on increasing along the computation. Traditional cumulative
resources are execution time, number of executed steps, amount of memory
allocated, and energy consumption. Non-cumulative resources are acquired and
(possibly) released along the execution. Examples of non-cumulative cost are
memory usage in the presence of garbage collection, number of connections
established that are later closed, or resources requested to a virtual host
which are released after using them.
We present, to the best of our knowledge, the first generic static analysis
framework to infer an upper bound on the peak cost for
non-cumulative types of resources. Our analysis comprises several components:
(1) a pre-analysis to infer when resources are being used simultaneously, (2)
a program-point resource analysis which infers an upper bound on the
cost at the points of interest (namely the points where resources are
acquired) and (3) the elimination from the upper bounds obtained in (2) of
those resources accumulated that are not used simultaneously. We report on a
prototype implementation of our analysis that can be used on a simple
imperative language.
@inproceedings{AlbertCR15,
author = {Elvira Albert and Jes\'us Correas and Guillermo Rom\'{a}n-D\'{i}ez},
title = {{N}on-{C}umulative {R}esource {A}nalysis},
booktitle = {Proceedings of 21st International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS 2015)},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2015},
doi = {10.1007/978-3-662-46681-0_6},
url = {https://doi.org/10.1007/978-3-662-46681-0_6},
pages = {85-100},
volume = {9035},
isbn = {978-3-662-46680-3},
rank:grinscie:class = {1}
}
|
[32]
|
Elvira Albert, Jesús Correas, Enrique Martín-Martín, and Guillermo Román-Díez.
Static Inference of Transmission Data Sizes in
Distributed Systems.
In 6th International Symposium On Leveraging Applications of
Formal Methods, Verification and Validation (ISOLA'14), volume 8803 of
Lecture Notes in Computer Science, pages 104-119. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present a static analysis to infer the amount of data that a
distributed system may transmit. The different locations of a
distributed system communicate and coordinate their actions by
posting tasks among them. A task is posted by building a message
with the task name and the data on which such task has to be
executed. When the task completes, the result can be retrieved by
means of another message from which the result of the computation
can be obtained. Thus, the transmission data size of a distributed
system mainly depends on the amount of messages posted among the
locations of the system, and the sizes of the data transferred in
the messages. Our static analysis has two main parts: (1) we
over-approximate the sizes of the data at the program points where
tasks are spawned and where the results are received, and (2) we
over-approximate the total number of messages. Knowledge of the
transmission data sizes is essential, among other things, to predict
the bandwidth required to achieve a certain response time, or
conversely, to estimate the response time for a given bandwidth. A
prototype implementation in the SACO system demonstrates the
accuracy and feasibility of the proposed analysis.
@inproceedings{AlbertCMR14,
author = {Elvira Albert and
Jes\'us Correas and
Enrique Mart\'in-Mart\'in and
Guillermo Rom\'an-D\'iez},
title = {{S}tatic {I}nference of {T}ransmission {D}ata {S}izes in {D}istributed {S}ystems},
booktitle = {6th International Symposium On Leveraging Applications of Formal Methods, Verification and Validation (ISOLA'14)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {8803},
pages = {104--119},
year = {2014},
url = {http://dx.doi.org/10.1007/978-3-662-45231-8_8},
doi = {10.1007/978-3-662-45231-8_8}
}
|
[33]
|
Elvira Albert, Jesús Correas, and Guillermo Román-Díez.
Peak Cost Analysis of Distributed Systems.
In Markus Müller-Olm and Helmut Seidl, editors, Static
Analysis - 21st International Symposium, SAS 2014, Munich, Germany,
September 11-13, 2014. Proceedings, volume 8723 of Lecture Notes in
Computer Science, pages 18-33. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present a novel static analysis to infer the peak cost of
distributed systems. The different locations of a distributed
system communicate and coordinate their actions by posting tasks
among them. Thus, the amount of work that each location has to
perform can greatly vary along the execution depending on: (1) the
amount of tasks posted to its queue, (2) their respective costs, and
(3) the fact that they may be posted in parallel and thus be pending
to execute simultaneously. The peak cost of a distributed
location refers to the maximum cost that it needs to carry out
along its execution.
Inferring the peak cost is challenging because it increases and
decreases along the execution, unlike the standard notion of
total cost which is cumulative. Our key contribution is the
novel notion of quantified queue configuration which captures
the worst-case cost of the tasks that may be simultaneously
pending to execute at each location along the execution.
Our analysis framework is developed for a generic notion of cost
that can be instantiated to infer the peak cost in terms of number
of steps, the amount of memory allocated, the number of tasks, etc.
A prototype implementation demonstrates the accuracy and
feasibility of the proposed peak cost analysis.
@inproceedings{AlbertCR14,
author = {Elvira Albert and
Jes\'us Correas and
Guillermo Rom\'an-D\'iez},
title = {{P}eak {C}ost {A}nalysis of {D}istributed {S}ystems},
editor = {Markus M{\"{u}}ller{-}Olm and
Helmut Seidl},
booktitle = {Static Analysis - 21st International Symposium, {SAS} 2014, Munich,
Germany, September 11-13, 2014. Proceedings},
publisher = {Springer},
year = {2014},
rank:grinscie:class = {2},
volume = {8723},
series = {Lecture Notes in Computer Science},
url = {http://dx.doi.org/10.1007/978-3-319-10936-7_2},
doi = {10.1007/978-3-319-10936-7_2},
isbn = {978-3-319-10935-0},
pages = {18--33}
}
|
[34]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Actor- and Task-Selection Strategies for Pruning Redundant
State-Exploration in Testing.
In Erika Ábrahám and Catuscia Palamidessi, editors, 34th
IFIP International Conference on Formal Techniques for Distributed Objects,
Components and Systems (FORTE 2014), volume 8461 of Lecture Notes in
Computer Science, pages 49-65. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Testing concurrent systems requires exploring all possible
non-deterministic interleavings that the concurrent execution may
have. This is because any of the interleavings may reveal the
erroneous behaviour. In testing of actor systems, we can distinguish
two sources on non-determinism: (1) actor-selection, the
order in which actors are explored and (2) task-selection,
the order in which the messages (or tasks) within each actor are
explored. This paper provides new strategies and heuristics for
pruning redundant state-exploration when testing actor systems by
reducing the amount of unnecessary non-determinism. First, we
propose a method and heuristics for actor-selection based on
tracking the amount and the type of interactions among actors.
Second, we can avoid further redundant interleavings in task-selection
by taking into account the access to the shared-memory that
the tasks make.
@inproceedings{AlbertAG14,
author = {Elvira Albert and
Puri Arenas and
Miguel G\'omez-Zamalloa},
title = {Actor- and {T}ask-{S}election {S}trategies for {P}runing {R}edundant {S}tate-{E}xploration in {T}esting},
booktitle = {34th IFIP International Conference on Formal Techniques for Distributed Objects, Components and Systems (FORTE 2014)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2014},
doi = {10.1007/978-3-662-43613-4_4},
url = {https://doi.org/10.1007/978-3-662-43613-4_4},
pages = {49-65},
volume = {8461},
editor = {Erika {\'A}brah{\'a}m and Catuscia Palamidessi}
}
|
[35]
|
Elvira Albert, Puri Arenas, Antonio Flores-Montoya, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and Guillermo Román-Díez.
SACO: Static Analyzer for Concurrent Objects.
In Erika Ábrahám and Klaus Havelund, editors, Tools and
Algorithms for the Construction and Analysis of Systems - 20th International
Conference, TACAS 2014, volume 8413 of Lecture Notes in Computer
Science, pages 562-567. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present the main concepts, usage and implementation of SACO,
a static analyzer for concurrent objects. Interestingly, SACO is
able to infer both liveness (namely termination and resource
boundedness) and safety properties (namely deadlock freeness)
of programs based on concurrent objects. The system integrates
auxiliary analyses such as points-to and
may-happen-in-parallel, which are essential for increasing the
accuracy of the aforementioned more complex properties. SACO
provides accurate information about the dependencies which may
introduce deadlocks, loops whose termination is not guaranteed, and
upper bounds on the resource consumption of methods.
@inproceedings{AlbertAFGGMPR14,
author = {Elvira Albert and
Puri Arenas and
Antonio Flores-Montoya and
Samir Genaim and
Miguel G\'omez-Zamalloa and
Enrique Martin-Martin and
Germ\'an Puebla and
Guillermo Rom\'an-D\'iez},
title = {{SACO}: {S}tatic {A}nalyzer for {C}oncurrent {O}bjects},
booktitle = {Tools and Algorithms for the Construction and Analysis of
Systems - 20th International Conference, TACAS 2014},
year = {2014},
editor = {Erika {\'A}brah{\'a}m and
Klaus Havelund},
pages = {562-567},
volume = {8413},
series = {Lecture Notes in Computer Science},
isbn = {978-3-642-54861-1},
publisher = {Springer},
doi = {10.1007/978-3-642-54862-8_46},
url = {https://doi.org/10.1007/978-3-642-54862-8_46},
rank:grinscie:class = {3}
}
|
[36]
|
Elvira Albert, Samir Genaim, and Raúl Gutiérrez.
A Transformational Approach to Resource Analysis with
Typed-Norms.
In Proc. of the 23rd International Symposium on Logic-based
Program Synthesis and Transformation (LOPSTR'13), volume 8901 of
Lecture Notes in Computer Science, pages 38-53. Springer, 2014.
[ bibtex |
abstract |
PDF |
http ]
In order to automatically infer the resource
consumption of programs, analyzers track how
data sizes change along program's execution.
Typically, analyzers measure the sizes of data by
applying norms which are mappings from data
to natural numbers that represent the sizes of the
corresponding data. When norms are defined by taking
type information into account, they are named
typed-norms. The main contribution of this
extended abstract is a transformational approach to
resource analysis with typed-norms. The analysis is
based on a transformation of the program into an
intermediate abstract program in which each
variable is abstracted with respect to all
considered norms which are valid for its type. We
also sketch a simple analysis that can be used to
automatically infer the required, useful,
typed-norms from programs.
@inproceedings{AlbertGG13,
author = {Elvira Albert and Samir Genaim and Ra{\'u}l Guti{\'e}rrez},
title = {A {T}ransformational {A}pproach to {R}esource {A}nalysis with {T}yped-{N}orms},
booktitle = {Proc. of the 23rd International Symposium on Logic-based Program Synthesis and Transformation (LOPSTR'13)},
year = {2014},
pages = {38-53},
url = {https://doi.org/10.1007/978-3-319-14125-1_3},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {8901}
}
|
[37]
|
Elvira Albert, Samir Genaim, and Enrique Martin-Martin.
May-Happen-in-Parallel Analysis for Priority-based
Scheduling.
In Ken McMillan, Aart Middeldorp, and Andrei Voronkov, editors,
19th International Conference on Logic for Programming, Artificial
Intelligence, and Reasoning (LPAR-19), volume 8312 of Lecture Notes in
Computer Science, pages 18-34. Springer, December 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
A may-happen-in-parallel (MHP) analysis infers the sets of
pairs of program points that may execute in parallel along a
program's execution. This is an essential piece of information to
detect data races, and also to infer more complex properties of
concurrent programs, e.g., deadlock freeness, termination and
resource consumption analyses can greatly benefit from the MHP
relations to increase their accuracy. Previous MHP analyses have
assumed a worst case scenario by adopting a simplistic
(non-deterministic) task scheduler which can select any available
task. While the results of the analysis for a non-deterministic
scheduler are obviously sound, they can lead to an overly
pessimistic result. We present an MHP analysis for an asynchronous
language with prioritized tasks buffers.
Priority-based scheduling is arguably the most common
scheduling strategy adopted in the implementation of concurrent
languages. The challenge is to be able to take task priorities into
account at static analysis time in order to filter out unfeasible
MHP pairs.
@inproceedings{AlbertGM13,
author = {Elvira Albert and Samir Genaim and Enrique Martin-Martin},
title = {May-{H}appen-in-{P}arallel {A}nalysis for {P}riority-based {S}cheduling},
booktitle = {19th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR-19)},
editor = {McMillan, Ken and Middeldorp, Aart and Voronkov, Andrei},
volume = {8312},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
isbn = {978-3-642-45220-8},
month = dec,
year = {2013},
pages = {18-34},
doi = {10.1007/978-3-642-45221-5_2},
url = {http://dx.doi.org/10.1007/978-3-642-45221-5_2}
}
|
[38]
|
Antonio Flores-Montoya, Elvira Albert, and Samir Genaim.
May-Happen-in-Parallel based Deadlock Analysis for
Concurrent Objects.
In Dirk Beyer and Michele Boreale, editors, Formal Techniques
for Distributed Systems (FMOODS/FORTE 2013), volume 7892 of Lecture
Notes in Computer Science, pages 273-288. Springer, June 2013.
[ bibtex |
abstract |
http ]
We present a novel deadlock analysis for concurrent objects based on
the results inferred by a points-to analysis and a
may-happen-in-parallel (MHP) analysis. Similarly to other analysis,
we build a dependency graph such that the absence of cycles
in the graph ensures deadlock freeness. An MHP
analysis provides an over-approximation of the pairs of program
points that may be running in parallel. The crux of the method is
that the analysis integrates the MHP information within the dependency
graph in order to discard unfeasible cycles that otherwise would
lead to false positives. We argue that our analysis is more precise
and/or efficient than previous proposals for deadlock analysis of
concurrent objects. As regards accuracy, we are able to handle cases
that other analyses have pointed out as challenges. As regards
efficiency, the complexity of our deadlock analysis is polynomial.
@inproceedings{FloresAG13,
author = {Antonio Flores-Montoya and Elvira Albert and Samir Genaim},
title = {May-{H}appen-in-{P}arallel based {D}eadlock {A}nalysis for {C}oncurrent {O}bjects},
booktitle = {Formal Techniques for Distributed Systems (FMOODS/FORTE 2013)},
editor = {Dirk Beyer and Michele Boreale},
series = {Lecture Notes in Computer Science},
month = jun,
year = {2013},
pages = {273-288},
volume = {7892},
isbn = {978-3-642-38591-9},
url = {http://dx.doi.org/10.1007/978-3-642-38592-6_19},
publisher = {Springer}
}
|
[39]
|
Elvira Albert, Jesús Correas, Germán Puebla, and Guillermo Román-Díez.
Quantified Abstractions of Distributed Systems.
In 10th International Conference on integrated Formal Methods
(iFM'13), volume 7940 of Lecture Notes in Computer Science, pages
285-300. Springer, June 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
When reasoning about distributed systems, it is
essential to have information about the different
kinds of nodes which compose the system, how many
instances of each kind exist, and how nodes
communicate with other nodes. In this paper we
present a static-analysis-based approach which is
able to provide information about the questions
above. In order to cope with an unbounded number of
nodes and an unbounded number of calls among them,
the analysis performs an abstraction of the
system producing a graph whose nodes may represent
(infinitely) many concrete nodes and arcs represent
any number of (infinitely) many calls among nodes.
The crux of our approach is that the abstraction is
enriched with upper bounds inferred by a
resource analysis which limit the number of
concrete instances which the nodes and arcs
represent. The combined information provided by our
approach has interesting applications such as
debugging, optimizing and dimensioning distributed
systems.
@inproceedings{AlbertCPR13,
author = {Elvira Albert and Jes\'us Correas and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{Q}uantified {A}bstractions of {D}istributed {S}ystems},
booktitle = {10th International Conference on integrated Formal Methods (iFM'13)},
year = {2013},
month = jun,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {7940},
pages = {285-300},
doi = {10.1007/978-3-642-38613-8_20},
url = {http://dx.doi.org/10.1007/978-3-642-38613-8_20},
rank:grinscie:class = {3}
}
|
[40]
|
Elvira Albert, Puri Arenas, Miguel Gómez-Zamalloa, and Peter Y.H. Wong.
aPET: A Test Case Generation Tool for Concurrent
Objects.
In Bertrand Meyer, Luciano Baresi, and Mira Mezini, editors,
Joint Meeting of the European Software Engineering Conference and the ACM
SIGSOFT Symposium on the Foundations of Software Engineering, ESEC/FSE'13,
Saint Petersburg, Russian Federation, August 18-26, 2013, pages 595-598.
ACM, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present the concepts, usage and prototypical implementation of
aPET, a test case generation tool for a distributed asynchronous
language based on concurrent objects. The system receives as
input a program, a selection of methods to be tested, and a set of
parameters that include a selection of a coverage criterion. It
yields as output a set of test cases which guarantee that the
selected coverage criterion is achieved.
aPET is completely integrated within the language's IDE via
Eclipse. The generated test cases can be displayed in textual mode
and, besides, it is possible to generate ABSUnit code (i.e., code
runnable in a simple framework similar to JUnit to write repeatable
tests). The information yield by aPET can be relevant to spot
bugs during program development and also to perform regression
testing.
@inproceedings{AlbertAGZW13,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa and Peter Y.H. Wong},
title = {a{PET}: {A} {T}est {C}ase {G}eneration {T}ool for {C}oncurrent {O}bjects},
booktitle = {Joint Meeting of the European Software Engineering Conference
and the ACM SIGSOFT Symposium on the Foundations of Software
Engineering, ESEC/FSE'13, Saint Petersburg, Russian Federation,
August 18-26, 2013},
year = 2013,
publisher = {ACM},
pages = {595--598},
editor = {Bertrand Meyer and
Luciano Baresi and
Mira Mezini},
doi = {10.1145/2491411.2494590},
url = {http://dx.doi.org/10.1145/2491411.2494590}
}
|
[41]
|
Elvira Albert, Antonio Flores-Montoya, Samir Genaim, and Enrique Martin-Martin.
Termination and Cost Analysis of Loops with Concurrent
Interleavings.
In Dang Van Hung and Mizuhito Ogawa, editors, Automated
Technology for Verification and Analysis - 11th International Symposium, ATVA
2013, Hanoi, Vietnam, October 15-18, 2013. Proceedings, volume 8172 of
Lecture Notes in Computer Science, pages 349-364. Springer, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
By following a rely-guarantee style of reasoning, we present
a novel termination analysis for concurrent programs that, in order to
prove termination of a considered loop, makes the assumption that the
"shared-data that is involved in the termination proof of the loop is
modified a finite number of times". In a subsequent step, it proves that
this assumption holds in all code whose execution might interleave with such
loop. At the core of the analysis, we use a may-happen-in-parallel analysis
to restrict the set of program points whose execution can interleave with
the considered loop. Interestingly, the same kind of reasoning can
be applied to infer upper bounds on the number of iterations of loops
with concurrent interleavings. To the best of our knowledge, this is the
first method to automatically bound the cost of such kind of loops.
@inproceedings{AlbertFGM13,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim and Enrique Martin-Martin},
title = {Termination and {C}ost {A}nalysis of {L}oops with {C}oncurrent {I}nterleavings},
booktitle = {Automated Technology for Verification and Analysis - 11th
International Symposium, ATVA 2013, Hanoi, Vietnam, October
15-18, 2013. Proceedings},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {349-364},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
doi = {10.1007/978-3-319-02444-8_25},
url = {http://dx.doi.org/10.1007/978-3-319-02444-8_25}
}
|
[42]
|
Elvira Albert, Antonio Flores-Montoya, and Samir Genaim.
MayPar: a may-happen-in-parallel analyzer for concurrent objects.
In Will Tracz, Martin P. Robillard, and Tevfik Bultan, editors,
20th ACM SIGSOFT Symposium on the Foundations of Software Engineering
(FSE-20), SIGSOFT/FSE'12, Cary, NC, USA - November 11 - 16, 2012, pages
1-4. ACM, November 2012.
[ bibtex |
abstract |
DOI ]
We present the concepts, usage and prototypical
implementation of MayPar, a may-happen-in-parallel
(MHP) static analyzer for a distributed asynchronous
language based on concurrent objects. Our tool
allows analyzing an application and finding out the
pairs of statements that can execute in
parallel. The information can be displayed by means
of a graphical representation of the MHP analysis
graph or, in a textual way, as a set of pairs which
identify the program points that may run in
parallel. The information yield by MayPar can be
relevant (1) to spot bugs in the program related to
fragments of code which should not run in parallel
and also (2) to improve the precision of other
analyses which infer more complex properties (e.g.,
termination and resource consumption).
@inproceedings{AlbertFG12-FSE,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim},
title = {MayPar: a may-happen-in-parallel analyzer for concurrent objects},
editor = {Will Tracz and Martin P. Robillard and
Tevfik Bultan},
booktitle = {20th ACM SIGSOFT Symposium on the Foundations of Software
Engineering (FSE-20), SIGSOFT/FSE'12, Cary, NC, USA - November
11 - 16, 2012},
year = {2012},
month = nov,
pages = {1-4},
publisher = {ACM},
isbn = {978-1-4503-1614-9, 978-1-4503-0443-6},
ee = {http://doi.acm.org/10.1145/2393596.2393611}
}
|
[43]
|
Elvira Albert, Miguel Gómez-Zamalloa, and José Miguel Rojas.
Resource-driven CLP-based Test Case Generation.
In LOPSTR 2011 Revised Selected Papers, volume 7225 of
Lecture Notes in Computer Science, pages 25-41. Springer, July 2012.
[ bibtex |
abstract |
PDF |
http ]
Test Data Generation (TDG) aims at automatically
obtaining test inputs which can then be used
by a software testing tool to validate the
functional behaviour of the program. In this paper,
we propose resource-aware TDG, whose purpose
is to generate test cases (from which the test
inputs are obtained) with associated resource
consumptions. The framework is parametric w.r.t. the notion of resource (it can measure memory,
steps, etc.) and allows using software testing to
detect bugs related to non-functional aspects of the
program. As a further step, we introduce
resource-driven TDG whose purpose is to guide
the TDG process by taking resource consumption into
account. Interestingly, given a resource
policy, TDG is guided to generate test cases that
adhere to the policy and avoid the generation of
test cases which violate it.
@inproceedings{AlbertGR11,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Jos\'{e} Miguel Rojas},
title = {{R}esource-driven {CLP}-based {T}est {C}ase {G}eneration},
booktitle = {LOPSTR 2011 Revised Selected Papers},
series = {Lecture Notes in Computer Science},
volume = {7225},
pages = {25--41},
npages = {17},
publisher = {Springer},
month = jul,
year = {2012},
isbn = {978-3-642-32210-5},
url = {http://dx.doi.org/10.1007/978-3-642-32211-2_3}
}
|
[44]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, and Guillermo Román-Díez.
Verified Resource Guarantees for Heap Manipulating
Programs.
In Proceedings of the 15th International Conference on
Fundamental Approaches to Software Engineering, FASE 2012, Tallinn, Estonia,
March, 2012, volume 7212 of Lecture Notes in Computer Science, pages
130-145. Springer, March 2012.
[ bibtex |
abstract |
DOI |
PDF |
http ]
Program properties that are automatically inferred by static
analysis tools are generally not considered to be completely
trustworthy, unless the tool implementation or the results are
formally verified. Here we focus on the formal verification of
resource guarantees inferred by automatic cost
analysis. Resource guarantees ensure that programs run within the
indicated amount of resources which may refer to memory consumption,
to number of instructions executed, etc. In previous work we
studied formal verification of inferred resource guarantees that
depend only on integer data. In realistic programs, however,
resource consumption is often bounded by the size of
heap-allocated data structures. Bounding their size
requires to perform a number of structural heap
analyses. The contributions of this paper are (i) to identify what
exactly needs to be verified to guarantee sound analysis of heap
manipulating programs, (ii) to provide a suitable extension of the
program logic used for verification to handle structural heap
properties in the context of resource guarantees, and (iii) to
improve the underlying theorem prover so that proof obligations
can be automatically discharged.
@inproceedings{AlbertBGHR12,
author = {Elvira Albert and Richard Bubel and Samir Genaim and Reiner
H\"ahnle and Guillermo Rom\'an-D\'iez},
title = {{V}erified {R}esource {G}uarantees for {H}eap {M}anipulating {P}rograms},
booktitle = {Proceedings of the 15th International Conference on Fundamental Approaches to Software Engineering, FASE 2012, Tallinn, Estonia, March, 2012},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2012},
volume = {7212},
month = mar,
pages = {130-145},
isbn = {978-3-642-28871-5},
doi = {10.1007/978-3-642-28872-2_10},
url = {http://www.springerlink.com/content/a10153194828v8k1/},
rank:grinscie:class = {3}
}
|
[45]
|
Elvira Albert, Bjarte M. Østvold, and José Miguel Rojas.
Automated Extraction of Abstract Behavioural Models from
JMS Applications.
In Formal Methods for Industrial Critical Systems (FMICS'12),
volume 7437 of Lecture Notes in Computer Science, pages 16-31.
Springer, 2012.
[ bibtex |
abstract |
PDF ]
Distributed systems are hard to program, understand and
analyze. Two key sources of complexity are the many
possible behaviors of a system, arising from the
parallel execution of its distributed nodes, and the
handling of asynchronous messages exchanged between
nodes. We show how to systematically construct
executable models of publish/subscribe systems based
on the Java Messaging Service (JMS). These models,
written in the executable Abstract Behavioural
Specification (ABS) language, capture the essential
parts of the messaging behavior of the original Java
systems, and eliminate details not related to
distribution and messages. We report on JMS2ABS, a
tool that automatically extracts ABS models from the
bytecode of JMS systems. Since the extracted models
are formal and executable, they allow us to reason
about the modeled JMS systems by means of tools
built specifically for the modeling language. For
example, we have succeeded to apply simulation,
termination and resource analysis tools developed
for ABS to, respectively, execute, prove termination
and infer the resource consumption of the original
JMS applications.
@inproceedings{AlbertOR12,
author = {Elvira Albert and Bjarte M. {\O}stvold and Jos\'{e} Miguel Rojas},
title = {{A}utomated {E}xtraction of {A}bstract {B}ehavioural {M}odels from {JMS} {A}pplications},
booktitle = {Formal Methods for Industrial Critical Systems (FMICS'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {7437},
pages = {16--31},
isbn = {978-3-642-32468-0},
year = {2012}
}
|
[46]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Towards Testing Concurrent Objects in CLP.
In Agostino Dovier and Vítor Santos Costa, editors, 28th
International Conference on Logic Programming (ICLP'12), volume 17 of
LIPIcs, pages 98-108. Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik,
2012.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. This
paper studies the extension of the CLP-based framework for test data
generation of sequential programs to the context of concurrent
objects, a concurrency model which constitutes a promising
solution to concurrency in OO languages.
The challenge is in developing the following required extensions entirely in CLP
(which are our main contributions):
(1) we simulate
task interleavings which could occur in an actual execution and
could lead to additional test cases for the method under test (while
interleavings that would not add new test cases are disregarded),
(2) we integrate sophisticated scheduling policies and, besides,
use different policies for the different fragments of the program
that can run in parallel,
(3) we combine standard termination and coverage
criteria used for testing sequential programs with specific criteria
which control termination and coverage from the concurrency point of
view, e.g., we can limit the number of task interleavings allowed and
the number of loop unrollings performed in each parallel component, etc.
@inproceedings{AlbertAG-Z12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Towards Testing Concurrent Objects in CLP},
booktitle = {28th International Conference on Logic Programming (ICLP'12)},
editor = {Agostino Dovier and V\'{\i}tor Santos Costa},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik},
series = {LIPIcs},
volume = {17},
year = {2012},
doi = {10.4230/LIPIcs.ICLP.2012.98},
pages = {98-108}
}
|
[47]
|
Elvira Albert, Antonio Flores-Montoya, and Samir Genaim.
Analysis of May-Happen-in-Parallel in Concurrent Objects.
In Holger Giese and Grigore Rosu, editors, Formal Techniques for
Distributed Systems - Joint 14th IFIP WG 6.1 International Conference,
FMOODS 2012 and 32nd IFIP WG 6.1 International Conference, FORTE
2012, Stockholm, Sweden, June 13-16, 2012. Proceedings, volume 7273 of
Lecture Notes in Computer Science, pages 35-51. Springer, 2012.
[ bibtex |
abstract |
DOI |
http ]
This paper presents a may-happen-in-parallel (MHP)
analysis for OO languages based on concurrent
objects . In this concurrency model, objects are the
concurrency units such that, when a method is
invoked on an object o 2 from a task executing on
object o 1 , statements of the current task in o 1
may run in parallel with those of the (asynchronous)
call on o 2 , and with those of transitively invoked
methods. The goal of the MHP analysis is to identify
pairs of statements in the program that may run in
parallel in any execution. Our MHP analysis is
formalized as a method-level ( local ) analysis
whose information can be modularly composed to
obtain application-level ( global ) information.
@inproceedings{AlbertFG12,
author = {Elvira Albert and Antonio Flores-Montoya and Samir Genaim},
affiliation = {Complutense University of Madrid, Spain},
title = {Analysis of {M}ay-{H}appen-in-{P}arallel in {C}oncurrent {O}bjects},
booktitle = {Formal Techniques for Distributed Systems - Joint 14th {IFIP} {WG}
6.1 International Conference, {FMOODS} 2012 and 32nd {IFIP} {WG} 6.1
International Conference, {FORTE} 2012, Stockholm, Sweden, June 13-16,
2012. Proceedings},
series = {Lecture Notes in Computer Science},
editor = {Giese, Holger and Rosu, Grigore},
publisher = {Springer},
isbn = {978-3-642-30792-8},
keyword = {Computer Science},
pages = {35-51},
volume = {7273},
url = {http://dx.doi.org/10.1007/978-3-642-30793-5_3},
doi = {10.1007/978-3-642-30793-5_3},
year = {2012}
}
|
[48]
|
Elvira Albert, Jesús Correas, Germán Puebla, and Guillermo Román-Díez.
Incremental Resource Usage Analysis.
In Proceedings of the 2012 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2012, Philadelphia, Pennsylvania,
USA, January 23-24, 2012, pages 25-34. ACM Press, January 2012.
[ bibtex |
abstract |
DOI |
PDF |
http ]
The aim of incremental global analysis is, given a program, its
analysis results and a series of changes to the program, to obtain
the new analysis results as efficiently as possible and, ideally,
without having to (re-)analyze fragments of code which are not
affected by the changes. Incremental analysis can significantly
reduce both the time and the memory requirements of analysis. This
paper presents an incremental resource usage (a.k.a. cost) analysis
for a sequential Java-like language. Our main contributions are (1)
a multi-domain incremental fixed-point algorithm which can be used
by all global (pre-)analyses required to infer the cost (including
class, sharing, cyclicity, constancy, and size analyses) and which
takes care of propagating dependencies among such domains, and (2) a
novel form of cost summaries which allows us to incrementally
reconstruct only those components of cost functions affected by the
change. Experimental results in the COSTA system show that the
proposed incremental analysis can perform very efficiently in
practice.
@inproceedings{AlbertCPR12,
author = {Elvira Albert and Jes\'us Correas and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{I}ncremental {R}esource {U}sage {A}nalysis},
booktitle = {Proceedings of the 2012 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2012, Philadelphia, Pennsylvania, USA,
January 23-24, 2012},
publisher = {ACM Press},
year = {2012},
month = jan,
pages = {25--34},
isbn = {978-1-4503-1118-2},
doi = {10.1145/2103746.2103754},
url = {http://dl.acm.org/citation.cfm?doid=2103746.2103754},
rank:grinscie:class = {3}
}
|
[49]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and Germán Puebla.
COSTABS: A Cost and Termination Analyzer for ABS.
In Oleg Kiselyov and Simon Thompson, editors, Proceedings of the
2012 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation,
PEPM 2012, Philadelphia, Pennsylvania, USA, January 23-24, 2012, pages
151-154. ACM Press, 2012.
[ bibtex |
abstract |
DOI ]
ABS is an abstract behavioural specification
language to model distributed concurrent systems. Characteristic
features of ABS are that: (1) it allows abstracting from
implementation details while remaining executable: a
functional sub-language over abstract data types is used to
specify internal, sequential computations; and (2) the
imperative sub-language provides flexible concurrency and
synchronization mechanisms by means of asynchronous method calls,
release points in method definitions, and cooperative scheduling of
method activations. This paper presents COSTABS, a COSt and
Termination analyzer for ABS, which is able to prove termination and
obtain resource usage bounds for both the imperative and
functional fragments of programs. The resources that COSTABS can
infer include termination, number of execution steps, memory
consumption, number of asynchronous calls, among others. The
analysis bounds provide formal guarantees that the execution of the
program will never exceed the inferred amount of resources. The
system can be downloaded as free software from its web site, where a
repository of examples and a web interface are also provided. To the
best of our knowledge, COSTABS is the first system able to perform
resource analysis for a concurrent language.
@inproceedings{AlbertAGGZP12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'omez-Zamalloa and Germ\'an Puebla},
title = {{COSTABS}: A {C}ost and {T}ermination {A}nalyzer for {ABS}},
booktitle = {Proceedings of the 2012 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2012, Philadelphia, Pennsylvania, USA,
January 23-24, 2012},
publisher = {ACM Press},
editor = {Oleg Kiselyov and Simon Thompson},
year = {2012},
isbn = {978-1-4503-1118-2 },
pages = {151-154},
doi = {10.1145/2103746.2103774}
}
|
[50]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Automatic Inference of Resource Consumption Bounds.
In Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, Mérida, Venezuela, March 11-15,
2012. Proceedings LPAR, volume 7180 of Lecture Notes in Computer
Science, pages 1-11. Springer, 2012.
[ bibtex |
abstract |
DOI ]
One of the main features of programs is the amount of resources
which are needed in order to run them. Different resources can be
taken into consideration, such as the number of execution steps,
amount of memory allocated, number of calls to certain methods,
etc. Unfortunately, manually determining the resource consumption of
programs is difficult and error-prone. We provide an overview of a
state of the art framework for automatically obtaining both upper
and lower bounds on the resource consumption of programs. The bounds
obtained are functions on the size of the input arguments to the
program and are obtained statically, i.e., without running the
program. Due to the approximations introduced, the framework can
fail to obtain (non-trivial) bounds even if they exist. On the other
hand, modulo implementation bugs, the bounds thus obtained are valid
for any execution of the program. The framework has been
implemented in the COSTA system and can provide useful bounds for
realistic object-oriented and actor-based concurrent programs.
@inproceedings{AlbertAGGZP12a,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Miguel G{\'o}mez-Zamalloa and
Germ{\'a}n Puebla},
title = {Automatic {I}nference of {R}esource {C}onsumption {B}ounds},
booktitle = {Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, M{\'e}rida,
Venezuela, March 11-15, 2012. Proceedings LPAR},
year = {2012},
pages = {1-11},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-642-28717-6},
volume = {7180}
}
|
[51]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Symbolic Execution of Concurrent Objects in CLP.
In Practical Aspects of Declarative Languages (PADL'12),
Lecture Notes in Computer Science, pages 123-137. Springer, January 2012.
[ bibtex |
abstract |
DOI |
PDF ]
In the concurrent objects model,objects have conceptually
dedicated processors and live in a distributed environment with
unordered communication by means of asynchronous method calls.
Method callers may decide at runtime when to synchronize with the reply
from a call. This paper presents a CLP-based approach to symbolic execution
of concurrent OO programs. Developing a symbolic execution engine for
concurrent objects is challenging because it needs to combine the
OO features of the language, concurrency and backtracking.
Our approach consists in, first, transforming the OO program into an
equivalent CLP program which contains calls to specific builtins that
handle the concurrency model. The builtins are implemented in CLP and
include primitives to handle asynchronous calls synchronization
operations and scheduling policies, among others. Interestingly,
symbolic execution of the transformed programs then relies
simply on the standard sequential execution of CLP.
We report on a prototype implementation within the PET system which
shows the feasibility of our approach.
@inproceedings{AlbertAGZ12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Symbolic {E}xecution of {C}oncurrent {O}bjects in {CLP}},
booktitle = {Practical Aspects of Declarative Languages (PADL'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2012},
month = jan,
volumen = {7149},
pages = {123-137},
doi = {10.1007/978-3-642-27694-1_10}
}
|
[52]
|
Elvira Albert, Jesús Correas, Germán Puebla, and Guillermo Román-Díez.
Towards Incremental Resource Usage Analysis.
In The Ninth Asian Symposium on Programming Languages and
Systems (APLAS'11), December 2011.
Poster Presentation.
[ bibtex |
abstract |
PDF ]
The aim of incremental global analysis is, given a program, its
analysis results and a series of changes to the program, to obtain
the new analysis results as efficiently as possible and, ideally,
without having to (re-)analyze fragments of code which are not
affected by the changes. Incremental analysis can significantly
reduce both the time and the memory requirements of analysis. This
paper presents an incremental resource usage (a.k.a. cost) analysis
for a sequential Java-like language. Our main contributions are (1)
a multi-domain incremental fixed-point algorithm which can be used
by all global (pre-)analyses required to infer the cost (including
class, sharing, cyclicity, constancy, and size analyses) and which
takes care of propagating dependencies among such domains, and (2) a
novel form of cost summaries which allows us to incrementally
reconstruct only those components of cost functions affected by the
change. Experimental results in the COSTA system show that the
proposed incremental analysis can perform very efficiently in
practice.
@inproceedings{AlbertCPR11,
author = {Elvira Albert and Jes\'us Correas and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{T}owards {I}ncremental {R}esource {U}sage {A}nalysis},
booktitle = {The Ninth Asian Symposium on Programming Languages and Systems (APLAS'11)},
year = {2011},
month = dec,
note = {Poster Presentation}
}
|
[53]
|
Elvira Albert, Israel Cabañas, Antonio Flores-Montoya, Miguel Gómez-Zamalloa, and Sergio Gutiérrez.
jPET: an Automatic Test-Case Generator for Java.
In 18th Working Conference on Reverse Engineering (WCRE 2011),
pages 441-442. IEEE Computer Society, October 2011.
[ bibtex |
abstract |
DOI ]
We present jPET, a white box test-case generator
(TCG) which can be used during software
development of Java applications within the Eclipse
environment. jPET builds on top of PET, a TCG
which automatically obtains test-cases from the byte
code associated to a Java program. jPET performs
reverse engineering of the test-cases obtained at
the byte code level by PET in order to yield this
information to the user at the source code
level. This allows understanding the information
gathered at the lower level and using it to test
source Java programs.
@inproceedings{AlbertCFGG11,
author = {Elvira Albert and Israel Caba{\~n}as and Antonio
Flores-Montoya and Miguel G\'{o}mez-Zamalloa and
Sergio Guti\'{e}rrez},
title = {j{PET}: an {A}utomatic {T}est-{C}ase {G}enerator for {J}ava},
booktitle = {18th Working Conference on Reverse Engineering (WCRE 2011)},
month = oct,
year = {2011},
pages = {441-442},
publisher = {IEEE Computer Society},
npages = {2},
doi = {10.1109/WCRE.2011.67}
}
|
[54]
|
Elvira Albert, Samir Genaim, Miguel Gómez-Zamalloa, Einar Broch Johnsen,
Rudolf Schlatte, and Silvia Lizeth Tapia Tarifa.
Simulating Concurrent Behaviors with Worst-Case Cost
Bounds.
In Michael Butler and Wolfram Schulte, editors, FM 2011: Formal
Methods - 17th International Symposium on Formal Methods, Limerick, Ireland,
June 20-24, 2011. Proceedings, volume 6664 of Lecture Notes in Computer
Science, pages 353-368, 2011.
[ bibtex |
abstract |
DOI ]
Modern software systems are increasingly being
developed for deployment on a range of
architectures. For this purpose, it is interesting
to capture aspects of low-level deployment concerns
in high-level modeling languages. In this paper, an
executable object-oriented modeling language is
extended with resource-restricted deployment
components. To analyze model behavior a formal
methodology is proposed to assess resource
consumption, which balances the scalability of the
method and the reliability of the obtained
results. The approach applies to a general notion of
resource, including traditional cost measures (e.g.,
time, memory) as well as concurrency-related
measures (e.g., requests to a server, spawned
tasks). The main idea of our approach is to combine
reliable (but expensive) worst-case cost analysis of
statically predictable parts of the model with fast
(but inherently incomplete) simulations of the
concurrent aspects in order to avoid the state-space
explosion. The approach is illustrated by the
analysis of memory consumption.
@inproceedings{AlbertGGJST11,
author = {Elvira Albert and
Samir Genaim and
Miguel G{\'o}mez-Zamalloa and
Einar Broch Johnsen and
Rudolf Schlatte and
Silvia Lizeth Tapia Tarifa},
title = {{S}imulating {C}oncurrent {B}ehaviors with
{W}orst-{C}ase {C}ost {B}ounds},
editor = {Michael Butler and Wolfram Schulte},
booktitle = {FM 2011: Formal Methods - 17th International Symposium on
Formal Methods, Limerick, Ireland, June 20-24, 2011. Proceedings},
year = 2011,
volume = {6664},
pages = {353-368},
series = {Lecture Notes in Computer Science},
ee = {http://dx.doi.org/10.1007/978-3-642-21437-0_27}
}
|
[55]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Cost Analysis of Concurrent OO Programs.
In Hongseok Yang, editor, Programming Languages and Systems -
9th Asian Symposium, APLAS 2011, Kenting, Taiwan, December 5-7, 2011.
Proceedings, volume 7078 of Lecture Notes in Computer Science, pages
238-254. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at automatically approximating the
resource consumption (e.g., memory) of
executing a program in terms of its input
parameters. While cost analysis for sequential
programming languages has received considerable
attention, concurrency and distribution have been
notably less studied. The main challenges (and our
contributions) of cost analysis in a concurrent
setting are: (1) Inferring precise size
relations for data in the program in the presence
of shared memory. This information is essential for
bounding the number of iterations of loops. (2)
Distribution suggests that analysis must keep the
cost of the diverse distributed components
separate. We handle this by means of a novel form of
recurrence equations which are parametric on
the notion of cost center, which represents a
corresponding component. To the best of our
knowledge, our work is the first one to present a
general cost analysis framework and an
implementation for concurrent OO programs.
@inproceedings{AlbertAGGP11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'{o}mez-Zamalloa and Germ\'an Puebla},
editor = {Hongseok Yang},
title = {Cost {A}nalysis of {C}oncurrent {OO} Programs},
booktitle = {Programming Languages and Systems - 9th Asian Symposium,
APLAS 2011, Kenting, Taiwan, December 5-7, 2011. Proceedings},
series = {Lecture Notes in Computer Science},
year = {2011},
publisher = {Springer},
pages = {238-254},
volume = {7078},
doi = {10.1007/978-3-642-25318-8_19}
}
|
[56]
|
Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, Germán Puebla,
and Guillermo Román-Díez.
Verified Resource Guarantees using COSTA and KeY.
In Siau-Cheng Khoo and Jeremy G. Siek, editors, Proceedings of
the 2011 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation,
PEPM 2011, Austin, TX, USA, January 24-25, 2011, SIGPLAN, pages 73-76. ACM,
2011.
[ bibtex |
abstract |
DOI |
DOI |
PDF ]
Resource guarantees allow being certain that programs
will run within the indicated amount of resources,
which may refer to memory consumption, number of
instructions executed, etc. This information can be
very useful, especially in real-time and
safety-critical applications.Nowadays, a number of
automatic tools exist, often based on type systems
or static analysis, which produce such resource
guarantees. In spite of being based on
theoretically sound techniques, the implemented
tools may contain bugs which render the resource
guarantees thus obtained not completely trustworthy.
Performing full-blown verification of such tools is
a daunting task, since they are large and complex.
In this work we investigate an alternative approach
whereby, instead of the tools, we formally verify
the results of the tools. We have implemented this
idea using COSTA, a state-of-the-art static analysis
system, for producing resource guarantees and KeY, a
state-of-the-art verification tool, for formally
verifying the correctness of such resource
guarantees. Our preliminary results show that the
proposed tool cooperation can be used for
automatically producing verified resource
guarantees.
@inproceedings{AlbertBGHPR11,
author = {Elvira Albert and Richard Bubel and Samir Genaim and Reiner H\"ahnle and Germ\'an Puebla and Guillermo Rom\'an-D\'iez},
title = {{V}erified {R}esource {G}uarantees using {COSTA} and {KeY}},
booktitle = {Proceedings of the 2011 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2011, Austin,
TX, USA, January 24-25, 2011},
editor = {Siau-Cheng Khoo and Jeremy G. Siek},
publisher = {ACM},
series = {SIGPLAN},
year = {2011},
pages = {73-76},
doi = {10.1145/1929501.1929513},
ee = {http://doi.acm.org/10.1145/1929501.1929513},
rank:grinscie:class = {3}
}
|
[57]
|
Elvira Albert, Samir Genaim, and Abu Naser Masud.
More Precise yet Widely Applicable Cost Analysis.
In Ranjit Jhala and David A. Schmidt, editors, Verification,
Model Checking, and Abstract Interpretation - 12th International Conference,
VMCAI 2011, Austin, TX, USA, January 23-25, 2011. Proceedings, volume 6538
of Lecture Notes in Computer Science, pages 38-53. Springer, January
2011.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis aims at determining the amount of
resources required to run a program in terms of its
input data sizes. Automatically inferring precise
bounds, while at the same time being able to handle
a wide class of programs, is a main challenge in
cost analysis. (1) Existing methods which rely on
computer algebra systems (CAS ) to solve the ob-
tained cost recurrence equations (CR) are very
precise when applicable, but handle a very
restricted class of CR. (2) Specific solvers
developed for CR tend to sacrifice accuracy for
wider applicability. In this paper, we present a
novel approach to inferring precise upper and lower
bounds on CR which, when compared to (1), is
strictly more widely applicable while precision is
kept and, when compared to (2), is in practice more
precise and still keeps wide applicability. The main
novelty is that we are able to accurately bound the
worst-case/best-case cost of each iteration of the
program loops and, then, by summing the resulting
sequences, we obtain precise upper/lower
bounds. Preliminary experimental results on Java
(bytecode) programs confirm the good balance between
the accu- racy of our analysis and its
applicability.
@inproceedings{AlbertGM11,
author = {Elvira Albert and Samir Genaim and Abu Naser Masud},
title = {More Precise yet Widely Applicable Cost Analysis},
booktitle = {Verification, Model Checking, and Abstract Interpretation
- 12th International Conference, VMCAI 2011, Austin, TX,
USA, January 23-25, 2011. Proceedings},
editor = {Ranjit Jhala and David A. Schmidt},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {6538},
pages = {38-53},
ee = {http://dx.doi.org/10.1007/978-3-642-18275-4_5},
year = {2011},
month = jan
}
|
[58]
|
Elvira Albert, Miguel Gómez-Zamalloa, José Miguel Rojas, and Germán Puebla.
Compositional CLP-based Test Data Generation for Imperative
Languages.
In María Alpuente, editor, LOPSTR 2010 Revised Selected
Papers, volume 6564 of Lecture Notes in Computer Science, pages
99-116. Springer-Verlag, 2011.
[ bibtex |
abstract |
DOI |
PDF ]
Glass-box test data generation (TDG) is the process of
automatically generating test input data for a
program by considering its internal structure. This
is generally accomplished by performing symbolic
execution of the program where the contents of
variables are expressions rather than concrete
values. The main idea in CLP-based TDG is to
translate imperative programs into equivalent CLP
ones and then rely on the standard evaluation
mechanism of CLP to symbolically execute the
imperative program. Performing symbolic execution on
large programs becomes quickly expensive due to the
large number and the size of paths that need to be
explored. In this paper, we propose compositional
reasoning in CLP-based TDG where large programs can
be handled by testing parts (such as components,
modules, libraries, methods, etc.) separately and
then by composing the test cases obtained for these
parts to get the required information on the whole
program. Importantly, compositional reasoning also
gives us a practical solution to handle native code,
which may be unavailable or written in a different
programming language. Namely, we can model the
behavior of a native method by means of test cases
and compositional reasoning is able to use them.
@inproceedings{AlbertGRP10,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Jos{\'e} Miguel Rojas and Germ{\'a}n Puebla},
title = {Compositional {CLP}-based {T}est {D}ata {G}eneration for {I}mperative {L}anguages},
booktitle = {LOPSTR 2010 Revised Selected Papers},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
editor = {Mar\'{i}a Alpuente},
year = {2011},
volume = {6564},
pages = {99--116},
isbn = {978-3-642-20550-7},
ee = {http://dx.doi.org/10.1007/978-3-642-20551-4_7}
}
|
[59]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Germán Puebla.
PET: A Partial Evaluation-based Test Case Generation
Tool for Java Bytecode.
In ACM SIGPLAN Workshop on Partial Evaluation and
Semantics-based Program Manipulation (PEPM'10), pages 25-28. ACM Press,
2010.
[ bibtex |
abstract |
PDF ]
PET is a prototype Partial Evaluation-based Test case
generation tool for a subset of Java bytecode
programs. It performs white-box test generation by
means of two consecutive Partial Evaluations
(PE). The first PE decompiles the Java bytecode
program into an equivalent CLP (Constraint Logic
Programming) counterpart. The second PE generates a
test-case generator from the CLP program. This
generator captures interesting test coverage
criteria and it is able to generate further test
cases on demand. For the first PE, PET incorporates
an existing tool which decompiles bytecode to
CLP. The main contribution of this work is the
implementation of the second PE and the proof of
concept of the approach. This has required the
development of a partial evaluator for CLP with
appropriate control strategies to ensure the
required coverage criteria and to generate test-case
generators. PET can be downloaded as free software
from its web site, where a repository of examples
and a web interface are also provided. Though PET
has to be extended to be applicable to larger
programs, we argue that it provides some evidence
that the approach can be of practical interest.
@inproceedings{AlbertGP10,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{PET}: A {P}artial {E}valuation-based {T}est {C}ase {G}eneration {T}ool for {J}ava {B}ytecode},
booktitle = {ACM SIGPLAN Workshop on Partial Evaluation and Semantics-based Program Manipulation (PEPM'10)},
year = {2010},
pages = {25--28},
isbn = {978-1-60558-727-1},
publisher = {ACM Press}
}
|
[60]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Diana
Ramírez-Deantes.
From Object Fields to Local Variables: A Practical
Approach to Field-Sensitive Analysis.
In Radhia Cousot and Matthieu Martel, editors, Static Analysis -
17th International Symposium, SAS 2010, Perpignan, France, September 14-16,
2010. Proceedings, volume 6337 of Lecture Notes in Computer Science,
pages 100-116. Springer, 2010.
[ bibtex |
abstract |
DOI |
PDF ]
Static analysis which takes into account the value of
data stored in the heap is typically considered complex
and computationally intractable in practice. Thus, most
static analyzers do not keep track of object fields (or
fields for short), i.e., they are field-insensitive.
In this paper, we propose locality conditions for
soundly converting fields into local variables.
This way, field-insensitive analysis over the
transformed program can infer information on the original
fields.
Our notion of locality is context-sensitive and can be
applied both to numeric and reference fields.
We propose then a polyvariant transformation which
actually converts object fields meeting the locality
condition into variables and which is able to generate
multiple versions of code when this leads to increasing
the amount of fields which satisfy the locality
conditions. We have implemented our analysis within a
termination analyzer for Java bytecode.
Interestingly, we can now prove termination of programs
which use iterators without the need of a sophisticated
heap analysis.
@inproceedings{AlbertAGPR10,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'{\i}rez-Deantes},
title = {From {O}bject {F}ields to {L}ocal {V}ariables: A {P}ractical {A}pproach
to {F}ield-{S}ensitive {A}nalysis},
booktitle = {Static Analysis - 17th International Symposium, SAS 2010,
Perpignan, France, September 14-16, 2010. Proceedings},
editor = {Radhia Cousot and
Matthieu Martel},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {6337},
year = {2010},
isbn = {978-3-642-15768-4},
pages = {100-116},
doi = {10.1007/978-3-642-15769-1_7},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[61]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, Germán Puebla, Diana Ramirez, Guillermo Román-Díez, and Damiano Zanardini.
Termination and Cost Analysis with COSTA and its User
Interfaces.
In Spanish Conference on Programming and Computer Languages
(PROLE'09), volume 258 of Electronic Notes in Theoretical Computer
Science, pages 109-121. Elsevier, September 2009.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a static analyzer for Java
bytecode which is able to infer cost and termination
information for large classes of programs. The
analyzer takes as input a program and a resource of
interest, in the form of a cost model, and aims at
obtaining an upper bound on the execution cost with
respect to the resource and at proving program
termination. The COSTA system has reached a
considerable degree of maturity in that (1) it
includes state-of-the-art techniques for statically
estimating the resource consumption and the
termination behavior of programs, plus a number of
specialized techniques which are required for
achieving accurate results in the context of
object-oriented programs, such as handling numeric
fields in value analysis; (2) it provides several
non-trivial notions of cost (resource consumption)
including, in addition to the number of execution
steps, the amount of memory allocated in the heap or
the number of calls to some user-specified method;
(3) it provides several user interfaces: a classical
command line, a Web interface which allows
experimenting remotely with the system without the
need of installing it locally, and a recently
developed Eclipse plugin which facilitates usage of
the analyzer, even during the development phase; (4)
it can deal with both the Standard and Micro
editions of Java. In the tool demonstration, we will
show that COSTA is able to produce meaningful
results for non-trivial programs, possibly using
Java libraries. Such results can then be used in
many applications, including program development,
resource usage certification, program optimization,
etc.
@inproceedings{AlbertAGGPRRZ09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Germ{\'a}n Puebla and Diana Ramirez and Guillermo Rom\'an-D\'iez and Damiano Zanardini},
title = {{T}ermination and {C}ost {A}nalysis with {COSTA} and its {U}ser {I}nterfaces},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'09)},
series = {Electronic Notes in Theoretical Computer Science},
publisher = {Elsevier},
volume = {258},
year = {2009},
pages = {109-121},
doi = {10.1016/j.entcs.2009.12.008},
month = sep
}
|
[62]
|
Elvira Albert, Miguel Gómez-Zamalloa, and Germán Puebla.
Test Data Generation of Bytecode by CLP Partial
Evaluation.
In 18th International Symposium on Logic-based Program Synthesis
and Transformation (LOPSTR'08), number 5438 in Lecture Notes in Computer
Science, pages 4-23. Springer-Verlag, March 2009.
[ bibtex |
abstract |
PDF ]
@inproceedings{AlbertGP08,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{T}est {D}ata {G}eneration of {B}ytecode by {CLP} {P}artial {E}valuation},
booktitle = {18th International Symposium on Logic-based Program Synthesis and Transformation (LOPSTR'08)},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
number = {5438},
isbn = {978-3-642-00514-5},
pages = {4--23},
month = mar,
year = {2009}
}
|
[63]
|
Elvira Albert, Diego Esteban Alonso Blas, Puri Arenas, Samir Genaim, and
Germán Puebla.
Asymptotic Resource Usage Bounds.
In Zhenjiang Hu, editor, Programming Languages and Systems, 7th
Asian Symposium, APLAS 2009, Seoul, Korea, December 14-16, 2009.
Proceedings, volume 5904 of Lecture Notes in Computer Science, pages
294-310. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF |
slides ]
When describing the resource usage of a program, it is
usual to talk in asymptotic terms, such as
the well-known “big O” notation, whereby we focus
on the behaviour of the program for large input data
and make a rough approximation by considering as
equivalent programs whose resource usage grows at
the same rate. Motivated by the existence of
non-asymptotic resource usage analyzers, in
this paper, we develop a novel transformation from a
non-asymptotic cost function (which can be produced
by multiple resource analyzers) into its asymptotic
form. Our transformation aims at producing tight
asymptotic forms which do not contain
redundant subexpressions (i.e., expressions
asymptotically subsumed by others). Interestingly,
we integrate our transformation at the heart of a
cost analyzer to generate asymptotic upper
bounds without having to first compute their
non-asymptotic counterparts. Our experimental
results show that, while non-asymptotic cost
functions become very complex, their asymptotic
forms are much more compact and manageable. This is
essential to improve scalability and to enable the
application of cost analysis in resource-aware
verification/certification.
@inproceedings{AlbertAAGP09,
author = {Elvira Albert and Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {Asymptotic {R}esource {U}sage {B}ounds},
booktitle = {Programming Languages and Systems, 7th Asian Symposium, {APLAS} 2009,
Seoul, Korea, December 14-16, 2009. Proceedings},
year = 2009,
editor = {Zhenjiang Hu},
series = {Lecture Notes in Computer Science},
volume = {5904},
pages = {294-310},
publisher = {Springer},
doi = {10.1007/978-3-642-10672-9_21},
slides = {http://costa.fdi.ucm.es/papers/costa/AlbertAAGP09-slides.pdf}
}
|
[64]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Field-Sensitive Value Analysis by Field-Insensitive
Analysis.
In Ana Cavalcanti and Dennis Dams, editors, 16th International
Symposium on Formal Methods, FM'09, volume 5850 of Lecture Notes in
Computer Science, pages 370-386. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Shared mutable data structures pose major problems in
static analysis and most analyzers are unable to
keep track of the values of numeric variables
stored in the heap. In this paper, we first
identify sufficient conditions under which heap
allocated numeric variables in object oriented
programs (i.e., numeric fields) can be handled as
non-heap allocated variables. Then, we present a
static analysis to infer which numeric fields
satisfy these conditions at the level of
(sequential) bytecode. This allows
instrumenting the code with ghost variables
which make such numeric fields observable to any
field-insensitive value analysis. Our experimental
results in termination analysis show that we greatly
enlarge the class of analyzable programs with a
reasonable overhead.
@inproceedings{AlbertAGP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{F}ield-{S}ensitive {V}alue {A}nalysis by {F}ield-{I}nsensitive {A}nalysis},
booktitle = {16th International Symposium on Formal Methods, FM'09},
year = 2009,
editor = {Ana Cavalcanti and Dennis Dams},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {5850},
pages = {370-386},
isbn = {978-3-642-05088-6},
doi = {10.1007/978-3-642-05089-3_24},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[65]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Live heap space analysis for languages with garbage collection.
In Hillel Kolodner and Guy L. Steele Jr., editors, Proceedings
of the 8th International Symposium on Memory Management, ISMM 2009, Dublin,
Ireland, June 19-20, 2009, pages 129-138. ACM, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
The peak heap consumption of a program is the
maximum size of the live data on the heap
during the execution of the program, i.e., the
minimum amount of heap space needed to run the
program without exhausting the memory. It is
well-known that garbage collection (GC) makes the
problem of predicting the memory required to run a
program difficult. This paper presents, the best of
our knowledge, the first live heap space
analysis for garbage-collected languages which
infers accurate upper bounds on the peak heap usage
of a program's execution that are not restricted to
any complexity class, i.e., we can infer
exponential, logarithmic, polynomial, etc.,
bounds. Our analysis is developed for an
(sequential) object-oriented bytecode language with
a scoped-memory manager that reclaims
unreachable memory when methods return. We also show
how our analysis can accommodate other GC schemes
which are closer to the ideal GC which
collects objects as soon as they become unreachable.
The practicality of our approach is experimentally
evaluated on a prototype implementation. We
demonstrate that it is fully automatic, reasonably
accurate and efficient by inferring live heap space
bounds for a standardized set of benchmarks, the
JOlden suite.
@inproceedings{AlbertGG09,
author = {Elvira Albert and Samir Genaim and Miguel G{\'o}mez-Zamalloa},
title = {Live heap space analysis for languages with garbage collection},
editor = {Hillel Kolodner and Guy L. Steele Jr.},
booktitle = {Proceedings of the 8th International Symposium on Memory
Management, ISMM 2009, Dublin, Ireland, June 19-20, 2009},
year = 2009,
pages = {129-138},
publisher = {ACM},
isbn = {978-1-60558-347-1},
ee = {http://doi.acm.org/10.1145/1542431.1542450},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[66]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Cost Relation Systems: a Language-Independent Target
Language for Cost Analysis.
In Spanish Conference on Programming and Computer Languages
(PROLE'08), volume 248 of Electronic Notes in Theoretical Computer
Science, pages 31-46. Elsevier, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis aims at obtaining information about the
execution cost of programs. This paper studies
cost relation systems (CESs): the sets of
recursive equations used in cost analysis in order
to capture the execution cost of programs in terms
of the size of their input arguments. We investigate
the notion of CES from a general perspective which
is independent of the particular cost analysis
framework. Our main contributions are: we provide a
formal definition of execution cost and of CES which
is not tied to a particular programming language; we
present the notion of sound CES, i.e., which
correctly approximates the cost of the corresponding
program; we identify the differences with recurrence
relation systems, its possible applications and the
new challenges that they bring about. Our general
framework is illustrated by instantiating it to cost
analysis of Java bytecode, Haskell, and Prolog.
@inproceedings{AlbertAGP08b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{C}ost {R}elation {S}ystems: a
{L}anguage--{I}ndependent {T}arget {L}anguage for
{C}ost {A}nalysis},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'08)},
series = {Electronic Notes in Theoretical Computer Science},
volume = {248},
year = {2009},
pages = {31-46},
publisher = {Elsevier},
doi = {10.1016/j.entcs.2009.07.057},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[67]
|
Miguel Gómez-Zamalloa, Elvira Albert, and Germán Puebla.
Modular Decompilation of Low-Level Code by Partial
Evaluation.
In 8th IEEE International Working Conference on Source Code
Analysis and Manipulation (SCAM'08), pages 239-248. IEEE Computer Society,
September 2008.
[ bibtex |
abstract |
PDF ]
@inproceedings{Gomez-ZAP08,
author = {Miguel G\'{o}mez-Zamalloa and Elvira Albert and Germ{\'a}n Puebla},
title = {{M}odular {D}ecompilation of {L}ow-{L}evel {C}ode by {P}artial {E}valuation},
booktitle = {8th IEEE International Working Conference on Source Code Analysis and Manipulation (SCAM'08)},
publisher = {IEEE Computer Society},
year = {2008},
pages = {239--248},
month = sep
}
|
[68]
|
Elvira Albert, John P. Gallagher, Miguel Gómez-Zamalloa, and Germán Puebla.
Type-based Homeomorphic Embedding and its Applications to
Online Partial Evaluation.
In 17th International Symposium on Logic-based Program Synthesis
and Transformation (LOPSTR'07), volume 4915 of Lecture Notes in
Computer Science, pages 23-42. Springer-Verlag, February 2008.
[ bibtex |
abstract |
PDF ]
@inproceedings{AlbertGGP08,
author = {Elvira Albert and John P. Gallagher and Miguel G\'{o}mez-Zamalloa and Germ{\'a}n Puebla},
title = {{T}ype-based {H}omeomorphic {E}mbedding and its
{A}pplications to {O}nline {P}artial {E}valuation},
booktitle = {17th International Symposium on Logic-based Program
Synthesis and Transformation (LOPSTR'07)},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {4915},
pages = {23--42},
isbn = {978-3-540-78768-6},
npages = 20,
year = {2008},
month = feb
}
|
[69]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Automatic Inference of Upper Bounds for Recurrence Relations in Cost
Analysis.
In María Alpuente and Germán Vidal, editors, Static
Analysis, 15th International Symposium, SAS 2008, Valencia, Spain, July
16-18, 2008. Proceedings, volume 5079 of Lecture Notes in Computer
Science, pages 221-237. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGP08,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla},
title = {Automatic Inference of Upper Bounds for Recurrence Relations
in Cost Analysis},
booktitle = {Static Analysis, 15th International Symposium, SAS 2008,
Valencia, Spain, July 16-18, 2008. Proceedings},
editor = {Mar\'{\i}a Alpuente and
Germ{\'a}n Vidal},
year = {2008},
pages = {221-237},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5079},
isbn = {978-3-540-69163-1},
doi = {10.1007/978-3-540-69166-2_15},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[70]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Gilles Barthe and Frank S. de Boer, editors, Formal Methods
for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International
Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings, volume
5051 of Lecture Notes in Computer Science, pages 2-18. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Termination analysis has received
considerable attention, traditionally in the context
of declarative programming, and recently also for
imperative languages. In existing approaches,
termination is performed on source
programs. However, there are many situations,
including mobile code, where only the compiled code
is available. In this work we present an automatic
termination analysis for sequential Java Bytecode
programs. Such analysis presents all of the
challenges of analyzing a low-level language as well
as those introduced by object-oriented languages.
Interestingly, given a bytecode program, we produce
a constraint logic program, CLP, whose
termination entails termination of the bytecode
program. This allows applying the large body of work
in termination of CLP programs to termination of
Java bytecode. A prototype analyzer is described
and initial experimentation is reported.
@inproceedings{AlbertACGPZ08,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Termination Analysis of Java Bytecode},
booktitle = {Formal Methods for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings},
editor = {Gilles Barthe and Frank S. de Boer},
year = {2008},
pages = {2--18},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5051},
isbn = {978-3-540-68862-4},
doi = {10.1007/978-3-540-68863-1_2},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[71]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Removing Useless Variables in Cost Analysis of Java
Bytecode.
In Roger L. Wainwright and Hisham Haddad, editors, Proceedings
of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara,
Brazil, March 16-20, 2008, pages 368-375. ACM, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Automatic cost analysis has interesting
applications in the context of verification and
certification of mobile code. For instance, the
code receiver can use cost information in order to
decide whether to reject mobile code which has too
large cost requirements in terms of computing
resources (in time and/or space) or billable events
(SMSs sent, bandwidth required). Existing cost
analyses for a variety of languages describe the
resource consumption of programs by means of
Cost Equation Systems (CESs), which are
similar to, but more general than recurrence
equations. CESs express the cost of a program in
terms of the size of its input data. In a further
step, a closed form (i.e., non-recursive) solution
or upper bound can sometimes be found by using
existing Computer Algebra Systems (CASs), such as
Maple and Mathematica. In this work, we focus on
cost analysis of Java bytecode, a language which is
widely used in the context of mobile code and we
study the problem of identifying variables which are
useless in the sense that they do not affect
the execution cost and therefore can be ignored by
cost analysis. We identify two classes of useless
variables and propose automatic analysis techniques
to detect them. The first class corresponds to
stack variables that can be replaced by
program variables or constant values. The second
class corresponds to variables whose value is
cost-irrelevant, i.e., does not affect the
cost of the program. We propose an algorithm,
inspired in static slicing which safely
identifies cost-irrelevant variables. The benefits
of eliminating useless variables are two-fold: (1)
cost analysis without useless variables can be more
efficient and (2) resulting CESs are more likely to
be solvable by existing CASs.
@inproceedings{AlbertAGPZ08,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Removing {U}seless {V}ariables in {C}ost {A}nalysis of {J}ava {B}ytecode},
editor = {Roger L. Wainwright and Hisham Haddad},
booktitle = {Proceedings of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara, Brazil, March 16-20, 2008},
year = {2008},
pages = {368--375},
publisher = {ACM},
isbn = {978-1-59593-753-7},
doi = {10.1145/1363686.1363779},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[72]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
A Generic Framework for the Cost Analysis of Java
Bytecode.
In Pimentel Ernesto, editor, Spanish Conference on Programming
and Computer Languages (PROLE'07), pages 61-70, September 2007.
[ bibtex |
abstract |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {A {G}eneric {F}ramework for the {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'07)},
year = {2007},
pages = {61--70},
month = sep,
editor = {Pimentel Ernesto}
}
|
[73]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Java Bytecode.
In Rocco De Nicola, editor, Programming Languages and Systems,
16th European Symposium on Programming, ESOP 2007, Held as Part of the
Joint European Conferences on Theory and Practics of Software, ETAPS 2007,
Braga, Portugal, March 24 - April 1, 2007, Proceedings, volume 4421 of
Lecture Notes in Computer Science, pages 157-172. Springer-Verlag, March
2007.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Programming Languages and Systems, 16th European Symposium on Programming,
{ESOP} 2007, Held as Part of the Joint European Conferences on Theory
and Practics of Software, {ETAPS} 2007, Braga, Portugal, March 24
- April 1, 2007, Proceedings},
year = 2007,
editor = {Rocco De Nicola},
series = {Lecture Notes in Computer Science},
month = mar,
publisher = {Springer-Verlag},
volume = {4421},
pages = {157--172},
isbn = {978-3-540-71314-2},
doi = {10.1007/978-3-540-71316-6_12},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[74]
|
Elvira Albert, Miguel Gómez-Zamalloa, Laurent Hubert, and Germán Puebla.
Verification of Java Bytecode using Analysis and
Transformation of Logic Programs.
In Ninth International Symposium on Practical Aspects of
Declarative Languages (PADL 2007), number 4354 in Lecture Notes in Computer
Science, pages 124-139. Springer-Verlag, January 2007.
[ bibtex |
abstract |
PDF ]
@inproceedings{AlbertGHP07,
author = {Elvira Albert and Miguel G\'{o}mez-Zamalloa and Laurent Hubert and Germ{\'a}n Puebla},
title = {{V}erification of {J}ava {B}ytecode using {A}nalysis and
{T}ransformation of {L}ogic {P}rograms},
booktitle = {Ninth International Symposium on Practical Aspects of
Declarative Languages (PADL 2007)},
year = 2007,
month = jan,
pages = {124--139},
isbn = {978-3-540-69608-7},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
number = {4354}
}
|
[75]
|
Elvira Albert, Samir Genaim, and Miguel Gómez-Zamalloa.
Heap Space Analysis for Java Bytecode.
In ISMM '07: Proceedings of the 6th International Symposium on
Memory Management, pages 105-116, New York, NY, USA, 2007. ACM Press.
[ bibtex |
abstract |
DOI |
PDF ]
This article presents a heap space analysis for
(sequential) Java bytecode. The analysis generates
heap space cost relations which define at
compile-time the heap consumption of a program as a
function of its data size. These relations can be
used to obtain upper bounds on the heap space
allocated during the execution of the different
methods. In addition, we describe how to refine the
cost relations, by relying on escape
analysis, in order to take into account the heap
space that can be safely deallocated by the garbage
collector upon exit from a corresponding
method. These refined cost relations are then used
to infer upper bounds on the active heap
space upon methods return. Example applications
for the analysis consider inference of constant heap
usage and heap usage proportional to the data size
(including polynomial and exponential heap
consumption). Our prototype implementation is
reported and demonstrated by means of a series of
examples which illustrate how the analysis naturally
encompasses standard data-structures like lists,
trees and arrays with several dimensions written in
object-oriented programming style.
@inproceedings{AlbertGG07,
author = {Elvira Albert and Samir Genaim and Miguel G{\'o}mez-Zamalloa},
title = {{H}eap {S}pace {A}nalysis for {J}ava {B}ytecode},
booktitle = {ISMM '07: Proceedings of the 6th International Symposium on Memory Management},
year = 2007,
pages = {105--116},
location = {Montreal, Quebec, Canada},
publisher = {ACM Press},
address = {New York, NY, USA},
isbn = {978-1-59593-893-0},
ee = {http://doi.acm.org/10.1145/1296907.1296922.061},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[76]
|
Elvira Albert, Puri Arenas-Sánchez, Germán Puebla, and Manuel V.
Hermenegildo.
Reduced Certificates for Abstraction-Carrying Code.
In Logic Programming, 22nd International Conference, ICLP 2006,
Seattle, WA, USA, August 17-20, 2006, Proceedings, volume 4079, pages
163-178. Springer, 2006.
[ bibtex |
abstract |
DOI ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for mobile code safety in which the code supplier provides a program together with an abstraction whose validity entails compliance with a predefined safety policy. The abstraction plays thus the role of safety certificate and its generation is carried out automatically by a fixed-point analyzer. The advantage of providing a (fixed-point) abstraction to the code consumer is that its validity is checked in a single pass of an abstract interpretation-based checker. A main challenge is to reduce the size of certificates as much as possible while at the same time not increasing checking time. We introduce the notion of reduced certificate which characterizes the subset of the abstraction which a checker needs in order to validate (and re-construct) the full certificate in a single pass. Based on this notion, we instrument a generic analysis algorithm with the necessary extensions in order to identify the information relevant to the checker. We also provide a correct checking algorithm together with sufficient conditions for ensuring its completeness. The experimental results within the CiaoPP system show that our proposal is able to greatly reduce the size of certificates in practice.
@inproceedings{DBLP-conf-iclp-AlbertAPH06,
author = {Elvira Albert and
Puri Arenas-S{\'a}nchez and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Reduced Certificates for Abstraction-Carrying Code}},
booktitle = {{Logic Programming, 22nd International Conference, ICLP 2006, Seattle, WA, USA, August 17-20, 2006, Proceedings}},
year = {2006},
pages = {163-178},
volume = {4079},
ee = {http://dx.doi.org/10.1007/11799573_14},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[77]
|
Elvira Albert, Puri Arenas, and Germán Puebla.
An Incremental Approach to Abstraction-Carrying Code.
In Logic for Programming, Artificial Intelligence, and
Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia,
November 13-17, 2006, Proceedings, volume 4246, pages 377-391. Springer,
2006.
[ bibtex |
abstract |
DOI ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for Proof-Carrying Code (PCC) in which the code supplier provides a program together with an abstraction (or abstract model of the program) whose validity entails compliance with a predefined safety policy. Existing approaches for PCC are developed under the assumption that the consumer reads and validates the entire program w.r.t. the full certificate at once, in a non incremental way. In the context of ACC, we propose an incremental approach to PCC for the generation of certificates and the checking of untrusted updates of a (trusted) program, i.e., when a producer provides a modified version of a previously validated program. Our proposal is that, if the consumer keeps the original (fixed-point) abstraction, it is possible to provide only the program updates and the incremental certificate (i.e., the difference of abstractions). Furthermore, it is now possible to define an incremental checking algorithm which, given the new updates and its incremental certificate, only re-checks the fixpoint for each procedure affected by the updates and the propagation of the effect of these fixpoint changes. As a consequence, both certificate transmission time and checking time can be reduced significantly.
@inproceedings{DBLP-conf-lpar-AlbertAP06,
author = {Elvira Albert and
Puri Arenas and
Germ{\'a}n Puebla},
title = {{An Incremental Approach to Abstraction-Carrying Code}},
booktitle = {{Logic for Programming, Artificial Intelligence, and Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia, November 13-17, 2006, Proceedings}},
year = {2006},
volume = {4246},
pages = {377-391},
ee = {http://dx.doi.org/10.1007/11916277_26},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[78]
|
Germán Puebla, Elvira Albert, and Manuel V. Hermenegildo.
Abstract Interpretation with Specialized Definitions.
In Static Analysis, 13th International Symposium, SAS 2006,
Seoul, Korea, August 29-31, 2006, Proceedings, volume 4134, pages 107-126.
Springer, 2006.
[ bibtex |
abstract |
DOI ]
The relationship between abstract interpretation and partial evaluation has received considerable attention and (partial) integrations have been proposed starting from both the partial evaluation and abstract interpretation perspectives. In this work we present what we argue is the first generic algorithm for efficient and precise integration of abstract interpretation and partial evaluation from an abstract interpretation perspective. Taking as starting point state-of-the-art algorithms for context-sensitive, polyvariant abstract interpretation and (abstract) partial evaluation of logic programs, we present an algorithm which combines the best of both worlds. Key ingredients include the accurate success propagation inherent to abstract interpretation and the powerful program transformations achievable by partial deduction. In our algorithm, the calls which appear in the analysis graph are not analyzed w.r.t. the original definition of the procedure but w.r.t. specialized definitions of these procedures. Such specialized definitions are obtained by applying both unfolding and abstract executability. Also, our framework is parametric w.r.t. different control strategies and abstract domains. Different combinations of these parameters correspond to existing algorithms for program analysis and specialization. Our approach efficiently computes strictly more precise results than those achievable by each of the individual techniques. The algorithm is one of the key components of CiaoPP, the analysis and specialization system of the Ciao compiler.
@inproceedings{DBLP-conf-sas-PueblaAH06,
author = {Germ{\'a}n Puebla and
Elvira Albert and
Manuel V. Hermenegildo},
title = {{Abstract Interpretation with Specialized Definitions}},
booktitle = {{Static Analysis, 13th International Symposium, SAS 2006, Seoul, Korea, August 29-31, 2006, Proceedings}},
year = {2006},
pages = {107-126},
publisher = {{Springer}},
ee = {http://dx.doi.org/10.1007/11823230_8},
volume = {4134},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[79]
|
Germán Puebla, Elvira Albert, and Manuel V. Hermenegildo.
A Generic Framework for the Analysis and Specialization of Logic
Programs.
In Logic Programming, 21st International Conference, ICLP 2005,
Sitges, Spain, October 2-5, 2005, Proceedings, pages 407-409. Springer,
2005.
[ bibtex |
abstract |
DOI ]
The relationship between abstract interpretation [2] and partial evaluation [5] has received considerable attention and (partial) integrations have been proposed starting from both the partial deduction (see e.g. [6] and its references) and abstract interpretation perspectives. Abstract interpretation-based analyzers (such as the CiaoPP analyzer [9,4]) generally compute a program analysis graph [1] in order to propagate (abstract) call and success information by performing fixpoint computations when needed. On the other hand, partial deduction methods [7] incorporate powerful techniques for on-line specialization including (concrete) call propagation and unfolding.
@inproceedings{DBLP-conf-iclp-PueblaAH05,
author = {Germ{\'a}n Puebla and
Elvira Albert and
Manuel V. Hermenegildo},
title = {{A Generic Framework for the Analysis and Specialization
of Logic Programs}},
booktitle = {{Logic Programming, 21st International Conference, ICLP 2005, Sitges, Spain, October 2-5, 2005, Proceedings}},
year = {2005},
pages = {407-409},
ee = {http://dx.doi.org/10.1007/11562931_32},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[80]
|
Elvira Albert, Germán Puebla, and John P. Gallagher.
Non-leftmost Unfolding in Partial Evaluation of Logic Programs with
Impure Predicates.
In Logic Based Program Synthesis and Transformation, 15th
International Symposium, LOPSTR 2005, London, UK, September 7-9, 2005,
Revised Selected Papers, pages 115-132. Springer, 2005.
[ bibtex |
abstract |
DOI ]
Partial evaluation of logic programs which contain impure predicates poses non-trivial challenges. Impure predicates include those which produce side-effects, raise errors (or exceptions), and those whose truth value varies according to the degree of instantiation of arguments. In particular, non-leftmost unfolding steps can produce incorrect results since the independence of the computation rule no longer holds in the presence of impure predicates. Existing proposals allow non-leftmost unfolding steps, but at the cost of accuracy: bindings and failure are not propagated backwards to predicates which are potentially impure. In this work we propose a partial evaluation scheme which substantially reduces the situations in which such backpropagation has to be avoided. With this aim, our partial evaluator takes into account the information about purity of predicates expressed in terms of assertions. This allows some optimizations which are not feasible using existing partial evaluation techniques. We argue that our proposal goes beyond existing ones in that it is a) accurate, since the classification of pure vs impure is done at the level of atoms instead of predicates, b) extensible, as the information about purity can be added to programs using assertions without having to modify the partial evaluator itself, and c) automatic, since (backwards) analysis can be used to automatically infer the required assertions. Our approach has been implemented in the context of CiaoPP, the abstract interpretation-based preprocessor of the Ciao logic programming system.
@inproceedings{DBLP-conf-lopstr-AlbertPG05,
author = {Elvira Albert and
Germ{\'a}n Puebla and
John P. Gallagher},
title = {{Non-leftmost Unfolding in Partial Evaluation of Logic Programs
with Impure Predicates}},
booktitle = {{Logic Based Program Synthesis and Transformation, 15th International Symposium, LOPSTR 2005, London, UK, September 7-9, 2005, Revised Selected Papers}},
year = {2005},
pages = {115-132},
ee = {http://dx.doi.org/10.1007/11680093_8},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[81]
|
John P. Gallagher, Germán Puebla, and Elvira Albert.
Converting One Type-Based Abstract Domain to Another.
In Logic Based Program Synthesis and Transformation, 15th
International Symposium, LOPSTR 2005, London, UK, September 7-9, 2005,
Revised Selected Papers, pages 147-162. Springer, 2005.
[ bibtex |
abstract |
DOI ]
The specific problem that motivates this paper is how to obtain abstract descriptions of the meanings of imported predicates (such as built-ins) that can be used when analysing a module of a logic program with respect to some abstract domain. We assume that abstract descriptions of the imported predicates are available in terms of some “standard” assertions. The first task is to define an abstract domain corresponding to the assertions for a given module and express the descriptions as objects in that domain. Following that they are automatically transformed into the analysis domain of interest. We develop a method which has been applied in order to generate call and success patterns from the CiaoPP assertions for built-ins, for any given regular type-based domain. In the paper we present the method as an instance of the more general problem of mapping elements of one abstract domain to another, with as little loss in precision as possible.
@inproceedings{DBLP-conf-lopstr-GallagherPA05,
author = {John P. Gallagher and
Germ{\'a}n Puebla and
Elvira Albert},
title = {{Converting One Type-Based Abstract Domain to Another}},
booktitle = {{Logic Based Program Synthesis and Transformation, 15th International Symposium, LOPSTR 2005, London, UK, September 7-9, 2005, Revised Selected Papers}},
year = {2005},
pages = {147-162},
ee = {http://dx.doi.org/10.1007/11680093_10},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[82]
|
Manuel V. Hermenegildo, Elvira Albert, Pedro López-García, and
Germán Puebla.
Abstraction carrying code and resource-awareness.
In Proceedings of the 7th International ACM SIGPLAN Conference
on Principles and Practice of Declarative Programming, July 11-13 2005,
Lisbon, Portugal, pages 1-11. ACM, 2005.
[ bibtex |
abstract |
DOI ]
Proof-Carrying Code (PCC) is a general approach to mobile code safety in which the code supplier augments the program with a certificate (or proof). The intended benefit is that the program consumer can locally validate the certificate w.r.t. the "untrusted" program by means of a certificate checker-a process which should be much simpler, efficient, and automatic than generating the original proof. Abstraction Carrying Code (ACC) is an enabling technology for PCC in which an abstract model of the program plays the role of certificate. The generation of the certificate, i.e., the abstraction, is automatically carried out by an abstract interpretation-based analysis engine, which is parametric w.r.t. different abstract domains. While the analyzer on the producer side typically has to compute a semantic fixpoint in a complex, iterative process, on the receiver it is only necessary to check that the certificate is indeed a fixpoint of the abstract semantics equations representing the program. This is done in a single pass in a much more efficient process. ACC addresses the fundamental issues in PCC and opens the door to the applicability of the large body of frameworks and domains based on abstract interpretation as enabling technology for PCC. We present an overview of ACC and we describe in a tutorial fashion an application to the problem of resource-aware security in mobile code. Essentially the information computed by a cost analyzer is used to generate cost certificates which attest a safe and efficient use of a mobile code. A receiving side can then reject code which brings cost certificates (which it cannot validate or) which have too large cost requirements in terms of computing resources (in time and/or space) and accept mobile code which meets the established requirements.
@inproceedings{DBLP-conf-ppdp-HermenegildoALP05,
author = {Manuel V. Hermenegildo and
Elvira Albert and
Pedro L{\'o}pez-Garc\'{\i}a and
Germ{\'a}n Puebla},
title = {{Abstraction carrying code and resource-awareness}},
booktitle = {{Proceedings of the 7th International ACM SIGPLAN Conference on Principles and Practice of Declarative Programming, July 11-13 2005, Lisbon, Portugal}},
year = {2005},
pages = {1-11},
ee = {http://doi.acm.org/10.1145/1069774.1069775},
publisher = {{ACM}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[83]
|
Manuel V. Hermenegildo, Elvira Albert, Pedro López-García, and
Germán Puebla.
Some Techniques for Automated, Resource-Aware Distributed and Mobile
Computing in a Multi-paradigm Programming System.
In Euro-Par 2004 Parallel Processing, 10th International
Euro-Par Conference, Pisa, Italy, August 31-September 3, 2004, Proceedings,
volume 3149, pages 21-36. Springer, 2004.
[ bibtex |
abstract |
DOI ]
Distributed parallel execution systems speed up applications by splitting tasks into processes whose execution is assigned to different receiving nodes in a high-bandwidth network. On the distributing side, a fundamental problem is grouping and scheduling such tasks such that each one involves sufficient computational cost when compared to the task creation and communication costs and other such practical overheads. On the receiving side, an important issue is to have some assurance of the correctness and characteristics of the code received and also of the kind of load the particular task is going to pose, which can be specified by means of certificates. In this paper we present in a tutorial way a number of general solutions to these problems, and illustrate them through their implementation in the Ciao multi-paradigm language and program development environment. This system includes facilities for parallel and distributed execution, an assertion language for specifying complex programs properties (including safety and resource-related properties), and compile-time and run-time tools for performing automated parallelization and resource control, as well as certification of programs with resource consumption assurances and efficient checking of such certificates.
@inproceedings{DBLP-conf-europar-HermenegildoALP04,
author = {Manuel V. Hermenegildo and
Elvira Albert and
Pedro L{\'o}pez-Garc\'{\i}a and
Germ{\'a}n Puebla},
title = {{Some Techniques for Automated, Resource-Aware Distributed
and Mobile Computing in a Multi-paradigm Programming System}},
booktitle = {{Euro-Par 2004 Parallel Processing, 10th International Euro-Par Conference, Pisa, Italy, August 31-September 3, 2004, Proceedings}},
year = {2004},
pages = {21-36},
ee = {http://dx.doi.org/10.1007/978-3-540-27866-5_3},
publisher = {{Springer}},
volume = {3149},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[84]
|
Elvira Albert, Germán Puebla, and Manuel V. Hermenegildo.
Abstract Interpretation-Based Mobile Code Certification.
In Logic Programming, 20th International Conference, ICLP 2004,
Saint-Malo, France, September 6-10, 2004, Proceedings, pages 446-447.
Springer, 2004.
[ bibtex |
abstract |
DOI ]
Current approaches to mobile code safety – inspired by the technique of Proof-Carrying Code (PCC) [4] – associate safety information (in the form of a certificate) to programs. The certificate (or proof) is created by the code supplier at compile time, and packaged along with the untrusted code. The consumer who receives the code+certificate package can then run a checker which, by a straightforward inspection of the code and the certificate, is able to verify the validity of the certificate and thus compliance with the safety policy. The main practical difficulty of PCC techniques is in generating safety certificates which at the same time: i) allow expressing interesting safety properties, ii) can be generated automatically and, iii) are easy and efficient to check.
@inproceedings{DBLP-conf-iclp-AlbertPH04,
author = {Elvira Albert and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Abstract Interpretation-Based Mobile Code Certification}},
booktitle = {{Logic Programming, 20th International Conference, ICLP 2004, Saint-Malo, France, September 6-10, 2004, Proceedings}},
year = {2004},
pages = {446-447},
ee = {http://dx.doi.org/10.1007/978-3-540-27775-0_31},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[85]
|
Germán Puebla, Elvira Albert, and Manuel V. Hermenegildo.
Efficient Local Unfolding with Ancestor Stacks for Full Prolog.
In Logic Based Program Synthesis and Transformation, 14th
International Symposium, LOPSTR 2004, Verona, Italy, August 26-28, 2004,
Revised Selected Papers, pages 149-165. Springer, 2004.
[ bibtex |
abstract |
DOI ]
The integration of powerful partial evaluation methods into practical compilers for logic programs is still far from reality. This is related both to 1) efficiency issues and to 2) the complications of dealing with practical programs. Regarding efficiency, the most successful unfolding rules used nowadays are based on structural orders applied over (covering) ancestors, i.e., a subsequence of the atoms selected during a derivation. Unfortunately, maintaining the structure of the ancestor relation during unfolding introduces significant overhead. We propose an efficient, practical local unfolding rule based on the notion of covering ancestors which can be used in combination with any structural order and allows a stack-based implementation without losing any opportunities for specialization. Regarding the second issue, we propose assertion-based techniques which allow our approach to deal with real programs that include (Prolog) built-ins and external predicates in a very extensible manner. Finally, we report on our implementation of these techniques in a practical partial evaluator, embedded in a state of the art compiler which uses global analysis extensively (the Ciao compiler and, specifically, its preprocessor CiaoPP). The performance analysis of the resulting system shows that our techniques, in addition to dealing with practical programs, are also significantly more efficient in time and somewhat more efficient in memory than traditional tree-based implementations.
@inproceedings{DBLP-conf-lopstr-PueblaAH04,
author = {Germ{\'a}n Puebla and
Elvira Albert and
Manuel V. Hermenegildo},
title = {{Efficient Local Unfolding with Ancestor Stacks for Full
Prolog}},
booktitle = {{Logic Based Program Synthesis and Transformation, 14th International Symposium, LOPSTR 2004, Verona, Italy, August 26-28, 2004, Revised Selected Papers}},
year = {2004},
pages = {149-165},
ee = {http://dx.doi.org/10.1007/11506676_10},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[86]
|
Elvira Albert, Germán Puebla, and Manuel V. Hermenegildo.
Abstraction-Carrying Code.
In Logic for Programming, Artificial Intelligence, and
Reasoning, 11th International Conference, LPAR 2004, Montevideo, Uruguay,
March 14-18, 2005, Proceedings, volume 3452, pages 380-397. Springer,
2004.
[ bibtex |
abstract |
DOI ]
Proof-Carrying Code (PCC) is a general approach to mobile code safety in which programs are augmented with a certificate (or proof). The practical uptake of PCC greatly depends on the existence of a variety of enabling technologies which allow both to prove programs correct and to replace a costly verification process by an efficient checking procedure on the consumer side. In this work we propose Abstraction-Carrying Code (ACC), a novel approach which uses abstract interpretation as enabling technology. We argue that the large body of applications of abstract interpretation to program verification is amenable to the overall PCC scheme. In particular, we rely on an expressive class of safety policies which can be defined over different abstract domains. We use an abstraction (or abstract model) of the program computed by standard static analyzers as a certificate. The validity of the abstraction on the consumer side is checked in a single-pass by a very efficient and specialized abstract-interpreter. We believe that ACC brings the expressiveness, flexibility and automation which is inherent in abstract interpretation techniques to the area of mobile code safety. We have implemented and benchmarked ACC within the Ciao system preprocessor. The experimental results show that the checking phase is indeed faster than the proof generation phase, and that the sizes of certificates are reasonable.
@inproceedings{DBLP-conf-lpar-AlbertPH04,
author = {Elvira Albert and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Abstraction-Carrying Code}},
booktitle = {{Logic for Programming, Artificial Intelligence, and Reasoning, 11th International Conference, LPAR 2004, Montevideo, Uruguay, March 14-18, 2005, Proceedings}},
year = {2004},
pages = {380-397},
ee = {http://dx.doi.org/10.1007/978-3-540-32275-7_25},
volume = {3452},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[87]
|
Elvira Albert, Germán Puebla, and Manuel V. Hermenegildo.
Experiments in abstract interpretation-based code certification for
pervasive systems.
In Proceedings of the IEEE International Conference on Systems,
Man & Cybernetics: The Hague, Netherlands, 10-13 October 2004, pages
1125-1130. IEEE, 2004.
[ bibtex |
abstract |
DOI ]
Proof-carrying code (PCC) is a general methodology for certifying that the execution of a untrusted mobile code is safe. The basic idea is that the code supplier attaches a certificate to the mobile code which the consumer checks in order to ensure that the code is indeed safe. The potential benefit is that the consumer's task is reduced from the level of proving to the level of checking. Recently, the abstract interpretation techniques developed in logic programming have been proposed as a basis for PCC. This extended abstract reports on experiments which illustrate several issues involved in abstract interpretation-based certification. First, we describe the implementation of our system in the context of CiaoPP: the preprocessor of the Ciao multi-paradigm programming system. Then, by means of some experiments, we show how code certification is aided in the implementation of the framework. Finally, we discuss the application of our method within the area of pervasive systems.
@inproceedings{DBLP-conf-smc-AlbertPH04,
author = {Elvira Albert and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Experiments in abstract interpretation-based code certification
for pervasive systems}},
booktitle = {{Proceedings of the IEEE International Conference on Systems, Man {\&} Cybernetics: The Hague, Netherlands, 10-13 October 2004}},
year = {2004},
pages = {1125-1130},
ee = {http://dx.doi.org/10.1109/ICSMC.2004.1399773},
publisher = {{IEEE}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[88]
|
Elvira Albert, Michael Hanus, and Germán Vidal.
A Practical Partial Evaluator for a Multi-Paradigm Declarative
Language.
In Functional and Logic Programming, 5th International
Symposium, FLOPS 2001, Tokyo, Japan, March 7-9, 2001, Proceedings, volume
2024, pages 326-342. Springer, 2001.
[ bibtex |
abstract |
DOI ]
Partial evaluation is an automatic technique for program optimization which preserves program semantics. The range of its potential applications is extremely large, as witnessed by successful experiences in several fields. This paper summarizes our findings in the development of partial evaluation tools for Curry, a modern multi-paradigm declarative language which combines features from functional, logic and concurrent programming. From a practical point of view, the most promising approach appears to be a recent partial evaluation framework which translates source programs into a maximally simplified representation. We support this statement by extending the underlying method in order to design a practical partial evaluation tool for the language Curry. The process is fully automatic and can be incorporated into a Curry compiler as a source-to-source transformation on intermediate programs. An implementation of the partial evaluator has been undertaken. Experimental results confirm that our partial evaluator pays off in practice.
@inproceedings{DBLP-conf-flops-AlbertHV01,
author = {Elvira Albert and
Michael Hanus and
Germ{\'a}n Vidal},
title = {{A Practical Partial Evaluator for a Multi-Paradigm Declarative
Language}},
booktitle = {{Functional and Logic Programming, 5th International Symposium, FLOPS 2001, Tokyo, Japan, March 7-9, 2001, Proceedings}},
year = {2001},
volume = {2024},
pages = {326-342},
ee = {http://dx.doi.org/10.1007/3-540-44716-4_21},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[89]
|
Elvira Albert and Germán Vidal.
Symbolic Profiling for Multi-paradigm Declarative Languages.
In Logic Based Program Synthesis and Transformation, 11th
International Workshop, LOPSTR 2001, Paphos, Cyprus, November 28-30, 2001,
Selected Papers, pages 148-167. Springer, 2001.
[ bibtex |
abstract |
DOI ]
We present the basis of a source-level profiler for multi-paradigm declarative languages which integrate features from (lazy) functional and logic programming. Our profiling scheme is symbolic in the sense that it is independent of the particular language implementation. This is achieved by counting the number of basic operations performed during the execution of program calls, e.g., the number of unfolding steps, the number of matching operations, etc. The main contribution of this paper is the formal specification of the attribution of execution costs to cost centers, which is particularly difficult in the context of lazy languages. A prototype implementation of the symbolic profiler has been undertaken for the multi-paradigm language Curry. Preliminary results demonstrate the practicality of our approach and its applications in the field of program transformation.
@inproceedings{DBLP-conf-lopstr-AlbertV01,
author = {Elvira Albert and
Germ{\'a}n Vidal},
title = {{Symbolic Profiling for Multi-paradigm Declarative Languages}},
booktitle = {{Logic Based Program Synthesis and Transformation, 11th International Workshop, LOPSTR 2001, Paphos, Cyprus, November 28-30, 2001, Selected Papers}},
year = {2001},
pages = {148-167},
ee = {http://dx.doi.org/10.1007/3-540-45607-4_9},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[90]
|
Elvira Albert, César Ferri, Frank Steiner, and Germán Vidal.
Improving Functional Logic Programs by Difference-Lists.
In Advances in Computing Science - ASIAN 2000, 6th Asian
Computing Science Conference, Penang, Malaysia, November 25-27, 2000,
Proceedings, volume 1961, pages 237-254. Springer, 2000.
[ bibtex |
abstract |
DOI ]
Modern multi-paradigm declarative languages integrate features from functional, logic, and concurrent programming. Since programs in these languages make extensive use of list-processing functions, we consider of much interest the development of list-processing optimization techniques. In this work, we consider the adaptation of the well-known difference-lists transformation from the logic programming paradigm to our integrated setting. Unfortunately, the use of differencelists is impractical due to the absence of non-strict equality in lazy (callby-name) languages. Despite all, we have developed a novel, stepwise transformation which achieves a similar effect over functional logic programs. We also show a simple and practical approach to incorporate the optimization into a real compiler. Finally, we have conducted a number of experiments which show the practicality of our proposal.
@inproceedings{DBLP-conf-asian-AlbertFSV00,
author = {Elvira Albert and
C{\'e}sar Ferri and
Frank Steiner and
Germ{\'a}n Vidal},
title = {{Improving Functional Logic Programs by Difference-Lists}},
booktitle = {{Advances in Computing Science - ASIAN 2000, 6th Asian Computing Science Conference, Penang, Malaysia, November 25-27, 2000, Proceedings}},
year = {2000},
volume = {1961},
pages = {237-254},
ee = {http://dx.doi.org/10.1007/3-540-44464-5_17},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[91]
|
Elvira Albert, Sergio Antoy, and Germán Vidal.
Measuring the Effectiveness of Partial Evaluation.
In LOPSTR, 2000.
[ bibtex |
abstract |
DOI ]
We introduce a framework for assessing the effectiveness of partial evaluators in functional logic languages. Our framework is based on properties of the rewrite system that models a functional logic program. Consequently, our assessment is independent of any specific language implementation or computing environment. We define several criteria for measuring the cost of a computation: number of steps, number of function applications, and pattern matching effort. Most importantly, we express the cost of each criterion by means of recurrence equations over algebraic data types, which can be automatically inferred from the partial evaluation process itself. In some cases, the equations can be solved by transforming their arguments from arbitrary data types to natural numbers. In other cases, it is possible to estimate the improvement of a partial evaluation by analyzing the associated cost recurrence equations.
@inproceedings{DBLP-conf-lopstr-AlbertAV00,
author = {Elvira Albert and
Sergio Antoy and
Germ{\'a}n Vidal},
title = {{Measuring the Effectiveness of Partial Evaluation}},
booktitle = {LOPSTR},
year = {2000},
ee = {ftp://ftp.cs.man.ac.uk/pub/TR/UMCS-00-6-1-albert.ps.Z},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[92]
|
Elvira Albert, Sergio Antoy, and Germán Vidal.
Measuring the Effectiveness of Partial Evaluation in Functional
Logic Languages.
In Logic Based Program Synthesis and Transformation, 10th
International Workshop, LOPSTR 2000 London, UK, July 24-28, 2000, Selected
Papers, pages 103-124. Springer, 2000.
[ bibtex |
abstract |
DOI ]
We introduce a framework for assessing the effectiveness of partial evaluators in functional logic languages. Our framework is based on properties of the rewrite system that models a functional logic program. Consequently, our assessment is independent of any specific language implementation or computing environment. We define several criteria for measuring the cost of a computation: number of steps, number of function applications, and pattern matching effort. Most importantly, we express the cost of each criterion by means of recurrence equations over algebraic data types, which can be automatically inferred from the partial evaluation process itself. In some cases, the equations can be solved by transforming their arguments from arbitrary data types to natural numbers. In other cases, it is possible to estimate the improvement of a partial evaluation by analyzing the associated cost recurrence equations.
@inproceedings{DBLP-conf-lopstr-AlbertAV00a,
author = {Elvira Albert and
Sergio Antoy and
Germ{\'a}n Vidal},
title = {{Measuring the Effectiveness of Partial Evaluation in Functional
Logic Languages}},
booktitle = {{Logic Based Program Synthesis and Transformation, 10th International Workshop, LOPSTR 2000 London, UK, July 24-28, 2000, Selected Papers}},
year = {2000},
pages = {103-124},
ee = {http://dx.doi.org/10.1007/3-540-45142-0_7},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[93]
|
Elvira Albert, Michael Hanus, and Germán Vidal.
Using an Abstract Representation to Specialize Functional Logic
Programs.
In Logic for Programming and Automated Reasoning, 7th
International Conference, LPAR 2000, Reunion Island, France, November 11-12,
2000, Proceedings, volume 1955, pages 381-398. Springer, 2000.
[ bibtex |
abstract |
DOI ]
This paper introduces a novel approach for the specialization of functional logic languages. We consider a maximally simplified abstract representation of programs (which still contains all the necessary information) and define a non-standard semantics for these programs. Both things mixed together allow us to design a simple and concise partial evaluation method for modern functional logic languages, avoiding several limitations of previous approaches. Moreover, since these languages can be automatically translated into the abstract representation, our technique is widely applicable. In order to assess the practicality of our approach, we have developed a partial evaluation tool for the multi-paradigm language Curry. The partial evaluator is written in Curry itself and has been tested on an extensive benchmark suite (even a meta-interpreter). To the best of our knowledge, this is the first purely declarative partial evaluator for a functional logic language.
@inproceedings{DBLP-conf-lpar-AlbertHV00,
author = {Elvira Albert and
Michael Hanus and
Germ{\'a}n Vidal},
title = {{Using an Abstract Representation to Specialize Functional
Logic Programs}},
booktitle = {{Logic for Programming and Automated Reasoning, 7th International Conference, LPAR 2000, Reunion Island, France, November 11-12, 2000, Proceedings}},
volume = {1955},
year = {2000},
pages = {381-398},
ee = {http://dx.doi.org/10.1007/3-540-44404-1_24},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[94]
|
Elvira Albert, María Alpuente, Michael Hanus, and Germán Vidal.
A Partial Evaluation Framework for Curry Programs.
In Logic Programming and Automated Reasoning, 6th International
Conference, LPAR'99, Tbilisi, Georgia, September 6-10, 1999, Proceedings,
volume 1705, pages 376-395. Springer, 1999.
[ bibtex |
abstract |
DOI ]
In this work, we develop a partial evaluation technique for residuating functional logic programs, which generalize the concurrent computation models for logic programs with delays to functional logic programs. We show how to lift the nondeterministic choices from run time to specialization time. We ascertain the conditions under which the original and the transformed program have the same answer expressions for the considered class of queries as well as the same floundering behavior. All these results are relevant for program optimization in Curry, a functional logic language which is intended to become a standard in this area. Preliminary empirical evaluation of the specialized Curry programs demonstrates that our technique also works well in practice and leads to substantial performance improvements. To our knowledge, this work is the first attempt to formally define and prove correct a general scheme for the partial evaluation of functional logic programs with delays.
@inproceedings{DBLP-conf-lpar-AlbertAHV99,
author = {Elvira Albert and
Mar\'{\i}a Alpuente and
Michael Hanus and
Germ{\'a}n Vidal},
title = {{A Partial Evaluation Framework for Curry Programs}},
booktitle = {{Logic Programming and Automated Reasoning, 6th International Conference, LPAR'99, Tbilisi, Georgia, September 6-10, 1999, Proceedings}},
year = {1999},
volume = {1705},
pages = {376-395},
ee = {http://dx.doi.org/10.1007/3-540-48242-3_23},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[95]
|
Elvira Albert, María Alpuente, Moreno Falaschi, Pascual Julián Iranzo,
and Germán Vidal.
Improving Control in Functional Logic Program Specialization.
In Static Analysis, 5th International Symposium, SAS '98, Pisa,
Italy, September 14-16, 1998, Proceedings, volume 1503, pages 262-277.
Springer, 1998.
[ bibtex |
abstract |
DOI ]
We have recently defined a framework for Narrowing-driven Partial Evaluation (NPE) of functional logic programs. This method is as powerful as partial deduction of logic programs and positive supercompilation of functional programs. Although it is possible to treat complex terms containing primitive functions (e.g. conjunctions or equations) in the NPE framework, its basic control mechanisms do not allow for effective polygenetic specialization of these complex expressions. We introduce a sophisticated unfolding rule endowed with a dynamic narrowing strategy which permits flexible scheduling of the elements (in conjunctions) which are reduced during specialization. We also present a novel abstraction operator which extends some partitioning techniques defined in the framework of conjunctive partial deduction. We provide experimental results obtained from an implementation using the Indy system which demonstrate that the control refinements produce better specializations.
@inproceedings{DBLP-conf-sas-AlbertAFJV98,
author = {Elvira Albert and
Mar\'{\i}a Alpuente and
Moreno Falaschi and
Pascual Juli{\'a}n Iranzo and
Germ{\'a}n Vidal},
title = {{Improving Control in Functional Logic Program Specialization}},
booktitle = {{Static Analysis, 5th International Symposium, SAS '98, Pisa, Italy, September 14-16, 1998, Proceedings}},
year = {1998},
pages = {262-277},
volume = {1503},
ee = {http://dx.doi.org/10.1007/3-540-49727-7_16},
publisher = {{Springer}},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|