[1]
|
Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and
Guillermo Román-Díez.
Resource Analysis: From Sequential to Concurrent and
Distributed Programs.
In Nikolaj Bjørner and Frank D. de Boer, editors, FM 2015:
Formal Methods - 20th International Symposium, Oslo, Norway, June 24-26,
2015, Proceedings, volume 9109 of Lecture Notes in Computer Science,
pages 3-17. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF ]
@inproceedings{AlbertACGGMPR15,
author = {Elvira Albert and
Puri Arenas and
Jes{\'{u}}s Correas and
Samir Genaim and
Miguel G{\'{o}}mez{-}Zamalloa and
Enrique Martin{-}Martin and
Germ{\'{a}}n Puebla and
Guillermo Rom{\'{a}}n{-}D{\'{\i}}ez},
title = {{R}esource {A}nalysis: {F}rom {S}equential to {C}oncurrent and {D}istributed {P}rograms},
booktitle = {{FM} 2015: Formal Methods - 20th International Symposium, Oslo, Norway,
June 24-26, 2015, Proceedings},
pages = {3--17},
year = {2015},
doi = {10.1007/978-3-319-19249-9_1},
editor = {Nikolaj Bj{\o}rner and Frank D. de Boer},
series = {Lecture Notes in Computer Science},
isbn = {978-3-319-19248-2},
volume = {9109},
publisher = {Springer}
}
|
[2]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Test Case Generation of Actor Systems.
In 13th International Symposium on Automated Technology for
Verification and Analysis, ATVA 2015. Proceedings, volume 9364 of
Lecture Notes in Computer Science, pages 259-275. Springer, 2015.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. Test
case generation (TCG) is the process of automatically generating
test inputs for interesting coverage criteria, which
are then applied to the system under test. This paper presents a
TCG framework for actor systems, which consists of three main
elements, which are the original contributions of this work: (1) a
symbolic execution calculus, which allows symbolically executing the
program (i.e., executing the program for unknown input data), (2)
improved techniques to avoid performing redundant computations
during symbolic execution, (3) new termination and coverage criteria,
which ensure the termination of symbolic execution and guarantee
that the test cases provide the desired degree of code coverage.
Finally, our framework has been
implemented and evaluated within the aPET system.
@inproceedings{AlbertAGM15,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Test {C}ase {G}eneration of {A}ctor {S}ystems},
booktitle = {13th International Symposium on Automated Technology for Verification and Analysis, {ATVA} 2015. Proceedings},
publisher = {Springer},
year = {2015},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-319-24953-7_21},
pages = {259-275},
volume = {9364},
isbn = {978-3-319-24952-0}
}
|
[3]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Actor- and Task-Selection Strategies for Pruning Redundant
State-Exploration in Testing.
In Erika Ábrahám and Catuscia Palamidessi, editors, 34th
IFIP International Conference on Formal Techniques for Distributed Objects,
Components and Systems (FORTE 2014), volume 8461 of Lecture Notes in
Computer Science, pages 49-65. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF ]
Testing concurrent systems requires exploring all possible
non-deterministic interleavings that the concurrent execution may
have. This is because any of the interleavings may reveal the
erroneous behaviour. In testing of actor systems, we can distinguish
two sources on non-determinism: (1) actor-selection, the
order in which actors are explored and (2) task-selection,
the order in which the messages (or tasks) within each actor are
explored. This paper provides new strategies and heuristics for
pruning redundant state-exploration when testing actor systems by
reducing the amount of unnecessary non-determinism. First, we
propose a method and heuristics for actor-selection based on
tracking the amount and the type of interactions among actors.
Second, we can avoid further redundant interleavings in task-selection
by taking into account the access to the shared-memory that
the tasks make.
@inproceedings{AlbertAG14,
author = {Elvira Albert and
Puri Arenas and
Miguel G\'omez-Zamalloa},
title = {Actor- and {T}ask-{S}election {S}trategies for {P}runing {R}edundant {S}tate-{E}xploration in {T}esting},
booktitle = {34th IFIP International Conference on Formal Techniques for Distributed Objects, Components and Systems (FORTE 2014)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2014},
doi = {10.1007/978-3-662-43613-4_4},
pages = {49-65},
volume = {8461},
editor = {Erika {\'A}brah{\'a}m and Catuscia Palamidessi}
}
|
[4]
|
Elvira Albert, Puri Arenas, Antonio Flores-Montoya, Samir Genaim, Miguel Gómez-Zamalloa, Enrique Martin-Martin, Germán Puebla, and Guillermo Román-Díez.
SACO: Static Analyzer for Concurrent Objects.
In Erika Ábrahám and Klaus Havelund, editors, Tools and
Algorithms for the Construction and Analysis of Systems - 20th International
Conference, TACAS 2014, volume 8413 of Lecture Notes in Computer
Science, pages 562-567. Springer, 2014.
[ bibtex |
abstract |
DOI |
PDF ]
We present the main concepts, usage and implementation of SACO,
a static analyzer for concurrent objects. Interestingly, SACO is
able to infer both liveness (namely termination and resource
boundedness) and safety properties (namely deadlock freeness)
of programs based on concurrent objects. The system integrates
auxiliary analyses such as points-to and
may-happen-in-parallel, which are essential for increasing the
accuracy of the aforementioned more complex properties. SACO
provides accurate information about the dependencies which may
introduce deadlocks, loops whose termination is not guaranteed, and
upper bounds on the resource consumption of methods.
@inproceedings{AlbertAFGGMPR14,
author = {Elvira Albert and
Puri Arenas and
Antonio Flores-Montoya and
Samir Genaim and
Miguel G\'omez-Zamalloa and
Enrique Martin-Martin and
Germ\'an Puebla and
Guillermo Rom\'an-D\'iez},
title = {{SACO}: {S}tatic {A}nalyzer for {C}oncurrent {O}bjects},
booktitle = {Tools and Algorithms for the Construction and Analysis of
Systems - 20th International Conference, TACAS 2014},
year = {2014},
editor = {Erika {\'A}brah{\'a}m and
Klaus Havelund},
pages = {562-567},
volume = {8413},
series = {Lecture Notes in Computer Science},
isbn = {978-3-642-54861-1},
publisher = {Springer},
doi = {http://dx.doi.org/10.1007/978-3-642-54862-8_46}
}
|
[5]
|
Elvira Albert, Puri Arenas, Miguel Gómez-Zamalloa, and Peter Y.H. Wong.
aPET: A Test Case Generation Tool for Concurrent
Objects.
In Bertrand Meyer, Luciano Baresi, and Mira Mezini, editors,
Joint Meeting of the European Software Engineering Conference and the ACM
SIGSOFT Symposium on the Foundations of Software Engineering, ESEC/FSE'13,
Saint Petersburg, Russian Federation, August 18-26, 2013, pages 595-598.
ACM, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
We present the concepts, usage and prototypical implementation of
aPET, a test case generation tool for a distributed asynchronous
language based on concurrent objects. The system receives as
input a program, a selection of methods to be tested, and a set of
parameters that include a selection of a coverage criterion. It
yields as output a set of test cases which guarantee that the
selected coverage criterion is achieved.
aPET is completely integrated within the language's IDE via
Eclipse. The generated test cases can be displayed in textual mode
and, besides, it is possible to generate ABSUnit code (i.e., code
runnable in a simple framework similar to JUnit to write repeatable
tests). The information yield by aPET can be relevant to spot
bugs during program development and also to perform regression
testing.
@inproceedings{AlbertAGZW13,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa and Peter Y.H. Wong},
title = {a{PET}: {A} {T}est {C}ase {G}eneration {T}ool for {C}oncurrent {O}bjects},
booktitle = {Joint Meeting of the European Software Engineering Conference
and the ACM SIGSOFT Symposium on the Foundations of Software
Engineering, ESEC/FSE'13, Saint Petersburg, Russian Federation,
August 18-26, 2013},
year = 2013,
publisher = {ACM},
pages = {595--598},
editor = {Bertrand Meyer and
Luciano Baresi and
Mira Mezini},
doi = {10.1145/2491411.2494590},
url = {http://dx.doi.org/10.1145/2491411.2494590}
}
|
[6]
|
Diego Esteban Alonso Blas, Puri Arenas, and Samir Genaim.
Precise Cost Analysis via Local Reasoning.
In Dang Van Hung and Mizuhito Ogawa, editors, Automated
Technology for Verification and Analysis - 11th International Symposium,
ATVA 2013, Hanoi, Vietnam, October 15-18, 2013. Proceedings, volume 8172
of Lecture Notes in Computer Science, pages 319-333. Springer, 2013.
[ bibtex |
abstract |
DOI |
PDF |
http ]
The classical approach to static cost analysis is based on first
transforming a given program into a set of cost relations, and then solving
them into closed-form upper-bounds.
The quality of the upper-bounds and the scalability of such cost
analysis highly depend on the precision and efficiency of the
solving phase.
Several techniques for solving cost relations exist, some are
efficient but not precise enough, and some are very precise but do
not scale to large cost relations.
In this paper we explore the gap between these techniques, seeking
for ones that are both precise and efficient.
In particular, we propose a novel technique that first splits the
cost relation into several atomic ones, and then uses precise
local reasoning for some and less precise but efficient reasoning for
others.
For the precise local reasoning, we propose several methods that
define the cost as a solution of a universally quantified formula.
Preliminary experiments demonstrate the effectiveness of our
approach.
@inproceedings{AlonsoAG13,
author = {Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim},
title = {Precise {C}ost {A}nalysis via {L}ocal {R}easoning},
booktitle = {Automated Technology for Verification and Analysis - 11th International
Symposium, {ATVA} 2013, Hanoi, Vietnam, October 15-18, 2013. Proceedings},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {319-333},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
doi = {10.1007/978-3-319-02444-8_23},
url = {http://dx.doi.org/10.1007/978-3-319-02444-8_23}
}
|
[7]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Towards Testing Concurrent Objects in CLP.
In Agostino Dovier and Vítor Santos Costa, editors, 28th
International Conference on Logic Programming (ICLP'12), volume 17 of
LIPIcs, pages 98-108. Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik,
2012.
[ bibtex |
abstract |
DOI |
PDF ]
Testing is a vital part of the software development process. It is
even more so in the context of concurrent languages, since due to
undesired task interleavings and to unexpected behaviours of the
underlying task scheduler, errors can go easily undetected. This
paper studies the extension of the CLP-based framework for test data
generation of sequential programs to the context of concurrent
objects, a concurrency model which constitutes a promising
solution to concurrency in OO languages.
The challenge is in developing the following required extensions entirely in CLP
(which are our main contributions):
(1) we simulate
task interleavings which could occur in an actual execution and
could lead to additional test cases for the method under test (while
interleavings that would not add new test cases are disregarded),
(2) we integrate sophisticated scheduling policies and, besides,
use different policies for the different fragments of the program
that can run in parallel,
(3) we combine standard termination and coverage
criteria used for testing sequential programs with specific criteria
which control termination and coverage from the concurrency point of
view, e.g., we can limit the number of task interleavings allowed and
the number of loop unrollings performed in each parallel component, etc.
@inproceedings{AlbertAG-Z12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Towards Testing Concurrent Objects in CLP},
booktitle = {28th International Conference on Logic Programming (ICLP'12)},
editor = {Agostino Dovier and V\'{\i}tor Santos Costa},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik},
series = {LIPIcs},
volume = {17},
year = {2012},
doi = {10.4230/LIPIcs.ICLP.2012.98},
pages = {98-108}
}
|
[8]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and Germán Puebla.
COSTABS: A Cost and Termination Analyzer for ABS.
In Oleg Kiselyov and Simon Thompson, editors, Proceedings of the
2012 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation,
PEPM 2012, Philadelphia, Pennsylvania, USA, January 23-24, 2012, pages
151-154. ACM Press, 2012.
[ bibtex |
abstract |
DOI ]
ABS is an abstract behavioural specification
language to model distributed concurrent systems. Characteristic
features of ABS are that: (1) it allows abstracting from
implementation details while remaining executable: a
functional sub-language over abstract data types is used to
specify internal, sequential computations; and (2) the
imperative sub-language provides flexible concurrency and
synchronization mechanisms by means of asynchronous method calls,
release points in method definitions, and cooperative scheduling of
method activations. This paper presents COSTABS, a COSt and
Termination analyzer for ABS, which is able to prove termination and
obtain resource usage bounds for both the imperative and
functional fragments of programs. The resources that COSTABS can
infer include termination, number of execution steps, memory
consumption, number of asynchronous calls, among others. The
analysis bounds provide formal guarantees that the execution of the
program will never exceed the inferred amount of resources. The
system can be downloaded as free software from its web site, where a
repository of examples and a web interface are also provided. To the
best of our knowledge, COSTABS is the first system able to perform
resource analysis for a concurrent language.
@inproceedings{AlbertAGGZP12,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'omez-Zamalloa and Germ\'an Puebla},
title = {{COSTABS}: A {C}ost and {T}ermination {A}nalyzer for {ABS}},
booktitle = {Proceedings of the 2012 ACM SIGPLAN Workshop on Partial
Evaluation and Program Manipulation, PEPM 2012, Philadelphia, Pennsylvania, USA,
January 23-24, 2012},
publisher = {ACM Press},
editor = {Oleg Kiselyov and Simon Thompson},
year = {2012},
isbn = {978-1-4503-1118-2 },
pages = {151-154},
doi = {10.1145/2103746.2103774}
}
|
[9]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Automatic Inference of Resource Consumption Bounds.
In Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, Mérida, Venezuela, March 11-15,
2012. Proceedings LPAR, volume 7180 of Lecture Notes in Computer
Science, pages 1-11. Springer, 2012.
[ bibtex |
abstract |
DOI ]
One of the main features of programs is the amount of resources
which are needed in order to run them. Different resources can be
taken into consideration, such as the number of execution steps,
amount of memory allocated, number of calls to certain methods,
etc. Unfortunately, manually determining the resource consumption of
programs is difficult and error-prone. We provide an overview of a
state of the art framework for automatically obtaining both upper
and lower bounds on the resource consumption of programs. The bounds
obtained are functions on the size of the input arguments to the
program and are obtained statically, i.e., without running the
program. Due to the approximations introduced, the framework can
fail to obtain (non-trivial) bounds even if they exist. On the other
hand, modulo implementation bugs, the bounds thus obtained are valid
for any execution of the program. The framework has been
implemented in the COSTA system and can provide useful bounds for
realistic object-oriented and actor-based concurrent programs.
@inproceedings{AlbertAGGZP12a,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Miguel G{\'o}mez-Zamalloa and
Germ{\'a}n Puebla},
title = {Automatic {I}nference of {R}esource {C}onsumption {B}ounds},
booktitle = {Logic for Programming, Artificial Intelligence, and Reasoning
- 18th International Conference, LPAR-18, M{\'e}rida,
Venezuela, March 11-15, 2012. Proceedings LPAR},
year = {2012},
pages = {1-11},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-642-28717-6},
volume = {7180}
}
|
[10]
|
Elvira Albert, Puri Arenas, and Miguel Gómez-Zamalloa.
Symbolic Execution of Concurrent Objects in CLP.
In Practical Aspects of Declarative Languages (PADL'12),
Lecture Notes in Computer Science, pages 123-137. Springer, January 2012.
[ bibtex |
abstract |
DOI |
PDF ]
In the concurrent objects model,objects have conceptually
dedicated processors and live in a distributed environment with
unordered communication by means of asynchronous method calls.
Method callers may decide at runtime when to synchronize with the reply
from a call. This paper presents a CLP-based approach to symbolic execution
of concurrent OO programs. Developing a symbolic execution engine for
concurrent objects is challenging because it needs to combine the
OO features of the language, concurrency and backtracking.
Our approach consists in, first, transforming the OO program into an
equivalent CLP program which contains calls to specific builtins that
handle the concurrency model. The builtins are implemented in CLP and
include primitives to handle asynchronous calls synchronization
operations and scheduling policies, among others. Interestingly,
symbolic execution of the transformed programs then relies
simply on the standard sequential execution of CLP.
We report on a prototype implementation within the PET system which
shows the feasibility of our approach.
@inproceedings{AlbertAGZ12,
author = {Elvira Albert and Puri Arenas and Miguel G\'omez-Zamalloa},
title = {Symbolic {E}xecution of {C}oncurrent {O}bjects in {CLP}},
booktitle = {Practical Aspects of Declarative Languages (PADL'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
year = {2012},
month = jan,
volumen = {7149},
pages = {123-137},
doi = {10.1007/978-3-642-27694-1_10}
}
|
[11]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, and
Germán Puebla.
Cost Analysis of Concurrent OO Programs.
In Hongseok Yang, editor, Programming Languages and Systems -
9th Asian Symposium, APLAS 2011, Kenting, Taiwan, December 5-7, 2011.
Proceedings, volume 7078 of Lecture Notes in Computer Science, pages
238-254. Springer, 2011.
[ bibtex |
abstract |
DOI ]
Cost analysis aims at automatically approximating the
resource consumption (e.g., memory) of
executing a program in terms of its input
parameters. While cost analysis for sequential
programming languages has received considerable
attention, concurrency and distribution have been
notably less studied. The main challenges (and our
contributions) of cost analysis in a concurrent
setting are: (1) Inferring precise size
relations for data in the program in the presence
of shared memory. This information is essential for
bounding the number of iterations of loops. (2)
Distribution suggests that analysis must keep the
cost of the diverse distributed components
separate. We handle this by means of a novel form of
recurrence equations which are parametric on
the notion of cost center, which represents a
corresponding component. To the best of our
knowledge, our work is the first one to present a
general cost analysis framework and an
implementation for concurrent OO programs.
@inproceedings{AlbertAGGP11,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G\'{o}mez-Zamalloa and Germ\'an Puebla},
editor = {Hongseok Yang},
title = {Cost {A}nalysis of {C}oncurrent {OO} Programs},
booktitle = {Programming Languages and Systems - 9th Asian Symposium,
APLAS 2011, Kenting, Taiwan, December 5-7, 2011. Proceedings},
series = {Lecture Notes in Computer Science},
year = {2011},
publisher = {Springer},
pages = {238-254},
volume = {7078},
doi = {10.1007/978-3-642-25318-8_19}
}
|
[12]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Diana
Ramírez-Deantes.
From Object Fields to Local Variables: A Practical
Approach to Field-Sensitive Analysis.
In Radhia Cousot and Matthieu Martel, editors, Static Analysis -
17th International Symposium, SAS 2010, Perpignan, France, September 14-16,
2010. Proceedings, volume 6337 of Lecture Notes in Computer Science,
pages 100-116. Springer, 2010.
[ bibtex |
abstract |
DOI |
PDF ]
Static analysis which takes into account the value of
data stored in the heap is typically considered complex
and computationally intractable in practice. Thus, most
static analyzers do not keep track of object fields (or
fields for short), i.e., they are field-insensitive.
In this paper, we propose locality conditions for
soundly converting fields into local variables.
This way, field-insensitive analysis over the
transformed program can infer information on the original
fields.
Our notion of locality is context-sensitive and can be
applied both to numeric and reference fields.
We propose then a polyvariant transformation which
actually converts object fields meeting the locality
condition into variables and which is able to generate
multiple versions of code when this leads to increasing
the amount of fields which satisfy the locality
conditions. We have implemented our analysis within a
termination analyzer for Java bytecode.
Interestingly, we can now prove termination of programs
which use iterators without the need of a sophisticated
heap analysis.
@inproceedings{AlbertAGPR10,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Diana Ram\'{\i}rez-Deantes},
title = {From {O}bject {F}ields to {L}ocal {V}ariables: A {P}ractical {A}pproach
to {F}ield-{S}ensitive {A}nalysis},
booktitle = {Static Analysis - 17th International Symposium, SAS 2010,
Perpignan, France, September 14-16, 2010. Proceedings},
editor = {Radhia Cousot and
Matthieu Martel},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {6337},
year = {2010},
isbn = {978-3-642-15768-4},
pages = {100-116},
doi = {10.1007/978-3-642-15769-1_7},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[13]
|
Elvira Albert, Puri Arenas, Samir Genaim, Miguel Gómez-Zamalloa, Germán Puebla, Diana Ramirez, Guillermo Román-Díez, and Damiano Zanardini.
Termination and Cost Analysis with COSTA and its User
Interfaces.
In Spanish Conference on Programming and Computer Languages
(PROLE'09), volume 258 of Electronic Notes in Theoretical Computer
Science, pages 109-121. Elsevier, September 2009.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a static analyzer for Java
bytecode which is able to infer cost and termination
information for large classes of programs. The
analyzer takes as input a program and a resource of
interest, in the form of a cost model, and aims at
obtaining an upper bound on the execution cost with
respect to the resource and at proving program
termination. The COSTA system has reached a
considerable degree of maturity in that (1) it
includes state-of-the-art techniques for statically
estimating the resource consumption and the
termination behavior of programs, plus a number of
specialized techniques which are required for
achieving accurate results in the context of
object-oriented programs, such as handling numeric
fields in value analysis; (2) it provides several
non-trivial notions of cost (resource consumption)
including, in addition to the number of execution
steps, the amount of memory allocated in the heap or
the number of calls to some user-specified method;
(3) it provides several user interfaces: a classical
command line, a Web interface which allows
experimenting remotely with the system without the
need of installing it locally, and a recently
developed Eclipse plugin which facilitates usage of
the analyzer, even during the development phase; (4)
it can deal with both the Standard and Micro
editions of Java. In the tool demonstration, we will
show that COSTA is able to produce meaningful
results for non-trivial programs, possibly using
Java libraries. Such results can then be used in
many applications, including program development,
resource usage certification, program optimization,
etc.
@inproceedings{AlbertAGGPRRZ09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Miguel G{\'o}mez-Zamalloa and Germ{\'a}n Puebla and Diana Ramirez and Guillermo Rom\'an-D\'iez and Damiano Zanardini},
title = {{T}ermination and {C}ost {A}nalysis with {COSTA} and its {U}ser {I}nterfaces},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'09)},
series = {Electronic Notes in Theoretical Computer Science},
publisher = {Elsevier},
volume = {258},
year = {2009},
pages = {109-121},
doi = {10.1016/j.entcs.2009.12.008},
month = sep
}
|
[14]
|
Elvira Albert, Diego Esteban Alonso Blas, Puri Arenas, Samir Genaim, and
Germán Puebla.
Asymptotic Resource Usage Bounds.
In Zhenjiang Hu, editor, Programming Languages and Systems, 7th
Asian Symposium, APLAS 2009, Seoul, Korea, December 14-16, 2009.
Proceedings, volume 5904 of Lecture Notes in Computer Science, pages
294-310. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF |
slides ]
When describing the resource usage of a program, it is
usual to talk in asymptotic terms, such as
the well-known “big O” notation, whereby we focus
on the behaviour of the program for large input data
and make a rough approximation by considering as
equivalent programs whose resource usage grows at
the same rate. Motivated by the existence of
non-asymptotic resource usage analyzers, in
this paper, we develop a novel transformation from a
non-asymptotic cost function (which can be produced
by multiple resource analyzers) into its asymptotic
form. Our transformation aims at producing tight
asymptotic forms which do not contain
redundant subexpressions (i.e., expressions
asymptotically subsumed by others). Interestingly,
we integrate our transformation at the heart of a
cost analyzer to generate asymptotic upper
bounds without having to first compute their
non-asymptotic counterparts. Our experimental
results show that, while non-asymptotic cost
functions become very complex, their asymptotic
forms are much more compact and manageable. This is
essential to improve scalability and to enable the
application of cost analysis in resource-aware
verification/certification.
@inproceedings{AlbertAAGP09,
author = {Elvira Albert and Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {Asymptotic {R}esource {U}sage {B}ounds},
booktitle = {Programming Languages and Systems, 7th Asian Symposium, {APLAS} 2009,
Seoul, Korea, December 14-16, 2009. Proceedings},
year = 2009,
editor = {Zhenjiang Hu},
series = {Lecture Notes in Computer Science},
volume = {5904},
pages = {294-310},
publisher = {Springer},
doi = {10.1007/978-3-642-10672-9_21},
slides = {http://costa.ls.fi.upm.es/papers/costa/AlbertAAGP09-slides.pdf}
}
|
[15]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Field-Sensitive Value Analysis by Field-Insensitive
Analysis.
In Ana Cavalcanti and Dennis Dams, editors, 16th International
Symposium on Formal Methods, FM'09, volume 5850 of Lecture Notes in
Computer Science, pages 370-386. Springer, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Shared mutable data structures pose major problems in
static analysis and most analyzers are unable to
keep track of the values of numeric variables
stored in the heap. In this paper, we first
identify sufficient conditions under which heap
allocated numeric variables in object oriented
programs (i.e., numeric fields) can be handled as
non-heap allocated variables. Then, we present a
static analysis to infer which numeric fields
satisfy these conditions at the level of
(sequential) bytecode. This allows
instrumenting the code with ghost variables
which make such numeric fields observable to any
field-insensitive value analysis. Our experimental
results in termination analysis show that we greatly
enlarge the class of analyzable programs with a
reasonable overhead.
@inproceedings{AlbertAGP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{F}ield-{S}ensitive {V}alue {A}nalysis by {F}ield-{I}nsensitive {A}nalysis},
booktitle = {16th International Symposium on Formal Methods, FM'09},
year = 2009,
editor = {Ana Cavalcanti and Dennis Dams},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {5850},
pages = {370-386},
isbn = {978-3-642-05088-6},
doi = {10.1007/978-3-642-05089-3_24},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[16]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Cost Relation Systems: a Language-Independent Target
Language for Cost Analysis.
In Spanish Conference on Programming and Computer Languages
(PROLE'08), volume 248 of Electronic Notes in Theoretical Computer
Science, pages 31-46. Elsevier, 2009.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis aims at obtaining information about the
execution cost of programs. This paper studies
cost relation systems (CESs): the sets of
recursive equations used in cost analysis in order
to capture the execution cost of programs in terms
of the size of their input arguments. We investigate
the notion of CES from a general perspective which
is independent of the particular cost analysis
framework. Our main contributions are: we provide a
formal definition of execution cost and of CES which
is not tied to a particular programming language; we
present the notion of sound CES, i.e., which
correctly approximates the cost of the corresponding
program; we identify the differences with recurrence
relation systems, its possible applications and the
new challenges that they bring about. Our general
framework is illustrated by instantiating it to cost
analysis of Java bytecode, Haskell, and Prolog.
@inproceedings{AlbertAGP08b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla},
title = {{C}ost {R}elation {S}ystems: a
{L}anguage--{I}ndependent {T}arget {L}anguage for
{C}ost {A}nalysis},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'08)},
series = {Electronic Notes in Theoretical Computer Science},
volume = {248},
year = {2009},
pages = {31-46},
publisher = {Elsevier},
doi = {10.1016/j.entcs.2009.07.057},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[17]
|
Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Automatic Inference of Upper Bounds for Recurrence Relations in Cost
Analysis.
In María Alpuente and Germán Vidal, editors, Static
Analysis, 15th International Symposium, SAS 2008, Valencia, Spain, July
16-18, 2008. Proceedings, volume 5079 of Lecture Notes in Computer
Science, pages 221-237. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
non-trivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., non-recursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGP08,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla},
title = {Automatic Inference of Upper Bounds for Recurrence Relations
in Cost Analysis},
booktitle = {Static Analysis, 15th International Symposium, SAS 2008,
Valencia, Spain, July 16-18, 2008. Proceedings},
editor = {Mar\'{\i}a Alpuente and
Germ{\'a}n Vidal},
year = {2008},
pages = {221-237},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5079},
isbn = {978-3-540-69163-1},
doi = {10.1007/978-3-540-69166-2_15},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[18]
|
Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Gilles Barthe and Frank S. de Boer, editors, Formal Methods
for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International
Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings, volume
5051 of Lecture Notes in Computer Science, pages 2-18. Springer, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Termination analysis has received
considerable attention, traditionally in the context
of declarative programming, and recently also for
imperative languages. In existing approaches,
termination is performed on source
programs. However, there are many situations,
including mobile code, where only the compiled code
is available. In this work we present an automatic
termination analysis for sequential Java Bytecode
programs. Such analysis presents all of the
challenges of analyzing a low-level language as well
as those introduced by object-oriented languages.
Interestingly, given a bytecode program, we produce
a constraint logic program, CLP, whose
termination entails termination of the bytecode
program. This allows applying the large body of work
in termination of CLP programs to termination of
Java bytecode. A prototype analyzer is described
and initial experimentation is reported.
@inproceedings{AlbertACGPZ08,
author = {Elvira Albert and Puri Arenas and Michael Codish and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Termination Analysis of Java Bytecode},
booktitle = {Formal Methods for Open Object-Based Distributed Systems, 10th IFIP WG 6.1 International Conference, FMOODS 2008, Oslo, Norway, June 4-6, 2008, Proceedings},
editor = {Gilles Barthe and Frank S. de Boer},
year = {2008},
pages = {2--18},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5051},
isbn = {978-3-540-68862-4},
doi = {10.1007/978-3-540-68863-1_2},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[19]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Removing Useless Variables in Cost Analysis of Java
Bytecode.
In Roger L. Wainwright and Hisham Haddad, editors, Proceedings
of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara,
Brazil, March 16-20, 2008, pages 368-375. ACM, 2008.
[ bibtex |
abstract |
DOI |
PDF ]
Automatic cost analysis has interesting
applications in the context of verification and
certification of mobile code. For instance, the
code receiver can use cost information in order to
decide whether to reject mobile code which has too
large cost requirements in terms of computing
resources (in time and/or space) or billable events
(SMSs sent, bandwidth required). Existing cost
analyses for a variety of languages describe the
resource consumption of programs by means of
Cost Equation Systems (CESs), which are
similar to, but more general than recurrence
equations. CESs express the cost of a program in
terms of the size of its input data. In a further
step, a closed form (i.e., non-recursive) solution
or upper bound can sometimes be found by using
existing Computer Algebra Systems (CASs), such as
Maple and Mathematica. In this work, we focus on
cost analysis of Java bytecode, a language which is
widely used in the context of mobile code and we
study the problem of identifying variables which are
useless in the sense that they do not affect
the execution cost and therefore can be ignored by
cost analysis. We identify two classes of useless
variables and propose automatic analysis techniques
to detect them. The first class corresponds to
stack variables that can be replaced by
program variables or constant values. The second
class corresponds to variables whose value is
cost-irrelevant, i.e., does not affect the
cost of the program. We propose an algorithm,
inspired in static slicing which safely
identifies cost-irrelevant variables. The benefits
of eliminating useless variables are two-fold: (1)
cost analysis without useless variables can be more
efficient and (2) resulting CESs are more likely to
be solvable by existing CASs.
@inproceedings{AlbertAGPZ08,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {Removing {U}seless {V}ariables in {C}ost {A}nalysis of {J}ava {B}ytecode},
editor = {Roger L. Wainwright and Hisham Haddad},
booktitle = {Proceedings of the 2008 ACM Symposium on Applied Computing (SAC), Fortaleza, Ceara, Brazil, March 16-20, 2008},
year = {2008},
pages = {368--375},
publisher = {ACM},
isbn = {978-1-59593-753-7},
doi = {10.1145/1363686.1363779},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[20]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
A Generic Framework for the Cost Analysis of Java
Bytecode.
In Pimentel Ernesto, editor, Spanish Conference on Programming
and Computer Languages (PROLE'07), pages 61-70, September 2007.
[ bibtex |
abstract |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07b,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {A {G}eneric {F}ramework for the {C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Spanish Conference on Programming and Computer Languages (PROLE'07)},
year = {2007},
pages = {61--70},
month = sep,
editor = {Pimentel Ernesto}
}
|
[21]
|
Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Java Bytecode.
In Rocco De Nicola, editor, Programming Languages and Systems,
16th European Symposium on Programming, ESOP 2007, Held as Part of the
Joint European Conferences on Theory and Practics of Software, ETAPS 2007,
Braga, Portugal, March 24 - April 1, 2007, Proceedings, volume 4421 of
Lecture Notes in Computer Science, pages 157-172. Springer-Verlag, March
2007.
[ bibtex |
abstract |
DOI |
PDF ]
Cost analysis of Java bytecode is
complicated by its unstructured control flow, the
use of an operand stack and its object-oriented
programming features (like dynamic dispatching).
This paper addresses these problems and develops a
generic framework for the automatic cost analysis of
sequential Java bytecode. Our method generates
cost relations which define at compile-time
the cost of programs as a function of their input
data size. To the best of our knowledge, this is the
first approach to the automatic cost analysis of
Java bytecode.
@inproceedings{AlbertAGPZ07,
author = {Elvira Albert and Puri Arenas and Samir Genaim and Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Programming Languages and Systems, 16th European Symposium on Programming,
{ESOP} 2007, Held as Part of the Joint European Conferences on Theory
and Practics of Software, {ETAPS} 2007, Braga, Portugal, March 24
- April 1, 2007, Proceedings},
year = 2007,
editor = {Rocco De Nicola},
series = {Lecture Notes in Computer Science},
month = mar,
publisher = {Springer-Verlag},
volume = {4421},
pages = {157--172},
isbn = {978-3-540-71314-2},
doi = {10.1007/978-3-540-71316-6_12},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|
[22]
|
Elvira Albert, Puri Arenas-Sánchez, Germán Puebla, and Manuel V.
Hermenegildo.
Reduced Certificates for Abstraction-Carrying Code.
In Logic Programming, 22nd International Conference, ICLP 2006,
Seattle, WA, USA, August 17-20, 2006, Proceedings, volume 4079 of
LNCS, pages 163-178. Springer, 2006.
[ bibtex |
abstract ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for mobile code safety in which the code supplier provides a program together with an abstraction whose validity entails compliance with a predefined safety policy. The abstraction plays thus the role of safety certificate and its generation is carried out automatically by a fixed-point analyzer. The advantage of providing a (fixed-point) abstraction to the code consumer is that its validity is checked in a single pass of an abstract interpretation-based checker. A main challenge is to reduce the size of certificates as much as possible while at the same time not increasing checking time. We introduce the notion of reduced certificate which characterizes the subset of the abstraction which a checker needs in order to validate (and re-construct) the full certificate in a single pass. Based on this notion, we instrument a generic analysis algorithm with the necessary extensions in order to identify the information relevant to the checker. We also provide a correct checking algorithm together with sufficient conditions for ensuring its completeness. The experimental results within the CiaoPP system show that our proposal is able to greatly reduce the size of certificates in practice.
@inproceedings{DBLP-conf-iclp-AlbertAPH06,
author = {Elvira Albert and
Puri Arenas-S{\'a}nchez and
Germ{\'a}n Puebla and
Manuel V. Hermenegildo},
title = {{Reduced Certificates for Abstraction-Carrying Code}},
booktitle = {{Logic Programming, 22nd International Conference, ICLP 2006, Seattle, WA, USA, August 17-20, 2006, Proceedings}},
year = {2006},
series = {LNCS},
volume = {4079},
pages = {163-178},
publisher = {Springer}
}
|
[23]
|
Elvira Albert, Puri Arenas, and Germán Puebla.
An Incremental Approach to Abstraction-Carrying Code.
In Logic for Programming, Artificial Intelligence, and
Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia,
November 13-17, 2006, Proceedings, volume 4246 of LNCS, pages
377-391. Springer, 2006.
[ bibtex |
abstract ]
Abstraction-Carrying Code (ACC) has recently been proposed as a framework for Proof-Carrying Code (PCC) in which the code supplier provides a program together with an abstraction (or abstract model of the program) whose validity entails compliance with a predefined safety policy. Existing approaches for PCC are developed under the assumption that the consumer reads and validates the entire program w.r.t. the full certificate at once, in a non incremental way. In the context of ACC, we propose an incremental approach to PCC for the generation of certificates and the checking of untrusted updates of a (trusted) program, i.e., when a producer provides a modified version of a previously validated program. Our proposal is that, if the consumer keeps the original (fixed-point) abstraction, it is possible to provide only the program updates and the incremental certificate (i.e., the difference of abstractions). Furthermore, it is now possible to define an incremental checking algorithm which, given the new updates and its incremental certificate, only re-checks the fixpoint for each procedure affected by the updates and the propagation of the effect of these fixpoint changes. As a consequence, both certificate transmission time and checking time can be reduced significantly.
@inproceedings{DBLP-conf-lpar-AlbertAP06,
author = {Elvira Albert and
Puri Arenas and
Germ{\'a}n Puebla},
title = {{An Incremental Approach to Abstraction-Carrying Code}},
booktitle = {{Logic for Programming, Artificial Intelligence, and Reasoning, 13th International Conference, LPAR 2006, Phnom Penh, Cambodia, November 13-17, 2006, Proceedings}},
year = {2006},
volume = {4246},
series = {LNCS},
pages = {377-391},
publisher = {Springer}
}
|
[24]
|
Puri Arenas, Francisco Javier López-Fraguas, and Mario
Rodríguez-Artalejo.
Functional Plus Logic Programming with Built-In and Symbolic
Constraints.
In Principles and Practice of Declarative Programming,
International Conference PPDP'99, Paris, France, September 29 - October 1,
1999, Proceedings, volume 1702 of Lecture Notes in Computer Science,
pages 152-169. Springer, 1999.
[ bibtex |
abstract ]
In this paper we propose a lazy functional logic language, named SETA, which allows to handle multisets, built-in arithmetic constraints over the domain of real numbers, as well as various symbolic constraints over datatypes. As main theoretical results, we have proved the existence of free term models for all SETA programs and we have developed a correct and complete goal solving mechanism.
@inproceedings{DBLP:conf/ppdp/Arenas-SanchezLR99,
author = {Puri Arenas and
Francisco Javier L{\'o}pez-Fraguas and
Mario Rodr\'{\i}guez-Artalejo},
title = {Functional Plus {L}ogic {P}rogramming with {B}uilt-In and {S}ymbolic
{C}onstraints},
booktitle = {Principles and Practice of Declarative Programming, International
Conference PPDP'99, Paris, France, September 29 - October
1, 1999, Proceedings},
year = {1999},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1702},
pages = {152-169}
}
|
[25]
|
Puri Arenas, Francisco Javier López-Fraguas, and Mario
Rodríguez-Artelejo.
Embedding Multiset Constraints into a Lazy Functional Logic
Language.
In Principles of Declarative Programming, 10th International
Symposium, PLILP'98 Held Jointly with the 7th International Conference,
ALP'98, Pisa, Italy, September 16-18, 1998, Proceedings, volume 1490 of
Lecture Notes in Computer Science, pages 429-444. Springer, 1998.
[ bibtex |
abstract |
DOI ]
In recent works, we have proposed a general framework for lazy functional logic programming with algebraic polymorphic types, i.e., parametric datatypes whose data constructors fulfill a given set of equational axioms. The aim of this paper is to investigate implementation techniques for an extended instance of this framework, namely, lazy functional logic programming with multisets and constraints. We consider a language (named Seta) which supports a polymorphic datatype Mset(α) along with specific constraints for multisets: strict equality (already present in the general framework), disequality, membership and non-membership. We describe a quite readable Prolog-based implementation which can be executed on top of any Prolog system that provides the ability to solve simple arithmetic constraints.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezLR98,
author = {Puri Arenas and
Francisco Javier L{\'o}pez-Fraguas and
Mario Rodr\'{\i}guez-Artelejo},
title = {Embedding {M}ultiset {C}onstraints into a {L}azy {F}unctional {L}ogic
{L}anguage},
booktitle = {Principles of Declarative Programming, 10th International
Symposium, PLILP'98 Held Jointly with the 7th International
Conference, ALP'98, Pisa, Italy, September 16-18, 1998,
Proceedings},
year = {1998},
pages = {429-444},
ee = {http://dx.doi.org/10.1007/BFb0056631},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1490}
}
|
[26]
|
Puri Arenas and Mario Rodríguez-Artalejo.
A Lazy Narrowing Calculus for Functional Logic Programming with
Algebraic Polymorphic Types.
In Logic Programming, Proceedngs of the 1997 International
Symposium, pages 53-67. The MIT Press, 1997.
[ bibtex |
abstract ]
In a recent work, we have proposed a semantic framework for lazy functional logic programming with algebraic polymorphic types, i.e., polymorphic types whose data constructors must obey a finite set C of equaxional axioms. That framework included C-based rewriting calculi and a notion of model, providing soundness and completeness of C-based rewriting w.r.t. models, existence of free models for all programs and type preservation results, but no goal solving mechanism was presented. The present paper extends the privous one by developing a sound and complete procedure for goal solving, which is based on the combination of lazy narrowing with unification modulo C. The resulting language is quite expressive for many kinds of probles, as e.g. action and change problems.
@inproceedings{DBLP:conf/slp/Arenas-SanchezR97,
author = {Puri Arenas and
Mario Rodr\'{\i}guez-Artalejo},
title = {A Lazy Narrowing Calculus for Functional Logic Programming
with Algebraic Polymorphic Types},
booktitle = {Logic Programming, Proceedngs of the 1997 International Symposium},
year = {1997},
pages = {53-67},
publisher = {The MIT Press}
}
|
[27]
|
Puri Arenas and Mario Rodríguez-Artalejo.
A Semantic Framework for Functional Logic Programming with
Algebraic Polymorphic Types.
In TAPSOFT'97: Theory and Practice of Software Development, 7th
International Joint Conference CAAP/FASE, Lille, France, April 14-18, 1997,
Proceedings, volume 1214 of Lecture Notes in Computer Science, pages
453-464. Springer, 1997.
[ bibtex |
abstract |
DOI ]
We propose a formal framework for functional logic programming, supporting lazy functions, non-determinism and polymorphic datatypes whose data constructors obey a given set C of equational axioms. On top of a given C, we specify a program as a set R of C-based conditional rewriting rules for defined functions. We argue that equational logic does not supply the proper semantics for such programs. Therefore, we present an alternative logic which includes C-based rewriting calculi and a notion of model. We get soundness and completeness for C-based rewriting w.r.t. models, existence of free models for all programs, and type preservation results.
@inproceedings{DBLP:conf/tapsoft/Arenas-SanchezR97,
author = {Puri Arenas and
Mario Rodr\'{\i}guez-Artalejo},
title = {A {S}emantic {F}ramework for {F}unctional {L}ogic {P}rogramming with
{A}lgebraic {P}olymorphic {T}ypes},
booktitle = {TAPSOFT'97: Theory and Practice of Software Development,
7th International Joint Conference CAAP/FASE, Lille, France,
April 14-18, 1997, Proceedings},
pages = {453-464},
ee = {http://dx.doi.org/10.1007/BFb0030618},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {1214},
year = {1997},
isbn = {3-540-62781-2}
}
|
[28]
|
Puri Arenas and Agostino Dovier.
Minimal Set Unification.
In Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands, September
20-22, 1995, Proceedings, volume 982 of Lecture Notes in Computer
Science, pages 397-414. Springer, 1995.
[ bibtex |
abstract |
DOI ]
A unification algorithm is said to be minimal for a unification problem if it generates exactly a complete set of minimal unifiers, without instances, without repetitions. Aim of this paper is to describe a new set unification algorithm minimal for a significant collection of sample problems that can be used as benchmarks for testing any set unification algorithm. To this end, a deep combinatorial study for such problems has been realized. Moreover, an existing naïve set unification algorithm has been also tested in order to show its bad behavior for most of the sample problems.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezD95,
author = {Puri Arenas and Agostino Dovier},
title = {Minimal {S}et {U}nification},
booktitle = {Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands,
September 20-22, 1995, Proceedings},
year = {1995},
pages = {397-414},
ee = {http://dx.doi.org/10.1007/BFb0026832},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {982}
}
|
[29]
|
Puri Arenas and Ana Gil-Luezas.
A Debugging Model for Lazy Narrowing.
In Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands, September
20-22, 1995, Proceedings, volume 982 of Lecture Notes in Computer
Science, pages 453-454. Springer, 1995.
[ bibtex |
abstract |
DOI ]
@inproceedings{DBLP:conf/plilp/Arenas-SanchezG95,
author = {Puri Arenas and
Ana Gil-Luezas},
title = {A {D}ebugging {M}odel for {L}azy {N}arrowing},
booktitle = {Programming Languages: Implementations, Logics and Programs,
7th International Symposium, PLILP'95, Utrecht, The Netherlands,
September 20-22, 1995, Proceedings},
year = {1995},
pages = {453-454},
ee = {http://dx.doi.org/10.1007/BFb0026836},
bibsource = {DBLP, http://dblp.uni-trier.de},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {982}
}
|
[30]
|
Puri Arenas, Ana Gil-Luezas, and Francisco Javier López-Fraguas.
Combining Lazy Narrowing with Disequality Constraints.
In Programming Language Implementation and Logic Programming,
6th International Symposium, PLILP'94, Madrid, Spain, September 14-16, 1994,
Proceedings, volume 844 of Lecture Notes in Computer Science, pages
385-399. Springer, 1994.
[ bibtex |
abstract |
DOI ]
We investigate an extension of a lazy functional logic language, which uses term disequations both in programs and in computed answers. The semantic properties of the language are derived from the fact that it can be viewed as an instance of the CFLP(X)-scheme proposed for constraint functional logic programming. In particular, the operational semantics for CFLP(X) ― so called lazy constrained narrowing ― is a computation mechanism parameterized by a constraint solver over the underlying domain. We define a constraint solver for our language, whose properties ensure completeness. As a further step towards implementation, we present an executable Prolog specification of the operational semantics, which has been implemented on top of a Prolog system. Experimental results have shown good performance.
@inproceedings{DBLP:conf/plilp/Arenas-SanchezGL94,
author = {Puri Arenas and
Ana Gil-Luezas and
Francisco Javier L{\'o}pez-Fraguas},
title = {Combining {L}azy {N}arrowing with {D}isequality {C}onstraints},
booktitle = {Programming Language Implementation and Logic Programming,
6th International Symposium, PLILP'94, Madrid, Spain, September
14-16, 1994, Proceedings},
year = {1994},
pages = {385-399},
ee = {http://dx.doi.org/10.1007/3-540-58402-1_27},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {844},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
|