@inproceedings{504276296a75464eba8d698828613896,
title = "The Modest State of Learning, Sampling, and Verifying Strategies",
abstract = "Optimal decision-making under stochastic uncertainty is a core problem tackled in artificial intelligence/machine learning (AI), planning, and verification. Planning and AI methods aim to find good or optimal strategies to maximise rewards or the probability of reaching a goal. Verification approaches focus on calculating the probability or reward, obtaining the strategy as a side effect. In this paper, we connect three strands of work on obtaining strategies implemented in the context of the Modest Toolset: statistical model checking with either lightweight scheduler sampling or deep learning, and probabilistic model checking. We compare their different goals and abilities, and show newly extended experiments on Racetrack benchmarks that highlight the tradeoffs between the methods. We conclude with an outlook on improving the existing approaches and on generalisations to continuous models, and emphasise the need for further tool development to integrate methods that find, evaluate, compare, and explain strategies.",
keywords = "This work was part of the MISSION (Models in Space Systems: Integration, Operation, and Networking) project, funded by the European Union{\textquoteright}s Horizon 2020 research and innovation programme under Marie Sk{\l}odowska-Curie Actions grant number 101008233., 2023 OA procedure",
author = "Arnd Hartmanns and Michaela Klauck",
year = "2022",
month = oct,
day = "17",
doi = "10.1007/978-3-031-19759-8_25",
language = "English",
isbn = "978-3-031-19758-1",
volume = "Part III",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "406--432",
editor = "Tiziana Margaria and Bernhard Steffen",
booktitle = "Leveraging Applications of Formal Methods, Verification and Validation. Adaptation and Learning",
address = "Germany",
}