@inbook{e6286f45667246749ce9858696c00622,
title = "Approximate Dynamic Programming by Practical Examples",
abstract = "Computing the exact solution of an MDP model is generally difficult and possibly intractable for realistically sized problem instances. A powerful technique to solve the large scale discrete time multistage stochastic control processes is Approximate Dynamic Programming (ADP). Although ADP is used as an umbrella term for a broad spectrum of methods to approximate the optimal solution of MDPs, the common denominator is typically to combine optimization with simulation, use approximations of the optimal values of the Bellman{\textquoteright}s equations, and use approximate policies. This chapter aims to present and illustrate the basics of these steps by a number of practical and instructive examples. We use three examples (1) to explain the basics of ADP, relying on value iteration with an approximation of the value functions, (2) to provide insight into implementation issues, and (3) to provide test cases for the reader to validate its own ADP implementations.",
keywords = "METIS-318330, IR-101811",
author = "Mes, \{Martijn R.K.\} and \{Perez Rivera\}, \{Arturo Eduardo\}",
year = "2017",
month = mar,
day = "11",
doi = "10.1007/978-3-319-47766-4\_3",
language = "English",
isbn = "978-3-319-47766-4",
series = "International Series in Operations Research \& Management Science",
publisher = "Springer",
number = "248",
pages = "63--101",
editor = "Richard Boucherie and \{van Dijk\}, \{Nico M.\}",
booktitle = "Markov Decision Processes in Practice",
address = "Germany",
}