@Inbook{Ancona2019gradient, author="Ancona, Marco and Ceolini, Enea and {\"O}ztireli, Cengiz and Gross, Markus", editor="Samek, Wojciech and Montavon, Gr{\'e}goire and Vedaldi, Andrea and Hansen, Lars Kai and M{\"u}ller, Klaus-Robert", title="Gradient-Based Attribution Methods", bookTitle="Explainable AI: Interpreting, Explaining and Visualizing Deep Learning", year="2019", publisher="Springer International Publishing", address="Cham", pages="169--191", abstract="The problem of explaining complex machine learning models, including Deep Neural Networks, has gained increasing attention over the last few years. While several methods have been proposed to explain network predictions, the definition itself of explanation is still debated. Moreover, only a few attempts to compare explanation methods from a theoretical perspective has been done. In this chapter, we discuss the theoretical properties of several attribution methods and show how they share the same idea of using the gradient information as a descriptive factor for the functioning of a model. Finally, we discuss the strengths and limitations of these methods and compare them with available alternatives.", isbn="978-3-030-28954-6", doi="10.1007/978-3-030-28954-6_9", url="https://doi.org/10.1007/978-3-030-28954-6_9" }