@article{discovery10205986,
       publisher = {Springer Science and Business Media LLC},
         journal = {Nature Geoscience},
           pages = {112--114},
          volume = {18},
           title = {Explainability can foster trust in artificial intelligence in geoscience},
            note = {This version is the author accepted manuscript. For information on re-use, please refer to the publisher's terms and conditions.},
            year = {2025},
          number = {2},
           month = {February},
          author = {Dramsch, Jesper S{\"o}ren and Kuglitsch, Monique M and Fern{\'a}ndez-Torres, Miguel-{\'A}ngel and Toreti, Andrea and Albayrak, Rustem Arif and Nava, Lorenzo and Ghaffarian, Saman and Cheng, Ximeng and Ma, Jackie and Samek, Wojciech and Venguswamy, Rudy and Koul, Anirudh and Muthuregunathan, Raghavan and Hrast Essenfelder, Arthur},
             url = {https://doi.org/10.1038/s41561-025-01639-x},
        abstract = {Uptake of explainable artificial intelligence (XAI) methods in geoscience is currently limited. We argue that such methods that reveal the decision processes of AI models can foster trust in their results and facilitate the broader adoption of AI.}
}