@inproceedings{9f8bab0bf887488e88d2fc279df17665,
title = "Variance Tolerance Factors For Interpreting All Neural Networks",
abstract = "Black box models only provide results for deep learning tasks, and lack informative details about how these results were obtained. Knowing how input variables are related to outputs, in addition to why they are related, can be critical to translating predictions into laboratory experiments, or defending a model prediction under scrutiny. In this paper, we propose a general theory that defines a variance tolerance factor (VTF) inspired by influence function, to interpret features in the context of black box neural networks by ranking the importance of features, and construct a novel architecture consisting of a base model and feature model to explore the feature importance in a Rashomon set that contains all well-performing neural networks. Two feature importance ranking methods in the Rashomon set and a feature selection method based on the VTF are created and explored. A thorough evaluation on synthetic and benchmark datasets is provided, and the method is applied to two real world examples predicting the formation of noncrystalline gold nanoparticles and the chemical toxicity 1793 aromatic compounds exposed to a protozoan ciliate for 40 hours.",
keywords = "Rashomon sets, black box model, feature importance, influence function",
author = "Sichao Li and Amanda Barnard",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 2023 International Joint Conference on Neural Networks, IJCNN 2023 ; Conference date: 18-06-2023 Through 23-06-2023",
year = "2023",
doi = "10.1109/IJCNN54540.2023.10191646",
language = "English",
series = "Proceedings of the International Joint Conference on Neural Networks",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "IJCNN 2023 - International Joint Conference on Neural Networks, Proceedings",
address = "United States",
}