publications
2025
-
Workshop PaperProceedings of 1st Workshop on Advancing Artificial Intelligence through Theory of MindAbrini, Mouad, Abend, Omri, Acklin, Dina, Admoni, Henny, Aichinger, Gregor, Alon, Nitay, Ashktorab, Zahra, Atreja, Ashish, Auron, Moises, Aufreiter, Alexander, and al.,arXiv preprint arXiv:2505.03770 2025
This volume includes a selection of papers presented at the Workshop on Advancing Artificial Intelligence through Theory of Mind held at AAAI 2025 in Philadelphia US on 3rd March 2025. The purpose of this volume is to provide an open access and curated anthology for the ToM and AI research community.
@article{abrini2025proceeding, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Proceedings of 1st Workshop on Advancing Artificial Intelligence through Theory of Mind}, author = {Abrini, Mouad and Abend, Omri and Acklin, Dina and Admoni, Henny and Aichinger, Gregor and Alon, Nitay and Ashktorab, Zahra and Atreja, Ashish and Auron, Moises and Aufreiter, Alexander and et al.}, journal = {arXiv preprint arXiv:2505.03770}, year = {2025}, pub_year = {2025}, citation = {arXiv preprint arXiv:2505.03770, 2025}, html = {https://arxiv.org/abs/2505.03770} } -
Workshop PaperOver-Relying on Reliance: Towards Realistic Evaluations of AI-Based Clinical Decision SupportSivaraman, Venkatesh, Morrison, Katelyn, Epperson, Will, and Perer, AdamWorkshop on Envisioning the Future of Interactive Health 2025
As AI-based clinical decision support (AI-CDS) is introduced in more and more aspects of healthcare services, HCI research plays an increasingly important role in designing for complementarity between AI and clinicians. However, current evaluations of AI-CDS often fail to capture when AI is and is not useful to clinicians. This position paper reflects on our work and influential AI-CDS literature to advocate for moving beyond evaluation metrics like Trust, Reliance, Acceptance, and Performance on the AI’s task (what we term the "trap" of human-AI collaboration). Although these metrics can be meaningful in some simple scenarios, we argue that optimizing for them ignores important ways that AI falls short of clinical benefit, as well as ways that clinicians successfully use AI. As the fields of HCI and AI in healthcare develop new ways to design and evaluate CDS tools, we call on the community to prioritize ecologically valid, domain-appropriate study setups that measure the emergent forms of value that AI can bring to healthcare professionals.
@article{sivaraman2025overrelyin, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Over-Relying on Reliance: Towards Realistic Evaluations of AI-Based Clinical Decision Support}, author = {Sivaraman, Venkatesh and Morrison, Katelyn and Epperson, Will and Perer, Adam}, journal = {Workshop on Envisioning the Future of Interactive Health}, year = {2025}, pub_year = {2025}, html = {https://arxiv.org/pdf/2504.07423?} } -
Conference PaperToward Interpretable 3D Diffusion in Radiology: Token-Wise Attribution for Text-to-CT SynthesisBradshaw, Aidan, Morrison, Katelyn, Mathur, Arpit, Dai, Weicheng, Eslami, Motahhare, and Perer, Adam2025
Diffusion-based generative models have emerged as powerful tools for synthesizing anatomically realistic computed tomography (CT) scans from free-text prompts but remain opaque when delineating token influence on the conditioned CT volume. This lack of interpretability limits their clinical applicability, trustworthiness, and adoption across diagnostic and decision-support scenarios. We present a token-wise voxel attribution method for 3D text-to-image diffusion models that leverages cross-attention in U-Net–based architectures to extract individual token attention maps for synthetic CT scans. Our method visualizes individual, joint, or aggregated token-level voxel attributions during CT synthesis, helping to alleviate concerns about model transparency. This lays the groundwork for practical methods and structured explanations illustrating what aspects of attribution work well, where current limitations lie, and how researchers might approach explainable AI for 3D text-to-image diffusion models in radiology moving forward.
@misc{bradshaw2025towardinte, abbr = {Conference Paper}, bibtex_show = {true}, title = {Toward Interpretable 3D Diffusion in Radiology: Token-Wise Attribution for Text-to-CT Synthesis}, author = {Bradshaw, Aidan and Morrison, Katelyn and Mathur, Arpit and Dai, Weicheng and Eslami, Motahhare and Perer, Adam}, year = {2025}, pub_year = {2025}, citation = {Medical Imaging with Deep Learning-Short Papers, 2025}, conference = {Medical Imaging with Deep Learning-Short Papers}, pdf = {https://openreview.net/pdf?id=DTYFRzRPQn} } -
Conference PaperA Human-Centered Approach to Identifying Promises, Risks, & Challenges of Text-to-Image Generative AI in RadiologyMorrison, Katelyn, Mathur, Arpit, Bradshaw, Aidan, Wartmann, Tom, Lundi, Steven, Zandifar, Afrooz, Dai, Weichang, Batmanghelich, Kayhan, Eslami, Motahhare, and Perer, AdamProceedings of the AAAI/ACM Conference on AI, Ethics, and Society 2025
As text-to-image generative models rapidly improve, AI researchers are making significant advances in developing domain-specific models capable of generating complex medical imagery from text prompts. Despite this, these technical advancements have overlooked whether and how medical professionals would benefit from and use text-to-image generative AI (GenAI) in practice. By developing domain-specific GenAI without involving stakeholders, we risk the potential of building models that are either not useful or even more harmful than helpful. In this paper, we adopt a human-centered approach to responsible model development by involving stakeholders in evaluating and reflecting on the promises, risks, and challenges of a novel text-to-CT Scan GenAI model. Through exploratory model prompting activities, we uncover the perspectives of medical students, radiology trainees, and radiologists on the role that text-to-CT Scan GenAI can play across medical education, training, and practice. This human-centered approach additionally enabled us to surface technical challenges and domain-specific risks of generating synthetic medical images. We conclude by reflecting on the implications of medical text-to-image GenAI.
@article{morrison2025ahumancent, abbr = {Conference Paper}, bibtex_show = {true}, selected = {true}, title = {A Human-Centered Approach to Identifying Promises, Risks, & Challenges of Text-to-Image Generative AI in Radiology}, author = {Morrison, Katelyn and Mathur, Arpit and Bradshaw, Aidan and Wartmann, Tom and Lundi, Steven and Zandifar, Afrooz and Dai, Weichang and Batmanghelich, Kayhan and Eslami, Motahhare and Perer, Adam}, journal = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, volume = {8}, number = {2}, pages = {1758-1770}, year = {2025}, pub_year = {2025}, citation = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society 8 (2 ..., 2025}, pdf = {https://ojs.aaai.org/index.php/AIES/article/download/36672/38810} } -
Journal PaperImperfections of XAI: Phenomena Influencing AI-Assisted Decision-MakingSpitzer, Philipp, Morrison, Katelyn, Turri, Violet, Feng, Michelle, Perer, Adam, and KĂĽhl, NiklasACM Transactions on Interactive Intelligent Systems 2025
With the increasing use of AI, recent research in human–computer interaction explores Explainable AI (XAI) to make AI advice more interpretable. While research addresses the effects of incorrect AI advice on AI-assisted decision-making, the impact of incorrect explanations is neglected so far. Additionally, recent work shows that not only different explanation modalities impact decision-makers, but also human factors play a critical role. To analyze relevant phenomena influencing AI-assisted decision-making, this work explores the impacting factors by conceptualizing theories of appropriate reliance and taking the first steps toward empirical evidence. We show that humans’ reliance on AI and the human–AI team performance are impacted by imperfect XAI in a study with 136 participants. Additionally, we find that cognitive styles affect decision-making in different explanation modalities. Hence, we shed light on ...
@article{spitzer2025imperfecti, abbr = {Journal Paper}, bibtex_show = {true}, title = {Imperfections of XAI: Phenomena Influencing AI-Assisted Decision-Making}, author = {Spitzer, Philipp and Morrison, Katelyn and Turri, Violet and Feng, Michelle and Perer, Adam and KĂĽhl, Niklas}, journal = {ACM Transactions on Interactive Intelligent Systems}, publisher = {ACM}, volume = {15}, number = {3}, pages = {1-40}, year = {2025}, pub_year = {2025}, citation = {ACM Transactions on Interactive Intelligent Systems 15 (3), 1-40, 2025}, html = {https://dl.acm.org/doi/pdf/10.1145/3750052} } -
Workshop PaperEstablishing the Cooperative Game Wavelength as a Testbed to Explore Mutual Theory of MindMorrison, Katelyn, Ashktorab, Zahra, Bouneffouf, Djallel, and Enrique, GabrielToM4AI 2025 2025
Machine learning (ML) and humancentered AI (HCAI) researchers have considered numerous methods to evaluate Theory of Mind (ToM)-like capabilities in artiĄcial intelligence (AI). These methods have independently captured multiple aspects of ToM capabilities (ie, beliefs, knowledge). Recent research has proposed exploring Mutual Theory of Mind (MToM) as a way to understand how a humanŠs mental model and an AIŠs user model can be mutually shaped to beneĄt future interactions. However, there is a lack of methods for understanding the development and impact of MToM-like capabilities in human-AI teams. We propose using a collaborative party game called Wavelength as a testbed to explore the complexities of MToM-like capabilities in human-AI teams. We compare Wavelength to other methods (ie, Overcooked, Hanabi) and discuss how game mechanics help players mutually construct, recognize, and revise their models of their teammates. Lastly, we brieĆy suggest how future work can explore MToM with Wavelength.
@article{morrison2025establishi, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Establishing the Cooperative Game Wavelength as a Testbed to Explore Mutual Theory of Mind}, author = {Morrison, Katelyn and Ashktorab, Zahra and Bouneffouf, Djallel and Enrique, Gabriel}, journal = {ToM4AI 2025}, pages = {54}, year = {2025}, pub_year = {2025}, citation = {ToM4AI 2025, 54, 2025}, html = {https://bit.ly/4op0ui8} } -
WorkshopHybrid Automation Experiences–Communication, Coordination, and Collaboration within Human-AI TeamsSpitzer, Philipp, Baldauf, Matthias, Palanque, Philippe, Roto, Virpi, Morrison, Katelyn, Zipperling, Domenique, and Holstein, Joshua2025
Automated systems and AI-assisted workflows are evolving from support tools to sophisticated collaborators in so-called “hybrid” teams with human and AI-based members. This evolution presents new challenges and opportunities in the field of “Automation Experience”, particularly in how these team members communicate, coordinate, and collaborate. This workshop aims to contribute to the design of automated systems that augment humans’ capabilities, maintain human autonomy, and create positive automation experiences in human-AI teams. Through presentations, a keynote talk, discussions, and interactive collaborative activities, researchers and practitioners from different fields will address challenges regarding communication, coordination, and collaboration within hybrid human-AI teams. The workshop aims to spark innovative research directions and foster collaborative interdisciplinary initiatives that ...
@misc{spitzer2025hybridauto, abbr = {Workshop}, bibtex_show = {true}, title = {Hybrid Automation Experiences–Communication, Coordination, and Collaboration within Human-AI Teams}, author = {Spitzer, Philipp and Baldauf, Matthias and Palanque, Philippe and Roto, Virpi and Morrison, Katelyn and Zipperling, Domenique and Holstein, Joshua}, pages = {1-6}, year = {2025}, pub_year = {2025}, citation = {Proceedings of the Extended Abstracts of the CHI Conference on Human Factors ..., 2025}, html = {https://dl.acm.org/doi/abs/10.1145/3706599.3706738} } -
Journal PaperDon’t be Fooled: The Misinformation Effect of Explanations in Human-AI CollaborationSpitzer, Philipp, Holstein, Joshua, Morrison, Katelyn, Holstein, Kenneth, Satzger, Gerhard, and Kühl, NiklasInternational Journalof Human–Computer Interaction 2025
Across various applications, humans increasingly use black-box artificial intelligence (AI) systems without insight into these systems’ reasoning. To counter this opacity, explainable AI (XAI) methods promise enhanced transparency and interpretability. While recent studies have explored how XAI affects human-AI collaboration, few have examined the potential pitfalls caused by incorrect explanations. The implications for humans can be far-reaching but have not been explored extensively. To investigate this, we ran a study (n=160) on AI-assisted decision-making in which humans were supported by XAI. Our findings reveal a misinformation effect when incorrect explanations accompany correct AI advice with implications post-collaboration. This effect causes humans to infer flawed reasoning strategies, hindering task execution and demonstrating impaired procedural knowledge. Additionally, incorrect explanations compromise human-AI team-performance during collaboration. With our work, we contribute to HCI by providing empirical evidence for the negative consequences of incorrect explanations on humans post-collaboration and outlining guidelines for designers of AI.
@article{spitzer2024dontbefool, abbr = {Journal Paper}, bibtex_show = {true}, title = {Don't be Fooled: The Misinformation Effect of Explanations in Human-AI Collaboration}, author = {Spitzer, Philipp and Holstein, Joshua and Morrison, Katelyn and Holstein, Kenneth and Satzger, Gerhard and Kühl, Niklas}, journal = {International Journalof Human–Computer Interaction}, year = {2025}, pub_year = {2025}, html = {https://arxiv.org/pdf/2409.12809} }
2024
-
Conference PaperMedSyn: Text-guided Anatomy-aware Synthesis of High-Fidelity 3D CT ImagesXu, Yanwu, Sun, Li, Peng, Wei, Jia, Shuyue, Morrison, Katelyn, Perer, Adam, Zandifar, Afrooz, Visweswaran, Shyam, Eslami, Motahhare, and Batmanghelich, KayhanIEEE Transactions on Medical Imaging 2024
This paper introduces an innovative methodology for producing high-quality 3D lung CT images guided by textual information. While diffusion-based generative models are increasingly used in medical imaging, current state-of-the-art approaches are limited to low-resolution outputs and underutilize radiology reports’ abundant information. The radiology reports can enhance the generation process by providing additional guidance and offering fine-grained control over the synthesis of images. Nevertheless, expanding text-guided generation to high-resolution 3D images poses significant memory and anatomical detail-preserving challenges. Addressing the memory issue, we introduce a hierarchical scheme that uses a modified UNet architecture. We start by synthesizing low-resolution images conditioned on the text, serving as a foundation for subsequent generators for complete volumetric data. To ensure the ...
@article{xu2024medsyntext, abbr = {Conference Paper}, bibtex_show = {true}, title = {MedSyn: Text-guided Anatomy-aware Synthesis of High-Fidelity 3D CT Images}, author = {Xu, Yanwu and Sun, Li and Peng, Wei and Jia, Shuyue and Morrison, Katelyn and Perer, Adam and Zandifar, Afrooz and Visweswaran, Shyam and Eslami, Motahhare and Batmanghelich, Kayhan}, journal = {IEEE Transactions on Medical Imaging}, publisher = {IEEE}, year = {2024}, pub_year = {2024}, citation = {IEEE Transactions on Medical Imaging, 2024}, pdf = {https://arxiv.org/pdf/2310.03559} } -
Conference PaperThe Impact of Imperfect XAI on Human-AI Decision-MakingMorrison, Katelyn, Spitzer, Philipp, Turri, Violet, Feng, Michelle, KĂĽhl, Niklas, and Perer, AdamACM SIGCHI Conference on Computer-Supported Cooperative Work & Social Computing (CSCW) 2024
Explainability techniques are rapidly being developed to improve human-AI decision-making across various cooperative work settings. Consequently, previous research has evaluated how decision-makers collaborate with imperfect AI by investigating appropriate reliance and task performance with the aim of designing more human-centered computer-supported collaborative tools. Several human-centered explainable AI (XAI) techniques have been proposed in hopes of improving decision-makers’ collaboration with AI; however, these techniques are grounded in findings from previous studies that primarily focus on the impact of incorrect AI advice. Few studies acknowledge the possibility of the explanations being incorrect even if the AI advice is correct. Thus, it is crucial to understand how imperfect XAI affects human-AI decision-making. In this work, we contribute a robust, mixed-methods user study with 136 ...
@article{morrison2024theimpacto, abbr = {Conference Paper}, bibtex_show = {true}, selected = {true}, title = {The Impact of Imperfect XAI on Human-AI Decision-Making}, author = {Morrison, Katelyn and Spitzer, Philipp and Turri, Violet and Feng, Michelle and KĂĽhl, Niklas and Perer, Adam}, journal = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social Computing (CSCW)}, year = {2024}, pub_year = {2024}, citation = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social ..., 2024}, html = {https://arxiv.org/pdf/2307.13566.pdf} } -
Conference PaperTransparency in the wild: Navigating transparency in a deployed ai system to broaden need-finding approachesTurri, Violet, Morrison, Katelyn, Robinson, Katherine-Marie, Abidi, Collin, Perer, Adam, Forlizzi, Jodi, and Dzombak, Rachel2024
Transparency is a critical component when building artificial intelligence (AI) decision-support tools, especially for contexts in which AI outputs impact people or policy. Effectively identifying and addressing user transparency needs in practice remains a challenge. While a number of guidelines and processes for identifying transparency needs have emerged, existing methods tend to approach need-finding with a limited focus that centers around a narrow set of stakeholders and transparency techniques. To broaden this perspective, we employ numerous need-finding methods to investigate transparency mechanisms in a widely deployed AI-decision support tool developed by a wildlife conservation non-profit. Throughout our 5-month case study, we conducted need-finding through semi-structured interviews with end-users, analysis of the tool’s community forum, experiments with their ML model, and analysis of ...
@misc{turri2024transparen, abbr = {Conference Paper}, bibtex_show = {true}, title = {Transparency in the wild: Navigating transparency in a deployed ai system to broaden need-finding approaches}, author = {Turri, Violet and Morrison, Katelyn and Robinson, Katherine-Marie and Abidi, Collin and Perer, Adam and Forlizzi, Jodi and Dzombak, Rachel}, pages = {1494-1514}, year = {2024}, pub_year = {2024}, citation = {Proceedings of the 2024 ACM Conference on Fairness, Accountability, and ..., 2024}, pdf = {https://dl.acm.org/doi/pdf/10.1145/3630106.3658985} } -
Conference PaperAI-Powered Reminders for Collaborative Tasks: Experiences and FuturesProceedings of the ACM on Human-Computer Interaction 2024
Email continues to serve as a central medium for managing collaborations. While unstructured email messaging is lightweight and conducive to coordination, it is easy to overlook commitments and requests for collaborations that are embedded in the text of free-flowing communications. Twenty-one years ago, Bellotti et al. proposed TaskMaster with the goal of redesigning the email interface to have explicit task management capabilities. Recently, AI-based task recognition and reminder services have been introduced in major email systems as one approach to managing asynchronous collaborations. While these services have been provided to millions of people around the world, there is little understanding of how people interact with and benefit from them. We explore knowledge workers’ experiences with Microsoft’s Viva Daily Briefing Email to better understand how AI-powered reminders can support ...
@article{morrison2024aipoweredr, abbr = {Conference Paper}, bibtex_show = {true}, title = {AI-Powered Reminders for Collaborative Tasks: Experiences and Futures}, author = {Morrison, Katelyn and Iqbal, Shamsi T and Horvitz, Eric}, journal = {Proceedings of the ACM on Human-Computer Interaction}, publisher = {ACM}, volume = {8}, number = {CSCW1}, pages = {1-20}, year = {2024}, pub_year = {2024}, citation = {Proceedings of the ACM on Human-Computer Interaction 8 (CSCW1), 1-20, 2024}, html = {} } -
Workshop PaperImperfect Natural Language Explanations in Human-AI Decision-MakingMorrison, K., Spitzer, P., Turri, V., Feng, M., KĂĽhl, N., and Perer, A.TREW Workshop at ACM CHI 2024 2024
@article{morrison2024trew, bibtex_show = {true}, abbr = {Workshop Paper}, title = {Imperfect Natural Language Explanations in Human-AI Decision-Making}, author = {Morrison, K. and Spitzer, P. and Turri, V. and Feng, M. and KĂĽhl, N. and Perer, A.}, journal = {TREW Workshop at ACM CHI 2024}, year = {2024}, pdf = {24_Chi_Trew_ImperfectXAI.pdf} }
2023
-
Conference PaperEvaluating the Impact of Human Explanation Strategies on Human-AI Visual Decision-Making2023
Artificial intelligence (AI) is increasingly being deployed in high-stakes domains, such as disaster relief and radiology, to aid practitioners during the decision-making process. Explainable AI techniques have been developed and deployed to provide users insights into why the AI made certain predictions. However, recent research suggests that these techniques may confuse or mislead users. We conducted a series of two studies to uncover strategies that humans use to explain decisions and then understand how those explanation strategies impact visual decision-making. In our first study, we elicit explanations from humans when assessing and localizing damaged buildings after natural disasters from satellite imagery and identify four core explanation strategies that humans employed. We then follow up by studying the impact of these explanation strategies by framing the explanations from Study 1 as if they were ...
@misc{morrison2023evaluating, abbr = {Conference Paper}, bibtex_show = {true}, title = {Evaluating the Impact of Human Explanation Strategies on Human-AI Visual Decision-Making}, author = {Morrison, Katelyn and Shin, Donghoon and Holstein, Kenneth and Perer, Adam}, year = {2023}, pub_year = {2023}, citation = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social ..., 2023}, conference = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social Computing (CSCW)}, pdf = {23_cscw_explanation_characteristics.pdf} } -
Conference PaperEye into AI: Evaluating the Interpretability of Explainable AI Techniques through a Game With a PurposeACM SIGCHI Conference on Computer-Supported Cooperative Work & Social Computing (CSCW) 2023
Recent developments in explainable AI (XAI) aim to improve the transparency of black-box models. However, empirically evaluating the interpretability of these XAI techniques is still an open challenge. The most common evaluation method is algorithmic performance, but such an approach may not accurately represent how interpretable these techniques are to people. A less common but growing evaluation strategy is to leverage crowd-workers to provide feedback on multiple XAI techniques to compare them. However, these tasks often feel like work and may limit participation. We propose a novel, playful, human-centered method for evaluating XAI techniques: a Game With a Purpose (GWAP), Eye into AI, that allows researchers to collect human evaluations of XAI at scale. We provide an empirical study demonstrating how our GWAP supports evaluating and comparing the agreement between three popular XAI ...
@article{morrison2023eyeintoaie, abbr = {Conference Paper}, bibtex_show = {true}, title = {Eye into AI: Evaluating the Interpretability of Explainable AI Techniques through a Game With a Purpose}, author = {Morrison, Katelyn and Jain, Mayank and Hammer, Jessica and Perer, Adam}, journal = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social Computing (CSCW)}, year = {2023}, pub_year = {2023}, citation = {ACM SIGCHI Conference on Computer-Supported Cooperative Work & Social ..., 2023}, html = {} } -
Conference PaperShared interest... sometimes: Understanding the alignment between human perception, vision architectures, and saliency map techniquesMorrison, Katelyn, Mehra, Ankita, and Perer, Adam2023
Empirical studies have shown that attention-based architectures outperform traditional convolutional neural networks (CNN) in terms of accuracy and robustness. As a result, attention-based architectures are increasingly used in high-stakes domains such as radiology and wildlife conservation to aid in decision-making. However, understanding how attention-based architectures compare to CNNs regarding alignment with human perception is still under-explored. Previous studies exploring how vision architectures align with human perception evaluate a single architecture with multiple explainability techniques or multiple architectures with a single explainability technique. Through an empirical analysis, we investigate how two attention-based architectures and two CNNs for two saliency map techniques align with the ground truth for human perception on 100 images from an interpretability benchmark dataset. Using the Shared Interest metrics, we found that CNNs align more with human perception when using the XRAI saliency map technique. However, we found the opposite for Grad-CAM. We discuss the implications of our analysis for human-centered explainable AI and introduce directions for future work.
@misc{morrison2023sharedinte, abbr = {Conference Paper}, bibtex_show = {true}, title = {Shared interest... sometimes: Understanding the alignment between human perception, vision architectures, and saliency map techniques}, author = {Morrison, Katelyn and Mehra, Ankita and Perer, Adam}, pages = {3776-3781}, year = {2023}, pub_year = {2023}, citation = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern ..., 2023}, conference = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pdf = {23_XAI4CV_cvpr.pdf} }
2022
-
Workshop PaperTowards Generating Human-Centered Saliency Maps without Significantly Sacrificing AccuracyAswal, Vivek, Kao, Gore, Kim, Seo Young, and Morrison, KatelynNeuroVision 2022 Workshop at CVPR 2022
As deep neural networks make significant advances in computer vision tasks, they are being deployed in several high-stakes domains. However, these models are not always semantically meaningful to humans as traditional interpretability techniques are quantitatively driven. Therefore, we explore how to generate saliency maps that are more similar to human attention without significantly sacrificing the model performance. We conduct an empirical study to understand how current object detection models compare to human centered saliency maps. Additionally, we present different data augmentation techniques such as Selective Erasing and Selective Inpainting along with the prevelant non-trivial transforms to evaluate the impact of human-centered data augmentation. With less than 3% mAP difference, we observe that data augmentations that are derived from predicted human attention improves the MAE and IoU between the model saliency and predicted attention. Visualization and more details are at here.
@article{aswal2022towardsgen, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Towards Generating Human-Centered Saliency Maps without Significantly Sacrificing Accuracy}, author = {Aswal, Vivek and Kao, Gore and Kim, Seo Young and Morrison, Katelyn}, journal = {NeuroVision 2022 Workshop at CVPR}, year = {2022}, pub_year = {2022}, citation = {NeuroVision 2022 Workshop at CVPR, 2022}, html = {http://katelyn98.github.io/blog/2022/vlr-project/}, pdf = {19_CameraReady_CVPR2022_NeuroVision_SHORT_CameraReady.pdf}, slides = {CVPR_Lightning_Talk.pdf} }
2021
-
Workshop PaperExploring Corruption Robustness: Inductive Biases in Vision Transformers and MLP-MixersMorrison, Katelyn, Gilby, Benjamin, Lipchak, Colton, Mattioli, Adam, and Kovashka, Adriana2021
Recently, vision transformers and MLP-based models have been developed in order to address some of the prevalent weaknesses in convolutional neural networks. Due to the novelty of transformers being used in this domain along with the self-attention mechanism, it remains unclear to what degree these architectures are robust to corruptions. Despite some works proposing that data augmentation remains essential for a model to be robust against corruptions, we propose to explore the impact that the architecture has on corruption robustness. We find that vision transformer architectures are inherently more robust to corruptions than the ResNet-50 and MLP-Mixers. We also find that vision transformers with 5 times fewer parameters than a ResNet-50 have more shape bias. Our code is available to reproduce.
@misc{morrison2021exploringc, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Exploring Corruption Robustness: Inductive Biases in Vision Transformers and MLP-Mixers}, author = {Morrison, Katelyn and Gilby, Benjamin and Lipchak, Colton and Mattioli, Adam and Kovashka, Adriana}, year = {2021}, pub_year = {2021}, citation = {Uncertainty and Robustness in Deep Learning Workshop at ICML 2021, 2021}, conference = {Uncertainty and Robustness in Deep Learning Workshop at ICML 2021}, html = {https://arxiv.org/abs/2106.13122}, pdf = {exploring_corruption.pdf}, code = {https://github.com/katelyn98/CorruptionRobustness} }
2020
-
Conference PaperUsing Object Tracking Techniques to Non-Invasively Measure Thoracic Rotation Range of MotionAdjunct Proceedings of the ACM International Conference on Multimodal Interaction (ICMI 2020), Utrecht, the Netherlands 2020
Different measuring instruments, such as a goniometer, have been used by clinicians to measure a patient’s ability to rotate their thoracic spine. Despite the simplicity of goniometers, this instrument requires the user to decipher the resulting measurement properly. The correctness of these measurements are imperative for clinicians to properly identify and evaluate injuries or help athletes enhance their overall performance. This paper introduces a goniometer-free, noninvasive measuring technique using a Raspberry Pi, a Pi Camera module, and software for clinicians to measure a subject’s thoracic rotation range of motion (ROM) when administering the seated rotation technique with immediate measurement feedback. Determining this measurement is achieved by applying computer vision object tracking techniques on a live video feed from the Pi Camera that is secured on the ceiling above the subject ...
@article{morrison2020usingobjec, abbr = {Conference Paper}, bibtex_show = {true}, title = {Using Object Tracking Techniques to Non-Invasively Measure Thoracic Rotation Range of Motion}, author = {Morrison, Katelyn and Yates, Daniel and Roman, Maya and Clark, William W}, journal = {Adjunct Proceedings of the ACM International Conference on Multimodal Interaction (ICMI 2020), Utrecht, the Netherlands}, year = {2020}, pub_year = {2020}, citation = {Adjunct Proceedings of the ACM International Conference on Multimodal ..., 2020}, html = {} } -
Workshop PaperReducing Discrimination in Learning Algorithms for Social Good in Sociotechnical SystemsMorrison, KatelynAI for Social Good Workshop at IJCAI-PRICAI 2020 2020
Sociotechnical systems within cities are now equipped with machine learning algorithms in hopes to increase efficiency and functionality by modeling and predicting trends. Machine learning algorithms have been applied in these domains to address challenges such as balancing the distribution of bikes throughout a city and identifying demand hotspots for ride sharing drivers. However, these algorithms applied to challenges in sociotechnical systems have exacerbated social inequalities due to previous bias in data sets or the lack of data from marginalized communities. In this paper, I will address how smart mobility initiatives in cities use machine learning algorithms to address challenges. I will also address how these algorithms unintentionally discriminate against features such as socioeconomic status to motivate the importance of algorithmic fairness. Using the bike sharing program in Pittsburgh, PA, I will present a position on how discrimination can be eliminated from the pipeline using Bayesian Optimization.
@article{morrison2020reducingdi, abbr = {Workshop Paper}, bibtex_show = {true}, title = {Reducing Discrimination in Learning Algorithms for Social Good in Sociotechnical Systems}, author = {Morrison, Katelyn}, journal = {AI for Social Good Workshop at IJCAI-PRICAI 2020}, year = {2020}, pub_year = {2020}, citation = {AI for Social Good Workshop at IJCAI-PRICAI 2020, 2020}, html = {https://arxiv.org/abs/2011.13988} }