{"id":448,"date":"2026-04-06T15:46:23","date_gmt":"2026-04-06T19:46:23","guid":{"rendered":"https:\/\/charm.seas.harvard.edu\/?page_id=448"},"modified":"2026-05-07T10:52:54","modified_gmt":"2026-05-07T14:52:54","slug":"research","status":"publish","type":"page","link":"https:\/\/charm.seas.harvard.edu\/?page_id=448","title":{"rendered":"Research"},"content":{"rendered":"\n<!-- RESEARCH PAGE HERO -->\n<section class=\"charm-research-hero\">\n  <p class=\"charm-section-kicker\">Research<\/p>\n  <h1>Publications<\/h1>\n  <p class=\"charm-research-hero-tagline\">\n    Research from CHARM&#8217;s faculty, students, and collaborators across\n    human\u2011centered AI, interaction design, and machine learning.\n  <\/p>\n<\/section>\n\n<!-- RESEARCH LIST -->\n<section class=\"charm-pubs-band\">\n  <div class=\"charm-pubs-inner\">\n\n    <!-- Optional: year group heading -->\n    <h2 class=\"charm-pubs-year\">2026<\/h2>\n\n    <ol class=\"charm-pubs-list\">\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2604.13305\"\n               target=\"_blank\" rel=\"noopener\">\nBias at the End of the Score\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSalma Abdel Magid, Grace Guo, Esin Tureci, Amaya Dharmasiri, Vikram V. Ramaswamy, Hanspeter Pfister, Olga Russakovsky\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE CVPR 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2411.05174\"\n               target=\"_blank\" rel=\"noopener\">\nInverse Transition Learning: Learning Dynamics from Demonstrations\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nLeo Benac, Abhishek Sharma, Sonali Parbhoo, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\nAISTATS 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3772318.3791337\"\n               target=\"_blank\" rel=\"noopener\">\nVidmento: Creating Video Stories through Context-Aware Expansion with Generative Video \n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nCatherine Yeh, Anh Truong, Mira Dontcheva, Bryan Wang\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/dl.acm.org\/doi\/10.1145\/3772363.3799137\"\n               target=\"_blank\" rel=\"noopener\">\nMnemoMaker: Creator, Curator, or Something Else? Exploring Human-AI Mnemonic Co-Creation\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nOlivia Seow, Elena Sajno, Dongho Shin, Pattie Maes, Samantha W. T. Chan\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026 Extended Abstracts (Interactive Demo)\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/notationsCHI26.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nHow Notations Evolve: A Historical Analysis with Implications for Supporting User-Defined Abstractions\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nJingyue Zhang, J.D. Zamfirescu-Pereira, Elena L. Glassman, Damien Masson, Ian Arawjo\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/ownershipCHI26.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nA Paradigm for Creative Ownership\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nTejaswi Polimetla, Katy Ilonka Gero, Elena L. Glassman\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/meta_HCI_CHI26meetup.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nMeta-HCI: Practising Reflection in HCI Research\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nAnnika Kaltenhauser, James Peter Arn\u00e9ra, Amelie Unger, Sophia Ppali, Niels van Berkel, Benjamin Tag, Elena L. Glassman, Phoebe Sengers, Simo Hosio, Jonas Oppenlaender\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026 Extended Abstracts (Meetup)\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/star_workshop_chi26.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nScience and Technology for Augmenting Reading (STAR)\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nTal August, Andrew Head, Alexa Siu, Elena L. Glassman, Jonathan K. Kummerfeld, Joseph Chee Chang, Lucy Lu Wang, Marti A. Hearst\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026 Extended Abstracts (Workshop)\n          <\/p>\n      <\/li>\n\n       <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-09-113426.png\"\n            alt=\"Overview of the BRIDGE system\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2602.23288\"\n               target=\"_blank\" rel=\"noopener\">\n              BRIDGE: Borderless Reconfiguration for Inclusive and Diverse Gameplay Experience via Embodiment Transformation\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Hayato Saiki, Chunggi Lee, Hikari Takahashi, Tica Lin, Hidetada Kishi, Kaori Tachibana, Yasuhiro Suzuki, Hanspeter Pfister, Kenji Suzuki\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            CHI 2026 <b>Best Paper Award<\/b>\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Training resources for parasports are limited, reducing opportunities for athletes and coaches to engage with sport-specific movements and tactical coordination. To address this gap, we developed BRIDGE, a system that integrates a reconstruction pipeline, which detects and tracks players from broadcast video to generate 3D play sequences, with an embodiment-aware visualization framework that decomposes head, trunk, and wheelchair base orientations to represent attention, intent, and mobility. We evaluated BRIDGE in two controlled studies with 20 participants (10 national wheelchair basketball team players and 10 amateur players).The results showed that BRIDGE significantly enhanced the perceived naturalness of player postures and made tactical intentions easier to understand. In addition, it supported functional classification by realistically conveying players\u2019 capabilities, which in turn improved participants\u2019 sense of self-efficacy. This work advances inclusive sports learning and accessible coaching practices, contributing to more equitable access to tactical resources in parasports.\n          <\/p>\n        <\/div>\n      <\/li>\n\n       <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/02\/Screenshot-2026-02-20-094733.png\"\n            alt=\"Funding AI for Good paper thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/abs\/2509.12455\"\n               target=\"_blank\" rel=\"noopener\">\n              Funding AI for Good: A Call for Meaningful Engagement\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Lin, Hongjin; Kawakami, Anna; D&#8217;Ignazio, Catherine; Holstein, Kenneth; Gajos, Krzysztof Z\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            CHI 2026\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Artificial Intelligence for Social Good (AI4SG) is a growing area that\n            explores AI&#8217;s potential to address social issues, such as public health.\n            Yet prior work has shown limited evidence of its tangible benefits for\n            intended communities, and projects frequently face real\u2011world deployment\n            and sustainability challenges. We conducted a reflexive thematic analysis\n            of 35 funding documents, representing about $410 million USD in total\n            investments.\n          <\/p>\n        <\/div>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/abs\/2603.04613\"\n               target=\"_blank\" rel=\"noopener\">\nBeyond Anthropomorphism: a Spectrum of Interface Metaphors for LLMs\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nJianna So, Connie Cheng, Sonia Krishna Murthy\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/abs\/2603.04613\"\n               target=\"_blank\" rel=\"noopener\">\nNonvisual Support for Understanding and Reasoning about Data Structures\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nBrianna L. Wimer, Ritesh Kanchi, Kaija Frierson, Venkatesh Potluri, Ronald Metoyer, Jennifer Mankoff, Miya Natsuhara, Matt X. Wang\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/abs\/2603.04613\"\n               target=\"_blank\" rel=\"noopener\">\n&#8220;It just requires so much more creativity&#8221;: Barriers and Workarounds to Gathering Information for AI Contestation\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSohini Upadhyay, Dasha Pruss, Alicia DeVrio, Krzysztof Z. Gajos, Naveena Karusala\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n<\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/humanfactors.jmir.org\/2026\/1\/e70402\"\n               target=\"_blank\" rel=\"noopener\">\nNovel Web-Based Technology to Promote Goal-Setting in Complex Chronic Illness: Randomized Controlled Trial\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nLin, Jody; Huber, Bernd; Amir, Ofra; Assis-Hassid, Shiri; Gehrmann, Sebastian; Gajos, Krzysztof; Grosz, Barbara; Sanders, Lee\n          <\/p>\n          <p class=\"charm-pub-venue\">\nJMIR Hum Factors, vol. 13, pp. e70402, 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2602.22077\"\n               target=\"_blank\" rel=\"noopener\">\nViSTAR: Virtual Skill Training with Augmented Reality with 3D Avatars and LLM coaching agent.\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nChunggi Lee, Hayato Saiki, Tica Lin, Eiji Ikeda, Kenji Suzuki, Chen Zhu-Tian, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2506.13150\"\n               target=\"_blank\" rel=\"noopener\">\nFederated ADMM from Bayesian Duality\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nThomas M\u00f6llenhoff, Siddharth Swaroop, Finale Doshi-Velez, Mohammad Emtiyaz Khan\n          <\/p>\n          <p class=\"charm-pub-venue\">\nICLR 2026\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2508.14681\"\n               target=\"_blank\" rel=\"noopener\">\nVirtual Multiplex Staining for Histological Images Using a Marker-wise Conditioned Diffusion Model\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nHyun-Jic Oh, Junsik Kim, Zhiyi Shi, Yichen Wu, Yu-An Chen, Peter K. Sorger, Hanspeter Pfister, and Won-Ki Jeong\n          <\/p>\n          <p class=\"charm-pub-venue\">\nAAAI 2026\n          <\/p>\n      <\/li>\n \n<h2 class=\"charm-pubs-year\">2025<\/h2>\n\n    <ol class=\"charm-pubs-list\">\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2507.07136\"\n               target=\"_blank\" rel=\"noopener\">\nLangSplatV2: High-dimensional 3D Language Gaussian Splatting with 450+ FPS\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nWanhua Li, Yujie Zhao, Minghan Qin, Liu Y, Cai Y, Chuang Gan, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\nNeurIPS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2307.06541\"\n               target=\"_blank\" rel=\"noopener\">\nOn the Effective Horizon of Inverse Reinforcement Learning\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nYiqing Xu, Finale Doshi-Velez, David Hsu\n          <\/p>\n          <p class=\"charm-pub-venue\">\nAAMAS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2512.17446\"\n               target=\"_blank\" rel=\"noopener\">\nVAIR: Visual Analytics for Injury Risk Exploration in Sports\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nChunggi Lee, Ut Gong, Tica Lin, Stefanie Zollmann, Scott A Epsley, Adam Petway, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE VIS 16th Workshop on Visual Analytics in Healthcare (VAHC), 2025.\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/aisel.aisnet.org\/icis2025\/impl_adopt\/impl_adopt\/9\/\"\n               target=\"_blank\" rel=\"noopener\">\nTensions of Occupational Identity and Patterns of Identity Protection: Preliminary Insights on Generative AI in the Software Engineering Domain\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSchmitt, Anuschka; Gajos, Krzysztof Z.; Mokryn, Osnat\n          <\/p>\n          <p class=\"charm-pub-venue\">\nICIS 2025 Proceedings\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/ieeexplore.ieee.org\/document\/10887085\"\n               target=\"_blank\" rel=\"noopener\">\nEstimating Upper Extremity Fugl-Meyer Assessment Scores From Reaching Motions Using Wearable Sensors\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nYu Meng Zhou, Nihal Raman, Tommaso Proietti, James Arnold, Prabhat Pathak, David Pont-Esteban, Kristin Nuckolsand, Kelly Rishe, Finale Doshi-Velez, David Lin, Conor Walsh\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2025.07.02.662847v1.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nMoMo &#8211; Combining Neuron Morphology and Connectivity for Interactive Motif Analysis in Connectomes\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nShewarega MF, Troidl J, Rodriguez OA, Dindoost M, Harth P, Haberkern H, Stegmaier J, Bader D, Pfister H\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE VIS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2025.08.09.669342v1.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nSynAnno: Interactive Guided Proofreading of Synaptic Annotations\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nLeander Lauenburg, Jakob Troidl, Adam Gohain, Zudi Lin, Hanspeter Pfister, Donglai Wei\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE VIS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-09-103902.png\"\n            alt=\"Creative Writers\u2019 Attitudes on Writing as Training Data for Large Language Models thumbnail\"\n          \/>\n        <\/div>\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/gero_LIL_chi25.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nCreative Writers\u2019 Attitudes on Writing as Training Data for Large Language Models\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nKaty Ilonka Gero, Meera Desai, Carly Schnitzler, Nayun Eom, Jack Cushman, Elena L. Glassman\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2025 <b>Best Paper Award<\/b>\n          <\/p>\n <p class=\"charm-pub-abstract\">\n            The use of creative writing as training data for large language models (LLMs) is highly contentious and many writers have expressed outrage at the use of their work without consent or compensation. In this paper, we seek to understand how creative writers reason about the real or hypothetical use of their writing as training data. We interviewed 33 writers with variation across genre, method of publishing, degree of professionalization, and attitudes toward and engagement with LLMs. We report on core principles that writers express (support of the creative chain, respect for writers and writing, and the human element of creativity) and how these principles can be at odds with their realistic expectations of the world (a lack of control, industry-scale impacts, and interpretation of scale). Collectively these findings demonstrate that writers have a nuanced understanding of LLMs and are more concerned with power imbalances than the technology itself. \n          <\/p>\n        <\/div>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-09-103045.png\"\n            alt=\"Supporting Co-Adaptive Machine Teaching through Human Concept Learning and Cognitive Theories thumbnail\"\n          \/>\n        <\/div>\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/mocha_chi25.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nSupporting Co-Adaptive Machine Teaching through Human Concept Learning and Cognitive Theories\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSimret Araya Gebreegziabher, Yukun Yang, Elena L. Glassman, Toby Jia-Jun Li\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCHI 2025 <b>Best Paper Award<\/b>\n          <\/p>\n <p class=\"charm-pub-abstract\">\n            An important challenge in interactive machine learning, particularly in subjective or ambiguous domains, is fostering bi-directional alignment between humans and models. Users teach models their concept definition through data labeling, while refining their own understandings throughout the process. To facilitate this, we introduce Mocha, an interactive machine learning tool informed by two theories of human concept learning and cognition. First, it utilizes a neuro-symbolic pipeline to support Variation Theory based counterfactual data generation. By asking users to annotate counterexamples that are syntactically and semantically similar to already-annotated data but predicted to have different labels, the system can learn more effectively while helping users understand the model and reflect on their own label definitions. Second, Mocha uses Structural Alignment Theory to present groups of counterexamples, helping users comprehend alignable differences between data items and annotate them in batch. We validated Mocha\u2019s effectiveness and usability through a lab study with 18 participants.\n          <\/p>\n        <\/div>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2025.07.19.665696v4.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nSEAL: Spatially-resolved Embedding Analysis with Linked Imaging Data\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSimon Warchol, Grace Guo, Johannes Knittel, Dan Freeman, Usha Bhalla, Jeremy L Muhlich, Peter K. Sorger, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE VIS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2024.09.07.611785v2.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nniiv: Interactive Self-supervised Neural Implicit Isotropic Volume Reconstruction\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nJakob Troidl, Yiqing Liang, Johanna Beyer, Mojtaba Tavakoli, Johann Danzl, Markus Hadwiger, Hanspeter Pfister, James Tompkin\n          <\/p>\n          <p class=\"charm-pub-venue\">\nMICCAI Workshop on Efficient Medical AI (EMA), 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/dl.acm.org\/doi\/10.1145\/3757512\"\n               target=\"_blank\" rel=\"noopener\">\nTo Recommend or Not to Recommend: Designing and Evaluating AI-Enabled Decision Support for Time-Critical Medical Events\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nMastrianni, Angela; Kim, Mary Suhyun; Sullivan, Travis M.; Sippel, Genevieve Jayne; Burd, Randall S.; Gajos, Krzysztof Z.; Sarcevic, Aleksandra\n          <\/p>\n          <p class=\"charm-pub-venue\">\nProc. ACM Hum.-Comput. Interact, vol. 9, iss. CSCW2, 2025.\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2410.23880\"\n               target=\"_blank\" rel=\"noopener\">\nTransparent Trade-offs between Properties of Explanations\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nHiwot Belay Tadesse, Alihan H\u00fcy\u00fck, Yaniv Yacoby, Weiwei Pan, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\nUAI 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2404.14435\"\n               target=\"_blank\" rel=\"noopener\">\nFrenet-Serret Frame-based Decomposition for Part Segmentation of 3D Curvilinear Structures\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nLeslie Gu, Jason Ken Adhinarta, Mikhail Bessmeltsev, Jiancheng Yang, Yongjie Jessica Zhang, Wenjie Yin, Daniel Berger, Jeff Lichtman, Hanspeter Pfister, Donglai Wei\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE Transactions on Medical Imaging, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/pmc.ncbi.nlm.nih.gov\/articles\/PMC12580505\/pdf\/nihms-2114702.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nThe State of Single-Cell Atlas Data Visualization in the Biological Literature\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nMark S Keller, Eric M\u00f6rth, Thomas C Smits, Simon Warchol, Grace Guo, Qianwen Wang, Robert Krueger, Hanspeter Pfister, Nils Gehlenborg\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE Computer Graphics and Applications, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/swaroop2025personalising.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nPersonalising AI assistance based on overreliance rate in AI-assisted decision making\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSiddharth Swaroop, Zana Bu\u00e7inca, Krzysztof Z. Gajos, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIUI 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2025.06.10.658982v1.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nA connectomic resource for neural cataloguing and circuit dissection of the larval zebrafish brain\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nMariela D. Petkova, Micha\u0142 Januszewski, Tim Blakely, Kristian J. Herrera, Gregor F.P. Schuhknecht, Robert Tiller, Jinhan Choi, Richard L. Schalek, Jonathan Boulanger-Weill, Adi Peleg, Yuelong Wu, Shuohong Wang, Jakob Troidl, Sumit Kumar Vohra, Donglai Wei, Zudi Lin, Armin Bahl, Juan Carlos Tapia, Nirmala Iyer, Zachary T. Miller, Kathryn B. Hebert, Elisa C. Pavarino, Milo Taylor, Zixuan Deng, Moritz Stingl, Dana Hockling, Alina Hebling, Ruohong C. Wang, Lauren L. Zhang, Sam Dvorak, Zainab Faik, Kareem I. King Jr., Pallavi Goel, Julian Wagner-Carena, David Aley, Selimzhan Chalyshkan, Dominick Contreas, Xiong Li, Akila V. Muthukumar, Marina S. Vernaglia, Teodoro Tapia Carrasco, Sofia Melnychuck, TingTing Yan, Ananya Dalal, James M. DiMartino, Sam Brown, Nana Safo-Mensa, Ethan Greenberg, Michael Cook, Samantha Finley-May, Miriam A. Flynn, Gary Patrick Hopkins, Julie Kovalyak, Meghan Leonard, Alanna Lohff, Christopher Ordish, Ashley L. Scott, Satoko Takemura, Claire Walsh, John J. Walsh, Daniel R. Berger, Hanspeter Pfister, Stuart Berg, Christopher Knecht, Geoffrey W. Meissner, Wyatt Korff, Misha B. Ahrens, Viren Jain, Jeff W. Lichtman, Florian Engert\n          <\/p>\n          <p class=\"charm-pub-venue\">\nbioRxiv, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2503.10437\"\n               target=\"_blank\" rel=\"noopener\">\n4D LangSplat: 4D Language Gaussian Splatting via Multimodal Large Language Models\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nWanhua Li, Renping Zhou, Jiawei Zhou, Y. Song, J. Herter, M. Qin, Gao Huang, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\nCVPR 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2410.09361\"\n               target=\"_blank\" rel=\"noopener\">\nDecision-Point Guided Safe Policy Improvement\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nAbhishek Sharma, Leo Benac, Sonali Parbhoo, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\nAISTATS 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/hagen2025toward.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nToward Accounting for the Effects of Gender Socialization in Quantitative Research in Human-Computer Interaction\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nHagen, Nazeli; Miratrix, Luke W.; Gajos, Krzysztof Z.\n          <\/p>\n          <p class=\"charm-pub-venue\">\nInteracting with Computers, 2025.\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2401.13961\"\n               target=\"_blank\" rel=\"noopener\">\nTriSAM: Tri-Plane SAM for zero-shot cortical blood vessel segmentation in VEM images\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nJia Wan, Wanhua Li, Jason Ken Adhinarta, Atmadeep Banerjee, Evelina Sjostedt, Jingpeng Wu, Jeff Lichtman, Hanspeter Pfister, Donglai Wei\n          <\/p>\n          <p class=\"charm-pub-venue\">\nIEEE Journal of Biomedical and Health Informatics, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2505.18306\"\n               target=\"_blank\" rel=\"noopener\">\nCTRL-GS: Cascaded Temporal Residue Learning for 4D Gaussian Splatting\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nKarly Hou, Wanhua Li, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\n4D Vision Workshop @ CVPR 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/abstractexplorer.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nAbstractExplorer: Leveraging Structure-Mapping Theory to Enhance Comparative Close Reading at Scale\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nZiwei Gu, Joyce Zhou, Nina Lei, Jonathan Kummerfeld, Mahmood Jasim, Narges Mahyar, Elena L. Glassman\n          <\/p>\n          <p class=\"charm-pub-venue\">\nUIST 2025\n          <\/p>\n      <\/li>\n\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\nIntegrated Gradients Provides Faithful Language Model Attributions for In-Context Learning\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nTheo Datta, Erik Wang, Kayla Huang, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\nICLR 2025 Workshop Building Trust\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.nature.com\/articles\/s41592-024-02426-z\"\n               target=\"_blank\" rel=\"noopener\">\nCAVE: Connectome Annotation Versioning Engine\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSven Dorkenwald, Casey M. Schneider-Mizell, Derrick Brittain, Akhilesh Halageri, Chris Jordan, Nico Kemnitz, Manual A. Castro, William Silversmith, Jeremy Maitin-Shephard, Jakob Troidl, Hanspeter Pfister, Valentin Gillet, Daniel Xenes, J. Alexander Bae, Agnes L. Bodor, JoAnn Buchanan, Daniel J. Bumbarger, Leila Elabbady, Zhen Jia, Daniel Kapner, Sam Kinn, Sam Kinn, Kisuk Lee, Kai Li, Ran Lu, Thomas Macrina, Gayathri Mahalingam, Eric Mitchell, Shanka Subhra Mondal, Shang Mu, Barak Nehoran, Sergiy Popovych, Marc Takeno, Russel Torres, Nicholas L. Turner, William Wong, Jingpeng Wu, Wenjing Yin, Szi-chieh Yu, R. Clay Reid, Nuno Ma\u00e7arico da Costa, H. Sebastian Seung, Forrest Collman\n          <\/p>\n          <p class=\"charm-pub-venue\">\nNature Methods, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/www.biorxiv.org\/content\/10.1101\/2024.11.24.625067v1.full.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nGlobal Neuron Shape Reasoning with Point Affinity Transformers\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nJakob Troidl, Johannes Knittel, Wanhua Li, Fangneng Zhan, Hanspeter Pfister, Srinivas Turaga\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            bioRxiv, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/semantic_commit.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nSemantic Commit: Helping Users Update Intent Specifications for AI Memory at Scale\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nPriyan Vaithilingam, Munyeong Kim, Frida-Cecilia Acosta-Parenteau, Daniel Lee, Amine Mhedhbi, Elena L Glassman, Ian Arawjo\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            UIST 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/febs.onlinelibrary.wiley.com\/doi\/10.1002\/1878-0261.13783\"\n               target=\"_blank\" rel=\"noopener\">\nAddressing persistent challenges in digital image analysis of cancer tissue: resources developed from a hackathon\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSandhya Prabhakaran, Clarence Yapp, Gregory J Baker, Johanna Beyer, Young Hwan Chang, Allison L Creason, Robert Krueger, Jeremy Muhlich, Nathan Heath Patterson, Kevin Sidak, Damir Sudar, Adam J Taylor, Luke Ternes, Jakob Troidl, Xie Yubin, Artem Sokolov, Darren R Tyson\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            Molecular Oncology, 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/bucinca2025contrastive.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nContrastive Explanations That Anticipate Human Misconceptions Can Improve Human Decision-Making Skills\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nZana Bu\u00e7inca, Siddharth Swaroop, Amanda E. Paluch, Krzysztof Z. Gajos, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            CHI 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2502.08621\"\n               target=\"_blank\" rel=\"noopener\">\n               SportsBuddy: Designing and Evaluating an AI-Powered Sports Video Storytelling Tool Through Real-World Deployment\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Tica Lin, Ruxun Xiang, Gardenia Liu, Divyanshu Tiwari, Meng-Chia Chiang, Chenjiayi Ye, Hanspeter Pfister, Chen Zhu-Tian\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            IEEE PacificVis 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n               Extending reinforcement Learning-Driven Personalized Health Interventions to Multiple Health Behavioral Change Goals\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Samantha Marks, Michelle Chang, Weiwei Pan, Susan Murphy, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            MOSS Workshop @ ICML 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2501.13198\"\n               target=\"_blank\" rel=\"noopener\">\n               SD-LoRA: Scalable Decoupled Low-Rank Adaptation for Class Incremental Learning\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Yichen Wu, H.M. Piao, L.K. Huang, R.Z. Wang, Wanhua Li, Hanspeter Pfister, D.Y. Meng, K.D. Ma, Y. Wei\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/openreview.net\/pdf?id=Z8E2JQW3QG1\"\n               target=\"_blank\" rel=\"noopener\">\n               Understanding the Relationship between Prompts and Response Uncertainty in Large Language Models\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Ze Yu Zhang, Arun Verma, Finale Doshi-Velez, Bryan Kian Hsiang Low\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025 Workshop: Quantify Uncertainty and Hallucination in Foundation Models: The Next Frontier in Reliable AI\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2410.11201\"\n               target=\"_blank\" rel=\"noopener\">\n               Tree of Attributes Prompt Learning for Vision-Language Models\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Tong Ding, Wanhua Li, Zhongqi Miao, Hanspeter Pfister\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2409.01035v1\"\n               target=\"_blank\" rel=\"noopener\">\n               Unleashing the Power of Task-Specific Directions in Parameter Efficient Fine-tuning\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Chongjie Si, Zhiyi Shi, Shifan Zhang, Xiaokang Yang, Hanspeter Pfister, Wei Shen\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025\n          <\/p>\n      <\/li>\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2501.17325\"\n               target=\"_blank\" rel=\"noopener\">\n               Connecting Federated ADMM to Bayes\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Siddharth Swaroop, Mohammad Emtiyaz Khan, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025\n          <\/p>\n      <\/li>\n\n<li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/arxiv.org\/pdf\/2501.17325\"\n               target=\"_blank\" rel=\"noopener\">\n               Connecting Federated ADMM to Bayes\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Siddharth Swaroop, Mohammad Emtiyaz Khan, Finale Doshi-Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            ICLR 2025\n          <\/p>\n      <\/li>\n\n <li class=\"charm-pub-item\">\n   <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/papers.ssrn.com\/sol3\/papers.cfm?abstract_id=5387231\"\n               target=\"_blank\" rel=\"noopener\">\n               Law is vulnerable to AI influence; interface design can help\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Aileen Nielsen, Chelse Swoopes, Elena L. Glassman\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            SSRN Preprint 2025\n          <\/p>\n      <\/li>\n\n <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-01-144857.png\"\n            alt=\"Bridging Ontologies of Neurological Conditions: Towards Patient-centered Data Practices in Digital Phenotyping Research and Design thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/so2025bridging.pdf\"\n               target=\"_blank\" rel=\"noopener\">\n               Bridging Ontologies of Neurological Conditions: Towards Patient-centered Data Practices in Digital Phenotyping Research and Design\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Jianna So, Faye Yang, Krzysztof Z. Gajos, Naveena Karusala, Anoopum S. Gupta \n          <\/p>\n          <p class=\"charm-pub-venue\">\n            Proceedings of the ACM on Human-Computer Interaction, Honorable Mention\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Amidst the increasing datafication of healthcare, deep digital phenotyping is being explored in clinical research to gather comprehensive data that can improve understanding of neurological conditions. However, participants currently do not have access to this data due to researchers\u2019 apprehension around whether such data is interpretable or useful. This study focuses on patient perspectives on the potential of deep digital phenotyping data to benefit people with neurodegenerative diseases, such as ataxias, Parkinson\u2019s disease, and multiple system atrophy. We present an interview study (n=12) to understand how people with these conditions currently track their symptoms and how they envision interacting with their deep digital phenotyping data. We describe how participants envision the utility of this deep digital phenotyping data in relation to multiple stages of disease and stakeholders, especially its potential to bridge different and sometimes conflicting understandings of their condition. Looking towards a future in which patients have increased agency over their data and can use it to inform their care, we contribute implications for shaping patient-driven clinical research practices and deep digital phenotyping tools that serve a multiplicity of patient needs.\n          <\/p>\n        <\/div>\n      <\/li>\n \n <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-01-141517.png\"\n            alt=\"Leveraging Variation Theory in Counterfactual Data Augmentation for Optimized Active Learning thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/leveragingVT_acl.pdf\"\n               target=\"_blank\" rel=\"noopener\">\nLeveraging Variation Theory in Counterfactual Data Augmentation for Optimized Active Learning\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\nSimret Araya Gebreegziabher, Kuangshi Ai, Zheng Zhang, Elena L. Glassman*, Toby Jia-Jun Li*\n          <\/p>\n          <p class=\"charm-pub-venue\">\nACL 2025\n          <\/p>\n          <p class=\"charm-pub-abstract\">\nActive Learning (AL) allows models to learn interactively from user feedback. However, only annotating existing samples may hardly benef it the model\u2019s generalization. Moreover, AL commonly faces a cold start problem due to insufficient annotated data for effective sample selection. To address this, we introduce a counterfactual data augmentation approach inspired by Variation Theory, a theory of human concept learning that emphasizes the essential features of a concept by focusing on what stays the same and what changes. We use a neuro-symbolic pipeline to pinpoint key conceptual dimensions and use a large language model (LLM) to generate targeted variations along those dimensions. Through a text classification experiment, we show that our approach achieves significantly higher performance when there are fewer annotated data, showing its capability to address the cold start problem in AL. We also find that as the annotated training data gets larger, the impact of the generated data starts to diminish. This work demonstrates the value of incorporating human learning theories into the design and optimization of AL.\n          <\/p>\n        <\/div>\n      <\/li>\n\n  <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-01-141404.png\"\n            alt=\"Designing a Dashboard for Transparency and Control of Conversational AI thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/upadhyay2025counterfactual.pdf\"\n               target=\"_blank\" rel=\"noopener\">\n               Designing a Dashboard for Transparency and Control of Conversational AI\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n              Yida Chen, Aoyu Wu, Trevor DePodesta, Catherine Yeh, Lena Armstrong, Kenneth Li, Nicholas Castillo Marin, Oam Patel, Jan Riecke, Shivam Raval, Olivia Seow, Martin Wattenberg, Fernanda Vi\u00e9gas\n          <\/p>\n          <p class=\"charm-pub-venue\">\n             ICML 2025\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n      Conversational LLMs function as black box systems, leaving users guessing about why they see the output they do. This lack of transparency is potentially problematic, especially given concerns around bias and truthfulness. To address this issue, we present an end-to-end prototype\u2014connecting interpretability techniques with user experience design\u2014that seeks to make chatbots more transparent. We begin by showing evidence that a prominent open-source LLM has a \u201cuser model\u201d: examining the internal state of the system, we can extract data related to a user\u2019s age, gender, educational level, and socioeconomic status. Next, we describe the design of a dashboard that accompanies the chatbot interface, displaying this user model in real time. The dashboard can also be used to control the user model and the system\u2019s behavior. Finally, we discuss a study in which users conversed with the instrumented system. Our results suggest that users appreciate seeing internal states, which helped them expose biased behavior and increased their sense of control. Participants also made valuable suggestions that point to future directions for both design and machine learning research. The project page and video demo of our TalkTuner system are available at \n<a href=\"https:\/\/bit.ly\/talktuner-project-page\"\n               target=\"_blank\">\nbit.ly\/talktuner-project-page\n            <\/a>.\n          <\/p>\n        <\/div>\n      <\/li>\n\n  <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-01-124907.png\"\n            alt=\"Counterfactual Explanations May Not Be the Best Algorithmic Recourse Approach thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/upadhyay2025counterfactual.pdf\"\n               target=\"_blank\" rel=\"noopener\">\n              Counterfactual Explanations May Not Be the Best Algorithmic Recourse Approach\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n           Sohini Upadhyay, Himabindu Lakkaraju, Krzysztof Z. Gajos\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            IUI 2025\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n      Algorithmic recourse is a rapidly developing subfield in explainable AI (XAI) concerned with providing individuals subject to adverse high-stakes algorithmic outcomes with explanations indicating how to reverse said outcomes. While XAI research in the machine learning community doesn\u2019t confine itself to counterfactual explanations, its algorithmic recourse subfield does, adopting the assumption that the optimal way to provide recourse is through counterfactual explanations. Though there has been extensive human-AI interaction research on explanations, translating these findings to the algorithmic recourse setting is non-obvious due to meaningful problem setting differences, leaving the question of whether counterfactuals are the most optimal explanation paradigm for recourse unanswered. While intuitively satisfying, the prescriptive nature of counterfactuals makes them vulnerable to poor outcomes when circumstances unknown to the decision-making and explanation generating algorithms affect re-application strategies. With these concerns in mind, we designed a series of experiments comparing different explanation methods in the recourse setting, explicitly incorporating scenarios where circumstances unknown to the decision-making and explanation   algorithms affect re-application strategies. In Experiment 1, we compared counterfactuals with reason codes, a simple feature-based explanation, finding that they both yield comparable re-application success, and that reason codes led to better user outcomes when unknown circumstances had a high impact on re-application strategies. In Experiment 2, we sought to improve on reason code outcomes, comparing them to feature attributions, a more informative feature-based explanation, but found no improvements. Finally, in Experiment 3, we aimed to improve on reason code outcomes with a multiple counterfactual explanation condition, finding that multiple counterfactuals led to higher re-application success but still resulted in comparatively worse user outcomes in the face of high impact unknown circumstances. Taken together, these findings call into question whether the standard counterfactual paradigm is the best approach for the algorithmic recourse problem setting. \n          <\/p>\n        <\/div>\n      <\/li>\n \n  <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2026\/04\/Screenshot-2026-04-01-121523.png\"\n            alt=\"Bayesian Hypothesis Testing Policy Regularization thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/openreview.net\/pdf?id=yAL3BmW7WH\"\n               target=\"_blank\" rel=\"noopener\">\n              Bayesian Hypothesis Testing Policy Regularization\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Sarah Rathnam, Finale Doshi-Velez, Susan Murphy\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            RLC 2025, ICML 2025\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n        In reinforcement learning (RL), sparse feedback makes it difficult to target long-term outcomes, often resulting in high-variance policies. Real-world interventions instead rely on prior study data, expert input, or short-term proxies to guide exploration. In this work, we propose Bayesian Hypothesis Testing Policy Regularization (BHTPR), a method that integrates a previously-learned policy with a policy learned online to speed up learning in such settings. BHTPR applies the inductive bias that the prior study data matches the current study environment in some states but is incorrect in others. We use Bayesian hypothesis testing to determine, state by state, when to transfer the prior policy and when to rely on online learning.\n          <\/p>\n        <\/div>\n      <\/li>\n\n  <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2025\/08\/Screenshot-2025-08-07-091255.png\"\n            alt=\"CorpusStudio paper thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/glassmanlab.seas.harvard.edu\/papers\/corpusstudio.pdf\"\n               target=\"_blank\" rel=\"noopener\">\n              CorpusStudio: Surfacing Emergent Patterns In A Corpus Of Prior Work While Writing\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Hai Dang, Chelse Swoopes, Daniel Buschek, Elena L. Glassman\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            CHI &#8217;25 \u00b7 April 26\u2013May 01, 2025 \u00b7 Yokohama, Japan\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Many communities, including the scientific community, develop implicit\n            writing norms. Understanding them is crucial for effective communication\n            with that community. Writers gradually develop an implicit understanding\n            of norms by reading papers and receiving feedback on their writing.\n            However, it is difficult to both externalize this knowledge and apply it\n            to one&#8217;s own writing. We propose two new writing support concepts that\n            reify document and sentence\u2011level patterns in a given text corpus.\n          <\/p>\n        <\/div>\n      <\/li>\n     \n      <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2025\/08\/Screenshot-2025-08-07-092649.png\"\n            alt=\"ChatGPT paper thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/iis.seas.harvard.edu\/papers\/ma2025chatgpt.pdf\"\n               target=\"_blank\" rel=\"noopener\">\n              &#8220;ChatGPT, Don&#8217;t Tell Me What to Do&#8221;: Designing AI for Context Analysis\n              in Humanitarian Frontline Negotiations\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Zilin Ma, Yiyang Mei, Claude Bruderlein, Krzysztof Z. Gajos, Weiwei Pan\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            CHIWORK \u00b7 ACM Press \u00b7 Forthcoming\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Frontline humanitarian negotiators are increasingly exploring ways to use\n            AI tools in their workflows. However, current AI tools in negotiation\n            primarily focus on outcomes, neglecting crucial aspects of the negotiation\n            process. Through iterative user\u2011centric design with experienced frontline\n            negotiators (n=32), we found that flexible tools that enable contextualizing\n            cases and exploring options are more effective than those providing direct\n            recommendations.\n          <\/p>\n        <\/div>\n      <\/li>\n\n      <li class=\"charm-pub-item\">\n        <div class=\"charm-pub-thumb-wrap\">\n          <img decoding=\"async\"\n            class=\"charm-pub-thumb\"\n            src=\"https:\/\/charm.seas.harvard.edu\/wp-content\/uploads\/2025\/08\/Screenshot-2025-08-07-095850.png\"\n            alt=\"Optimizing Explanations paper thumbnail\"\n          \/>\n        <\/div>\n        <div class=\"charm-pub-body\">\n          <h3 class=\"charm-pub-title\">\n            <a href=\"https:\/\/openreview.net\/pdf?id=HUq8YbDpFt\"\n               target=\"_blank\" rel=\"noopener\">\n              Optimizing Explanations: Nuances Matter When Evaluation Metrics\n              Become Loss Functions\n            <\/a>\n          <\/h3>\n          <p class=\"charm-pub-authors\">\n            Jonas B. Raedler, Hiwot Belay Tadesse, Weiwei Pan, Finale Doshi\u2011Velez\n          <\/p>\n          <p class=\"charm-pub-venue\">\n            MOSS Workshop @ ICML 2025\n          <\/p>\n          <p class=\"charm-pub-abstract\">\n            Recent work has introduced a framework that allows users to directly\n            optimize explanations for desired properties and their trade\u2011offs.\n            While powerful in principle, this method repurposes evaluation metrics\n            as loss functions. We study how different robustness metrics influence\n            the outcome of explanation optimization, and find that the choice of\n            metric can lead to highly divergent explanations, particularly in\n            higher\u2011dimensional settings.\n          <\/p>\n        <\/div>\n      <\/li>\n\n    <\/ol>\n\n    <!-- Add more year groups below as needed -->\n    <!-- <h2 class=\"charm-pubs-year\">2024<\/h2> -->\n    <!-- <ol class=\"charm-pubs-list\"> ... <\/ol> -->\n\n  <\/div>\n<\/section>\n\n<style>\n:root {\n  --charm-purple: #5C3C99;\n  --charm-orange: #F28E2B;\n  --charm-orange-light: #FFF6EB;\n  --charm-feature-bg: #f7f2ff;\n  --charm-ink: #241634;\n  --charm-body: \"DM Sans\", system-ui, -apple-system, BlinkMacSystemFont,\n                \"SF Pro Text\", \"Helvetica Neue\", Arial, sans-serif;\n  --charm-heading: \"Space Grotesk\", system-ui, -apple-system, BlinkMacSystemFont,\n                   \"SF Pro Text\", \"Helvetica Neue\", Arial, sans-serif;\n}\n\n\/* ========== HERO ========== *\/\n\n.charm-research-hero {\n  width: 100%;\n  padding: 3rem 1.5rem 2.5rem;\n  text-align: center;\n  background: linear-gradient(180deg, #ffffff 0%, var(--charm-orange-light) 100%);\n  margin: 0;\n  box-sizing: border-box;\n}\n\n.charm-research-hero .charm-section-kicker {\n  font-family: var(--charm-heading);\n  font-size: 0.8rem;\n  letter-spacing: 0.18em;\n  text-transform: uppercase;\n  font-weight: 600;\n  color: var(--charm-purple);\n  margin: 0 0 0.4rem;\n  display: block;\n}\n\n.charm-research-hero h1 {\n  font-family: var(--charm-heading);\n  font-size: clamp(2rem, 3.5vw, 2.6rem);\n  font-weight: 700;\n  letter-spacing: 0.04em;\n  margin: 0 0 0.6rem;\n  color: #111;\n}\n\n.charm-research-hero-tagline {\n  font-family: var(--charm-body);\n  font-size: 1.05rem;\n  line-height: 1.75;\n  max-width: 44rem;\n  margin: 0 auto;\n  color: #3e324a;\n}\n\n\/* ========== PUBLICATIONS BAND ========== *\/\n\n.charm-pubs-band {\n  width: 100%;\n  margin: 0;\n  background: linear-gradient(\n    to bottom,\n    var(--charm-orange-light) 0%,\n    var(--charm-feature-bg) 40%,\n    #e8f0ff 100%\n  );\n  padding: 0;\n  box-sizing: border-box;\n}\n\n.charm-pubs-inner {\n  max-width: 1400px;\n  margin: 0 auto;\n  padding: 3rem 1.5rem 4rem;\n  box-sizing: border-box;\n  font-family: var(--charm-body);\n  color: var(--charm-ink);\n}\n\n\/* Year headings *\/\n.charm-pubs-year {\n  font-family: var(--charm-heading);\n  font-size: 1.4rem;\n  font-weight: 700;\n  letter-spacing: 0.1em;\n  color: var(--charm-purple);\n  margin: 0 0 1.5rem;\n  padding-bottom: 0.5rem;\n  border-bottom: 2px solid rgba(92, 60, 153, 0.15);\n}\n\n\/* List reset *\/\n.charm-pubs-list {\n  list-style: none;\n  margin: 0 0 3rem;\n  padding: 0;\n  display: flex;\n  flex-direction: column;\n  gap: 1.2rem;\n}\n\n\/* ========== INDIVIDUAL PAPER ITEM ========== *\/\n\n.charm-pub-item {\n  display: flex;\n  flex-direction: row;\n  align-items: flex-start;\n  gap: 1.4rem;\n  background: #ffffff;\n  border-radius: 14px;\n  padding: 1.2rem 1.3rem;\n  border: 1px solid rgba(0, 0, 0, 0.04);\n  box-shadow: 0 4px 16px rgba(0, 0, 0, 0.05);\n  position: relative;\n  overflow: hidden;\n  transition: box-shadow 0.2s ease, transform 0.18s ease;\n}\n\n\/* Hover lift *\/\n.charm-pub-item:hover {\n  box-shadow: 0 10px 28px rgba(0, 0, 0, 0.09);\n  transform: translateY(-2px);\n}\n\n\/* Accent bar on left edge *\/\n.charm-pub-item::before {\n  content: \"\";\n  position: absolute;\n  top: 0;\n  left: 0;\n  bottom: 0;\n  width: 4px;\n  background: linear-gradient(to bottom, var(--charm-purple), var(--charm-orange));\n  border-radius: 14px 0 0 14px;\n}\n\n\/* ========== THUMBNAIL ========== *\/\n\n.charm-pub-thumb-wrap {\n  flex: 0 0 320px;\n  width: 320px;\n}\n\n.charm-pub-thumb {\n  width: 320px;\n  height: 200px;\n  object-fit: contain;\n  object-position: center; \n  border-radius: 8px;\n  display: block;\n  background: #ffffff;\n}\n\n\/* ========== PAPER TEXT ========== *\/\n\n.charm-pub-body {\n  flex: 1 1 0;\n  min-width: 0;\n}\n\n\/* Paper title + link *\/\n.charm-pub-title {\n  font-family: var(--charm-heading);\n  font-size: 1.02rem;\n  font-weight: 600;\n  letter-spacing: 0.01em;\n  line-height: 1.4;\n  margin: 0 0 0.3rem;\n}\n\n.charm-pub-title a {\n  color: var(--charm-ink);\n  text-decoration: underline;\n  text-decoration-color: rgba(92, 60, 153, 0.4);\n  text-underline-offset: 0.18em;\n  text-decoration-thickness: 0.09em;\n  transition: color 0.15s ease, text-decoration-color 0.15s ease;\n}\n\n.charm-pub-title a:hover {\n  color: var(--charm-purple);\n  text-decoration-color: var(--charm-purple);\n}\n\n\/* Authors *\/\n.charm-pub-authors {\n  margin: 0 0 0.2rem;\n  font-family: var(--charm-body);\n  font-size: 0.9rem;\n  line-height: 1.55;\n  color: #4a3b63;\n  font-weight: 500;\n}\n\n\/* Venue \/ conference *\/\n.charm-pub-venue {\n  margin: 0 0 0.55rem;\n  font-family: var(--charm-heading);\n  font-size: 0.78rem;\n  letter-spacing: 0.12em;\n  text-transform: uppercase;\n  color: var(--charm-purple);\n}\n\n\/* Abstract \u2013 clamped to ~6 lines, smooth expand\/collapse *\/\n.charm-pub-abstract {\n  margin: 0;\n  font-family: var(--charm-body);\n  font-size: 0.88rem;\n  line-height: 1.75;\n  color: #4a3a5e;\n  overflow: hidden;\n  position: relative;\n\n  \/* ~6 lines: 0.88rem \u00d7 1.75 \u00d7 6 \u2248 9.24rem *\/\n  max-height: 9.5rem;\n\n  \/* Smooth expand and collapse *\/\n  transition: max-height 2.5s ease;\n}\n\n\/* When expanded: allow full height, fade out the gradient *\/\n.charm-pub-abstract.is-expanded {\n  max-height: 1000px;  \/* large enough for any abstract *\/\n}\n\n.charm-pub-abstract.is-expanded::after {\n  opacity: 0;\n}\n\/* Toggle button *\/\n.charm-abstract-toggle {\n  display: flex;          \/* changed from inline-flex *\/\n  align-items: center;\n  justify-content: center; \/* centers the text + arrow inside the button *\/\n  gap: 0.3em;\n  margin: 0.6rem auto 0;  \/* auto left\/right centers the block itself *\/\n  width: fit-content;     \/* shrinks to content width so auto margins work *\/\n  background: none;\n  border: none;\n  padding: 0;\n  cursor: pointer;\n  font-family: var(--charm-heading);\n  font-size: 0.78rem;\n  font-weight: 600;\n  letter-spacing: 0.1em;\n  text-transform: uppercase;\n  color: var(--charm-purple);\n  transition: color 0.15s ease;\n}\n\n.charm-abstract-toggle:hover {\n  color: var(--charm-orange);\n}\n\n\/* Arrow icon that rotates when expanded *\/\n.charm-abstract-toggle .charm-toggle-arrow {\n  display: inline-block;\n  font-size: 0.7rem;\n  transition: transform 0.25s ease;\n  line-height: 1;\n}\n\n.charm-abstract-toggle.is-expanded .charm-toggle-arrow {\n  transform: rotate(180deg);\n}\n\n\/* ========== RESPONSIVE ========== *\/\n\n@media (max-width: 900px) {\n  .charm-pubs-inner {\n    padding: 2.5rem 1.25rem 3rem;\n  }\n\n  .charm-pub-thumb-wrap {\n    flex: 0 0 150px;\n    width: 150px;\n  }\n\n  .charm-pub-thumb {\n    width: 150px;\n    height: 100px;\n  }\n\n  .charm-pub-title {\n    font-size: 0.98rem;\n  }\n}\n\n@media (max-width: 640px) {\n  \/* Stack image above text on phones *\/\n  .charm-pub-item {\n    flex-direction: column;\n    gap: 0.9rem;\n  }\n\n  .charm-pub-item::before {\n    top: 0;\n    left: 0;\n    right: 0;\n    bottom: auto;\n    width: auto;\n    height: 4px;\n    border-radius: 14px 14px 0 0;\n  }\n\n  .charm-pub-thumb-wrap {\n    flex: none;\n    width: 100%;\n  }\n\n  .charm-pub-thumb {\n    width: 100%;\n    height: 160px;\n    object-fit: fill;\n  }\n\n  .charm-research-hero {\n    padding: 2.5rem 1.25rem 2rem;\n  }\n}\n<\/style>\n\n<script>\n(function () {\n  \/\/ Use window.load (not DOMContentLoaded) so fonts and layout\n  \/\/ are fully calculated before we measure heights\n  window.addEventListener('load', function () {\n\n    var abstracts = document.querySelectorAll('.charm-pub-abstract');\n\n    abstracts.forEach(function (abstract) {\n\n      \/\/ With overflow:hidden + -webkit-line-clamp, scrollHeight returns\n      \/\/ the FULL content height while clientHeight returns the VISIBLE height.\n      \/\/ If they differ, the text is being truncated and we need a button.\n      if (abstract.scrollHeight > abstract.clientHeight + 1) {\n\n        var btn = document.createElement('button');\n        btn.type = 'button';\n        btn.className = 'charm-abstract-toggle';\n        btn.innerHTML = 'Show more <span class=\"charm-toggle-arrow\">&#9662;<\/span>';\n        btn.setAttribute('aria-expanded', 'false');\n\n        btn.addEventListener('click', function () {\n          var isExpanded = abstract.classList.contains('is-expanded');\n\n          if (isExpanded) {\n            abstract.classList.remove('is-expanded');\n            btn.classList.remove('is-expanded');\n            btn.innerHTML = 'Show more <span class=\"charm-toggle-arrow\">&#9662;<\/span>';\n            btn.setAttribute('aria-expanded', 'false');\n          } else {\n            abstract.classList.add('is-expanded');\n            btn.classList.add('is-expanded');\n            btn.innerHTML = 'Show less <span class=\"charm-toggle-arrow\">&#9662;<\/span>';\n            btn.setAttribute('aria-expanded', 'true');\n          }\n        });\n\n        \/\/ Insert button immediately after the abstract paragraph\n        abstract.parentNode.insertBefore(btn, abstract.nextSibling);\n      }\n    });\n  });\n})();\n<\/script>\n","protected":false},"excerpt":{"rendered":"<p>Research Publications Research from CHARM&#8217;s faculty, students, and collaborators across human\u2011centered AI, interaction design, and machine learning. 2026 Bias at the End of the Score Salma Abdel Magid, Grace Guo, Esin Tureci, Amaya Dharmasiri, Vikram V. Ramaswamy, Hanspeter Pfister, Olga Russakovsky IEEE CVPR 2026 Inverse Transition Learning: Learning Dynamics from Demonstrations Leo Benac, Abhishek Sharma, [&hellip;]<\/p>\n","protected":false},"author":2,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"page-no-title","meta":{"footnotes":""},"class_list":["post-448","page","type-page","status-publish","hentry"],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v27.4 - https:\/\/yoast.com\/product\/yoast-seo-wordpress\/ -->\n<title>Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/charm.seas.harvard.edu\/?page_id=448\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard\" \/>\n<meta property=\"og:description\" content=\"Research Publications Research from CHARM&#8217;s faculty, students, and collaborators across human\u2011centered AI, interaction design, and machine learning. 2026 Bias at the End of the Score Salma Abdel Magid, Grace Guo, Esin Tureci, Amaya Dharmasiri, Vikram V. Ramaswamy, Hanspeter Pfister, Olga Russakovsky IEEE CVPR 2026 Inverse Transition Learning: Learning Dynamics from Demonstrations Leo Benac, Abhishek Sharma, [&hellip;]\" \/>\n<meta property=\"og:url\" content=\"https:\/\/charm.seas.harvard.edu\/?page_id=448\" \/>\n<meta property=\"og:site_name\" content=\"Center for Human-driven AI Research and Methods (CHARM) at Harvard\" \/>\n<meta property=\"article:modified_time\" content=\"2026-05-07T14:52:54+00:00\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"19 minutes\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/?page_id=448\",\"url\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/?page_id=448\",\"name\":\"Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/#website\"},\"datePublished\":\"2026-04-06T19:46:23+00:00\",\"dateModified\":\"2026-05-07T14:52:54+00:00\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/?page_id=448#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/charm.seas.harvard.edu\\\/?page_id=448\"]}]},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/?page_id=448#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Home\",\"item\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"Research\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/#website\",\"url\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/\",\"name\":\"Center for Human-driven AI Research and Methods (CHARM) at Harvard\",\"description\":\"\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/charm.seas.harvard.edu\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/charm.seas.harvard.edu\/?page_id=448","og_locale":"en_US","og_type":"article","og_title":"Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard","og_description":"Research Publications Research from CHARM&#8217;s faculty, students, and collaborators across human\u2011centered AI, interaction design, and machine learning. 2026 Bias at the End of the Score Salma Abdel Magid, Grace Guo, Esin Tureci, Amaya Dharmasiri, Vikram V. Ramaswamy, Hanspeter Pfister, Olga Russakovsky IEEE CVPR 2026 Inverse Transition Learning: Learning Dynamics from Demonstrations Leo Benac, Abhishek Sharma, [&hellip;]","og_url":"https:\/\/charm.seas.harvard.edu\/?page_id=448","og_site_name":"Center for Human-driven AI Research and Methods (CHARM) at Harvard","article_modified_time":"2026-05-07T14:52:54+00:00","twitter_card":"summary_large_image","twitter_misc":{"Est. reading time":"19 minutes"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/charm.seas.harvard.edu\/?page_id=448","url":"https:\/\/charm.seas.harvard.edu\/?page_id=448","name":"Research - Center for Human-driven AI Research and Methods (CHARM) at Harvard","isPartOf":{"@id":"https:\/\/charm.seas.harvard.edu\/#website"},"datePublished":"2026-04-06T19:46:23+00:00","dateModified":"2026-05-07T14:52:54+00:00","breadcrumb":{"@id":"https:\/\/charm.seas.harvard.edu\/?page_id=448#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/charm.seas.harvard.edu\/?page_id=448"]}]},{"@type":"BreadcrumbList","@id":"https:\/\/charm.seas.harvard.edu\/?page_id=448#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Home","item":"https:\/\/charm.seas.harvard.edu\/"},{"@type":"ListItem","position":2,"name":"Research"}]},{"@type":"WebSite","@id":"https:\/\/charm.seas.harvard.edu\/#website","url":"https:\/\/charm.seas.harvard.edu\/","name":"Center for Human-driven AI Research and Methods (CHARM) at Harvard","description":"","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/charm.seas.harvard.edu\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"}]}},"_links":{"self":[{"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/pages\/448","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/users\/2"}],"replies":[{"embeddable":true,"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=448"}],"version-history":[{"count":34,"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/pages\/448\/revisions"}],"predecessor-version":[{"id":726,"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=\/wp\/v2\/pages\/448\/revisions\/726"}],"wp:attachment":[{"href":"https:\/\/charm.seas.harvard.edu\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=448"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}