publications

2022

  • T. Draws, O. Inel, N. Tintarev, C. Baden, and B. Timmermans, “Comprehensive Viewpoint Representations for a Deeper Understanding of User Interactions With Debated Topics,” in Proceedings of the 2022 ACM SIGIR Conference on Human Information Interaction and Retrieval, {New York, NY, USA}, 2022. doi:10.1145/3498366.3505812
    [BibTeX] [Abstract] [Download PDF]

    Research in the area of human information interaction (HII) typically represents viewpoints on debated topics in a binary fashion, as either against or in favor of a given topic (e.g., the feminist movement). This simple taxonomy, however, greatly reduces the latent richness of viewpoints and thereby limits the potential of research and practical applications in this field. Work in the communication sciences has already demonstrated that viewpoints can be represented in much more comprehensive ways, which could enable a deeper understanding of users’ interactions with debated topics online. For instance, a viewpoint’s stance usually has a degree of strength (e.g., mild or strong), and, even if two viewpoints support or oppose something to the same degree, they may use different logics of evaluation (i.e., underlying reasons). In this paper, we draw from communication science practice to propose a novel, two-dimensional way of representing viewpoints that incorporates a viewpoint’s stance degree as well as its logic of evaluation. We show in a case study of tweets on debated topics how our proposed viewpoint label can be obtained via crowdsourcing with acceptable reliability. By analyzing the resulting data set and conducting a user study, we further show that the two-dimensional viewpoint representation we propose allows for more meaningful analyses and diversification interventions compared to current approaches. Finally, we discuss what this novel viewpoint label implies for HII research and how obtaining it may be made cheaper in the future.

    @inproceedings{draws2022ComprehensiveViewpointRepresentations,
    title = {Comprehensive {{Viewpoint Representations}} for a {{Deeper Understanding}} of {{User Interactions With Debated Topics}}},
    booktitle = {Proceedings of the 2022 {{ACM SIGIR Conference}} on {{Human Information Interaction}} and {{Retrieval}}},
    author = {Draws, Tim and Inel, Oana and Tintarev, Nava and Baden, Christian and Timmermans, Benjamin},
    year = {2022},
    series = {{{CHIIR}} '22},
    publisher = {{ACM}},
    address = {{New York, NY, USA}},
    doi = {10.1145/3498366.3505812},
    url = {https://drive.google.com/file/d/1cMUzKX9QkAGfTAM8WaDKRK7y23auzNn5/view?usp=sharing},
    abstract = {Research in the area of human information interaction (HII) typically represents viewpoints on debated topics in a binary fashion, as either against or in favor of a given topic (e.g., the feminist movement). This simple taxonomy, however, greatly reduces the latent richness of viewpoints and thereby limits the potential of research and practical applications in this field. Work in the communication sciences has already demonstrated that viewpoints can be represented in much more comprehensive ways, which could enable a deeper understanding of users' interactions with debated topics online. For instance, a viewpoint's stance usually has a degree of strength (e.g., mild or strong), and, even if two viewpoints support or oppose something to the same degree, they may use different logics of evaluation (i.e., underlying reasons). In this paper, we draw from communication science practice to propose a novel, two-dimensional way of representing viewpoints that incorporates a viewpoint's stance degree as well as its logic of evaluation. We show in a case study of tweets on debated topics how our proposed viewpoint label can be obtained via crowdsourcing with acceptable reliability. By analyzing the resulting data set and conducting a user study, we further show that the two-dimensional viewpoint representation we propose allows for more meaningful analyses and diversification interventions compared to current approaches. Finally, we discuss what this novel viewpoint label implies for HII research and how obtaining it may be made cheaper in the future.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/YSQ8BIRZ/2022 - Comprehensive Viewpoint Representations for a Deep.pdf}
    }

  • T. Draws, D. La Barbera, M. Soprano, K. Roitero, D. Ceolin, A. Checco, and S. Mizzaro, “The Effects of Crowd Workers Biases in Fact-Checking Tasks,” in Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency, {Seoul, South Korea}, 2022.
    [BibTeX] [Abstract]

    Due to the increasing amount of information shared online every day, the need for a sound and reliable way to distinguish between trustworthy and non-trustworthy information is as present as ever. One technique for performing fact-checking at scale is to employ human intelligence in the form of crowd workers. Although earlier work has suggested that crowd workers can reliably identify misinformation, cognitive biases of crowd workers may decrease the quality of truthfulness judgments in this context. We performed a systematic exploratory analysis of publicly available crowdsourced data to identify a set of potential systematic biases that may occur in fact-checking tasks performed by crowd workers. Following this exploratory study, we collected a novel dataset of crowdsourced truthfulness judgments to validate our hypotheses. Our findings suggest that workers generally overestimate the truthfulness of statements and that different cognitive biases (i.e., the affect heuristic and overconfidence) can affect their annotations. Exploratory findings furthermore show a relationship between crowd workers’ belief in science and self reported confidence and their ability to judge the truthfulness of statements accurately. Interestingly, we find that, depending on general judgment tendencies of workers, their biases may sometimes lead to more accurate judgments.

    @inproceedings{draws2022EffectsCrowdWorkers,
    title = {The {{Effects}} of {{Crowd Workers Biases}} in {{Fact-Checking Tasks}}},
    booktitle = {Proceedings of the 2022 {{ACM Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}},
    author = {Draws, Tim and La Barbera, David and Soprano, Michael and Roitero, Kevin and Ceolin, Davide and Checco, Alessandro and Mizzaro, Stefano},
    year = {2022},
    series = {{{FAccT}} '22},
    publisher = {{Association for Computing Machinery}},
    address = {{Seoul, South Korea}},
    abstract = {Due to the increasing amount of information shared online every day, the need for a sound and reliable way to distinguish between trustworthy and non-trustworthy information is as present as ever. One technique for performing fact-checking at scale is to employ human intelligence in the form of crowd workers. Although earlier work has suggested that crowd workers can reliably identify misinformation, cognitive biases of crowd workers may decrease the quality of truthfulness judgments in this context. We performed a systematic exploratory analysis of publicly available crowdsourced data to identify a set of potential systematic biases that may occur in fact-checking tasks performed by crowd workers. Following this exploratory study, we collected a novel dataset of crowdsourced truthfulness judgments to validate our hypotheses. Our findings suggest that workers generally overestimate the truthfulness of statements and that different cognitive biases (i.e., the affect heuristic and overconfidence) can affect their annotations. Exploratory findings furthermore show a relationship between crowd workers' belief in science and self reported confidence and their ability to judge the truthfulness of statements accurately. Interestingly, we find that, depending on general judgment tendencies of workers, their biases may sometimes lead to more accurate judgments.}
    }

  • A. Sarafoglou, A. {van der Heijden}, T. Draws, J. Cornelisse, E. Wagenmakers, and M. Marsman, “Combine Statistical Thinking With Open Scientific Practice: A Protocol of a Bayesian Research Project,” Psychology Learning & Teaching, p. 1–13, 2022. doi:10.1177/14757257221077307
    [BibTeX] [Abstract] [Download PDF]

    Current developments in the statistics community suggest that modern statistics education should be structured holistically, that is, by allowing students to work with real data and to answer concrete statistical questions, but also by educating them about alternative frameworks, such as Bayesian inference. In this article, we describe how we incorporated such a holistic structure in a Bayesian research project on ordered binomial probabilities. The project was conducted with a group of three undergraduate psychology students who had basic knowledge of Bayesian statistics and programming, but lacked formal mathematical training. The research project aimed to (1) convey the basic mathematical concepts of Bayesian inference; (2) have students experience the entire empirical cycle including collection, analysis, and interpretation of data and (3) teach students open science practices.

    @article{sarafoglou2022CombineStatisticalThinking,
    title = {Combine {{Statistical Thinking With Open Scientific Practice}}: {{A Protocol}} of a {{Bayesian Research Project}}},
    shorttitle = {Combine {{Statistical Thinking With Open Scientific Practice}}},
    author = {Sarafoglou, Alexandra and {van der Heijden}, Anna and Draws, Tim and Cornelisse, Joran and Wagenmakers, Eric-Jan and Marsman, Maarten},
    year = {2022},
    month = feb,
    journal = {Psychology Learning \& Teaching},
    pages = {1--13},
    issn = {1475-7257, 2057-3022},
    doi = {10.1177/14757257221077307},
    url = {http://journals.sagepub.com/doi/10.1177/14757257221077307},
    urldate = {2022-02-18},
    abstract = {Current developments in the statistics community suggest that modern statistics education should be structured holistically, that is, by allowing students to work with real data and to answer concrete statistical questions, but also by educating them about alternative frameworks, such as Bayesian inference. In this article, we describe how we incorporated such a holistic structure in a Bayesian research project on ordered binomial probabilities. The project was conducted with a group of three undergraduate psychology students who had basic knowledge of Bayesian statistics and programming, but lacked formal mathematical training. The research project aimed to (1) convey the basic mathematical concepts of Bayesian inference; (2) have students experience the entire empirical cycle including collection, analysis, and interpretation of data and (3) teach students open science practices.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/78ZUADBF/Sarafoglou et al. - 2022 - Combine Statistical Thinking With Open Scientific .pdf}
    }

  • C. J. Van Lissa, W. Stroebe, M. {vanDellen}, P. Leander, M. Agostini, B. Gutzkow, J. Kreienkamp, J. Belanger, T. Draws, A. Grygoryshyn, C. S. Vetter, and P. {team}, “Using Machine Learning to Identify Important Predictors of COVID-19 Infection Prevention Behaviors During the Early Phase of the Pandemic,” Patterns, 2022.
    [BibTeX] [Abstract] [Download PDF]

    In the early phase of the COVID-19 pandemic, before vaccines became available, a set of infection prevention behaviors constituted the primary means to mitigate the virus spread. Our study aimed to identify important predictors of this set of behaviors– information essential for guiding follow-up research and interventions. Whereas social and health psychological theories suggest a limited set of predictors, machine learning analyses can identify correlates of health behaviors from a larger pool of candidate predictors. We used random forests, a machine learning algorithm, to rank 115 candidate correlates of infection prevention behavior in a study of 56,072 participants across 28 countries, administered in March-May 2020. Results indicated that the two most important predictors related to individual-level injunctive norms— beliefs that people in the community should engage in social distancing and selfisolation, followed by endorsement of restrictive containment measures. The machine-learning model predicted 52\% of the variance in infection prevention behavior in a separate test sample— exceeding the performance of psychological models of health behavior. Illustrating how data-driven methods can complement theory, some of the most important predictors were not derived from theories of health behavior— and some theoretically-derived predictors did not turn out to be important.

    @article{vanlissa2022UsingMachineLearning,
    title = {Using {{Machine Learning}} to {{Identify Important Predictors}} of {{COVID-19 Infection Prevention Behaviors During}} the {{Early Phase}} of the {{Pandemic}}},
    author = {Van Lissa, Caspar J. and Stroebe, Wolfgang and {vanDellen}, Michelle and Leander, Pontus and Agostini, Maximilian and Gutzkow, Ben and Kreienkamp, Jannis and Belanger, Jocelyn and Draws, Tim and Grygoryshyn, Andrii and Vetter, Clara S. and {team}, PsyCorona},
    year = {2022},
    journal = {Patterns},
    url = {https://pure.hw.ac.uk/ws/portalfiles/portal/52989806/AAM.pdf},
    abstract = {In the early phase of the COVID-19 pandemic, before vaccines became available, a set of infection prevention behaviors constituted the primary means to mitigate the virus spread. Our study aimed to identify important predictors of this set of behaviors\textendash information essential for guiding follow-up research and interventions. Whereas social and health psychological theories suggest a limited set of predictors, machine learning analyses can identify correlates of health behaviors from a larger pool of candidate predictors. We used random forests, a machine learning algorithm, to rank 115 candidate correlates of infection prevention behavior in a study of 56,072 participants across 28 countries, administered in March-May 2020. Results indicated that the two most important predictors related to individual-level injunctive norms\textemdash beliefs that people in the community should engage in social distancing and selfisolation, followed by endorsement of restrictive containment measures. The machine-learning model predicted 52\% of the variance in infection prevention behavior in a separate test sample\textemdash exceeding the performance of psychological models of health behavior. Illustrating how data-driven methods can complement theory, some of the most important predictors were not derived from theories of health behavior\textemdash and some theoretically-derived predictors did not turn out to be important.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/H58NGCBE/Van Lissa et al. - 2020 - Early Indicators of COVID-19 Infection Prevention .pdf}
    }

2021

  • J. {van Doorn}, D. {van den Bergh}, U. Böhm, F. Dablander, K. Derks, T. Draws, A. Etz, N. J. Evans, Q. F. Gronau, J. M. Haaf, M. Hinne, Š. Kucharský, A. Ly, M. Marsman, D. Matzke, A. K. N. R. Gupta, A. Sarafoglou, A. Stefan, J. G. Voelkel, and E. Wagenmakers, “The JASP Guidelines for Conducting and Reporting a Bayesian Analysis,” Psychonomic Bulletin & Review, vol. 28, iss. 3, p. 813–826, 2021. doi:10.3758/s13423-020-01798-5
    [BibTeX] [Abstract] [Download PDF]

    Despite the increasing popularity of Bayesian inference in empirical research, few practical guidelines provide detailed recommendations for how to apply Bayesian procedures and interpret the results. Here we offer specific guidelines for four different stages of Bayesian statistical reasoning in a research setting: planning the analysis, executing the analysis, interpreting the results, and reporting the results. The guidelines for each stage are illustrated with a running example. Although the guidelines are geared towards analyses performed with the open-source statistical software JASP, most guidelines extend to Bayesian inference in general.

    @article{vandoornJASPGuidelinesConducting2021,
    title = {The {{JASP}} Guidelines for Conducting and Reporting a {{Bayesian}} Analysis},
    author = {{van Doorn}, Johnny and {van den Bergh}, Don and B{\"o}hm, Udo and Dablander, Fabian and Derks, Koen and Draws, Tim and Etz, Alexander and Evans, Nathan J. and Gronau, Quentin F. and Haaf, Julia M. and Hinne, Max and Kucharsk{\'y}, {\v S}imon and Ly, Alexander and Marsman, Maarten and Matzke, Dora and Gupta, Akash R. Komarlu Narendra and Sarafoglou, Alexandra and Stefan, Angelika and Voelkel, Jan G. and Wagenmakers, Eric-Jan},
    year = {2021},
    month = jun,
    journal = {Psychonomic Bulletin \& Review},
    volume = {28},
    number = {3},
    pages = {813--826},
    issn = {1531-5320},
    doi = {10.3758/s13423-020-01798-5},
    url = {https://doi.org/10.3758/s13423-020-01798-5},
    urldate = {2021-07-28},
    abstract = {Despite the increasing popularity of Bayesian inference in empirical research, few practical guidelines provide detailed recommendations for how to apply Bayesian procedures and interpret the results. Here we offer specific guidelines for four different stages of Bayesian statistical reasoning in a research setting: planning the analysis, executing the analysis, interpreting the results, and reporting the results. The guidelines for each stage are illustrated with a running example. Although the guidelines are geared towards analyses performed with the open-source statistical software JASP, most guidelines extend to Bayesian inference in general.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/CZNSIHR5/van Doorn et al. - 2021 - The JASP guidelines for conducting and reporting a.pdf}
    }

  • F. Barile, S. Najafian, T. Draws, O. Inel, A. Rieger, R. Hada, and N. Tintarev, “Toward Benchmarking Group Explanations: Evaluating the Effect of Aggregation Strategies versus Explanation,” Proceedings of the Perspectives on the Evaluation of Recommender Systems Workshop (PERSPECTIVES 2021), 2021.
    [BibTeX] [Abstract] [Download PDF]

    In the context of group recommendations, explanations have been claimed to be useful for finding a satisfactory choice for all the group members and helping them agree on a common decision, improving perceived fairness, perceived consensus, and satisfaction. In this work, we present a preregistered evaluation of the impact of using social choice-based explanations for group recommendations (i.e., explanations that intuitively describe the strategy used to generate the recommendation). Our objective is to conceptually replicate a previous study and investigate whether a) the used aggregation strategy or b) the explanation affected the most users’ fairness perception, consensus perception, and satisfaction. Our results show that the participants are able to discriminate between the different strategies, assigning worse evaluations to the Most Pleasure strategy (which chooses the item with the highest of the individual evaluations). In addition to a condition with no (natural language) explanation, we introduce a more detailed social choice-based explanation, evaluating whether additional information about the strategy has a positive impact on the evaluation of the group recommendation. However, we surprisingly found no effect of level of explanations, either as a main effect or as an interaction effect with the aggregation strategy. Overall, our results suggest that users’ perceptions of fairness, consensus, and satisfaction are primarily formed based on the social choice aggregation strategies for the studied group scenario. Our work also highlights the challenges of replication studies in recommender systems and discusses some of the design choices that may influence results when attempting to benchmark findings for the effectiveness of group explanations.

    @article{barile2021BenchmarkingGroupExplanations,
    title = {Toward {{Benchmarking Group Explanations}}: {{Evaluating}} the {{Effect}} of {{Aggregation Strategies}} versus {{Explanation}}},
    author = {Barile, Francesco and Najafian, Shabnam and Draws, Tim and Inel, Oana and Rieger, Alisa and Hada, Rishav and Tintarev, Nava},
    year = {2021},
    journal = {Proceedings of the Perspectives on the Evaluation of Recommender Systems Workshop (PERSPECTIVES 2021)},
    url = {http://ceur-ws.org/Vol-2955/paper11.pdf},
    abstract = {In the context of group recommendations, explanations have been claimed to be useful for finding a satisfactory choice for all the group members and helping them agree on a common decision, improving perceived fairness, perceived consensus, and satisfaction. In this work, we present a preregistered evaluation of the impact of using social choice-based explanations for group recommendations (i.e., explanations that intuitively describe the strategy used to generate the recommendation). Our objective is to conceptually replicate a previous study and investigate whether a) the used aggregation strategy or b) the explanation affected the most users' fairness perception, consensus perception, and satisfaction. Our results show that the participants are able to discriminate between the different strategies, assigning worse evaluations to the Most Pleasure strategy (which chooses the item with the highest of the individual evaluations). In addition to a condition with no (natural language) explanation, we introduce a more detailed social choice-based explanation, evaluating whether additional information about the strategy has a positive impact on the evaluation of the group recommendation. However, we surprisingly found no effect of level of explanations, either as a main effect or as an interaction effect with the aggregation strategy. Overall, our results suggest that users' perceptions of fairness, consensus, and satisfaction are primarily formed based on the social choice aggregation strategies for the studied group scenario. Our work also highlights the challenges of replication studies in recommender systems and discusses some of the design choices that may influence results when attempting to benchmark findings for the effectiveness of group explanations.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/UIVYAYW2/Najafian et al. - Toward Benchmarking Group Explanations Evaluating.pdf}
    }

  • T. Draws, N. Tintarev, U. Gadiraju, A. Bozzon, and B. Timmermans, “Assessing Viewpoint Diversity in Search Results Using Ranking Fairness Metrics,” ACM SIGKDD Explorations Newsletter, vol. 23, iss. 1, p. 50–58, 2021. doi:10.1145/3468507.3468515
    [BibTeX] [Abstract] [Download PDF]

    The way pages are ranked in search results influences whether the users of search engines are exposed to more homogeneous, or rather to more diverse viewpoints. However, this viewpoint diversity is not trivial to assess. In this paper, we use existing and novel ranking fairness metrics to evaluate viewpoint diversity in search result rankings. We conduct a controlled simulation study that shows how ranking fairness metrics can be used for viewpoint diversity, how their outcome should be interpreted, and which metric is most suitable depending on the situation. This paper lays out important groundwork for future research to measure and assess viewpoint diversity in real search result rankings.

    @article{draws2021AssessingViewpointDiversity,
    title = {Assessing {{Viewpoint Diversity}} in {{Search Results Using Ranking Fairness Metrics}}},
    author = {Draws, Tim and Tintarev, Nava and Gadiraju, Ujwal and Bozzon, Alessandro and Timmermans, Benjamin},
    year = {2021},
    month = may,
    journal = {ACM SIGKDD Explorations Newsletter},
    volume = {23},
    number = {1},
    pages = {50--58},
    issn = {1931-0145, 1931-0153},
    doi = {10.1145/3468507.3468515},
    url = {https://dl.acm.org/doi/10.1145/3468507.3468515},
    urldate = {2021-07-13},
    abstract = {The way pages are ranked in search results influences whether the users of search engines are exposed to more homogeneous, or rather to more diverse viewpoints. However, this viewpoint diversity is not trivial to assess. In this paper, we use existing and novel ranking fairness metrics to evaluate viewpoint diversity in search result rankings. We conduct a controlled simulation study that shows how ranking fairness metrics can be used for viewpoint diversity, how their outcome should be interpreted, and which metric is most suitable depending on the situation. This paper lays out important groundwork for future research to measure and assess viewpoint diversity in real search result rankings.},
    copyright = {All rights reserved},
    langid = {english},
    file = {/Users/tim/Zotero/storage/KN52NKB8/Draws et al. - 2021 - Assessing Viewpoint Diversity in Search Results Us.pdf}
    }

  • T. Draws, A. Rieger, O. Inel, U. Gadiraju, and N. Tintarev, “A Checklist to Combat Cognitive Biases in Crowdsourcing,” Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, vol. 9, iss. 1, p. 48–59, 2021.
    [BibTeX] [Abstract] [Download PDF]

    Recent research has demonstrated that cognitive biases such as the confirmation bias or the anchoring effect can negatively affect the quality of crowdsourced data. In practice, however, such biases go unnoticed unless specifically assessed or controlled for. Task requesters need to ensure that task workflow and design choices do not trigger workers’ cognitive biases. Moreover, to facilitate the reuse of crowdsourced data collections, practitioners can benefit from understanding whether and which cognitive biases may be associated with the data. To this end, we propose a 12-item checklist adapted from business psychology to combat cognitive biases in crowdsourcing. We demonstrate the practical application of this checklist in a case study on viewpoint annotations for search results. Through a retrospective analysis of relevant crowdsourcing research that has been published at HCOMP in 2018, 2019, and 2020, we show that cognitive biases may often affect crowd workers but are typically not considered as potential sources of poor data quality. The checklist we propose is a practical tool that requesters can use to improve their task designs and appropriately describe potential limitations of collected data. It contributes to a body of efforts towards making human-labeled data more reliable and reusable.

    @article{draws2021ChecklistCombatCognitive,
    title = {A {{Checklist}} to {{Combat Cognitive Biases}} in {{Crowdsourcing}}},
    author = {Draws, Tim and Rieger, Alisa and Inel, Oana and Gadiraju, Ujwal and Tintarev, Nava},
    year = {2021},
    journal = {Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
    series = {{{HCOMP}} '21},
    volume = {9},
    number = {1},
    pages = {48--59},
    url = {https://ojs.aaai.org/index.php/HCOMP/article/view/18939},
    abstract = {Recent research has demonstrated that cognitive biases such as the confirmation bias or the anchoring effect can negatively affect the quality of crowdsourced data. In practice, however, such biases go unnoticed unless specifically assessed or controlled for. Task requesters need to ensure that task workflow and design choices do not trigger workers' cognitive biases. Moreover, to facilitate the reuse of crowdsourced data collections, practitioners can benefit from understanding whether and which cognitive biases may be associated with the data. To this end, we propose a 12-item checklist adapted from business psychology to combat cognitive biases in crowdsourcing. We demonstrate the practical application of this checklist in a case study on viewpoint annotations for search results. Through a retrospective analysis of relevant crowdsourcing research that has been published at HCOMP in 2018, 2019, and 2020, we show that cognitive biases may often affect crowd workers but are typically not considered as potential sources of poor data quality. The checklist we propose is a practical tool that requesters can use to improve their task designs and appropriately describe potential limitations of collected data. It contributes to a body of efforts towards making human-labeled data more reliable and reusable.},
    langid = {english},
    file = {/Users/tim/Zotero/storage/S8DEMAUD/Draws et al. - A Checklist to Combat Cognitive Biases in Crowdsou.pdf}
    }

  • T. Draws, Z. Szlávik, B. Timmermans, N. Tintarev, K. R. Varshney, and M. Hind, “Disparate Impact Diminishes Consumer Trust Even for Advantaged Users,” in Persuasive Technology, {Cham}, 2021, p. 135–149. doi:10.1007/978-3-030-79460-6_11
    [BibTeX] [Abstract] [Download PDF]

    Systems aiming to aid consumers in their decision-making (e.g., by implementing persuasive techniques) are more likely to be effective when consumers trust them. However, recent research has demonstrated that the machine learning algorithms that often underlie such technology can act unfairly towards specific groups (e.g., by making more favorable predictions for men than for women). An undesired disparate impact resulting from this kind of algorithmic unfairness could diminish consumer trust and thereby undermine the purpose of the system. We studied this effect by conducting a between-subjects user study investigating how (gender-related) disparate impact affected consumer trust in an app designed to improve consumers’ financial decision-making. Our results show that disparate impact decreased consumers’ trust in the system and made them less likely to use it. Moreover, we find that trust was affected to the same degree across consumer groups (i.e., advantaged and disadvantaged users) despite both of these consumer groups recognizing their respective levels of personal benefit. Our findings highlight the importance of fairness in consumer-oriented artificial intelligence systems.

    @inproceedings{draws2021DisparateImpactDiminishes,
    title = {Disparate {{Impact Diminishes Consumer Trust Even}} for {{Advantaged Users}}},
    booktitle = {Persuasive {{Technology}}},
    author = {Draws, Tim and Szl{\'a}vik, Zolt{\'a}n and Timmermans, Benjamin and Tintarev, Nava and Varshney, Kush R. and Hind, Michael},
    editor = {Ali, Raian and Lugrin, Birgit and Charles, Fred},
    year = {2021},
    volume = {12684},
    pages = {135--149},
    publisher = {{Springer International Publishing}},
    address = {{Cham}},
    doi = {10.1007/978-3-030-79460-6_11},
    url = {https://link.springer.com/10.1007/978-3-030-79460-6_11},
    urldate = {2021-07-13},
    abstract = {Systems aiming to aid consumers in their decision-making (e.g., by implementing persuasive techniques) are more likely to be effective when consumers trust them. However, recent research has demonstrated that the machine learning algorithms that often underlie such technology can act unfairly towards specific groups (e.g., by making more favorable predictions for men than for women). An undesired disparate impact resulting from this kind of algorithmic unfairness could diminish consumer trust and thereby undermine the purpose of the system. We studied this effect by conducting a between-subjects user study investigating how (gender-related) disparate impact affected consumer trust in an app designed to improve consumers' financial decision-making. Our results show that disparate impact decreased consumers' trust in the system and made them less likely to use it. Moreover, we find that trust was affected to the same degree across consumer groups (i.e., advantaged and disadvantaged users) despite both of these consumer groups recognizing their respective levels of personal benefit. Our findings highlight the importance of fairness in consumer-oriented artificial intelligence systems.},
    copyright = {All rights reserved},
    isbn = {978-3-030-79459-0 978-3-030-79460-6},
    langid = {english},
    file = {/Users/tim/Zotero/storage/V9VIEUEH/Draws et al. - 2021 - Disparate Impact Diminishes Consumer Trust Even fo.pdf}
    }

  • T. Draws, N. Tintarev, U. Gadiraju, A. Bozzon, and B. Timmermans, “This Is Not What We Ordered: Exploring Why Biased Search Result Rankings Affect User Attitudes on Debated Topics,” in Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, {New York, NY, USA}, 2021, p. 295–305. doi:10.1145/3404835.3462851
    [BibTeX] [Abstract] [Download PDF]

    In web search on debated topics, algorithmic and cognitive biases strongly influence how users consume and process information. Recent research has shown that this can lead to a search engine manipulation effect (SEME): when search result rankings are biased towards a particular viewpoint, users tend to adopt this favored viewpoint. To better understand the mechanisms underlying SEME, we present a pre-registered, 5 \texttimes{} 3 factorial user study investigating whether order effects (i.e., users adopting the viewpoint pertaining to higher-ranked documents) can cause SEME. For five different debated topics, we evaluated attitude change after exposing participants with mild pre-existing attitudes to search results that were overall viewpoint-balanced but reflected one of three levels of algorithmic ranking bias. We found that attitude change did not differ across levels of ranking bias and did not vary based on individual user differences. Our results thus suggest that order effects may not be an underlying mechanism of SEME. Exploratory analyses lend support to the presence of exposure effects (i.e., users adopting the majority viewpoint among the results they examine) as a contributing factor to users’ attitude change. We discuss how our findings can inform the design of user bias mitigation strategies.

    @inproceedings{draws2021ThisNotWhat,
    title = {This {{Is Not What We Ordered}}: {{Exploring Why Biased Search Result Rankings Affect User Attitudes}} on {{Debated Topics}}},
    shorttitle = {This {{Is Not What We Ordered}}},
    booktitle = {Proceedings of the 44th {{International ACM SIGIR Conference}} on {{Research}} and {{Development}} in {{Information Retrieval}}},
    author = {Draws, Tim and Tintarev, Nava and Gadiraju, Ujwal and Bozzon, Alessandro and Timmermans, Benjamin},
    year = {2021},
    month = jul,
    series = {{{SIGIR}} '21},
    pages = {295--305},
    publisher = {{Association for Computing Machinery}},
    address = {{New York, NY, USA}},
    doi = {10.1145/3404835.3462851},
    url = {https://dl.acm.org/doi/10.1145/3404835.3462851},
    urldate = {2021-07-13},
    abstract = {In web search on debated topics, algorithmic and cognitive biases strongly influence how users consume and process information. Recent research has shown that this can lead to a search engine manipulation effect (SEME): when search result rankings are biased towards a particular viewpoint, users tend to adopt this favored viewpoint. To better understand the mechanisms underlying SEME, we present a pre-registered, 5 \texttimes{} 3 factorial user study investigating whether order effects (i.e., users adopting the viewpoint pertaining to higher-ranked documents) can cause SEME. For five different debated topics, we evaluated attitude change after exposing participants with mild pre-existing attitudes to search results that were overall viewpoint-balanced but reflected one of three levels of algorithmic ranking bias. We found that attitude change did not differ across levels of ranking bias and did not vary based on individual user differences. Our results thus suggest that order effects may not be an underlying mechanism of SEME. Exploratory analyses lend support to the presence of exposure effects (i.e., users adopting the majority viewpoint among the results they examine) as a contributing factor to users' attitude change. We discuss how our findings can inform the design of user bias mitigation strategies.},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8037-9},
    langid = {english},
    file = {/Users/tim/Zotero/storage/EFI7ZWH3/Draws et al. - 2021 - This Is Not What We Ordered Exploring Why Biased .pdf}
    }

  • T. Draws, “Understanding How Algorithmic and Cognitive Biases in Web Search Affect User Attitudes on Debated Topics,” in Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, {New York, NY, USA}, 2021, p. 2709. doi:10.1145/3404835.3463273
    [BibTeX] [Abstract] [Download PDF]

    Web search increasingly provides a platform for users to seek advice on important personal decisions [6] but may be biased in several different ways [1]. One result of such biases is the search engine manipulation effect (SEME): when a list of search results relates to a debated topic (e.g., veganism) and promotes documents pertaining to a particular viewpoint (e.g., by ranking them higher), users tend to adopt this advantaged viewpoint [5]. However, the detection and mitigation of SEME are complicated by the current lack of empirical understanding of its underlying mechanisms. This dissertation aims to investigate which (and to what degree) algorithmic and cognitive biases play a role in SEME concerning debated topics.

    @inproceedings{draws2021UnderstandingHowAlgorithmic,
    title = {Understanding {{How Algorithmic}} and {{Cognitive Biases}} in {{Web Search Affect User Attitudes}} on {{Debated Topics}}},
    booktitle = {Proceedings of the 44th {{International ACM SIGIR Conference}} on {{Research}} and {{Development}} in {{Information Retrieval}}},
    author = {Draws, Tim},
    year = {2021},
    series = {{{SIGIR}} '21},
    pages = {2709},
    publisher = {{Association for Computing Machinery}},
    address = {{New York, NY, USA}},
    doi = {10.1145/3404835.3463273},
    url = {http://timdraws.net/files/papers/Understanding_How_Algorithmic_and_Cognitive_Biases_in_Web_Search_Affect_User_Attitudes_on_Debated_Topics.pdf},
    abstract = {Web search increasingly provides a platform for users to seek advice on important personal decisions [6] but may be biased in several different ways [1]. One result of such biases is the search engine manipulation effect (SEME): when a list of search results relates to a debated topic (e.g., veganism) and promotes documents pertaining to a particular viewpoint (e.g., by ranking them higher), users tend to adopt this advantaged viewpoint [5]. However, the detection and mitigation of SEME are complicated by the current lack of empirical understanding of its underlying mechanisms. This dissertation aims to investigate which (and to what degree) algorithmic and cognitive biases play a role in SEME concerning debated topics.},
    isbn = {978-1-4503-8037-9},
    langid = {english},
    keywords = {Computer Science - Computation and Language},
    file = {/Users/tim/Zotero/storage/ES7AQ8LN/Draws et al. - 2020 - Helping users discover perspectives Enhancing opi.pdf}
    }

  • F. Giunchiglia, S. Kleanthous, J. Otterbacher, and T. Draws, “Transparency Paths – Documenting the Diversity of User Perceptions,” in Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization, {Utrecht Netherlands}, 2021, p. 415–420. doi:10.1145/3450614.3463292
    [BibTeX] [Abstract] [Download PDF]

    We are living in an era of global digital platforms, eco-systems of algorithmic processes that serve users worldwide. However, the increasing exposure to diversity online –{} of information and users –{} has led to important considerations of bias. A given platform, such as the Google search engine, may demonstrate behaviors that deviate from what users expect, or what they consider fair, relative to their own context and experiences. In this exploratory work, we put forward the notion of transparency paths, a process by which we document our position, choices, and perceptions when developing and/or using algorithmic platforms. We conducted a self-reflection exercise with seven researchers, who collected and analyzed two sets of images; one depicting an everyday activity, “washing hands,” and a second depicting the concept of “home.” Participants had to document their process and choices, and in the end, compare their work to others. Finally, participants were asked to reflect on the definitions of bias and diversity. The exercise revealed the range of perspectives and approaches taken, underscoring the need for future work that will refine the transparency paths methodology.

    @inproceedings{giunchigliaTransparencyPathsDocumenting2021,
    title = {Transparency {{Paths}} - {{Documenting}} the {{Diversity}} of {{User Perceptions}}},
    booktitle = {Adjunct {{Proceedings}} of the 29th {{ACM Conference}} on {{User Modeling}}, {{Adaptation}} and {{Personalization}}},
    author = {Giunchiglia, Fausto and Kleanthous, Styliani and Otterbacher, Jahna and Draws, Tim},
    year = {2021},
    month = jun,
    series = {{{UMAP}} '21},
    pages = {415--420},
    publisher = {{ACM}},
    address = {{Utrecht Netherlands}},
    doi = {10.1145/3450614.3463292},
    url = {https://dl.acm.org/doi/10.1145/3450614.3463292},
    urldate = {2021-07-13},
    abstract = {We are living in an era of global digital platforms, eco-systems of algorithmic processes that serve users worldwide. However, the increasing exposure to diversity online \textendash{} of information and users \textendash{} has led to important considerations of bias. A given platform, such as the Google search engine, may demonstrate behaviors that deviate from what users expect, or what they consider fair, relative to their own context and experiences. In this exploratory work, we put forward the notion of transparency paths, a process by which we document our position, choices, and perceptions when developing and/or using algorithmic platforms. We conducted a self-reflection exercise with seven researchers, who collected and analyzed two sets of images; one depicting an everyday activity, ``washing hands," and a second depicting the concept of ``home." Participants had to document their process and choices, and in the end, compare their work to others. Finally, participants were asked to reflect on the definitions of bias and diversity. The exercise revealed the range of perspectives and approaches taken, underscoring the need for future work that will refine the transparency paths methodology.},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8367-7},
    langid = {english},
    file = {/Users/tim/Zotero/storage/QGKH2KJQ/Giunchiglia et al. - 2021 - Transparency Paths - Documenting the Diversity of .pdf}
    }

  • S. Najafian, T. Draws, F. Barile, M. Tkalcic, J. Yang, and N. Tintarev, “Exploring User Concerns about Disclosing Location and Emotion Information in Group Recommendations,” in Proceedings of the 32nd ACM Conference on Hypertext and Social Media, {New York, NY, USA}, 2021, p. 155–164. doi:10.1145/3465336.3475104
    [BibTeX] [Abstract] [Download PDF]

    Recent research has shown that explanations serve as an impor- tant means to increase transparency in group recommendations while also increasing users’ privacy concerns. However, it is cur- rently unclear what personal and contextual factors affect users’ privacy concerns about various types of personal information. This paper studies the effect of users’ personality traits and preference scenarios — having a majority or minority preference—{} on their privacy concerns regarding location and emotion information. To create natural scenarios of group decision-making where users can control the amount of information disclosed, we develop Toury- Bot, a chat-bot agent that generates natural language explanations to help group members explain their arguments for suggestions to the group in the tourism domain. We conducted a user study in which we instructed 541 participants to convince the group to either visit or skip a recommended place. Our results show that users generally have a larger concern regarding the disclosure of emotion compared to location information. However, we found no evidence that personality traits or preference scenarios affect privacy concerns in our task. Further analyses revealed that task design (i.e., the pressure on users to convince the group) had an effect on participants’ emotion-related privacy concerns. Our study also highlights the utility of providing users with the option of partial disclosure of personal information, which appeared to be popular among the participants.

    @inproceedings{najafian2021ExploringUserConcerns,
    title = {Exploring {{User Concerns}} about {{Disclosing Location}} and {{Emotion Information}} in {{Group Recommendations}}},
    booktitle = {Proceedings of the 32nd {{ACM Conference}} on {{Hypertext}} and {{Social Media}}},
    author = {Najafian, Shabnam and Draws, Tim and Barile, Francesco and Tkalcic, Marko and Yang, Jie and Tintarev, Nava},
    year = {2021},
    series = {{{HT}} '21},
    pages = {155--164},
    publisher = {{Association for Computing Machinery}},
    address = {{New York, NY, USA}},
    doi = {10.1145/3465336.3475104},
    url = {https://doi.org/10.1145/3465336.3475104},
    abstract = {Recent research has shown that explanations serve as an impor- tant means to increase transparency in group recommendations while also increasing users' privacy concerns. However, it is cur- rently unclear what personal and contextual factors affect users' privacy concerns about various types of personal information. This paper studies the effect of users' personality traits and preference scenarios \textemdash having a majority or minority preference\textemdash{} on their privacy concerns regarding location and emotion information. To create natural scenarios of group decision-making where users can control the amount of information disclosed, we develop Toury- Bot, a chat-bot agent that generates natural language explanations to help group members explain their arguments for suggestions to the group in the tourism domain. We conducted a user study in which we instructed 541 participants to convince the group to either visit or skip a recommended place. Our results show that users generally have a larger concern regarding the disclosure of emotion compared to location information. However, we found no evidence that personality traits or preference scenarios affect privacy concerns in our task. Further analyses revealed that task design (i.e., the pressure on users to convince the group) had an effect on participants' emotion-related privacy concerns. Our study also highlights the utility of providing users with the option of partial disclosure of personal information, which appeared to be popular among the participants.},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8551-0},
    langid = {english},
    file = {/Users/tim/Zotero/storage/5G743D5R/Exploring_User_Concerns_about_Disclosing_Location_and_Emotion_Information_in_Group_Recommendations.pdf}
    }

  • A. Rieger, T. Draws, N. Tintarev, and M. Theune, “This Item Might Reinforce Your Opinion: Obfuscation and Labeling of Search Results to Mitigate Confirmation Bias,” in Proceedings of the 32nd ACM Conference on Hypertext and Social Media, {New York, NY, USA}, 2021, p. 189–199. doi:10.1145/3465336.3475101
    [BibTeX] [Abstract] [Download PDF]

    During online information search, users tend to select search results that confirm previous beliefs and ignore competing possibilities. This systematic pattern in human behavior is known as confirmation bias. In this paper, we study the effect of obfuscation (i.e., hiding the result unless the user clicks on it) with warning labels and the effect of task on interaction with attitude-confirming search results. We conducted a preregistered between-subjects user study ({$\mathsl{n}$}=328) comparing six groups: three levels of obfuscation (targeted, random, none) and two levels of task (joint, two separate) for four debated topics. We found that both types of obfuscation influence user interactions, and in particular that targeted obfuscation helps decrease interaction with attitude-confirming search results. Future work is needed to understand how much of the observed effect is due to the strong influence of obfuscation, versus the warning label or the task design. We discuss design guidelines concerning system goals such as decreasing consumption of attitude-confirming search results, versus nudging users toward a more analytical mode of information processing. We also discuss implications for future work, such as the effects of such interventions over repeated exposure. We conclude with a strong word of caution: measures such as obfuscations should only be used for the benefit of the user, e.g., when they explicitly consent to mitigating their own biases.

    @inproceedings{rieger2021ThisItemMight,
    title = {This {{Item Might Reinforce Your Opinion}}: {{Obfuscation}} and {{Labeling}} of {{Search Results}} to {{Mitigate Confirmation Bias}}},
    booktitle = {Proceedings of the 32nd {{ACM Conference}} on {{Hypertext}} and {{Social Media}}},
    author = {Rieger, Alisa and Draws, Tim and Tintarev, Nava and Theune, Mariet},
    year = {2021},
    series = {{{HT}} '21},
    pages = {189--199},
    publisher = {{Association for Computing Machinery}},
    address = {{New York, NY, USA}},
    doi = {10.1145/3465336.3475101},
    url = {https://doi.org/10.1145/3465336.3475101},
    abstract = {During online information search, users tend to select search results that confirm previous beliefs and ignore competing possibilities. This systematic pattern in human behavior is known as confirmation bias. In this paper, we study the effect of obfuscation (i.e., hiding the result unless the user clicks on it) with warning labels and the effect of task on interaction with attitude-confirming search results. We conducted a preregistered between-subjects user study ({$\mathsl{n}$}=328) comparing six groups: three levels of obfuscation (targeted, random, none) and two levels of task (joint, two separate) for four debated topics. We found that both types of obfuscation influence user interactions, and in particular that targeted obfuscation helps decrease interaction with attitude-confirming search results. Future work is needed to understand how much of the observed effect is due to the strong influence of obfuscation, versus the warning label or the task design. We discuss design guidelines concerning system goals such as decreasing consumption of attitude-confirming search results, versus nudging users toward a more analytical mode of information processing. We also discuss implications for future work, such as the effects of such interventions over repeated exposure. We conclude with a strong word of caution: measures such as obfuscations should only be used for the benefit of the user, e.g., when they explicitly consent to mitigating their own biases.},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8551-0},
    langid = {english},
    file = {/Users/tim/Zotero/storage/A7Y6CPG2/This_Item_Might_Reinforce_Your_Opinion.pdf}
    }

2020

  • D. {van den Bergh}, J. {van Doorn}, M. Marsman, T. Draws, E. {van Kesteren}, K. Derks, F. Dablander, Q. F. Gronau, Š. Kucharský, A. K. N. R. Gupta, A. Sarafoglou, J. G. Voelkel, A. Stefan, M. Hinne, D. Matzke, and E. Wagenmakers, “A Tutorial on Conducting and Interpreting a Bayesian ANOVA in JASP,” Annee Psychologique, vol. 120, iss. 1, p. 73–96, 2020. doi:10.3917/anpsy1.201.0073
    [BibTeX] [Abstract] [Download PDF]

    Analysis of variance (ANOVA) is the standard procedure for statistical inference in factorial designs. Typically, ANOVAs are executed using frequentist statistics, where p-values determine statistical significance in an all-or-none fashion. In recent years, the Bayesian approach to statistics is increasingly viewed as a legitimate alternative to the p-value. However, the broad adoption of Bayesian statistics-and Bayesian ANOVA in particular-is frustrated by the fact that Bayesian concepts are rarely taught in applied statistics courses. Consequently, practitioners may be unsure how to conduct a Bayesian ANOVA and interpret the results. Here we provide a guide for executing and interpreting a Bayesian ANOVA with JASP, an open-source statistical software program with a graphical user interface. We explain the key concepts of the Bayesian ANOVA using two empirical examples.

    @article{vandenberghTutorialConductingInterpreting2020,
    title = {A {{Tutorial}} on {{Conducting}} and {{Interpreting}} a {{Bayesian ANOVA}} in {{JASP}}},
    author = {{van den Bergh}, Don and {van Doorn}, Johnny and Marsman, Maarten and Draws, Tim and {van Kesteren}, Erik-Jan and Derks, Koen and Dablander, Fabian and Gronau, Quentin Frederik and Kucharsk{\'y}, {\v S}imon and Gupta, Akash R. Komarlu Narendra and Sarafoglou, Alexandra and Voelkel, Jan G. and Stefan, Angelika and Hinne, Max and Matzke, Dora and Wagenmakers, Eric-Jan},
    year = {2020},
    month = feb,
    journal = {Annee Psychologique},
    volume = {120},
    number = {1},
    pages = {73--96},
    issn = {00035033},
    doi = {10.3917/anpsy1.201.0073},
    url = {https://ir.cwi.nl/pub/29560},
    urldate = {2021-07-28},
    abstract = {Analysis of variance (ANOVA) is the standard procedure for statistical inference in factorial designs. Typically, ANOVAs are executed using frequentist statistics, where p-values determine statistical significance in an all-or-none fashion. In recent years, the Bayesian approach to statistics is increasingly viewed as a legitimate alternative to the p-value. However, the broad adoption of Bayesian statistics-and Bayesian ANOVA in particular-is frustrated by the fact that Bayesian concepts are rarely taught in applied statistics courses. Consequently, practitioners may be unsure how to conduct a Bayesian ANOVA and interpret the results. Here we provide a guide for executing and interpreting a Bayesian ANOVA with JASP, an open-source statistical software program with a graphical user interface. We explain the key concepts of the Bayesian ANOVA using two empirical examples.},
    langid = {english},
    keywords = {Analysis of Variance,Bayes Factor,Hypothesis Test,JASP,Posterior distribution,Tutorial},
    file = {/Users/tim/Zotero/storage/NCXFKCR2/D. van den Bergh (Don) et al. - 2020 - A tutorial on conducting and interpreting a bayesi.pdf}
    }

  • T. Draws, J. Liu, and N. Tintarev, “Helping Users Discover Perspectives: Enhancing Opinion Mining with Joint Topic Models,” in 2020 International Conference on Data Mining Workshops (ICDMW), {Sorrento, Italy}, 2020, p. 23–30. doi:10.1109/ICDMW51313.2020.00013
    [BibTeX] [Abstract] [Download PDF]

    Support or opposition with respect to a debated claim such as abortion should be legal can have different underlying reasons, which we call perspectives. This paper explores how opinion mining can be enhanced with joint topic modeling, to identify distinct perspectives within the topic, providing an informative overview from unstructured text. We evaluate four joint topic models (TAM, JST, VODUM, and LAM) in a user study assessing human understandability of the extracted perspectives. Based on the results, we conclude that joint topic models such as TAM can discover perspectives that align with human judgments. Moreover, our results suggest that users are not influenced by their pre-existing stance on the topic of abortion when interpreting the output of topic models.

    @inproceedings{drawsHelpingUsersDiscover2020,
    title = {Helping Users Discover Perspectives: {{Enhancing}} Opinion Mining with Joint Topic Models},
    shorttitle = {Helping Users Discover Perspectives},
    booktitle = {2020 {{International Conference}} on {{Data Mining Workshops}} ({{ICDMW}})},
    author = {Draws, Tim and Liu, Jody and Tintarev, Nava},
    year = {2020},
    month = nov,
    pages = {23--30},
    publisher = {{IEEE}},
    address = {{Sorrento, Italy}},
    doi = {10.1109/ICDMW51313.2020.00013},
    url = {https://ieeexplore.ieee.org/document/9346407/},
    urldate = {2021-07-13},
    abstract = {Support or opposition with respect to a debated claim such as abortion should be legal can have different underlying reasons, which we call perspectives. This paper explores how opinion mining can be enhanced with joint topic modeling, to identify distinct perspectives within the topic, providing an informative overview from unstructured text. We evaluate four joint topic models (TAM, JST, VODUM, and LAM) in a user study assessing human understandability of the extracted perspectives. Based on the results, we conclude that joint topic models such as TAM can discover perspectives that align with human judgments. Moreover, our results suggest that users are not influenced by their pre-existing stance on the topic of abortion when interpreting the output of topic models.},
    copyright = {All rights reserved},
    isbn = {978-1-72819-012-9},
    langid = {english},
    file = {/Users/tim/Zotero/storage/444RTM63/Draws et al. - 2020 - Helping users discover perspectives Enhancing opi.pdf}
    }

privacy