publications

2021

  • J. van Doorn, D. van den Bergh, U. Böhm, F. Dablander, K. Derks, T. Draws, A. Etz, N. J. Evans, Q. F. Gronau, J. M. Haaf, M. Hinne, Š. Kucharský, A. Ly, M. Marsman, D. Matzke, A. K. N. R. Gupta, A. Sarafoglou, A. Stefan, J. G. Voelkel, and E. Wagenmakers, “The JASP guidelines for conducting and reporting a Bayesian analysis,” Psychonomic Bulletin & Review, vol. 28, iss. 3, p. 813–826, 2021. doi:10.3758/s13423-020-01798-5
    [BibTeX] [Abstract] [Download PDF]

    Despite the increasing popularity of Bayesian inference in empirical research, few practical guidelines provide detailed recommendations for how to apply Bayesian procedures and interpret the results. Here we offer specific guidelines for four different stages of Bayesian statistical reasoning in a research setting: planning the analysis, executing the analysis, interpreting the results, and reporting the results. The guidelines for each stage are illustrated with a running example. Although the guidelines are geared towards analyses performed with the open-source statistical software JASP, most guidelines extend to Bayesian inference in general.

    @article{vandoornJASPGuidelinesConducting2021,
    title = {The {JASP} guidelines for conducting and reporting a {Bayesian} analysis},
    volume = {28},
    issn = {1531-5320},
    url = {https://doi.org/10.3758/s13423-020-01798-5},
    doi = {10.3758/s13423-020-01798-5},
    abstract = {Despite the increasing popularity of Bayesian inference in empirical research, few practical guidelines provide detailed recommendations for how to apply Bayesian procedures and interpret the results. Here we offer specific guidelines for four different stages of Bayesian statistical reasoning in a research setting: planning the analysis, executing the analysis, interpreting the results, and reporting the results. The guidelines for each stage are illustrated with a running example. Although the guidelines are geared towards analyses performed with the open-source statistical software JASP, most guidelines extend to Bayesian inference in general.},
    language = {en},
    number = {3},
    urldate = {2021-07-28},
    journal = {Psychonomic Bulletin \& Review},
    author = {van Doorn, Johnny and van den Bergh, Don and Böhm, Udo and Dablander, Fabian and Derks, Koen and Draws, Tim and Etz, Alexander and Evans, Nathan J. and Gronau, Quentin F. and Haaf, Julia M. and Hinne, Max and Kucharský, Šimon and Ly, Alexander and Marsman, Maarten and Matzke, Dora and Gupta, Akash R. Komarlu Narendra and Sarafoglou, Alexandra and Stefan, Angelika and Voelkel, Jan G. and Wagenmakers, Eric-Jan},
    month = jun,
    year = {2021},
    pages = {813--826},
    file = {Springer Full Text PDF:/Users/tim/Zotero/storage/CZNSIHR5/van Doorn et al. - 2021 - The JASP guidelines for conducting and reporting a.pdf:application/pdf},
    }

  • F. Giunchiglia, S. Kleanthous, J. Otterbacher, and T. Draws, “Transparency Paths – Documenting the Diversity of User Perceptions,” in Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization, Utrecht Netherlands, 2021, p. 415–420. doi:10.1145/3450614.3463292
    [BibTeX] [Abstract] [Download PDF]

    We are living in an era of global digital platforms, eco-systems of algorithmic processes that serve users worldwide. However, the increasing exposure to diversity online – of information and users – has led to important considerations of bias. A given platform, such as the Google search engine, may demonstrate behaviors that deviate from what users expect, or what they consider fair, relative to their own context and experiences. In this exploratory work, we put forward the notion of transparency paths, a process by which we document our position, choices, and perceptions when developing and/or using algorithmic platforms. We conducted a self-reflection exercise with seven researchers, who collected and analyzed two sets of images; one depicting an everyday activity, “washing hands,” and a second depicting the concept of “home.” Participants had to document their process and choices, and in the end, compare their work to others. Finally, participants were asked to reflect on the definitions of bias and diversity. The exercise revealed the range of perspectives and approaches taken, underscoring the need for future work that will refine the transparency paths methodology.

    @inproceedings{giunchigliaTransparencyPathsDocumenting2021,
    address = {Utrecht Netherlands},
    series = {{UMAP} '21},
    title = {Transparency {Paths} - {Documenting} the {Diversity} of {User} {Perceptions}},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8367-7},
    url = {https://dl.acm.org/doi/10.1145/3450614.3463292},
    doi = {10.1145/3450614.3463292},
    abstract = {We are living in an era of global digital platforms, eco-systems of algorithmic processes that serve users worldwide. However, the increasing exposure to diversity online – of information and users – has led to important considerations of bias. A given platform, such as the Google search engine, may demonstrate behaviors that deviate from what users expect, or what they consider fair, relative to their own context and experiences. In this exploratory work, we put forward the notion of transparency paths, a process by which we document our position, choices, and perceptions when developing and/or using algorithmic platforms. We conducted a self-reflection exercise with seven researchers, who collected and analyzed two sets of images; one depicting an everyday activity, “washing hands," and a second depicting the concept of “home." Participants had to document their process and choices, and in the end, compare their work to others. Finally, participants were asked to reflect on the definitions of bias and diversity. The exercise revealed the range of perspectives and approaches taken, underscoring the need for future work that will refine the transparency paths methodology.},
    language = {en},
    urldate = {2021-07-13},
    booktitle = {Adjunct {Proceedings} of the 29th {ACM} {Conference} on {User} {Modeling}, {Adaptation} and {Personalization}},
    publisher = {ACM},
    author = {Giunchiglia, Fausto and Kleanthous, Styliani and Otterbacher, Jahna and Draws, Tim},
    month = jun,
    year = {2021},
    pages = {415--420},
    file = {Giunchiglia et al. - 2021 - Transparency Paths - Documenting the Diversity of .pdf:/Users/tim/Zotero/storage/QGKH2KJQ/Giunchiglia et al. - 2021 - Transparency Paths - Documenting the Diversity of .pdf:application/pdf},
    }

  • A. Sarafoglou, A. van der Heijden, T. Draws, J. Cornelisse, E. Wagenmakers, and M. Marsman, “Combine Statistical Thinking With Open Scientific Practice: A Protocol of a Bayesian Research Project For Undergraduate Students,” arXiv preprint arXiv:1810.07496, p. 28, 2021.
    [BibTeX] [Abstract] [Download PDF]

    Current developments in the statistics community suggest that modern statistics education should be structured holistically, i.e., by allowing students to work with real data and answer concrete statistical questions, but also by educating them about alternative statistical frameworks, such as Bayesian statistics. In this article, we describe how we incorporated such a holistic structure in a Bayesian thesis project on ordered binomial probabilities. The project was targeted at undergraduate students in psychology with basic knowledge in Bayesian statistics and programming, but no formal mathematical training. The thesis project aimed to (1) convey the basic mathematical concepts of Bayesian inference, (2) let students experience the entire empirical cycle including the collection, analysis, and interpretation of data, and (3) teach students open science practices.

    @article{sarafoglou2021CombineStatisticalThinking,
    title = {Combine {Statistical} {Thinking} {With} {Open} {Scientific} {Practice}: {A} {Protocol} of a {Bayesian} {Research} {Project} {For} {Undergraduate} {Students}},
    url = {http://arxiv.org/abs/1810.07496},
    abstract = {Current developments in the statistics community suggest that modern statistics education should be structured holistically, i.e., by allowing students to work with real data and answer concrete statistical questions, but also by educating them about alternative statistical frameworks, such as Bayesian statistics. In this article, we describe how we incorporated such a holistic structure in a Bayesian thesis project on ordered binomial probabilities. The project was targeted at undergraduate students in psychology with basic knowledge in Bayesian statistics and programming, but no formal mathematical training. The thesis project aimed to (1) convey the basic mathematical concepts of Bayesian inference, (2) let students experience the entire empirical cycle including the collection, analysis, and interpretation of data, and (3) teach students open science practices.},
    language = {en},
    journal = {arXiv preprint arXiv:1810.07496},
    author = {Sarafoglou, Alexandra and van der Heijden, Anna and Draws, Tim and Cornelisse, Joran and Wagenmakers, Eric-Jan and Marsman, Maarten},
    year = {2021},
    pages = {28},
    file = {Sarafoglou et al. - Combine Statistical Thinking With Open Scientific P.pdf:/Users/tim/Zotero/storage/AX9ANERM/Sarafoglou et al. - Combine Statistical Thinking With Open Scientific P.pdf:application/pdf},
    }

  • T. Draws, N. Tintarev, U. Gadiraju, A. Bozzon, and B. Timmermans, “Assessing Viewpoint Diversity in Search Results Using Ranking Fairness Metrics,” ACM SIGKDD Explorations Newsletter, vol. 23, iss. 1, p. 50–58, 2021. doi:10.1145/3468507.3468515
    [BibTeX] [Abstract] [Download PDF]

    The way pages are ranked in search results influences whether the users of search engines are exposed to more homogeneous, or rather to more diverse viewpoints. However, this viewpoint diversity is not trivial to assess. In this paper, we use existing and novel ranking fairness metrics to evaluate viewpoint diversity in search result rankings. We conduct a controlled simulation study that shows how ranking fairness metrics can be used for viewpoint diversity, how their outcome should be interpreted, and which metric is most suitable depending on the situation. This paper lays out important groundwork for future research to measure and assess viewpoint diversity in real search result rankings.

    @article{draws2021AssessingViewpointDiversity,
    title = {Assessing {Viewpoint} {Diversity} in {Search} {Results} {Using} {Ranking} {Fairness} {Metrics}},
    volume = {23},
    copyright = {All rights reserved},
    issn = {1931-0145, 1931-0153},
    url = {https://dl.acm.org/doi/10.1145/3468507.3468515},
    doi = {10.1145/3468507.3468515},
    abstract = {The way pages are ranked in search results influences whether the users of search engines are exposed to more homogeneous, or rather to more diverse viewpoints. However, this viewpoint diversity is not trivial to assess. In this paper, we use existing and novel ranking fairness metrics to evaluate viewpoint diversity in search result rankings. We conduct a controlled simulation study that shows how ranking fairness metrics can be used for viewpoint diversity, how their outcome should be interpreted, and which metric is most suitable depending on the situation. This paper lays out important groundwork for future research to measure and assess viewpoint diversity in real search result rankings.},
    language = {en},
    number = {1},
    urldate = {2021-07-13},
    journal = {ACM SIGKDD Explorations Newsletter},
    author = {Draws, Tim and Tintarev, Nava and Gadiraju, Ujwal and Bozzon, Alessandro and Timmermans, Benjamin},
    month = may,
    year = {2021},
    note = {address: New York, NY, USA},
    pages = {50--58},
    file = {Draws et al. - 2021 - Assessing Viewpoint Diversity in Search Results Us.pdf:/Users/tim/Zotero/storage/KN52NKB8/Draws et al. - 2021 - Assessing Viewpoint Diversity in Search Results Us.pdf:application/pdf},
    }

  • T. Draws, Z. Szlávik, B. Timmermans, N. Tintarev, K. R. Varshney, and M. Hind, “Disparate Impact Diminishes Consumer Trust Even for Advantaged Users,” in Persuasive Technology, Cham, 2021, p. 135–149. doi:10.1007/978-3-030-79460-6_11
    [BibTeX] [Abstract] [Download PDF]

    Systems aiming to aid consumers in their decision-making (e.g., by implementing persuasive techniques) are more likely to be effective when consumers trust them. However, recent research has demonstrated that the machine learning algorithms that often underlie such technology can act unfairly towards specific groups (e.g., by making more favorable predictions for men than for women). An undesired disparate impact resulting from this kind of algorithmic unfairness could diminish consumer trust and thereby undermine the purpose of the system. We studied this effect by conducting a between-subjects user study investigating how (gender-related) disparate impact affected consumer trust in an app designed to improve consumers’ financial decision-making. Our results show that disparate impact decreased consumers’ trust in the system and made them less likely to use it. Moreover, we find that trust was affected to the same degree across consumer groups (i.e., advantaged and disadvantaged users) despite both of these consumer groups recognizing their respective levels of personal benefit. Our findings highlight the importance of fairness in consumer-oriented artificial intelligence systems.

    @inproceedings{drawsDisparateImpactDiminishes2021,
    address = {Cham},
    title = {Disparate {Impact} {Diminishes} {Consumer} {Trust} {Even} for {Advantaged} {Users}},
    volume = {12684},
    copyright = {All rights reserved},
    isbn = {978-3-030-79459-0 978-3-030-79460-6},
    url = {https://link.springer.com/10.1007/978-3-030-79460-6_11},
    abstract = {Systems aiming to aid consumers in their decision-making (e.g., by implementing persuasive techniques) are more likely to be effective when consumers trust them. However, recent research has demonstrated that the machine learning algorithms that often underlie such technology can act unfairly towards specific groups (e.g., by making more favorable predictions for men than for women). An undesired disparate impact resulting from this kind of algorithmic unfairness could diminish consumer trust and thereby undermine the purpose of the system. We studied this effect by conducting a between-subjects user study investigating how (gender-related) disparate impact affected consumer trust in an app designed to improve consumers’ financial decision-making. Our results show that disparate impact decreased consumers’ trust in the system and made them less likely to use it. Moreover, we find that trust was affected to the same degree across consumer groups (i.e., advantaged and disadvantaged users) despite both of these consumer groups recognizing their respective levels of personal benefit. Our findings highlight the importance of fairness in consumer-oriented artificial intelligence systems.},
    language = {en},
    urldate = {2021-07-13},
    booktitle = {Persuasive {Technology}},
    publisher = {Springer International Publishing},
    author = {Draws, Tim and Szlávik, Zoltán and Timmermans, Benjamin and Tintarev, Nava and Varshney, Kush R. and Hind, Michael},
    editor = {Ali, Raian and Lugrin, Birgit and Charles, Fred},
    year = {2021},
    doi = {10.1007/978-3-030-79460-6_11},
    note = {Series Title: Lecture Notes in Computer Science},
    pages = {135--149},
    file = {Draws et al. - 2021 - Disparate Impact Diminishes Consumer Trust Even fo.pdf:/Users/tim/Zotero/storage/V9VIEUEH/Draws et al. - 2021 - Disparate Impact Diminishes Consumer Trust Even fo.pdf:application/pdf},
    }

  • T. Draws, N. Tintarev, U. Gadiraju, A. Bozzon, and B. Timmermans, “This Is Not What We Ordered: Exploring Why Biased Search Result Rankings Affect User Attitudes on Debated Topics,” in Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, New York, NY, USA, 2021, p. 295–305. doi:10.1145/3404835.3462851
    [BibTeX] [Abstract] [Download PDF]

    In web search on debated topics, algorithmic and cognitive biases strongly influence how users consume and process information. Recent research has shown that this can lead to a search engine manipulation effect (SEME): when search result rankings are biased towards a particular viewpoint, users tend to adopt this favored viewpoint. To better understand the mechanisms underlying SEME, we present a pre-registered, 5 × 3 factorial user study investigating whether order effects (i.e., users adopting the viewpoint pertaining to higher-ranked documents) can cause SEME. For five different debated topics, we evaluated attitude change after exposing participants with mild pre-existing attitudes to search results that were overall viewpoint-balanced but reflected one of three levels of algorithmic ranking bias. We found that attitude change did not differ across levels of ranking bias and did not vary based on individual user differences. Our results thus suggest that order effects may not be an underlying mechanism of SEME. Exploratory analyses lend support to the presence of exposure effects (i.e., users adopting the majority viewpoint among the results they examine) as a contributing factor to users’ attitude change. We discuss how our findings can inform the design of user bias mitigation strategies.

    @inproceedings{draws2021ThisNotWhat,
    address = {New York, NY, USA},
    series = {{SIGIR} '21},
    title = {This {Is} {Not} {What} {We} {Ordered}: {Exploring} {Why} {Biased} {Search} {Result} {Rankings} {Affect} {User} {Attitudes} on {Debated} {Topics}},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8037-9},
    shorttitle = {This {Is} {Not} {What} {We} {Ordered}},
    url = {https://dl.acm.org/doi/10.1145/3404835.3462851},
    doi = {10.1145/3404835.3462851},
    abstract = {In web search on debated topics, algorithmic and cognitive biases strongly influence how users consume and process information. Recent research has shown that this can lead to a search engine manipulation effect (SEME): when search result rankings are biased towards a particular viewpoint, users tend to adopt this favored viewpoint. To better understand the mechanisms underlying SEME, we present a pre-registered, 5 × 3 factorial user study investigating whether order effects (i.e., users adopting the viewpoint pertaining to higher-ranked documents) can cause SEME. For five different debated topics, we evaluated attitude change after exposing participants with mild pre-existing attitudes to search results that were overall viewpoint-balanced but reflected one of three levels of algorithmic ranking bias. We found that attitude change did not differ across levels of ranking bias and did not vary based on individual user differences. Our results thus suggest that order effects may not be an underlying mechanism of SEME. Exploratory analyses lend support to the presence of exposure effects (i.e., users adopting the majority viewpoint among the results they examine) as a contributing factor to users’ attitude change. We discuss how our findings can inform the design of user bias mitigation strategies.},
    language = {en},
    urldate = {2021-07-13},
    booktitle = {Proceedings of the 44th {International} {ACM} {SIGIR} {Conference} on {Research} and {Development} in {Information} {Retrieval}},
    publisher = {Association for Computing Machinery},
    author = {Draws, Tim and Tintarev, Nava and Gadiraju, Ujwal and Bozzon, Alessandro and Timmermans, Benjamin},
    month = jul,
    year = {2021},
    note = {Publisher Place: New York, NY, USA},
    pages = {295--305},
    file = {Draws et al. - 2021 - This Is Not What We Ordered Exploring Why Biased .pdf:/Users/tim/Zotero/storage/EFI7ZWH3/Draws et al. - 2021 - This Is Not What We Ordered Exploring Why Biased .pdf:application/pdf},
    }

  • A. Rieger, T. Draws, N. Tintarev, and M. Theune, “This Item Might Reinforce Your Opinion: Obfuscation and Labeling of Search Results to Mitigate Confirmation Bias,” in Proceedings of the 32nd ACM Conference on Hypertext and Social Media, New York, NY, USA, 2021, p. 189–199. doi:10.1145/3465336.3475101
    [BibTeX] [Abstract] [Download PDF]

    During online information search, users tend to select search results that confirm previous beliefs and ignore competing possibilities. This systematic pattern in human behavior is known as confirmation bias. In this paper, we study the effect of obfuscation (i.e., hiding the result unless the user clicks on it) with warning labels and the effect of task on interaction with attitude-confirming search results. We conducted a preregistered between-subjects user study (𝑛=328) comparing six groups: three levels of obfuscation (targeted, random, none) and two levels of task (joint, two separate) for four debated topics. We found that both types of obfuscation influence user interactions, and in particular that targeted obfuscation helps decrease interaction with attitude-confirming search results. Future work is needed to understand how much of the observed effect is due to the strong influence of obfuscation, versus the warning label or the task design. We discuss design guidelines concerning system goals such as decreasing consumption of attitude-confirming search results, versus nudging users toward a more analytical mode of information processing. We also discuss implications for future work, such as the effects of such interventions over repeated exposure. We conclude with a strong word of caution: measures such as obfuscations should only be used for the benefit of the user, e.g., when they explicitly consent to mitigating their own biases.

    @inproceedings{rieger2021ThisItemMight,
    address = {New York, NY, USA},
    series = {{HT} '21},
    title = {This {Item} {Might} {Reinforce} {Your} {Opinion}: {Obfuscation} and {Labeling} of {Search} {Results} to {Mitigate} {Confirmation} {Bias}},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8551-0},
    url = {https://doi.org/10.1145/3465336.3475101},
    doi = {10.1145/3465336.3475101},
    abstract = {During online information search, users tend to select search results that confirm previous beliefs and ignore competing possibilities. This systematic pattern in human behavior is known as confirmation bias. In this paper, we study the effect of obfuscation (i.e., hiding the result unless the user clicks on it) with warning labels and the effect of task on interaction with attitude-confirming search results. We conducted a preregistered between-subjects user study (𝑛=328) comparing six groups: three levels of obfuscation (targeted, random, none) and two levels of task (joint, two separate) for four debated topics. We found that both types of obfuscation influence user interactions, and in particular that targeted obfuscation helps decrease interaction with attitude-confirming search results. Future work is needed to understand how much of the observed effect is due to the strong influence of obfuscation, versus the warning label or the task design. We discuss design guidelines concerning system goals such as decreasing consumption of attitude-confirming search results, versus nudging users toward a more analytical mode of information processing. We also discuss implications for future work, such as the effects of such interventions over repeated exposure. We conclude with a strong word of caution: measures such as obfuscations should only be used for the benefit of the user, e.g., when they explicitly consent to mitigating their own biases.},
    language = {en},
    booktitle = {Proceedings of the 32nd {ACM} {Conference} on {Hypertext} and {Social} {Media}},
    publisher = {Association for Computing Machinery},
    author = {Rieger, Alisa and Draws, Tim and Tintarev, Nava and Theune, Mariet},
    year = {2021},
    note = {publisher-place: New York, NY, USA},
    pages = {189--199},
    file = {This_Item_Might_Reinforce_Your_Opinion.pdf:/Users/tim/Zotero/storage/A7Y6CPG2/This_Item_Might_Reinforce_Your_Opinion.pdf:application/pdf},
    }

  • S. Najafian, T. Draws, F. Barile, M. Tkalcic, J. Yang, and N. Tintarev, “Exploring User Concerns about Disclosing Location and Emotion Information in Group Recommendations,” in Proceedings of the 32nd ACM Conference on Hypertext and Social Media, New York, NY, USA, 2021, p. 155–164. doi:10.1145/3465336.3475104
    [BibTeX] [Abstract] [Download PDF]

    Recent research has shown that explanations serve as an impor- tant means to increase transparency in group recommendations while also increasing users’ privacy concerns. However, it is cur- rently unclear what personal and contextual factors affect users’ privacy concerns about various types of personal information. This paper studies the effect of users’ personality traits and preference scenarios —having a majority or minority preference— on their privacy concerns regarding location and emotion information. To create natural scenarios of group decision-making where users can control the amount of information disclosed, we develop Toury- Bot, a chat-bot agent that generates natural language explanations to help group members explain their arguments for suggestions to the group in the tourism domain. We conducted a user study in which we instructed 541 participants to convince the group to either visit or skip a recommended place. Our results show that users generally have a larger concern regarding the disclosure of emotion compared to location information. However, we found no evidence that personality traits or preference scenarios affect privacy concerns in our task. Further analyses revealed that task design (i.e., the pressure on users to convince the group) had an effect on participants’ emotion-related privacy concerns. Our study also highlights the utility of providing users with the option of partial disclosure of personal information, which appeared to be popular among the participants.

    @inproceedings{najafian2021ExploringUserConcerns,
    address = {New York, NY, USA},
    series = {{HT} ’21},
    title = {Exploring {User} {Concerns} about {Disclosing} {Location} and {Emotion} {Information} in {Group} {Recommendations}},
    copyright = {All rights reserved},
    isbn = {978-1-4503-8551-0},
    url = {https://doi.org/10.1145/3465336.3475104},
    doi = {10.1145/3465336.3475104},
    abstract = {Recent research has shown that explanations serve as an impor- tant means to increase transparency in group recommendations while also increasing users’ privacy concerns. However, it is cur- rently unclear what personal and contextual factors affect users’ privacy concerns about various types of personal information. This paper studies the effect of users’ personality traits and preference scenarios —having a majority or minority preference— on their privacy concerns regarding location and emotion information. To create natural scenarios of group decision-making where users can control the amount of information disclosed, we develop Toury- Bot, a chat-bot agent that generates natural language explanations to help group members explain their arguments for suggestions to the group in the tourism domain. We conducted a user study in which we instructed 541 participants to convince the group to either visit or skip a recommended place. Our results show that users generally have a larger concern regarding the disclosure of emotion compared to location information. However, we found no evidence that personality traits or preference scenarios affect privacy concerns in our task. Further analyses revealed that task design (i.e., the pressure on users to convince the group) had an effect on participants’ emotion-related privacy concerns. Our study also highlights the utility of providing users with the option of partial disclosure of personal information, which appeared to be popular among the participants.},
    language = {en},
    booktitle = {Proceedings of the 32nd {ACM} {Conference} on {Hypertext} and {Social} {Media}},
    publisher = {Association for Computing Machinery},
    author = {Najafian, Shabnam and Draws, Tim and Barile, Francesco and Tkalcic, Marko and Yang, Jie and Tintarev, Nava},
    year = {2021},
    pages = {155--164},
    file = {Exploring_User_Concerns_about_Disclosing_Location_and_Emotion_Information_in_Group_Recommendations.pdf:/Users/tim/Zotero/storage/5G743D5R/Exploring_User_Concerns_about_Disclosing_Location_and_Emotion_Information_in_Group_Recommendations.pdf:application/pdf},
    }

  • T. Draws, “Understanding How Algorithmic and Cognitive Biases in Web Search Affect User Attitudes on Debated Topics,” in Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, New York, NY, USA, 2021, p. 2709. doi:10.1145/3404835.3463273
    [BibTeX] [Abstract] [Download PDF]

    Web search increasingly provides a platform for users to seek advice on important personal decisions [6] but may be biased in several different ways [1]. One result of such biases is the search engine manipulation effect (SEME): when a list of search results relates to a debated topic (e.g., veganism) and promotes documents pertaining to a particular viewpoint (e.g., by ranking them higher), users tend to adopt this advantaged viewpoint [5]. However, the detection and mitigation of SEME are complicated by the current lack of empirical understanding of its underlying mechanisms. This dissertation aims to investigate which (and to what degree) algorithmic and cognitive biases play a role in SEME concerning debated topics.

    @inproceedings{draws2021UnderstandingHowAlgorithmic,
    address = {New York, NY, USA},
    series = {{SIGIR} '21},
    title = {Understanding {How} {Algorithmic} and {Cognitive} {Biases} in {Web} {Search} {Affect} {User} {Attitudes} on {Debated} {Topics}},
    isbn = {978-1-4503-8037-9},
    url = {http://timdraws.net/files/papers/Understanding_How_Algorithmic_and_Cognitive_Biases_in_Web_Search_Affect_User_Attitudes_on_Debated_Topics.pdf},
    doi = {10.1145/3404835.3463273},
    abstract = {Web search increasingly provides a platform for users to seek advice on important personal decisions [6] but may be biased in several different ways [1]. One result of such biases is the search engine manipulation effect (SEME): when a list of search results relates to a debated topic (e.g., veganism) and promotes documents pertaining to a particular viewpoint (e.g., by ranking them higher), users tend to adopt this advantaged viewpoint [5]. However, the detection and mitigation of SEME are complicated by the current lack of empirical understanding of its underlying mechanisms. This dissertation aims to investigate which (and to what degree) algorithmic and cognitive biases play a role in SEME concerning debated topics.},
    language = {en},
    booktitle = {Proceedings of the 44th {International} {ACM} {SIGIR} {Conference} on {Research} and {Development} in {Information} {Retrieval}},
    publisher = {Association for Computing Machinery},
    author = {Draws, Tim},
    year = {2021},
    keywords = {Computer Science - Computation and Language},
    pages = {2709},
    file = {Draws - 2021 - Understanding How Algorithmic and Cognitive Biases in Web Search Affect User Attitudes on Debated Topics.pdf:/Users/tim/Zotero/storage/ES7AQ8LN/Draws et al. - 2020 - Helping users discover perspectives Enhancing opi.pdf:application/pdf},
    }

  • T. Draws, A. Rieger, O. Inel, U. Gadiraju, and N. Tintarev, “A Checklist to Combat Cognitive Biases in Crowdsourcing,” Proceedings on the Ninth AAAI Conference on Human Computation and Crowdsourcing, 2021.
    [BibTeX] [Abstract] [Download PDF]

    Recent research has demonstrated that cognitive biases such as the confirmation bias or the anchoring effect can negatively affect the quality of crowdsourced data. In practice, however, such biases go unnoticed unless specifically assessed or controlled for. Task requesters need to ensure that task workflow and design choices do not trigger workers’ cognitive biases. Moreover, to facilitate the reuse of crowdsourced data collections, practitioners can benefit from understanding whether and which cognitive biases may be associated with the data. To this end, we propose a 12-item checklist adapted from business psychology to combat cognitive biases in crowdsourcing. We demonstrate the practical application of this checklist in a case study on viewpoint annotations for search results. Through a retrospective analysis of relevant crowdsourcing research that has been published at HCOMP in 2018, 2019, and 2020, we show that cognitive biases may often affect crowd workers but are typically not considered as potential sources of poor data quality. The checklist we propose is a practical tool that requesters can use to improve their task designs and appropriately describe potential limitations of collected data. It contributes to a body of efforts towards making human-labeled data more reliable and reusable.

    @article{draws2021ChecklistCombatCognitive,
    series = {{HCOMP} '21},
    title = {A {Checklist} to {Combat} {Cognitive} {Biases} in {Crowdsourcing}},
    url = {https://timdraws.net/files/papers/A_Checklist_to_Combat_Cognitive_Biases_in_Crowdsourcing.pdf},
    abstract = {Recent research has demonstrated that cognitive biases such as the confirmation bias or the anchoring effect can negatively affect the quality of crowdsourced data. In practice, however, such biases go unnoticed unless specifically assessed or controlled for. Task requesters need to ensure that task workflow and design choices do not trigger workers’ cognitive biases. Moreover, to facilitate the reuse of crowdsourced data collections, practitioners can benefit from understanding whether and which cognitive biases may be associated with the data. To this end, we propose a 12-item checklist adapted from business psychology to combat cognitive biases in crowdsourcing. We demonstrate the practical application of this checklist in a case study on viewpoint annotations for search results. Through a retrospective analysis of relevant crowdsourcing research that has been published at HCOMP in 2018, 2019, and 2020, we show that cognitive biases may often affect crowd workers but are typically not considered as potential sources of poor data quality. The checklist we propose is a practical tool that requesters can use to improve their task designs and appropriately describe potential limitations of collected data. It contributes to a body of efforts towards making human-labeled data more reliable and reusable.},
    language = {en},
    journal = {Proceedings on the Ninth AAAI Conference on Human Computation and Crowdsourcing},
    author = {Draws, Tim and Rieger, Alisa and Inel, Oana and Gadiraju, Ujwal and Tintarev, Nava},
    year = {2021},
    file = {Draws et al. - A Checklist to Combat Cognitive Biases in Crowdsou.pdf:/Users/tim/Zotero/storage/S8DEMAUD/Draws et al. - A Checklist to Combat Cognitive Biases in Crowdsou.pdf:application/pdf},
    }

  • S. Najafian, T. Draws, O. Inel, A. Rieger, F. Barile, R. Hada, and N. Tintarev, “Toward Benchmarking Group Explanations: Evaluating the Effect of Aggregation Strategies versus Explanation,” Perspectives Workshop at RecSys ’21, 2021.
    [BibTeX] [Abstract]

    In the context of group recommendations, explanations have been claimed to be useful for finding a satisfactory choice for all the group members and helping them agree on a common decision, improving perceived fairness, perceived consensus, and satisfaction. In this work, we present a preregistered evaluation of the impact of using social choice-based explanations for group recommendations (i.e., explanations that intuitively describe the strategy used to generate the recommendation). Our objective is to conceptually replicate a previous study and investigate whether a) the used aggregation strategy or b) the explanation affected the most users’ fairness perception, consensus perception, and satisfaction. Our results show that the participants are able to discriminate between the different strategies, assigning worse evaluations to the Most Pleasure strategy (which chooses the item with the highest of the individual evaluations). In addition to a condition with no (natural language) explanation, we introduce a more detailed social choice-based explanation, evaluating whether additional information about the strategy has a positive impact on the evaluation of the group recommendation. However, we surprisingly found no effect of level of explanations, either as a main effect or as an interaction effect with the aggregation strategy. Overall, our results suggest that users’ perceptions of fairness, consensus, and satisfaction are primarily formed based on the social choice aggregation strategies for the studied group scenario. Our work also highlights the challenges of replication studies in recommender systems and discusses some of the design choices that may influence results when attempting to benchmark findings for the effectiveness of group explanations.

    @article{najafian2021BenchmarkingGroupExplanations,
    title = {Toward {Benchmarking} {Group} {Explanations}: {Evaluating} the {Effect} of {Aggregation} {Strategies} versus {Explanation}},
    abstract = {In the context of group recommendations, explanations have been claimed to be useful for finding a satisfactory choice for all the group members and helping them agree on a common decision, improving perceived fairness, perceived consensus, and satisfaction. In this work, we present a preregistered evaluation of the impact of using social choice-based explanations for group recommendations (i.e., explanations that intuitively describe the strategy used to generate the recommendation). Our objective is to conceptually replicate a previous study and investigate whether a) the used aggregation strategy or b) the explanation affected the most users’ fairness perception, consensus perception, and satisfaction. Our results show that the participants are able to discriminate between the different strategies, assigning worse evaluations to the Most Pleasure strategy (which chooses the item with the highest of the individual evaluations). In addition to a condition with no (natural language) explanation, we introduce a more detailed social choice-based explanation, evaluating whether additional information about the strategy has a positive impact on the evaluation of the group recommendation. However, we surprisingly found no effect of level of explanations, either as a main effect or as an interaction effect with the aggregation strategy. Overall, our results suggest that users’ perceptions of fairness, consensus, and satisfaction are primarily formed based on the social choice aggregation strategies for the studied group scenario. Our work also highlights the challenges of replication studies in recommender systems and discusses some of the design choices that may influence results when attempting to benchmark findings for the effectiveness of group explanations.},
    language = {en},
    journal = {Perspectives Workshop at RecSys '21},
    author = {Najafian, Shabnam and Draws, Tim and Inel, Oana and Rieger, Alisa and Barile, Francesco and Hada, Rishav and Tintarev, Nava},
    year = {2021},
    file = {Najafian et al. - Toward Benchmarking Group Explanations Evaluating.pdf:/Users/tim/Zotero/storage/UIVYAYW2/Najafian et al. - Toward Benchmarking Group Explanations Evaluating.pdf:application/pdf},
    }

2020

  • C. J. Van Lissa, W. Stroebe, M. vanDellen, P. Leander, M. Agostini, B. Gutzkow, J. Kreienkamp, J. Belanger, T. Draws, A. Grygoryshyn, C. S. Vetter, and P. team, “Early Indicators of COVID-19 Infection Prevention Behaviors: Machine Learning Identifies Personal and Country-Level Factors,” PsyArXiv, preprint , 2020. doi:10.31234/osf.io/whjsb
    [BibTeX] [Abstract] [Download PDF]

    The Coronavirus is highly infectious and potentially deadly. In the absence of a cure or a vaccine, the infection prevention behaviors recommended by the World Health Organization constitute the only measure that is presently available to combat the pandemic. The unprecedented impact of this pandemic calls for swift identification of factors most important for predicting infection prevention behavior. In this paper, we used a machine learning approach to assess the relative importance of potential indicators of personal infection prevention behavior in a global psychological survey we conducted between March-May 2020 (N = 56,072 across 28 countries). The survey data were enriched with society-level variables relevant to the pandemic. Results indicated that the two most important indicators of self-reported infection prevention behavior were individual-level injunctive norms—beliefs that people in the community should engage in social distancing and self-isolation, followed by endorsement of restrictive containment measures (e.g., mandatory vaccination). Society-level factors (e.g., national healthcare infrastructure, confirmed infections) also emerged as important indicators. Social attitudes and norms were more important than personal factors considered most important by theories of health behavior. The model accounted for 52\% of the variance in infection prevention behavior in a separate test sample—above the performance of psychological models of health behavior. These results suggest that individuals are intuitively aware that this pandemic constitutes a social dilemma situation, where their own infection risk is partly dependent on the behaviors of others. If everybody engaged in infection prevention behavior, the virus could be defeated even without a vaccine.

    @techreport{vanlissaEarlyIndicatorsCOVID192020,
    type = {preprint},
    title = {Early {Indicators} of {COVID}-19 {Infection} {Prevention} {Behaviors}: {Machine} {Learning} {Identifies} {Personal} and {Country}-{Level} {Factors}},
    shorttitle = {Early {Indicators} of {COVID}-19 {Infection} {Prevention} {Behaviors}},
    url = {https://osf.io/whjsb},
    abstract = {The Coronavirus is highly infectious and potentially deadly. In the absence of a cure or a vaccine, the infection prevention behaviors recommended by the World Health Organization constitute the only measure that is presently available to combat the pandemic. The unprecedented impact of this pandemic calls for swift identification of factors most important for predicting infection prevention behavior. In this paper, we used a machine learning approach to assess the relative importance of potential indicators of personal infection prevention behavior in a global psychological survey we conducted between March-May 2020 (N = 56,072 across 28 countries). The survey data were enriched with society-level variables relevant to the pandemic. Results indicated that the two most important indicators of self-reported infection prevention behavior were individual-level injunctive norms—beliefs that people in the community should engage in social distancing and self-isolation, followed by endorsement of restrictive containment measures (e.g., mandatory vaccination). Society-level factors (e.g., national healthcare infrastructure, confirmed infections) also emerged as important indicators. Social attitudes and norms were more important than personal factors considered most important by theories of health behavior. The model accounted for 52\% of the variance in infection prevention behavior in a separate test sample—above the performance of psychological models of health behavior. These results suggest that individuals are intuitively aware that this pandemic constitutes a social dilemma situation, where their own infection risk is partly dependent on the behaviors of others. If everybody engaged in infection prevention behavior, the virus could be defeated even without a vaccine.},
    language = {en},
    urldate = {2021-07-28},
    institution = {PsyArXiv},
    author = {Van Lissa, Caspar J. and Stroebe, Wolfgang and vanDellen, Michelle and Leander, Pontus and Agostini, Maximilian and Gutzkow, Ben and Kreienkamp, Jannis and Belanger, Jocelyn and Draws, Tim and Grygoryshyn, Andrii and Vetter, Clara S. and team, PsyCorona},
    month = nov,
    year = {2020},
    doi = {10.31234/osf.io/whjsb},
    file = {Van Lissa et al. - 2020 - Early Indicators of COVID-19 Infection Prevention .pdf:/Users/tim/Zotero/storage/H58NGCBE/Van Lissa et al. - 2020 - Early Indicators of COVID-19 Infection Prevention .pdf:application/pdf},
    }

  • T. Draws, J. Liu, and N. Tintarev, “Helping users discover perspectives: Enhancing opinion mining with joint topic models,” in 2020 International Conference on Data Mining Workshops (ICDMW), Sorrento, Italy, 2020, p. 23–30. doi:10.1109/ICDMW51313.2020.00013
    [BibTeX] [Abstract] [Download PDF]

    Support or opposition with respect to a debated claim such as abortion should be legal can have different underlying reasons, which we call perspectives. This paper explores how opinion mining can be enhanced with joint topic modeling, to identify distinct perspectives within the topic, providing an informative overview from unstructured text. We evaluate four joint topic models (TAM, JST, VODUM, and LAM) in a user study assessing human understandability of the extracted perspectives. Based on the results, we conclude that joint topic models such as TAM can discover perspectives that align with human judgments. Moreover, our results suggest that users are not influenced by their pre-existing stance on the topic of abortion when interpreting the output of topic models.

    @inproceedings{drawsHelpingUsersDiscover2020,
    address = {Sorrento, Italy},
    title = {Helping users discover perspectives: {Enhancing} opinion mining with joint topic models},
    copyright = {All rights reserved},
    isbn = {978-1-72819-012-9},
    shorttitle = {Helping users discover perspectives},
    url = {https://ieeexplore.ieee.org/document/9346407/},
    doi = {10.1109/ICDMW51313.2020.00013},
    abstract = {Support or opposition with respect to a debated claim such as abortion should be legal can have different underlying reasons, which we call perspectives. This paper explores how opinion mining can be enhanced with joint topic modeling, to identify distinct perspectives within the topic, providing an informative overview from unstructured text. We evaluate four joint topic models (TAM, JST, VODUM, and LAM) in a user study assessing human understandability of the extracted perspectives. Based on the results, we conclude that joint topic models such as TAM can discover perspectives that align with human judgments. Moreover, our results suggest that users are not influenced by their pre-existing stance on the topic of abortion when interpreting the output of topic models.},
    language = {en},
    urldate = {2021-07-13},
    booktitle = {2020 {International} {Conference} on {Data} {Mining} {Workshops} ({ICDMW})},
    publisher = {IEEE},
    author = {Draws, Tim and Liu, Jody and Tintarev, Nava},
    month = nov,
    year = {2020},
    pages = {23--30},
    file = {Draws et al. - 2020 - Helping users discover perspectives Enhancing opi.pdf:/Users/tim/Zotero/storage/444RTM63/Draws et al. - 2020 - Helping users discover perspectives Enhancing opi.pdf:application/pdf},
    }

  • D. van den Bergh, J. van Doorn, M. Marsman, T. Draws, E. van Kesteren, K. Derks, F. Dablander, Q. F. Gronau, Š. Kucharský, A. K. N. R. Gupta, A. Sarafoglou, J. G. Voelkel, A. Stefan, M. Hinne, D. Matzke, and E. Wagenmakers, “A Tutorial on Conducting and Interpreting a Bayesian ANOVA in JASP,” Annee Psychologique, vol. 120, iss. 1, p. 73–96, 2020. doi:10.3917/anpsy1.201.0073
    [BibTeX] [Abstract] [Download PDF]

    Analysis of variance (ANOVA) is the standard procedure for statistical inference in factorial designs. Typically, ANOVAs are executed using frequentist statistics, where p-values determine statistical significance in an all-or-none fashion. In recent years, the Bayesian approach to statistics is increasingly viewed as a legitimate alternative to the p-value. However, the broad adoption of Bayesian statistics-and Bayesian ANOVA in particular-is frustrated by the fact that Bayesian concepts are rarely taught in applied statistics courses. Consequently, practitioners may be unsure how to conduct a Bayesian ANOVA and interpret the results. Here we provide a guide for executing and interpreting a Bayesian ANOVA with JASP, an open-source statistical software program with a graphical user interface. We explain the key concepts of the Bayesian ANOVA using two empirical examples.

    @article{vandenberghTutorialConductingInterpreting2020,
    title = {A {Tutorial} on {Conducting} and {Interpreting} a {Bayesian} {ANOVA} in {JASP}},
    volume = {120},
    issn = {00035033},
    url = {https://ir.cwi.nl/pub/29560},
    doi = {10.3917/anpsy1.201.0073},
    abstract = {Analysis of variance (ANOVA) is the standard procedure for statistical inference in factorial designs. Typically, ANOVAs are executed using frequentist statistics, where p-values determine statistical significance in an all-or-none fashion. In recent years, the Bayesian approach to statistics is increasingly viewed as a legitimate alternative to the p-value. However, the broad adoption of Bayesian statistics-and Bayesian ANOVA in particular-is frustrated by the fact that Bayesian concepts are rarely taught in applied statistics courses. Consequently, practitioners may be unsure how to conduct a Bayesian ANOVA and interpret the results. Here we provide a guide for executing and interpreting a Bayesian ANOVA with JASP, an open-source statistical software program with a graphical user interface. We explain the key concepts of the Bayesian ANOVA using two empirical examples.},
    language = {en},
    number = {1},
    urldate = {2021-07-28},
    journal = {Annee Psychologique},
    author = {van den Bergh, Don and van Doorn, Johnny and Marsman, Maarten and Draws, Tim and van Kesteren, Erik-Jan and Derks, Koen and Dablander, Fabian and Gronau, Quentin Frederik and Kucharský, Šimon and Gupta, Akash R. Komarlu Narendra and Sarafoglou, Alexandra and Voelkel, Jan G. and Stefan, Angelika and Hinne, Max and Matzke, Dora and Wagenmakers, Eric-Jan},
    month = feb,
    year = {2020},
    keywords = {Analysis of Variance, Bayes Factor, Hypothesis Test, JASP, Posterior distribution, Tutorial},
    pages = {73--96},
    file = {Full Text:/Users/tim/Zotero/storage/NCXFKCR2/D. van den Bergh (Don) et al. - 2020 - A tutorial on conducting and interpreting a bayesi.pdf:application/pdf},
    }

privacy