Orphanou, Kalia; Otterbacher, Jahna; Kleanthous, Styliani; Batsuren, Khuyagbaatar; Giunchiglia, Fausto; Bogina, Veronika; Tal, Avital Shulner; Hartman, Alan; Kuflik, Tsvi
Mitigating Bias in Algorithmic Systems - A Fish-Eye View Journal Article
In: ACM Comput. Surv., 2022, ISSN: 0360-0300, (Just Accepted).
@article{10.1145/3527152,
title = {Mitigating Bias in Algorithmic Systems - A Fish-Eye View},
author = {Kalia Orphanou and Jahna Otterbacher and Styliani Kleanthous and Khuyagbaatar Batsuren and Fausto Giunchiglia and Veronika Bogina and Avital Shulner Tal and Alan Hartman and Tsvi Kuflik},
url = {https://doi.org/10.1145/3527152},
doi = {10.1145/3527152},
issn = {0360-0300},
year = {2022},
date = {2022-03-01},
journal = {ACM Comput. Surv.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Mitigating bias in algorithmic systems is a critical issue drawing attention across communities within the information and computer sciences. Given the complexity of the problem and the involvement of multiple stakeholders – including developers, end users and third-parties – there is a need to understand the landscape of the sources of bias, and the solutions being proposed to address them, from a broad, cross-domain perspective. This survey provides a “fish-eye view,” examining approaches across four areas of research. The literature describes three steps toward a comprehensive treatment – bias detection, fairness management and explainability management – and underscores the need to work from within the system as well as from the perspective of stakeholders in the broader context.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Barlas, Pinar; Krahn, Maximilian; Kleanthous, Styliani; Kyriakou, Kyriakos; Otterbacher, Jahna
Shifting Our Awareness, Taking Back Tags: Temporal Changes in Computer Vision Services’ Social Behaviors Inproceedings
In: 2022.
@inproceedings{barlas2022shifting,
title = {Shifting Our Awareness, Taking Back Tags: Temporal Changes in Computer Vision Services’ Social Behaviors},
author = {Pinar Barlas and Maximilian Krahn and Styliani Kleanthous and Kyriakos Kyriakou and Jahna Otterbacher},
url = {https://static1.squarespace.com/static/5d0fdbbd14ec31000174c179/t/624e9a00b1569047830154d2/1649318405330/Barlas+et+al.+-+2022+-+Shifting+Our+Awareness+Taking+Back+Tags.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
abstract = {Much attention has been on the behaviors of computer vision services when describing images of people. Audits have revealed rampant biases that could lead to harm when services are used by developers and researchers. We focus on temporal auditing, replicating experiments originally conducted three years ago. We document the changes observed over time, relating this to the growing awareness of structural oppression and the need to align technology with social values. While we document some positive changes in the services’ behaviors, such as increased accuracy in the use of gender-related tags overall, we also replicate findings concerning larger error rates for images of Black individuals. In addition, we find cases of increased use of inferential tags (e.g., emotions), which are often sensitive. The analysis underscores the difficulty in following changes in services’ behaviors over time, and the need for more oversight of such services.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kleanthous, Styliani; Kasinidou, Maria; Barlas, Pınar; Otterbacher, Jahna
Perception of fairness in algorithmic decisions: Future developers' perspective Journal Article
In: Patterns, vol. 3, no. 1, pp. 100380, 2022, ISSN: 2666-3899.
@article{KLEANTHOUS2022100380,
title = {Perception of fairness in algorithmic decisions: Future developers' perspective},
author = {Styliani Kleanthous and Maria Kasinidou and Pınar Barlas and Jahna Otterbacher},
url = {https://www.sciencedirect.com/science/article/pii/S2666389921002476},
doi = {https://doi.org/10.1016/j.patter.2021.100380},
issn = {2666-3899},
year = {2022},
date = {2022-01-01},
journal = {Patterns},
volume = {3},
number = {1},
pages = {100380},
abstract = {Summary In this work, we investigate how students in fields adjacent to algorithms development perceive fairness, accountability, transparency, and ethics in algorithmic decision-making. Participants (N = 99) were asked to rate their agreement with statements regarding six constructs that are related to facets of fairness and justice in algorithmic decision-making using scenarios, in addition to defining algorithmic fairness and providing their view on possible causes of unfairness, transparency approaches, and accountability. The findings indicate that “agreeing” with a decision does not mean that the person “deserves the outcome,” perceiving the factors used in the decision-making as “appropriate” does not make the decision of the system “fair,” and perceiving a system's decision as “not fair” is affecting the participants' “trust” in the system. Furthermore, fairness is most likely to be defined as the use of “objective factors,” and participants identify the use of “sensitive attributes” as the most likely cause of unfairness.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pavlidou, Veronika; Otterbacher, Jahna; Kleanthous, Styliani
User Perception of Algorithmic Digital Marketing in Conditions of Scarcity Inproceedings
In: Themistocleous, Marinos; Papadaki, Maria (Ed.): Information Systems, pp. 319–332, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-95947-0.
@inproceedings{10.1007/978-3-030-95947-0_22,
title = {User Perception of Algorithmic Digital Marketing in Conditions of Scarcity},
author = {Veronika Pavlidou and Jahna Otterbacher and Styliani Kleanthous},
editor = {Marinos Themistocleous and Maria Papadaki},
isbn = {978-3-030-95947-0},
year = {2022},
date = {2022-01-01},
booktitle = {Information Systems},
pages = {319--332},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Digital Marketing, and specifically, targeted marketing online is flourishing in recent years, and is becoming evermore precise and easy to implement, given the rise of big data and algorithmic processes. This study assesses users' perceptions regarding the fairness in algorithmic targeted marketing, in conditions of scarcity. This is increasingly important because as more decisions are made by data-driven algorithms, the potential for consumers to be treated unfairly by marketers grows. Awareness of users' perceptions helps to create a more open, understandable and fair digital world without negative influences. Also, it may help both marketers and consumers to communicate effectively.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Orphanou, K.; Christoforou, E.; Otterbacher, J.; Paramita, M. L.; Hopfgartner, F.
Preserving the memory of the first wave of COVID-19 pandemic: Crowdsourcing a collection of image search queries Miscellaneous
2021, (copyright 2021 Copyright for this paper by its authors. Use permitted under Creative Commons License Attribution 4.0 International (CC BY 4.0). See: http://creativecommons.org/licenses/by/4.0.).
@misc{wrro180974,
title = {Preserving the memory of the first wave of COVID-19 pandemic: Crowdsourcing a collection of image search queries},
author = {K. Orphanou and E. Christoforou and J. Otterbacher and M. L. Paramita and F. Hopfgartner},
url = {https://eprints.whiterose.ac.uk/180974/},
year = {2021},
date = {2021-11-01},
booktitle = {Third symposium on Biases in Human Computation and Crowdsourcing (BHCC 2021)},
journal = {Proceedings of the Third symposium on Biases in Human Computation and Crowdsourcing},
publisher = {CEUR Workshop Proceedings},
abstract = {The unprecedented events of the COVID-19 pandemic have generated an enormous amount of information and populated the Web with new content relevant to the pandemic and its implications. Visual information such as images has been shown to be crucial in the context of scientific communication. Images are often interpreted as being closer to the truth as compared to other forms of communication, because of their physical representation of an event such as the COVID-19 pandemic. In this work, we ask crowdworkers across four regions of Europe that were severely affected by the first wave of pandemic, to provide us with image search queries related to COVID-19 pandemic. The goal of this study is to understand the similarities/differences of the aspects that are most important to users across different locations regarding the first wave of COVID-19 pandemic. Through a content analysis of their queries, we discovered five common themes of concern to all, although the frequency of use differed across regions.},
note = {copyright 2021 Copyright for this paper by its authors. Use permitted under Creative Commons License Attribution 4.0 International (CC BY 4.0). See: http://creativecommons.org/licenses/by/4.0.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kleanthous, Styliani; Otterbacher, Jahna; Bates, Jo; Giunchiglia, Fausto; Hopfgartner, Frank; Kuflik, Tsvi; Orphanou, Kalia; Paramita, Monica L.; Rovatsos, Michael; Shulner-Tal, Avital
Report on the CyCAT Winter School on Fairness, Accountability, Transparency and Ethics (FATE) in AI Journal Article
In: SIGIR Forum, vol. 55, no. 1, 2021, ISSN: 0163-5840.
@article{10.1145/3476415.3476419,
title = {Report on the CyCAT Winter School on Fairness, Accountability, Transparency and Ethics (FATE) in AI},
author = {Styliani Kleanthous and Jahna Otterbacher and Jo Bates and Fausto Giunchiglia and Frank Hopfgartner and Tsvi Kuflik and Kalia Orphanou and Monica L. Paramita and Michael Rovatsos and Avital Shulner-Tal},
url = {https://doi.org/10.1145/3476415.3476419},
doi = {10.1145/3476415.3476419},
issn = {0163-5840},
year = {2021},
date = {2021-07-01},
journal = {SIGIR Forum},
volume = {55},
number = {1},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {The first FATE Winter School, organized by the Cyprus Center for Algorithmic Transparency (CyCAT) provided a forum for both students as well as senior researchers to examine the complex topic of Fairness, Accountability, Transparency and Ethics (FATE). Through a program that included two invited keynotes, as well as sessions led by CyCAT partners across Europe and Israel, participants were exposed to a range of approaches on FATE, in a holistic manner. During the Winter School, the team also organized a hands-on activity to evaluate a tool-based intervention where participants interacted with eight prototypes of bias-aware search engines. Finally, participants were invited to join one of four collaborative projects coordinated by CyCAT, thus furthering common understanding and interdisciplinary collaboration on this emerging topic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kyriakou, Kyriakos; Barlas, Pinar; Kleanthous, Styliani; Christoforou, Evgenia; Otterbacher, Jahna
Crowdsourcing Human Oversight on Image Tagging Algorithms: An initial study of image diversity Working paper
2021.
@workingpaper{kyriakou2021crowdsourcing,
title = {Crowdsourcing Human Oversight on Image Tagging Algorithms: An initial study of image diversity},
author = {Kyriakos Kyriakou and Pinar Barlas and Styliani Kleanthous and Evgenia Christoforou and Jahna Otterbacher},
url = {https://www.humancomputation.com/assets/wips_demos/HCOMP_2021_paper_104.pdf},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
abstract = {Various stakeholders have called for human oversight of algorithmic processes, as a means to mitigate the possibility for automated discrimination and other social harms. This is even more crucial in light of the democratization of AI, where data and algorithms, such as Cognitive Services, are deployed into various applications and socio-cultural contexts. Inspired by previous work proposing human-in-the-loop governance mechanisms, we run a feasibility study involving image tagging services. Specifically, we ask whether micro-task crowd-sourcing can be an effective means for collecting a diverse pool of data for evaluating fairness in a hypothetical scenario of analyzing professional profile photos in a later phase. In this work-in-progress paper, we present our proposed over- sight approach and framework for analyzing the diversity of the images provided. Given the subjectivity of fairness judgments, we first aimed to recruit a diverse crowd from three distinct regions. This study lays the groundwork for expanding the approach, to offer developers a means to evaluate Cognitive Services before and/or during deployment.},
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Barlas, Pinar; Kyriakou, Kyriakos; Guest, Olivia; Kleanthous, Styliani; Otterbacher, Jahna
To "See" is to Stereotype: Image Tagging Algorithms, Gender Recognition, and the Accuracy-Fairness Trade-Off Inproceedings
In: Association for Computing Machinery, New York, NY, USA, 2021.
@inproceedings{10.1145/3432931,
title = {To "See" is to Stereotype: Image Tagging Algorithms, Gender Recognition, and the Accuracy-Fairness Trade-Off},
author = {Pinar Barlas and Kyriakos Kyriakou and Olivia Guest and Styliani Kleanthous and Jahna Otterbacher},
url = {https://doi.org/10.1145/3432931},
doi = {10.1145/3432931},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
volume = {4},
number = {CSCW3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Machine-learned computer vision algorithms for tagging images are increasingly used by developers and researchers, having become popularized as easy-to-use "cognitive services." Yet these tools struggle with gender recognition, particularly when processing images of women, people of color and non-binary individuals. Socio-technical researchers have cited data bias as a key problem; training datasets often over-represent images of people and contexts that convey social stereotypes. The social psychology literature explains that people learn social stereotypes, in part, by observing others in particular roles and contexts, and can inadvertently learn to associate gender with scenes, occupations and activities. Thus, we study the extent to which image tagging algorithms mimic this phenomenon. We design a controlled experiment, to examine the interdependence between algorithmic recognition of context and the depicted person's gender. In the spirit of auditing to understand machine behaviors, we create a highly controlled dataset of people images, imposed on gender-stereotyped backgrounds. Our methodology is reproducible and our code publicly available. Evaluating five proprietary algorithms, we find that in three, gender inference is hindered when a background is introduced. Of the two that "see" both backgrounds and gender, it is the one whose output is most consistent with human stereotyping processes that is superior in recognizing gender. We discuss the accuracy--fairness trade-off, as well as the importance of auditing black boxes in better understanding this double-edged sword.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Barlas, Pinar; Kyriakou, Kyriakos; Kleanthous, Styliani; Otterbacher, Jahna
Person, Human, Neither: The Dehumanization Potential of Automated Image Tagging Inproceedings
In: Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pp. 357–367, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 9781450384735.
@inproceedings{10.1145/3461702.3462567,
title = {Person, Human, Neither: The Dehumanization Potential of Automated Image Tagging},
author = {Pinar Barlas and Kyriakos Kyriakou and Styliani Kleanthous and Jahna Otterbacher},
url = {https://doi.org/10.1145/3461702.3462567},
isbn = {9781450384735},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society},
pages = {357–367},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Following the literature on dehumanization via technology, we audit six proprietary image tagging algorithms (ITAs) for their potential to perpetuate dehumanization. We examine the ITAs' outputs on a controlled dataset of images depicting a diverse group of people for tags that indicate the presence of a human in the image. Through an analysis of the (mis)use of these tags, we find that there are some individuals whose 'humanness' is not recognized by an ITA, and that these individuals are often from marginalized social groups. Finally, we compare these findings with the use of the 'face' tag, which can be used for surveillance, revealing that people's faces are often recognized by an ITA even when their 'humanness' is not. Overall, we highlight the subtle ways in which ITAs may inflict widespread, disparate harm, and emphasize the importance of considering the social context of the resulting application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kasinidou, Maria; Kleanthous, Styliani; Otterbacher, Jahna
`Expected Most of the Results, but Some Others...Surprised Me': Personality Inference in Image Tagging Services Inproceedings
In: Fogli, Daniela; Tetteroo, Daniel; Barricelli, Barbara Rita; Borsci, Simone; Markopoulos, Panos; Papadopoulos, George A. (Ed.): End-User Development, pp. 187–195, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-79840-6.
@inproceedings{10.1007/978-3-030-79840-6_12,
title = {`Expected Most of the Results, but Some Others...Surprised Me': Personality Inference in Image Tagging Services},
author = {Maria Kasinidou and Styliani Kleanthous and Jahna Otterbacher},
editor = {Daniela Fogli and Daniel Tetteroo and Barbara Rita Barricelli and Simone Borsci and Panos Markopoulos and George A. Papadopoulos},
isbn = {978-3-030-79840-6},
year = {2021},
date = {2021-01-01},
booktitle = {End-User Development},
pages = {187--195},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Image tagging APIs, offered as Cognitive Services in the movement to democratize AI, have become popular in applications that need to provide a personalized user experience. Developers can easily incorporate these services into their applications; however, little is known concerning their behavior under specific circumstances. We consider how two such services behave when predicting elements of the Big-Five personality traits from users' profile images. We found that personality traits are not equally represented in the APIs' output tags, with tags focusing mostly on Extraversion. The inaccurate personality prediction and the lack of vocabulary for the equal representation of all personality traits, could result in unreliable implicit user modeling, resulting in sub-optimal -- or even undesirable -- user experience in the application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Christoforou, Evgenia; Barlas, Pinar; Otterbacher, Jahna
It’s About Time: A View of Crowdsourced Data Before and During the Pandemic Inproceedings
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, Yokohama, Japan, 2021, ISBN: 9781450380966.
@inproceedings{10.1145/3411764.3445317,
title = {It’s About Time: A View of Crowdsourced Data Before and During the Pandemic},
author = {Evgenia Christoforou and Pinar Barlas and Jahna Otterbacher},
url = {https://doi.org/10.1145/3411764.3445317},
doi = {10.1145/3411764.3445317},
isbn = {9781450380966},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {Yokohama, Japan},
series = {CHI '21},
abstract = {Data attained through crowdsourcing have an essential role in the development of computer vision algorithms. Crowdsourced data might include reporting biases, since crowdworkers usually describe what is “worth saying” in addition to images’ content. We explore how the unprecedented events of 2020, including the unrest surrounding racial discrimination, and the COVID-19 pandemic, might be reflected in responses to an open-ended annotation task on people images, originally executed in 2018 and replicated in 2020. Analyzing themes of Identity and Health conveyed in workers’ tags, we find evidence that supports the potential for temporal sensitivity in crowdsourced data. The 2020 data exhibit more race-marking of images depicting non-Whites, as well as an increase in tags describing Weight. We relate our findings to the emerging research on crowdworkers’ moods. Furthermore, we discuss the implications of (and suggestions for) designing tasks on proprietary platforms, having demonstrated the possibility for additional, unexpected variation in crowdsourced data due to significant events.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kasinidou, Maria; Kleanthous, Styliani; Orphanou, Kalia; Otterbacher, Jahna
Educating Computer Science Students about Algorithmic Fairness, Accountability, Transparency and Ethics Book Chapter
In: Proceedings of the 26th ACM Conference on Innovation and Technology in Computer Science Education V. 1, pp. 484–490, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 9781450382144.
@inbook{10.1145/3430665.3456311,
title = {Educating Computer Science Students about Algorithmic Fairness, Accountability, Transparency and Ethics},
author = {Maria Kasinidou and Styliani Kleanthous and Kalia Orphanou and Jahna Otterbacher},
url = {https://doi.org/10.1145/3430665.3456311},
isbn = {9781450382144},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 26th ACM Conference on Innovation and Technology in Computer Science Education V. 1},
pages = {484–490},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Professionals are increasingly relying on algorithmic systems for decision making however, algorithmic decisions occasionally perceived as biased or not just. Prior work has provided evidences that education can make a difference on the perception of young developers on algorithmic fairness. In this paper, we investigate computer science students' perception of FATE in algorithmic decision-making and whether their views on FATE can be changed by attending a seminar on FATE topics. Participants attended a seminar on FATE in algorithmic decision-making and they were asked to respond to two online questionnaires to measure their pre- and post-seminar perception on FATE. Results show that a short seminar can make a difference in understanding and perception as well as the attitude of the students towards FATE in algorithmic decision support. CS curricula need to be updated and include FATE topics if we want algorithmic decision support systems to be just for all.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Kasinidou, Maria; Kleanthous, Styliani; Barlas, Pinar; Otterbacher, Jahna
I Agree with the Decision, but They Didn't Deserve This: Future Developers' Perception of Fairness in Algorithmic Decisions Inproceedings
In: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, pp. 690–700, Association for Computing Machinery, Virtual Event, Canada, 2021, ISBN: 9781450383097.
@inproceedings{10.1145/3442188.3445931,
title = {I Agree with the Decision, but They Didn't Deserve This: Future Developers' Perception of Fairness in Algorithmic Decisions},
author = {Maria Kasinidou and Styliani Kleanthous and Pinar Barlas and Jahna Otterbacher},
url = {https://doi.org/10.1145/3442188.3445931},
doi = {10.1145/3442188.3445931},
isbn = {9781450383097},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency},
pages = {690–700},
publisher = {Association for Computing Machinery},
address = {Virtual Event, Canada},
series = {FAccT '21},
abstract = {While professionals are increasingly relying on algorithmic systems for making a decision, on some occasions, algorithmic decisions may be perceived as biased or not just. Prior work has looked into the perception of algorithmic decision-making from the user's point of view. In this work, we investigate how students in fields adjacent to algorithm development perceive algorithmic decisionmaking. Participants (N=99) were asked to rate their agreement with statements regarding six constructs that are related to facets of fairness and justice in algorithmic decision-making in three separate scenarios. Two of the three scenarios were independent of each other, while the third scenario presented three different outcomes of the same algorithmic system, demonstrating perception changes triggered by different outputs. Quantitative analysis indicates that a) 'agreeing' with a decision does not mean the person 'deserves the outcome', b) perceiving the factors used in the decision-making as 'appropriate' does not make the decision of the system 'fair' and c) perceiving a system's decision as 'not fair' is affecting the participants' 'trust' in the system. In addition, participants found proportional distribution of benefits more fair than other approaches. Qualitative analysis provides further insights into that information the participants find essential to judge and understand an algorithmic decision-making system's fairness. Finally, the level of academic education has a role to play in the perception of fairness and justice in algorithmic decision-making.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Giunchiglia, Fausto; Kleanthous, Styliani; Otterbacher, Jahna; Draws, Tim
Transparency Paths - Documenting the Diversity of User Perceptions Book Chapter
In: Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization, pp. 415–420, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 9781450383677.
@inbook{10.1145/3450614.3463292,
title = {Transparency Paths - Documenting the Diversity of User Perceptions},
author = {Fausto Giunchiglia and Styliani Kleanthous and Jahna Otterbacher and Tim Draws},
url = {https://doi.org/10.1145/3450614.3463292},
isbn = {9781450383677},
year = {2021},
date = {2021-01-01},
booktitle = {Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization},
pages = {415–420},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {We are living in an era of global digital platforms, eco-systems of algorithmic processes that serve users worldwide. However, the increasing exposure to diversity online – of information and users – has led to important considerations of bias. A given platform, such as the Google search engine, may demonstrate behaviors that deviate from what users expect, or what they consider fair, relative to their own context and experiences. In this exploratory work, we put forward the notion of transparency paths, a process by which we document our position, choices, and perceptions when developing and/or using algorithmic platforms. We conducted a self-reflection exercise with seven researchers, who collected and analyzed two sets of images; one depicting an everyday activity, “washing hands,” and a second depicting the concept of “home.” Participants had to document their process and choices, and in the end, compare their work to others. Finally, participants were asked to reflect on the definitions of bias and diversity. The exercise revealed the range of perspectives and approaches taken, underscoring the need for future work that will refine the transparency paths methodology.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Paramita, Monica Lestari; Orphanou, Kalia; Christoforou, Evgenia; Otterbacher, Jahna; Hopfgartner, Frank
Do you see what I see? Images of the COVID-19 pandemic through the lens of Google Journal Article
In: Information Processing & Management, vol. 58, no. 5, pp. 102654, 2021, ISSN: 0306-4573.
@article{PARAMITA2021102654,
title = {Do you see what I see? Images of the COVID-19 pandemic through the lens of Google},
author = {Monica Lestari Paramita and Kalia Orphanou and Evgenia Christoforou and Jahna Otterbacher and Frank Hopfgartner},
url = {https://www.sciencedirect.com/science/article/pii/S0306457321001424},
doi = {https://doi.org/10.1016/j.ipm.2021.102654},
issn = {0306-4573},
year = {2021},
date = {2021-01-01},
journal = {Information Processing & Management},
volume = {58},
number = {5},
pages = {102654},
abstract = {During times of crisis, information access is crucial. Given the opaque processes behind modern search engines, it is important to understand the extent to which the “picture” of the Covid-19 pandemic accessed by users differs. We explore variations in what users “see” concerning the pandemic through Google image search, using a two-step approach. First, we crowdsource a search task to users in four regions of Europe, asking them to help us create a photo documentary of Covid-19 by providing image search queries. Analysing the queries, we find five common themes describing information needs. Next, we study three sources of variation – users’ information needs, their geo-locations and query languages – and analyse their influences on the similarity of results. We find that users see the pandemic differently depending on where they live, as evidenced by the 46% similarity across results. When users expressed a given query in different languages, there was no overlap for most of the results. Our analysis suggests that localisation plays a major role in the (dis)similarity of results, and provides evidence of the diverse “picture” of the pandemic seen through Google.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nisiotis, Louis; Kleanthous, S.
Lessons learned using a virtual world to support collaborative learning in the classroom Journal Article
In: Journal of Universal Computer Science, vol. 26, no. 8, pp. 858–879, 2020.
@article{shu27651,
title = {Lessons learned using a virtual world to support collaborative learning in the classroom},
author = {Louis Nisiotis and S. Kleanthous},
url = {http://shura.shu.ac.uk/27651/},
year = {2020},
date = {2020-08-01},
journal = {Journal of Universal Computer Science},
volume = {26},
number = {8},
pages = {858--879},
abstract = {copyright 2020, IICM. All rights reserved. Using technology in education is crucial to support learning, and Virtual Worlds (VWs) are one of the technologies used by many educators to support their teaching objectives. VWs enable students to connect, synchronously interact, and participate in immersive learning activities. Such VW has been developed at Sheffield Hallam University (UK), and is used to support the teaching of a specific module, as well as for conducting empirical research around the topics of Transactive Memory Systems (TMS) and Students Engagement. TMS is a phenomenon representing the collective awareness of a group's specialisation, coordination, and credibility with interesting results. This paper presents the lessons learned while using the VW over the past few years at a higher education institution to support collaborative learning within working groups. A review of these empirical findings is presented, together with the results of a follow up study conducted to further investigate TMS and student Engagement, as well as students perceived Motivation to use a VW for learning, and their Learning Outcomes. The findings of this study are corroborating and contributing to previous results, suggesting that a VW is an effective tool to support collaborative learning activities, allowing students to engage in the learning process, motivate them to participate in activities, and contribute to their overall learning experience.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kyriakou, Kyriakos; Barlas, Pinar; Kleanthous, Styliani; Otterbacher, Jahna
OpenTag: Understanding Human Perceptions of Image Tagging Algorithms Working paper
2020.
@workingpaper{kyriakou2020opentag,
title = {OpenTag: Understanding Human Perceptions of Image Tagging Algorithms},
author = {Kyriakos Kyriakou and Pinar Barlas and Styliani Kleanthous and Jahna Otterbacher},
url = {https://www.humancomputation.com/2020/assets/2020/wip_demos/HCOMP_2020_paper_76.pdf},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 8th AAAI Conference on Human Computation and Crowdsourcing. Hilversum, The Netherlands. URL www. aaai. org},
abstract = {Image Tagging Algorithms (ITAs) are extensively used in our information ecosystem, from facilitating the retrieval of images in social platforms to learning about users and their preferences. However, audits performed on ITAs have demonstrated that their behaviors often exhibit social biases, especially when analyzing images depicting people. We present OpenTag, a platform that fuses the auditing process with a crowdsourcing approach. Users can upload an image, which is then analyzed by various ITAs, resulting in multiple sets of descriptive tags. With OpenTag, the user can observe and compare the output of multiple ITAs simultaneously, while researchers can study the manner in which users perceive this output. Finally, using the collected data, further audits can be performed on ITAs.},
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Chrysanthou, Antrea; Barlas, Pinar; Kyriakou, Kyriakos; Kleanthous, Styliani; Otterbacher, Jahna
Bursting the Bubble: Tool for Awareness and Research about Overpersonalization in Information Access Systems Inproceedings
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 112–113, Association for Computing Machinery, Cagliari, Italy, 2020, ISBN: 9781450375139.
@inproceedings{10.1145/3379336.3381863,
title = {Bursting the Bubble: Tool for Awareness and Research about Overpersonalization in Information Access Systems},
author = {Antrea Chrysanthou and Pinar Barlas and Kyriakos Kyriakou and Styliani Kleanthous and Jahna Otterbacher},
url = {https://doi.org/10.1145/3379336.3381863},
doi = {10.1145/3379336.3381863},
isbn = {9781450375139},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {112–113},
publisher = {Association for Computing Machinery},
address = {Cagliari, Italy},
series = {IUI '20},
abstract = {Modern information access systems extensively use personalization, automatically filtering and/or ranking content based on the user profile, to guide users to the most relevant material. However, this can also lead to unwanted effects such as the "filter bubble." We present an interactive demonstration system, designed as an educational and research tool, which imitates a search engine, personalizing the search results returned for a query based on the user's characteristics. The system can be tailored to suit any type of audience and context, as well as enabling the collection of responses and interaction data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Barlas, Pinar; Kyriakou, Kyriakos; Chrysanthou, Antrea; Kleanthous, Styliani; Otterbacher, Jahna
OPIAS: Over-Personalization in Information Access Systems Inproceedings
In: Adjunct Publication of the 28th ACM Conference on User Modeling, Adaptation and Personalization, pp. 103–104, Association for Computing Machinery, Genoa, Italy, 2020, ISBN: 9781450379502.
@inproceedings{10.1145/3386392.3397607,
title = {OPIAS: Over-Personalization in Information Access Systems},
author = {Pinar Barlas and Kyriakos Kyriakou and Antrea Chrysanthou and Styliani Kleanthous and Jahna Otterbacher},
url = {https://doi.org/10.1145/3386392.3397607},
doi = {10.1145/3386392.3397607},
isbn = {9781450379502},
year = {2020},
date = {2020-01-01},
booktitle = {Adjunct Publication of the 28th ACM Conference on User Modeling, Adaptation and Personalization},
pages = {103–104},
publisher = {Association for Computing Machinery},
address = {Genoa, Italy},
series = {UMAP '20 Adjunct},
abstract = {"Filter bubbles," a phenomenon in which users become caught in an information space with low diversity, can have various negative effects. Several tools have been created to monitor the users' actions to make them aware of their own filter bubbles, but these tools have disadvantages (e.g., infringement on privacy). We propose a standalone demo that does not require any personal data. It emulates Facebook, a well-known and popular social network. We demonstrate how each user interaction may affect the selection of subsequent posts, sometimes resulting in the creation of a 'filter bubble.' The administrator (researcher) can tailor the demo for any context, changing the topics and points of view used in the demo. Data collection via surveys before and after the demo is facilitated so that the demo can be used for research, in addition to education.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kyriakou, Kyriakos; Kleanthous, Styliani; Otterbacher, Jahna; Papadopoulos, George A.
Emotion-Based Stereotypes in Image Analysis Services Inproceedings
In: Adjunct Publication of the 28th ACM Conference on User Modeling, Adaptation and Personalization, pp. 252–259, Association for Computing Machinery, Genoa, Italy, 2020, ISBN: 9781450379502.
@inproceedings{10.1145/3386392.3399567,
title = {Emotion-Based Stereotypes in Image Analysis Services},
author = {Kyriakos Kyriakou and Styliani Kleanthous and Jahna Otterbacher and George A. Papadopoulos},
url = {https://doi.org/10.1145/3386392.3399567},
doi = {10.1145/3386392.3399567},
isbn = {9781450379502},
year = {2020},
date = {2020-01-01},
booktitle = {Adjunct Publication of the 28th ACM Conference on User Modeling, Adaptation and Personalization},
pages = {252–259},
publisher = {Association for Computing Machinery},
address = {Genoa, Italy},
series = {UMAP '20 Adjunct},
abstract = {Vision-based cognitive services (CogS) have become crucial in a wide range of applications, from real-time security and social networks to smartphone applications. Many services focus on analyzing people images. When it comes to facial analysis, these services can be misleading or even inaccurate, raising ethical concerns such as the amplification of social stereotypes. We analyzed popular Image Tagging CogS that infer emotion from a person's face, considering whether they perpetuate racial and gender stereotypes concerning emotion. By comparing both CogS and Human-generated descriptions on a set of controlled images, we highlight the need for transparency and fairness in CogS. In particular, we document evidence that CogS may actually be more likely than crowdworkers to perpetuate the stereotype of the ängry black man" and often attribute black race individuals with "emotions of hostility".},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Christoforou, Evgenia; Barlas, Pinar; Otterbacher12, Jahna
Crowdwork as a Snapshot in Time: Image Annotation Tasks during a Pandemic Journal Article
In: 2020.
@article{christoforou2020crowdwork,
title = {Crowdwork as a Snapshot in Time: Image Annotation Tasks during a Pandemic},
author = {Evgenia Christoforou and Pinar Barlas and Jahna Otterbacher12},
year = {2020},
date = {2020-01-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Smith-Renner, Alison; Kleanthous, Styliani; Lim, Brian; Kuflik, Tsvi; Stumpf, Simone; Otterbacher, Jahna; Sarkar, Advait; Dugan, Casey; Shulner, Avital
ExSS-ATEC: Explainable Smart Systems for Algorithmic Transparency in Emerging Technologies 2020 Inproceedings
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 7–8, Association for Computing Machinery, Cagliari, Italy, 2020, ISBN: 9781450375139.
@inproceedings{10.1145/3379336.3379361,
title = {ExSS-ATEC: Explainable Smart Systems for Algorithmic Transparency in Emerging Technologies 2020},
author = {Alison Smith-Renner and Styliani Kleanthous and Brian Lim and Tsvi Kuflik and Simone Stumpf and Jahna Otterbacher and Advait Sarkar and Casey Dugan and Avital Shulner},
url = {https://doi.org/10.1145/3379336.3379361},
doi = {10.1145/3379336.3379361},
isbn = {9781450375139},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {7–8},
publisher = {Association for Computing Machinery},
address = {Cagliari, Italy},
series = {IUI '20},
abstract = {Smart systems that apply complex reasoning to make decisions and plan behavior, such as decision support systems and personalized recommendations, are difficult for users to understand. Algorithms allow the exploitation of rich and varied data sources, in order to support human decision-making and/or taking direct actions; however, there are increasing concerns surrounding their transparency and accountability, as these processes are typically opaque to the user. Transparency and accountability have attracted increasing interest to provide more effective system training, better reliability and improved usability. This workshop will provide a venue for exploring issues that arise in designing, developing and evaluating intelligent user interfaces that provide system transparency or explanations of their behavior. In addition, our goal is to focus on approaches to mitigate algorithmic biases that can be applied by researchers, even without access to a given system's inter-workings, such as awareness, data provenance, and validation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Otterbacher, Jahna; Barlas, Pınar; Kleanthous, Styliani; Kyriakou, Kyriakos
How Do We Talk about Other People? Group (Un)Fairness in Natural Language Image Descriptions Inproceedings
In: pp. 106-114, 2019.
@inproceedings{Otterbacher_Barlas_Kleanthous_Kyriakou_2019,
title = {How Do We Talk about Other People? Group (Un)Fairness in Natural Language Image Descriptions},
author = {Jahna Otterbacher and Pınar Barlas and Styliani Kleanthous and Kyriakos Kyriakou},
url = {https://ojs.aaai.org/index.php/HCOMP/article/view/5267},
year = {2019},
date = {2019-10-01},
urldate = {2019-10-01},
journal = {Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
volume = {7},
number = {1},
pages = {106-114},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kyriakou, Kyriakos; Barlas, Pınar; Kleanthous, Styliani; Otterbacher, Jahna
Fairness in Proprietary Image Tagging Algorithms: A Cross-Platform Audit on People Images Inproceedings
In: pp. 313-322, 2019.
@inproceedings{Kyriakou_Barlas_Kleanthous_Otterbacher_2019,
title = {Fairness in Proprietary Image Tagging Algorithms: A Cross-Platform Audit on People Images},
author = {Kyriakos Kyriakou and Pınar Barlas and Styliani Kleanthous and Jahna Otterbacher},
url = {https://ojs.aaai.org/index.php/ICWSM/article/view/3232},
year = {2019},
date = {2019-07-01},
urldate = {2019-07-01},
journal = {Proceedings of the International AAAI Conference on Web and Social Media},
volume = {13},
number = {01},
pages = {313-322},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Barlas, Pınar; Kyriakou, Kyriakos; Kleanthous, Styliani; Otterbacher, Jahna
Social B(eye)as: Human and Machine Descriptions of People Images Inproceedings
In: pp. 583-591, 2019.
@inproceedings{Barlas_Kyriakou_Kleanthous_Otterbacher_2019,
title = {Social B(eye)as: Human and Machine Descriptions of People Images},
author = {Pınar Barlas and Kyriakos Kyriakou and Styliani Kleanthous and Jahna Otterbacher},
url = {https://ojs.aaai.org/index.php/ICWSM/article/view/3255},
year = {2019},
date = {2019-07-01},
urldate = {2019-07-01},
journal = {Proceedings of the International AAAI Conference on Web and Social Media},
volume = {13},
number = {01},
pages = {583-591},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tal, Avital Shulner; Batsuren, Khuyagbaatar; Bogina, Veronika; Giunchiglia, Fausto; Hartman, Alan; Loizou, Styliani Kleanthous; Kuflik, Tsvi; Otterbacher, Jahna
“End to End” Towards a Framework for Reducing Biases and Promoting Transparency of Algorithmic Systems Inproceedings
In: 2019 14th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP), pp. 1-6, 2019.
@inproceedings{8864914,
title = {“End to End” Towards a Framework for Reducing Biases and Promoting Transparency of Algorithmic Systems},
author = {Avital Shulner Tal and Khuyagbaatar Batsuren and Veronika Bogina and Fausto Giunchiglia and Alan Hartman and Styliani Kleanthous Loizou and Tsvi Kuflik and Jahna Otterbacher},
doi = {10.1109/SMAP.2019.8864914},
year = {2019},
date = {2019-06-01},
booktitle = {2019 14th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP)},
pages = {1-6},
abstract = {Algorithms play an increasing role in our everyday lives. Recently, the harmful potential of biased algorithms has been recognized by researchers and practitioners. We have also witnessed a growing interest in ensuring the fairness and transparency of algorithmic systems. However, so far there is no agreed upon solution and not even an agreed terminology. The proposed research defines the problem space, solution space and a prototype of comprehensive framework for the detection and reducing biases in algorithmic systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Barlas, Pinar; Kleanthous, Styliani; Kyriakou, Kyriakos; Otterbacher, Jahna
What Makes an Image Tagger Fair? Inproceedings
In: Proceedings of the 27th ACM Conference on User Modeling, Adaptation and Personalization, pp. 95–103, Association for Computing Machinery, New York, NY, USA, 2019, ISBN: 9781450360210.
@inproceedings{10.1145/3320435.3320442,
title = {What Makes an Image Tagger Fair?},
author = {Pinar Barlas and Styliani Kleanthous and Kyriakos Kyriakou and Jahna Otterbacher},
url = {https://doi.org/10.1145/3320435.3320442},
isbn = {9781450360210},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the 27th ACM Conference on User Modeling, Adaptation and Personalization},
pages = {95–103},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Image analysis algorithms have been a boon to personalization in digital systems and are now widely available via easy-to-use APIs. However, it is important to ensure that they behave fairly in applications that involve processing images of people, such as dating apps. We conduct an experiment to shed light on the factors influencing the perception of "fairness." Participants are shown a photo along with two descriptions (human- and algorithm-generated). They are then asked to indicate which is "more fair" in the context of a dating site, and explain their reasoning. We vary a number of factors, including the gender, race and attractiveness of the person in the photo. While participants generally found human-generated tags to be more fair, API tags were judged as being more fair in one setting - where the image depicted an ättractive," white individual. In their explanations, participants often mention accuracy, as well as the objectivity/subjectivity of the tags in the description. We relate our work to the ongoing conversation about fairness in opaque tools like image tagging APIs, and their potential to result in harm.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Matsangidou, Maria; Otterbacher, Jahna
What Is Beautiful Continues to Be Good Inproceedings
In: Lamas, David; Loizides, Fernando; Nacke, Lennart; Petrie, Helen; Winckler, Marco; Zaphiris, Panayiotis (Ed.): Human-Computer Interaction -- INTERACT 2019, pp. 243–264, Springer International Publishing, Cham, 2019, ISBN: 978-3-030-29390-1.
@inproceedings{10.1007/978-3-030-29390-1_14,
title = {What Is Beautiful Continues to Be Good},
author = {Maria Matsangidou and Jahna Otterbacher},
editor = {David Lamas and Fernando Loizides and Lennart Nacke and Helen Petrie and Marco Winckler and Panayiotis Zaphiris},
isbn = {978-3-030-29390-1},
year = {2019},
date = {2019-01-01},
booktitle = {Human-Computer Interaction -- INTERACT 2019},
pages = {243--264},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Image recognition algorithms that automatically tag or moderate content are crucial in many applications but are increasingly opaque. Given transparency concerns, we focus on understanding how algorithms tag people images and their inferences on attractiveness. Theoretically, attractiveness has an evolutionary basis, guiding mating behaviors, although it also drives social behaviors. We test image-tagging APIs as to whether they encode biases surrounding attractiveness. We use the Chicago Face Database, containing images of diverse individuals, along with subjective norming data and objective facial measurements. The algorithms encode biases surrounding attractiveness, perpetuating the stereotype that ``what is beautiful is good.'' Furthermore, women are often misinterpreted as men. We discuss the algorithms' reductionist nature, and their potential to infringe on users' autonomy and well-being, as well as the ethical and legal considerations for developers. Future services should monitor algorithms' behaviors given their prevalence in the information ecosystem and influence on media.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spiel, Katta; Keyes, Os; Walker, Ashley Marie; DeVito, Michael A.; Birnholtz, Jeremy; Brulé, Emeline; Light, Ann; Barlas, Pinar; Hardy, Jean; Ahmed, Alex; Rode, Jennifer A.; Brubaker, Jed R.; Kannabiran, Gopinaath
Queer(Ing) HCI: Moving Forward in Theory and Practice Inproceedings
In: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 1–4, Association for Computing Machinery, Glasgow, Scotland Uk, 2019, ISBN: 9781450359719.
@inproceedings{10.1145/3290607.3311750,
title = {Queer(Ing) HCI: Moving Forward in Theory and Practice},
author = {Katta Spiel and Os Keyes and Ashley Marie Walker and Michael A. DeVito and Jeremy Birnholtz and Emeline Brulé and Ann Light and Pinar Barlas and Jean Hardy and Alex Ahmed and Jennifer A. Rode and Jed R. Brubaker and Gopinaath Kannabiran},
url = {https://doi.org/10.1145/3290607.3311750},
doi = {10.1145/3290607.3311750},
isbn = {9781450359719},
year = {2019},
date = {2019-01-01},
booktitle = {Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {1–4},
publisher = {Association for Computing Machinery},
address = {Glasgow, Scotland Uk},
series = {CHI EA '19},
abstract = {The increasing corpus on queer research within HCI, which started by focusing on sites such as location-based dating apps, has begun to expand to other topics such as identity formation, mental health and physical well-being. This Special Interest Group (SIG) aims to create a space for discussion, connection and camaraderie for researchers working with queer populations, queer people in research, and those using queer theory to inform their work. We aim to facilitate a broad-ranging, inclusive discussion of where queer HCI research goes next.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spiel, Katta; Keyes, Os; Barlas, Pinar
Patching Gender: Non-Binary Utopias in HCI Inproceedings
In: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 1–11, Association for Computing Machinery, Glasgow, Scotland Uk, 2019, ISBN: 9781450359719.
@inproceedings{10.1145/3290607.3310425,
title = {Patching Gender: Non-Binary Utopias in HCI},
author = {Katta Spiel and Os Keyes and Pinar Barlas},
url = {https://doi.org/10.1145/3290607.3310425},
doi = {10.1145/3290607.3310425},
isbn = {9781450359719},
year = {2019},
date = {2019-01-01},
booktitle = {Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {1–11},
publisher = {Association for Computing Machinery},
address = {Glasgow, Scotland Uk},
series = {CHI EA '19},
abstract = {Non-binary people are rarely considered by technologies or technologists, and often subsumed under binary trans experiences on the rare occasions when we are discussed. In this paper we share our own experiences and explore potential alternatives - utopias, impossible places, as our lived experience of technologies' obsessive gender binarism seems near-insurmountable. Our suggestions on how to patch these gender bugs appear trivial while at the same time revealing seemingly insurmountable barriers. We illustrate the casual violence technologies present to non-binary people, as well as the on-going marginalisations we experience as HCI researchers. We write this paper primarily as an expression of self-empowerment that can function as a first step towards raising awareness towards the complexities at stake.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kleanthous, Styliani; Bielikova, Maria; Steichen, Ben
UMAP 2019 Demo and Late-Breaking Results - Chairs' Preface Inproceedings
In: Adjunct Publication of the 27th Conference on User Modeling, Adaptation and Personalization, pp. 19, Association for Computing Machinery, Larnaca, Cyprus, 2019, ISBN: 9781450367110.
@inproceedings{10.1145/3314183.3324969,
title = {UMAP 2019 Demo and Late-Breaking Results - Chairs' Preface},
author = {Styliani Kleanthous and Maria Bielikova and Ben Steichen},
url = {https://doi.org/10.1145/3314183.3324969},
doi = {10.1145/3314183.3324969},
isbn = {9781450367110},
year = {2019},
date = {2019-01-01},
booktitle = {Adjunct Publication of the 27th Conference on User Modeling, Adaptation and Personalization},
pages = {19},
publisher = {Association for Computing Machinery},
address = {Larnaca, Cyprus},
series = {UMAP'19 Adjunct},
abstract = {It is our great pleasure to welcome you to the UMAP 2019 LBR and Demo Track, in conjunction with the 27th Conference on User Modelling, Adaptation and Personalization, held in Larnaca, Cyprus on June 9-12th, 2019.This track encompasses two categories: (i) Demos, which showcase research prototypes and commercially available products of UMAP-based systems, (ii) Late-breaking Results (LBR), which contain original and unpublished accounts of innovative research ideas, preliminary results, industry showcases, and system prototypes, addressing both the theory and practice of UMAP.The submissions spanned a wide scope of topics, ranging from novel techniques for user and group modeling, to adaptation and personalization implementations across different application scenarios.We received 46 LBR and 4 Demo submissions. Each submission was carefully reviewed by members of the Demo and LBR program committee, which consisted of 89 members. Each submission was reviewed by at least 3 PC members.Out of this total of 50 submissions, 15 LBR and 3 Demos were deemed of good quality by the reviewers, and were consequently accepted (36% overall acceptance rate). They were presented in the UMAP poster sessions, which collectively showcased the wide spectrum of novel ideas and latest results in user modelling, adaptation and personalization.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}