2025
Erle, Lukas; Timm, Lara; Eimler, Sabrina; Straßmann, Carolin
Perceive, React, Act – Exploring Bias Experiences, Blame Attributions, and Coping with Algorithmic Bias through Diverse Sampling Conference Forthcoming
2025 34th IEEE International Conference on Robot and Human Interactive Communication (ROMAN), Forthcoming.
@conference{perceivereactact2025,
title = {Perceive, React, Act – Exploring Bias Experiences, Blame Attributions, and Coping with Algorithmic Bias through Diverse Sampling},
author = {Lukas Erle and Lara Timm and Sabrina Eimler and Carolin Straßmann},
year = {2025},
date = {2025-10-31},
booktitle = {2025 34th IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Helgert, André; Erle, Lukas; Dittmann, Andre; Eimler, Sabrina C.; Straßmann, Carolin
Lost in Transparency? Exploring Uni- and Multimodal Transparency Declarations in Human-Robot Interaction Conference Forthcoming
2025 34th IEEE International Conference on Robot and Human Interactive Communication (ROMAN), Forthcoming.
@conference{nokey,
title = {Lost in Transparency? Exploring Uni- and Multimodal Transparency Declarations in Human-Robot Interaction},
author = {André Helgert and Lukas Erle and Andre Dittmann and Sabrina C. Eimler and Carolin Straßmann},
year = {2025},
date = {2025-10-31},
booktitle = {2025 34th IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Erle, Lukas; Eimler, Sabrina C.; Fulantelli, Giovanni; Handmann, Uwe; Hernández-Leo, Davinia; Koyuturk, Cansu; Niemann, Berit; Ognibene, Dimitri; Papadimitriou, Achileas; Sironi, Giulia; Taibi, Davide; Theophilou, Emily; Zarifis, George K.
AI EU-phoria? Exploring International Sentiments on the Use of AI in Higher Education Conference
Short Paper Proceedings of the D-SAIL Workshop on Transformative Curriculum Design – Digitalisation, Sustainability, and AI Literacy for 21st Century Learning co-located with 26th International Conference on Artificial Intelligence in Education (AIED 2025), vol. 4051, no. 3, CEUR Workshop Proceedings CEUR Workshop Proceedings, 2025.
@conference{nokey,
title = {AI EU-phoria? Exploring International Sentiments on the Use of AI in Higher Education},
author = {Lukas Erle and Sabrina C. Eimler and Giovanni Fulantelli and Uwe Handmann and Davinia Hernández-Leo and Cansu Koyuturk and Berit Niemann and Dimitri Ognibene and Achileas Papadimitriou and Giulia Sironi and Davide Taibi and Emily Theophilou and George K. Zarifis},
editor = {Martin Ruskov and Dimitri Ognibene and Davinia Hernández-Leo and Davide Taibi and Yannis Dimitriadis and Giovanni Fulantelli and Uwe Handmann and George K. Zarifis and Yan Wu and Paolo Maria Ferri},
url = {https://ceur-ws.org/Vol-4051/paper3.pdf},
doi = {urn:nbn:de:0074-4051-X},
year = {2025},
date = {2025-10-05},
urldate = {2025-07-19},
booktitle = {Short Paper Proceedings of the D-SAIL Workshop on Transformative Curriculum Design - Digitalisation, Sustainability, and AI Literacy for 21st Century Learning co-located with 26th International Conference on Artificial Intelligence in Education (AIED 2025)},
volume = {4051},
number = {3},
pages = {26-31},
publisher = {CEUR Workshop Proceedings},
series = {CEUR Workshop Proceedings},
abstract = {The onset of artificial intelligence (AI) tools has affected various areas in business and society. AI has also begun
changing the way higher education institutions carry out their work, leading educators, students, and university
staff to adapt to the peculiarities of AI. While research has begun investigating the impact of AI tools on higher
education, these studies largely focus on specific countries, lacking the integration of international perspectives.
Since there is a need for information on available resources and cultural differences, this integration should prove
highly beneficial to help countries develop strategies to enable a safe, inclusive, and sustainable use of AI in higher
education. We conducted five focus group interviews with N = 38 participants from three EU countries (Germany,
Spain, and Greece), combining international experiences in the use of AI in higher education, possible benefits
and challenges, and requirements for a sustainable use. Using a collection of different methodical approaches, we
fostered an open exchange with teachers, students, and staff from higher education. Our preliminary findings
offer a cross-national perspective on the use of AI in higher education.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
changing the way higher education institutions carry out their work, leading educators, students, and university
staff to adapt to the peculiarities of AI. While research has begun investigating the impact of AI tools on higher
education, these studies largely focus on specific countries, lacking the integration of international perspectives.
Since there is a need for information on available resources and cultural differences, this integration should prove
highly beneficial to help countries develop strategies to enable a safe, inclusive, and sustainable use of AI in higher
education. We conducted five focus group interviews with N = 38 participants from three EU countries (Germany,
Spain, and Greece), combining international experiences in the use of AI in higher education, possible benefits
and challenges, and requirements for a sustainable use. Using a collection of different methodical approaches, we
fostered an open exchange with teachers, students, and staff from higher education. Our preliminary findings
offer a cross-national perspective on the use of AI in higher education.
Erle, Lukas; Helgert, André; Dittmann, Andre; Straßmann, Carolin; Eimler, Sabrina C.
Tolerance for Technological Hiccups: Personality Predispositions and Responses to Erroneous Robotic Systems Conference Forthcoming
14th Conference of the Media Psychology Division (DGPs), Forthcoming.
@conference{nokey,
title = {Tolerance for Technological Hiccups: Personality Predispositions and Responses to Erroneous Robotic Systems},
author = {Lukas Erle and André Helgert and Andre Dittmann and Carolin Straßmann and Sabrina C. Eimler},
year = {2025},
date = {2025-09-10},
urldate = {2025-09-10},
booktitle = {14th Conference of the Media Psychology Division (DGPs)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Helgert, André; Erle, Lukas; Dittmann, Andre; Straßmann, Carolin; Eimler, Sabrina C.
When Robots Spill the Beans: Exploring Transparency Declarations in Human-Robot Interaction Conference Forthcoming
14th Conference of the Media Psychology Division (DGPs), Forthcoming.
@conference{nokey,
title = {When Robots Spill the Beans: Exploring Transparency Declarations in Human-Robot Interaction},
author = {André Helgert and Lukas Erle and Andre Dittmann and Carolin Straßmann and Sabrina C. Eimler},
year = {2025},
date = {2025-09-10},
urldate = {2025-09-10},
booktitle = {14th Conference of the Media Psychology Division (DGPs)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Timm, Lara; Erle, Lukas; Eimler, Sabrina C.; Straßmann, Carolin
Creating Visibility: Challenges of Sample Diversity and Representativity Conference Forthcoming
14th Conference of the Media Psychology Division (DGPs), Forthcoming.
@conference{nokey,
title = {Creating Visibility: Challenges of Sample Diversity and Representativity},
author = {Lara Timm and Lukas Erle and Sabrina C. Eimler and Carolin Straßmann},
year = {2025},
date = {2025-09-10},
urldate = {2025-09-10},
booktitle = {14th Conference of the Media Psychology Division (DGPs)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Kumar, Rosika; Erdogan, Ceyda; Azizi, Mohammad; Erle, Lukas; Strassmann, Carolin; Eimler, Sabrina C.
Can you Change my Mind? On the Role of Robot Appearance on Decision Making in a Moral Dilemma Conference Forthcoming
14th Conference of the Media Psychology Division (DGPs), Forthcoming.
@conference{nokey,
title = {Can you Change my Mind? On the Role of Robot Appearance on Decision Making in a Moral Dilemma},
author = {Rosika Kumar and Ceyda Erdogan and Mohammad Azizi and Lukas Erle and Carolin Strassmann and Sabrina C. Eimler},
year = {2025},
date = {2025-09-10},
urldate = {2025-09-10},
booktitle = {14th Conference of the Media Psychology Division (DGPs)},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
Erle, Lukas; Hoss, Thomas; Peltzer, Isabel; Eimler, Sabrina C.
Artificial Intelligence in Education. AIED 2025. Lecture Notes in Computer Science, vol. 15882, Springer, Cham, 2025, ISBN: 978-3-031-98465-5.
@conference{nokey,
title = {Opportunities and Challenges of Generative AI in Education through the Eyes of Students and Educators: A Qualitative Interview Approach},
author = {Lukas Erle and Thomas Hoss and Isabel Peltzer and Sabrina C. Eimler},
editor = {A.I. Cristea and E. Walker and Y. Lu and O.C. Santos and S. Isotani},
doi = {https://doi.org/10.1007/978-3-031-98465-5_40},
isbn = {978-3-031-98465-5},
year = {2025},
date = {2025-07-20},
urldate = {2025-07-22},
booktitle = {Artificial Intelligence in Education. AIED 2025. Lecture Notes in Computer Science},
volume = {15882},
pages = {316-323},
publisher = {Springer, Cham},
abstract = {The onset of Generative Artificial Intelligence (GenAI) tools has caused a surge in academic applications: Students can use these tools to support their learning processes, while educators enrich their lectures with GenAI. The integration of these tools, however, is tied to challenges like misinformation and algorithmic bias. Few institutions have introduced uniform guidelines regarding GenAI, and little research has examined the usage patterns in higher education. To gain a comprehensive understanding of what opportunities and challenges stakeholders in higher education institutions encounter when adopting these tools, we conducted N = 30 interviews with students and academic staff. Our findings reveal that interviewees see largely similar opportunities (e.g., a more individualized teaching support for students) and challenges (e.g., fair evaluations of written submissions becoming more difficult). We contribute to research by highlighting the perspectives of students and academic staff, as well as offering suggestions for guidelines.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Finkel, Marcel; Timm, Lara; Erle, Lukas; Arntz, Alexander; Helgert, André; Straßmann, Carolin; Eimler, Sabrina C.
Robot or Employee? Exploring People’s Choice for or Against an Interaction with a Social Robot Conference
Social Robotics. ICSR + AI 2024., vol. 15561, Lecture Notes in Computer Science Springer, Singapore, 2025, ISBN: 978-981-96-3522-1.
@conference{nokey,
title = {Robot or Employee? Exploring People’s Choice for or Against an Interaction with a Social Robot},
author = {Marcel Finkel and Lara Timm and Lukas Erle and Alexander Arntz and André Helgert and Carolin Straßmann and Sabrina C. Eimler},
url = {https://link.springer.com/chapter/10.1007/978-981-96-3522-1_38},
doi = {https://doi.org/10.1007/978-981-96-3522-1_38},
isbn = {978-981-96-3522-1},
year = {2025},
date = {2025-03-25},
urldate = {2025-03-25},
booktitle = {Social Robotics. ICSR + AI 2024.},
volume = {15561},
publisher = {Springer},
address = {Singapore},
series = {Lecture Notes in Computer Science},
abstract = {Employing social robots in public spaces to support employees at work is a frequently discussed scenario. However, the success of robotic systems often depends on people’s willingness to initiate interactions with them. This makes understanding people’s usage decisions crucial, yet only limited research has been done on why people select publicly accessible social robots over alternatives, such as human employees. Amongst various factors, people’s diversity characteristics are likely to influence this decision, such as people’s locus of control when using technology and their self-efficacy in human-robot interaction. To investigate this choice for or against using a robot, a field study (N = 65) was conducted in two public libraries in the Ruhr area (Germany). Participants had to decide to interact with a robot or an employee and were subsequently asked to explain their decision via a questionnaire and an interview. Results reveal that the decision could neither be explained by people’s locus of control when using technology nor by other diversity characteristics. Furthermore, no significant differences in self-efficacy in human-robot interaction between users who chose the robot instead of the human employee were found. Finally, the qualitative findings point to general interest in robots and people’s differences in dealing with novelty as reasons for their choice. Overall, our findings offer insights into the decision for or against the usage of a robot, which are relevant to both, research and the deployment of social robots in public spaces.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2024
Helgert, André; Erle, Lukas; Langer, Sabrina; Straßmann, Carolin; Eimler, Sabrina C.
Towards Understandable Transparency in Human-Robot-Interactions in Public Spaces Conference
2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN), 2024.
@conference{nokey,
title = {Towards Understandable Transparency in Human-Robot-Interactions in Public Spaces},
author = {André Helgert and Lukas Erle and Sabrina Langer and Carolin Straßmann and Sabrina C. Eimler},
editor = {Institute of Electrical and Electronic Engineers (IEEE)},
url = {https://ieeexplore.ieee.org/document/10731175},
doi = {10.1109/RO-MAN60168.2024.10731175},
year = {2024},
date = {2024-10-30},
urldate = {2024-10-30},
booktitle = {2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
pages = {1162-1169},
abstract = {The deployment of social robots in public spaces has received increased interest over the past years. These robots need to process a wide array of personal data to offer services that are tailored to users’ requirements. While much research has been carried out regarding the creation of explainable content, little research has dealt with how data transparency - as a way to address uncertainty and concerns regarding the handling of personal data - is conveyed to users. To examine the impact of different transparency declarations on trust, performance, and robot perception, we conducted a virtual reality (VR) supported laboratory experiment with N = 53 participants who interacted with a robot in a public setting (a library). The interaction between users and robots was accompanied by information on the handling of users’ personal data using three different modalities (via posters, the robot’s tablet, or verbally). The results imply that, while all modalities are understandable and perceived as useful, there is no preference for any modality. Our findings contribute to HRI research by examining different modalities for transparency declarations, in an effort to foster understandable and transparent processing of data.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Erle, Lukas; Timm, Lara; Kleinhaus, Lara; Straßmann, Carolin; Eimler, Sabrina C.
Towards a Human Diversity Wheel for Human Robot Interaction Conference
2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN), 2024.
@conference{nokey,
title = {Towards a Human Diversity Wheel for Human Robot Interaction},
author = {Lukas Erle and Lara Timm and Lara Kleinhaus and Carolin Straßmann and Sabrina C. Eimler},
editor = {Institute of Electrical and Electronic Engineers (IEEE)},
year = {2024},
date = {2024-10-30},
urldate = {2024-10-30},
booktitle = {2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
abstract = {While extant research on human-robot-interactions (HRI) has dealt with the examination of different user characteristics, quantifying and describing the various characteristics of human diversity remains a challenge for HRI research. This in turn often leads to a disregard of human diversity in the design of HRI, homogeneous study samples, and differences in technology access. Addressing these challenges, we conducted a systematic synthesis of existing models on human diversity, culminating in the development of a model we coined the Human Diversity Wheel for Robotic Interactions (HDWRI). The goal of this model is to provide designers and researchers in HRI with an analytical lens to ensure their work considers different human characteristics. To achieve this, we started to conduct expert interviews with HRI researchers to put our model into a practical context. This paper presents the development of our model, preliminary findings of our first interviews, and an outline of future steps.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Erle, Lukas; Hoss, Thomas; Peltzer, Isabel; Eimler, Sabrina C.
Generative AI in Higher Education – Perspectives from Students and Teaching Staff Conference
6th International Conference on Higher Education Learning Methodologies and Technologies Online (HELMeTO 2024), 2024.
@conference{nokey,
title = {Generative AI in Higher Education - Perspectives from Students and Teaching Staff},
author = {Lukas Erle and Thomas Hoss and Isabel Peltzer and Sabrina C. Eimler},
year = {2024},
date = {2024-09-27},
urldate = {2024-09-27},
booktitle = {6th International Conference on Higher Education Learning Methodologies and Technologies Online (HELMeTO 2024)},
abstract = {As a result of a massive increase in user numbers of Generative AI (GAI) like ChatGPT, members of higher education institutions have started to integrate such tools into their work and studies, for example as part of their lecture follow-ups. Students can use GAI to summarize lectures, while lecturers can use various tools to support the didactic design of their lectures and offer students more options to repeat learning materials. However, GAI does not come without risks, such as the spread of false in- formation or a reduction of mental engagement with the courses. Especially for written assignments, like bachelor or master theses, GAI tools pose a challenge: AI detector tools work unreliably at best when attempting to identify AI-generated texts, so lecturers have no reliable tool to differentiate be- tween human and AI submissions, which negatively affects fair evaluations. Beyond that, there are concerns regarding algorithmic bias and a subsequent increase of discrimination amongst students. Despite these risks, few institutions have established clear guidelines on how GAI tools should be used by staff and students, leading to various risks for education. There have been calls for more research on how GAI tools can safely be integrated into universities, and little research has considered the experiences and opinions of students and lecturers. We address this gap with semi-structured interviews among students and teaching staff.
We conducted a total of N = 30 interviews with n = 16 students and n = 14 staff from different German universities. The interviews were conducted and recorded via a video conferencing tool and lasted an average of 30.33 minutes (SD = 7.81). After transcription, a content analysis was carried out using open coding.
For these interviews, we created a guide for the student and university staff interviewees respectively. These guides were largely similar, differing with regard to the interviewees’ roles at their university. After gathering demographic data (such as age, study field, and academic experience) we asked participants what GAI tools they are using (as part of their studies or course preparation respectively). If participants did use such tools, we further asked what benefits they expect for themselves and how they deal with the responses (e.g., whether they check the results for correctness). In the second block of questions, we asked interviewees what chances and challenges they see in using GAI tools for educational purposes, and whether they were worried regarding their careers in the future. The third question block dealt with how such tools affect the fairness of evaluations (e.g., when some students use these tools and others do not) and discrimination. Besides open questions we also presented sample scenarios of cheating using GAI tools or unfair evaluations to gather interviewees’ opin- ions and whether they had already experienced similar situations. In the fourth thematic section of the interview guide, we asked interviewees whether they felt that GAI tools needed regulation in the university context, what rules they would like to see implemented, and who should be responsible for these rules. The fifth question block allowed students to state whether they had specific questions for their lecturers, with common questions being integrated into the university staff interview guide. At the end of the interview, interviewees had the option to voice any additional comments not yet covered by the questions. Students were acquired through lectures, university-wide communication channels, and personal contacts. For the university’s perspective, both administrative and teaching staff were contacted and asked for their participation. We attempted a balanced distribution of gender, study fields, and academic experience for participants. Regarding university employees, we focused on full-time teaching staff (i.e., lecturers and professors) as well as administrative employ- ees who are responsible for planning and evaluating teaching (e.g., staff of the university didactics department).
Preliminary results reveal individual differences amongst both groups, while the general tone implies many similarities: Interviewees agree that GAI tools force universities to rethink written assignments and possibly shift to more oral exams. GAI offers many chances for more interactive teaching, with many interviewees stressing that GAI tools can especially help weaker students. Some lecturers reject GAI tools (both their own and students’ use), while many others actively assess the impact of such tools on their courses, and some even create interactive studying companions, trained on their course materials. Many students actively use or have used such tools, with few rejecting their use. Most interviewees agree that their respective universities should clearly state which uses of GAI are and are not permitted, while also agreeing that implementing a ban is neither sensible nor possible. We aim to use these findings to aid universities in understanding their members’ preferences and suggest possible guidelines for safe GAI use.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
We conducted a total of N = 30 interviews with n = 16 students and n = 14 staff from different German universities. The interviews were conducted and recorded via a video conferencing tool and lasted an average of 30.33 minutes (SD = 7.81). After transcription, a content analysis was carried out using open coding.
For these interviews, we created a guide for the student and university staff interviewees respectively. These guides were largely similar, differing with regard to the interviewees’ roles at their university. After gathering demographic data (such as age, study field, and academic experience) we asked participants what GAI tools they are using (as part of their studies or course preparation respectively). If participants did use such tools, we further asked what benefits they expect for themselves and how they deal with the responses (e.g., whether they check the results for correctness). In the second block of questions, we asked interviewees what chances and challenges they see in using GAI tools for educational purposes, and whether they were worried regarding their careers in the future. The third question block dealt with how such tools affect the fairness of evaluations (e.g., when some students use these tools and others do not) and discrimination. Besides open questions we also presented sample scenarios of cheating using GAI tools or unfair evaluations to gather interviewees’ opin- ions and whether they had already experienced similar situations. In the fourth thematic section of the interview guide, we asked interviewees whether they felt that GAI tools needed regulation in the university context, what rules they would like to see implemented, and who should be responsible for these rules. The fifth question block allowed students to state whether they had specific questions for their lecturers, with common questions being integrated into the university staff interview guide. At the end of the interview, interviewees had the option to voice any additional comments not yet covered by the questions. Students were acquired through lectures, university-wide communication channels, and personal contacts. For the university’s perspective, both administrative and teaching staff were contacted and asked for their participation. We attempted a balanced distribution of gender, study fields, and academic experience for participants. Regarding university employees, we focused on full-time teaching staff (i.e., lecturers and professors) as well as administrative employ- ees who are responsible for planning and evaluating teaching (e.g., staff of the university didactics department).
Preliminary results reveal individual differences amongst both groups, while the general tone implies many similarities: Interviewees agree that GAI tools force universities to rethink written assignments and possibly shift to more oral exams. GAI offers many chances for more interactive teaching, with many interviewees stressing that GAI tools can especially help weaker students. Some lecturers reject GAI tools (both their own and students’ use), while many others actively assess the impact of such tools on their courses, and some even create interactive studying companions, trained on their course materials. Many students actively use or have used such tools, with few rejecting their use. Most interviewees agree that their respective universities should clearly state which uses of GAI are and are not permitted, while also agreeing that implementing a ban is neither sensible nor possible. We aim to use these findings to aid universities in understanding their members’ preferences and suggest possible guidelines for safe GAI use.
2023
Mirbabaie, Milad; Marx, Julian; Erle, Lukas
Digital Nudge Stacking and Backfiring: Understanding Sustainable E-Commerce Purchase Decisions Journal Article
In: Pacific Asia Journal of the Association for Information Systems (PAJAIS), vol. 15, iss. 3, 2023.
@article{nokey,
title = {Digital Nudge Stacking and Backfiring: Understanding Sustainable E-Commerce Purchase Decisions},
author = {Milad Mirbabaie and Julian Marx and Lukas Erle},
url = {https://aisel.aisnet.org/pajais/vol15/iss3/3},
doi = {http://dx.doi.org/10.17705/1pais.15303},
year = {2023},
date = {2023-09-27},
urldate = {2023-09-27},
journal = {Pacific Asia Journal of the Association for Information Systems (PAJAIS)},
volume = {15},
issue = {3},
abstract = {Background: The consumption of ‘fast fashion’, which is expedited by cost-effective e-commerce systems, represents one of the major factors contributing to the acceleration of climate change. An emerging approach to steer consumers in the direction of more sustainable purchase decisions is digital nudging. This paper explores digital nudging in the context of green fashion e-commerce by testing the effectiveness of two nudging strategies on the decision to choose green fashion products (GFP) over regular fashion items.
Method: This study was conducted as a between-subject online experiment (n=320) with four conditions simulating an e-commerce scenario. The participants were presented with different products: one was ecologically friendly, and another was the regular option. Depending on their randomized group allocation, the participants experienced a default nudge, a social norm nudge, a combination of both strategies, or no nudge. In addition, we conducted 10 qualitative interviews to gain a deeper understanding of consumers’ decision process.
Results: Our experiment failed to demonstrate statistically significant relationships between the various nudging strategies and GFP purchase decisions. However, additional explorative analyzes confirmed a backfire effect for the combination of nudging strategies. This reveals the previously overlooked influence of participants’ identification on the effectiveness of digital nudging strategies. In addition, qualitative interviews revealed individual factors that influence sustainable e-commerce purchase decisions.
Conclusion: This study contributes to information systems research by explaining the differences in the effectiveness of different nudging strategies regarding high-involvement compared to low-involvement products. Moreover, it provides empirical evidence of a backfire effect resulting from a combination of digital nudging strategies (i.e., digital nudge stacking). Finally, the study underscores the leverage that individual factors have on both GFP purchase decision and the effectiveness of nudges.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Method: This study was conducted as a between-subject online experiment (n=320) with four conditions simulating an e-commerce scenario. The participants were presented with different products: one was ecologically friendly, and another was the regular option. Depending on their randomized group allocation, the participants experienced a default nudge, a social norm nudge, a combination of both strategies, or no nudge. In addition, we conducted 10 qualitative interviews to gain a deeper understanding of consumers’ decision process.
Results: Our experiment failed to demonstrate statistically significant relationships between the various nudging strategies and GFP purchase decisions. However, additional explorative analyzes confirmed a backfire effect for the combination of nudging strategies. This reveals the previously overlooked influence of participants’ identification on the effectiveness of digital nudging strategies. In addition, qualitative interviews revealed individual factors that influence sustainable e-commerce purchase decisions.
Conclusion: This study contributes to information systems research by explaining the differences in the effectiveness of different nudging strategies regarding high-involvement compared to low-involvement products. Moreover, it provides empirical evidence of a backfire effect resulting from a combination of digital nudging strategies (i.e., digital nudge stacking). Finally, the study underscores the leverage that individual factors have on both GFP purchase decision and the effectiveness of nudges.
Hofeditz, Lennart; Erle, Lukas; Timm, Lara; Mirbabaie, Milad
Proceedings of the 56th Hawaii International Conference on System Sciences (HICSS), 2023.
@conference{nokey,
title = {How Virtuous are Virtual Influencers? – A Qualitative Analysis of Virtual Actors’ Virtues on Instagram},
author = {Lennart Hofeditz and Lukas Erle and Lara Timm and Milad Mirbabaie},
url = {https://hdl.handle.net/10125/103051},
doi = {10.24251/HICSS.2023.420},
year = {2023},
date = {2023-01-06},
urldate = {2023-01-06},
booktitle = {Proceedings of the 56th Hawaii International Conference on System Sciences (HICSS)},
abstract = {Recently, virtual influencers (VIs) have become a more frequent alternative to human influencers (HIs). VIs can be described as non-human agents who behave in a human-like pattern. Big enterprises such as Prada, Porsche, Samsung, or Ikea have already collaborated with VIs in the past. Even though it should be clear to users that VIs cannot practice values and virtues in the real world, VIs seem to express certain virtues. This research paper focuses on identifying virtues conveyed by VIs and the effect of expressing virtues on follower engagement by conducting a qualitative content analysis of social media posts. Furthermore, we checked on VIs being abused by companies to convey a more favorable image. Our findings suggest that conveying certain virtues seems to have a positive effect on the engagement. In addition, some VIs were used by companies for virtue signaling without being noticed by their followers.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2022
Hofeditz, Lennart; Mirbabaie, Milad; Erle, Lukas; Knoßalla, Eileen; Timm, Lara
Wirtschaftsinformatik 2022 Proceedings, vol. 3, 2022.
@conference{nokey,
title = {Automating Crisis Communication in Public Institutions - Towards Ethical Conversational Agents That Support Trust Management},
author = {Lennart Hofeditz and Milad Mirbabaie and Lukas Erle and Eileen Knoßalla and Lara Timm},
url = {https://aisel.aisnet.org/cgi/viewcontent.cgi?article=1077&context=wi2022},
year = {2022},
date = {2022-02-28},
urldate = {2022-02-28},
booktitle = {Wirtschaftsinformatik 2022 Proceedings},
volume = {3},
abstract = {To improve disaster relief and crisis communication, public institutions (PIs) such as administrations rely on automation and technology. As one example, the use of conversational agents (CAs) has increased. To ensure that information and advisories are taken up seriously, it is important for PIs to be perceived as a trusted source and a trustworthy point of contact. In this study, we therefore examine how CAs can be applied by PIs to, on the one hand, automate their crisis communication and, on the other hand, maintain or even increase their perceived trustworthiness. We developed two CAs-one equipped with ethical cues in order to be perceived more trustworthy and one without such cues-and started to conduct an online experiment to evaluate the effects. Our first results indicate that applying ethical principles such as fairness, transparency, security and accountability have a positive effect on the perceived trustworthiness of the CA.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
0000
Finkel, Marcel; Erle, Lukas; Timm, Lara; Arntz, Alexander; Helgert, André; Straßmann, Carolin; Eimler, Sabrina C.
Need Satisfaction in Human-Robot Interaction in Public Spaces – A Positive Computing Approach Journal Article Forthcoming
In: International Journal of Social Robotics, Forthcoming.
@article{nokey,
title = {Need Satisfaction in Human-Robot Interaction in Public Spaces – A Positive Computing Approach},
author = {Marcel Finkel and Lukas Erle and Lara Timm and Alexander Arntz and André Helgert and Carolin Straßmann and Sabrina C. Eimler},
journal = {International Journal of Social Robotics},
keywords = {},
pubstate = {forthcoming},
tppubtype = {article}
}
Eimler, Sabrina C.; Erle, Lukas; Straßmann, Carolin
Gender, Embodiment, and Recommendation Timing in Robot-Supported Decision-Making Processes Journal Article Forthcoming
In: Computers in Human Behavior: Artificial Humans, Forthcoming.
@article{nokey,
title = {Gender, Embodiment, and Recommendation Timing in Robot-Supported Decision-Making Processes},
author = {Sabrina C. Eimler and Lukas Erle and Carolin Straßmann},
journal = {Computers in Human Behavior: Artificial Humans},
keywords = {},
pubstate = {forthcoming},
tppubtype = {article}
}