-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
0bf72c5
commit b5d5a34
Showing
4 changed files
with
42 additions
and
36 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,16 +1,19 @@ | ||
@inproceedings{bleick-etal-2024-german, | ||
abstract = {}, | ||
address = {Tokyo, Japan}, | ||
author = {Bleick, Maximilian and | ||
Feldhus, Nils and | ||
Burchardt, Aljoscha and | ||
M\{"o}ller, Sebastian}, | ||
booktitle = {Proceedings of the 17th International Natural Language Generation Conference}, | ||
doi = {}, | ||
month = {September}, | ||
pages = {}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {German Voter Personas can Radicalize LLM Chatbots via the Echo Chamber Effect}, | ||
url = {}, | ||
year = {2024} | ||
@inproceedings{bleick-etal-2024-german-voter, | ||
title = "{G}erman Voter Personas Can Radicalize {LLM} Chatbots via the Echo Chamber Effect", | ||
author = {Bleick, Maximilian and | ||
Feldhus, Nils and | ||
Burchardt, Aljoscha and | ||
M{\"o}ller, Sebastian}, | ||
editor = "Mahamood, Saad and | ||
Minh, Nguyen Le and | ||
Ippolito, Daphne", | ||
booktitle = "Proceedings of the 17th International Natural Language Generation Conference", | ||
month = sep, | ||
year = "2024", | ||
address = "Tokyo, Japan", | ||
publisher = "Association for Computational Linguistics", | ||
url = "https://aclanthology.org/2024.inlg-main.13", | ||
pages = "153--164", | ||
abstract = {We investigate the impact of LLMs on political discourse with a particular focus on the influence of generated personas on model responses. We find an echo chamber effect from LLM chatbots when provided with German-language biographical information of politicians and voters in German politics, leading to sycophantic responses and the reinforcement of existing political biases. Findings reveal that personas of certain political party, such as those of the {`}Alternative f{\"u}r Deutschland{'} party, exert a stronger influence on LLMs, potentially amplifying extremist views. Unlike prior studies, we cannot corroborate a tendency for larger models to exert stronger sycophantic behaviour. We propose that further development should aim at reducing sycophantic behaviour in LLMs across all sizes and diversifying language capabilities in LLMs to enhance inclusivity.}, | ||
} | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
35 changes: 19 additions & 16 deletions
35
content/publication/gabryszak-etal-2024-enhancing/cite.bib
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,17 +1,20 @@ | ||
@inproceedings{gabryszak-etal-2024-enhancing, | ||
abstract = {In this paper, we investigate the use of large language models (LLMs) to enhance the editorial process of rewriting customer help pages. We introduce a German-language dataset comprising Frequently Asked Question-Answer pairs, presenting both raw drafts and their revisions by professional editors. On this dataset, we evaluate the performance of four large language models (LLM) through diverse prompts tailored for the rewriting task. We conduct automatic evaluations of content and text quality using ROUGE, BERTScore, and ChatGPT. Furthermore, we let professional editors assess the helpfulness of automatically generated FAQ revisions for editorial enhancement. Our findings indicate that LLMs can produce FAQ reformulations beneficial to the editorial process. We observe minimal performance discrepancies among LLMs for this task, and our survey on helpfulness underscores the subjective nature of editors' perspectives on editorial refinement.}, | ||
address = {Tokyo, Japan}, | ||
author = {Gabryszak, Aleksandra and | ||
R\{"o}der, Daniel and | ||
Binder, Arne and | ||
Sion, Luca and | ||
Hennig, Leonhard}, | ||
booktitle = {Proceedings of the 17th International Natural Language Generation Conference}, | ||
doi = {}, | ||
month = {September}, | ||
pages = {}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {Enhancing Editorial Tasks: A Case Study on Rewriting Customer Help Page Contents Using Large Language Models}, | ||
url = {}, | ||
year = {2024} | ||
@inproceedings{gabryszak-etal-2024-enhancing-editorial, | ||
title = "Enhancing Editorial Tasks: A Case Study on Rewriting Customer Help Page Contents Using Large Language Models", | ||
author = {Gabryszak, Aleksandra and | ||
R{\"o}der, Daniel and | ||
Binder, Arne and | ||
Sion, Luca and | ||
Hennig, Leonhard}, | ||
editor = "Mahamood, Saad and | ||
Minh, Nguyen Le and | ||
Ippolito, Daphne", | ||
booktitle = "Proceedings of the 17th International Natural Language Generation Conference", | ||
month = sep, | ||
year = "2024", | ||
address = "Tokyo, Japan", | ||
publisher = "Association for Computational Linguistics", | ||
url = "https://aclanthology.org/2024.inlg-main.33", | ||
pages = "402--411", | ||
abstract = "In this paper, we investigate the use of large language models (LLMs) to enhance the editorial process of rewriting customer help pages. We introduce a German-language dataset comprising Frequently Asked Question-Answer pairs, presenting both raw drafts and their revisions by professional editors. On this dataset, we evaluate the performance of four large language models (LLM) through diverse prompts tailored for the rewriting task. We conduct automatic evaluations of content and text quality using ROUGE, BERTScore, and ChatGPT. Furthermore, we let professional editors assess the helpfulness of automatically generated FAQ revisions for editorial enhancement. Our findings indicate that LLMs can produce FAQ reformulations beneficial to the editorial process. We observe minimal performance discrepancies among LLMs for this task, and our survey on helpfulness underscores the subjective nature of editors{'} perspectives on editorial refinement.", | ||
} | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters