MohammadHossein Rezaei and Eduardo Blanco. 2024. Paraphrasing in Affirmative Terms Improves Negation Understanding. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers) , Bangkok, Thailand. Association for Computational Linguistics. Accepted, To Appear.
@InProceedings{rezaei-EtAl:2024:SemEval2024, author = {Rezaei, MohammadHossein and Blanco, Eduardo}, title = {Paraphrasing in Affirmative Terms Improves Negation Understanding}, booktitle = {Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, month = {August}, year = {2024}, address = {Bangkok, Thailand}, publisher = {Association for Computational Linguistics}, pages = {}, abstract = {Negation is a common linguistic phenomenon. Yet language models face challenges with negation in many natural language understanding tasks such as question answering and natural language inference. In this paper, we experiment with seamless strategies that incorporate affirmative interpretations (i.e., paraphrases without negation) to make models more robust against negation. Crucially, our affirmative interpretations are obtained automatically. We show improvements with CondaQA, a large corpus requiring reasoning with negation, and five natural language understanding tasks.}, url = {} }
MohammadHossein Rezaei, Yeaeun Kwon, Reza Sanayei, Abhyuday Singh, and Steven Bethard. 2024. Clulab-uofa at semeval-2024 task 8: Detecting machine-generated text using triplet-loss-trained text similarity and text classification. In Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024) , pages 1509-1515, Mexico City, Mexico. Association for Computational Linguistics.
@InProceedings{rezaei-EtAl:2024:SemEval2024, author = {Rezaei, MohammadHossein and Kwon, Yeaeun and Sanayei, Reza and Singh, Abhyuday and Bethard, Steven}, title = {CLULab-UofA at SemEval-2024 Task 8: Detecting Machine-Generated Text Using Triplet-Loss-Trained Text Similarity and Text Classification}, booktitle = {Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)}, month = {June}, year = {2024}, address = {Mexico City, Mexico}, publisher = {Association for Computational Linguistics}, pages = {1509--1515}, abstract = {Detecting machine-generated text is a critical task in the era of large language models. In this paper, we present our systems for SemEval-2024 Task 8, which focuses on multi-class classification to discern between human-written and maching-generated texts by five state-of-the-art large language models. We propose three different systems: unsupervised text similarity, triplet-loss-trained text similarity, and text classification. We show that the triplet-loss trained text similarity system outperforms the other systems, achieving 80\% accuracy on the test set and surpassing the baseline model for this subtask. Additionally, our text classification system, which takes into account sentence paraphrases generated by the candidate models, also outperforms the unsupervised text similarity system, achieving 74\% accuracy.}, url = {https://aclanthology.org/2024.semeval2024-1.213} }
Reza Sanayei, Abhyuday Singh, MohammadHossein Rezaei, and Steven Bethard. 2024. MARiA at SemEval-2024 Task 6: Hallucination Detection Through LLMs and MNLI and and Cosine similarity. In Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024) , Mexico City, Mexico. Association for Computational Linguistics.
Zijie Wang, Md Hossain, Shivam Mathur, Terry Melo, Kadir Ozler, Keun Park, Jacob Quintero, MohammadHossein Rezaei, Shreya Shakya, Md Uddin, and Eduardo Blanco. 2023. Interpreting Indirect Answers to Yes-No Questions in Multiple Languages. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 2210-2227, Singapore. Association for Computational Linguistics.
@inproceedings{wang-etal-2023-interpreting, title = "Interpreting Indirect Answers to Yes-No Questions in Multiple Languages", author = "Wang, Zijie and Hossain, Md and Mathur, Shivam and Melo, Terry and Ozler, Kadir and Park, Keun and Quintero, Jacob and Rezaei, MohammadHossein and Shakya, Shreya and Uddin, Md and Blanco, Eduardo", editor = "Bouamor, Houda and Pino, Juan and Bali, Kalika", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.findings-emnlp.146", doi = "10.18653/v1/2023.findings-emnlp.146", pages = "2210--2227", abstract = "Yes-no questions expect a yes or no for an answer, but people often skip polar keywords. Instead, they answer with long explanations that must be interpreted. In this paper, we focus on this challenging problem and release new benchmarks in eight languages. We present a distant supervision approach to collect training data, and demonstrate that direct answers (i.e., with polar keywords) are useful to train models to interpret indirect answers (i.e., without polar keywords). We show that monolingual fine-tuning is beneficial if training data can be obtained via distant supervision for the language of interest (5 languages). Additionally, we show that cross-lingual fine-tuning is always beneficial (8 languages).", }