2020
|
Ryzhova, Margarita; Demberg, Vera Processing particularized pragmatic inferences under load Inproceedings Proceedings of the 42nd Annual Meeting of the Cognitive Science Society (CogSci 2020), 2020. Abstract | Links | BibTeX @inproceedings{Ryzhova2020,
title = {Processing particularized pragmatic inferences under load},
author = {Margarita Ryzhova and Vera Demberg},
url = {http://www.sfb1102.uni-saarland.de/wp/wp-content/uploads/2020/08/cogsci20_proceedings_ryzhova.pdf
https://cognitivesciencesociety.org/cogsci-2020/},
year = {2020},
date = {2020-00-00},
booktitle = {Proceedings of the 42nd Annual Meeting of the Cognitive Science Society (CogSci 2020)},
abstract = {A long-standing question in language understanding is whether pragmatic inferences are effortful or whether they happen seamlessly without measurable cognitive effort. We here measure the strength of particularized pragmatic inferences in a setting with high vs. low cognitive load. Cognitive load is induced by a secondary dot tracking task. If this type of pragmatic inference comes at no cognitive processing cost, inferences should be similarly strong in both the high and the low load condition. If they are effortful, we expect a smaller effect size in the dual tasking condition. Our results show that participants who have difficulty in dual tasking (as evidenced by incorrect answers to comprehension questions) exhibit a smaller pragmatic effect when they were distracted with a secondary task in comparison to the single task condition. This finding supports the idea that pragmatic inferences are effortful.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
A long-standing question in language understanding is whether pragmatic inferences are effortful or whether they happen seamlessly without measurable cognitive effort. We here measure the strength of particularized pragmatic inferences in a setting with high vs. low cognitive load. Cognitive load is induced by a secondary dot tracking task. If this type of pragmatic inference comes at no cognitive processing cost, inferences should be similarly strong in both the high and the low load condition. If they are effortful, we expect a smaller effect size in the dual tasking condition. Our results show that participants who have difficulty in dual tasking (as evidenced by incorrect answers to comprehension questions) exhibit a smaller pragmatic effect when they were distracted with a secondary task in comparison to the single task condition. This finding supports the idea that pragmatic inferences are effortful. |
2019
|
Zhai, Fangzhou; Demberg, Vera; Shkadzko, Pavel; Shi, Wei; Sayeed, Asad A Hybrid Model for Globally Coherent Story Generation Inproceedings Proceedings of the Second Workshop on Storytelling, Association for Computational Linguistics, Florence, 2019. BibTeX @inproceedings{Fangzhou2019,
title = {A Hybrid Model for Globally Coherent Story Generation},
author = {Fangzhou Zhai and Vera Demberg and Pavel Shkadzko and Wei Shi and Asad Sayeed},
year = {2019},
date = {2019-00-00},
booktitle = {Proceedings of the Second Workshop on Storytelling},
publisher = {Association for Computational Linguistics},
address = {Florence},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Ostermann, Simon; Roth, Michael; Pinkal, Manfred MCScript2.0: A Machine Comprehension Corpus Focused on Script Events and Participants Inproceedings Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (* SEM 2019), pp. 103-117, 2019. Links | BibTeX @inproceedings{ostermann2019mcscript2,
title = {MCScript2.0: A Machine Comprehension Corpus Focused on Script Events and Participants},
author = {Simon Ostermann and Michael Roth and Manfred Pinkal},
url = {https://www.aclweb.org/anthology/S19-1012},
year = {2019},
date = {2019-00-00},
booktitle = {Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (* SEM 2019)},
pages = {103-117},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Zhai, Fangzhou; Demberg, Vera; Shkadzko, Pavel; Shi, Wei; Sayeed, Asad A Hybrid Model for Globally Coherent Story Generation Inproceedings Proceedings of the Second Workshop on Storytelling, pp. 34-45, 2019. BibTeX @inproceedings{zhai2019hybrid,
title = {A Hybrid Model for Globally Coherent Story Generation},
author = {Fangzhou Zhai and Vera Demberg and Pavel Shkadzko and Wei Shi and Asad Sayeed},
year = {2019},
date = {2019-00-00},
booktitle = {Proceedings of the Second Workshop on Storytelling},
pages = {34-45},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Wanzare, Lilian D A Script acquisition : a crowdsourcing and text mining approach PhD Thesis Saarland University, 2019. Abstract | Links | BibTeX @phdthesis{Wanzare2019,
title = {Script acquisition : a crowdsourcing and text mining approach},
author = {Lilian D. A. Wanzare},
editor = {Manfred Pinkal [Akademische Betreuung]},
url = {http://nbn-resolving.de/urn:nbn:de:bsz:291--ds-301634},
doi = {http://dx.doi.org/10.22028/D291-30163},
year = {2019},
date = {2019-00-00},
address = {Saarbr\"{u}cken},
school = {Saarland University},
abstract = {According to Grice’s (1975) theory of pragmatics, people tend to omit basic information when participating in a conversation (or writing a narrative) under the assumption that left out details are already known or can be inferred from commonsense knowledge by the hearer (or reader). Writing and understanding of texts makes particular use of a specific kind of common-sense knowledge, referred to as script knowledge. Schank and Abelson (1977) proposed Scripts as a model of human knowledge represented in memory that stores the frequent habitual activities, called scenarios, (e.g. eating in a fast food restaurant, etc.), and the different courses of action in those routines.
This thesis addresses measures to provide a sound empirical basis for high-quality script models. We work on three key areas related to script modeling: script knowledge acquisition, script induction and script identification in text. We extend the existing repository of script knowledge bases in two different ways. First, we crowdsource a corpus of 40 scenarios with 100 event sequence descriptions (ESDs) each, thus going beyond the size of previous script collections. Second, the corpus is enriched with partial alignments of ESDs, done by human annotators. The crowdsourced partial alignments are used as prior knowledge to guide the semi-supervised script-induction algorithm proposed in this dissertation. We further present a semi-supervised clustering approach to induce script structure from crowdsourced descriptions of event sequences by grouping event descriptions into paraphrase sets and inducing their temporal order. The proposed semi-supervised clustering model better handles order variation in scripts and extends script representation formalism, Temporal Script graphs, by incorporating "arbitrary order" equivalence classes in order to allow for the flexible event order inherent in scripts.
In the third part of this dissertation, we introduce the task of scenario detection, in which we identify references to scripts in narrative texts. We curate a benchmark dataset of annotated narrative texts, with segments labeled according to the scripts they instantiate. The dataset is the first of its kind. The analysis of the annotation shows that one can identify scenario references in text with reasonable reliability. Subsequently, we proposes a benchmark model that automatically segments and identifies text fragments referring to given scenarios. The proposed model achieved promising results, and therefore opens up research on script parsing and wide coverage script acquisition.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
According to Grice’s (1975) theory of pragmatics, people tend to omit basic information when participating in a conversation (or writing a narrative) under the assumption that left out details are already known or can be inferred from commonsense knowledge by the hearer (or reader). Writing and understanding of texts makes particular use of a specific kind of common-sense knowledge, referred to as script knowledge. Schank and Abelson (1977) proposed Scripts as a model of human knowledge represented in memory that stores the frequent habitual activities, called scenarios, (e.g. eating in a fast food restaurant, etc.), and the different courses of action in those routines.
This thesis addresses measures to provide a sound empirical basis for high-quality script models. We work on three key areas related to script modeling: script knowledge acquisition, script induction and script identification in text. We extend the existing repository of script knowledge bases in two different ways. First, we crowdsource a corpus of 40 scenarios with 100 event sequence descriptions (ESDs) each, thus going beyond the size of previous script collections. Second, the corpus is enriched with partial alignments of ESDs, done by human annotators. The crowdsourced partial alignments are used as prior knowledge to guide the semi-supervised script-induction algorithm proposed in this dissertation. We further present a semi-supervised clustering approach to induce script structure from crowdsourced descriptions of event sequences by grouping event descriptions into paraphrase sets and inducing their temporal order. The proposed semi-supervised clustering model better handles order variation in scripts and extends script representation formalism, Temporal Script graphs, by incorporating "arbitrary order" equivalence classes in order to allow for the flexible event order inherent in scripts.
In the third part of this dissertation, we introduce the task of scenario detection, in which we identify references to scripts in narrative texts. We curate a benchmark dataset of annotated narrative texts, with segments labeled according to the scripts they instantiate. The dataset is the first of its kind. The analysis of the annotation shows that one can identify scenario references in text with reasonable reliability. Subsequently, we proposes a benchmark model that automatically segments and identifies text fragments referring to given scenarios. The proposed model achieved promising results, and therefore opens up research on script parsing and wide coverage script acquisition. |
Ostermann, Simon Script knowledge for natural language understanding PhD Thesis Saarland University, 2019. Abstract | Links | BibTeX @phdthesis{Ostermann2019,
title = {Script knowledge for natural language understanding},
author = {Simon Ostermann},
editor = {Manfred Pinkal [Akademische Betreuung]},
url = {http://nbn-resolving.de/urn:nbn:de:bsz:291--ds-313016},
doi = {http://dx.doi.org/10.22028/D291-31301},
year = {2019},
date = {2019-00-00},
address = {Saarbr\"{u}cken},
school = {Saarland University},
abstract = {While people process text, they make frequent use of information that is assumed to be common ground and left implicit in the text. One important type of such commonsense knowledge is script knowledge, which is the knowledge about the events and participants in everyday activities such as visiting a restaurant. Due to its implicitness, it is hard for machines to exploit such script knowledge for natural language processing (NLP). This dissertation addresses the role of script knowledge in a central field of NLP, natural language understanding (NLU). In the first part of this thesis, we address script parsing. The idea of script parsing is to align event and participant mentions in a text with an underlying script representation. This makes it possible for a system to leverage script knowledge for downstream tasks. We develop the first script parsing model for events that can be trained on a large scale on crowdsourced script data. The model is implemented as a linear-chain conditional random field and trained on sequences of short event descriptions, implicitly exploiting the inherent event ordering information. We show that this ordering information plays a crucial role for script parsing. Our model provides an important first step towards facilitating the use of script knowledge for NLU. In the second part of the thesis, we move our focus to an actual application in the area of NLU, i.e. machine comprehension. For the first time, we provide data sets for the systematic evaluation of the contribution of script knowledge for machine comprehension. We create MCScript, a corpus of narrations about everyday activities and questions on the texts. By collecting questions based on a scenario rather than a text, we aimed at creating challenging questions which require script knowledge for finding the correct answer. Based on the findings of a shared task carried out with the data set, which indicated that script knowledge is not relevant for good performance on our corpus, we revised the data collection process and created a second version of the data set.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
While people process text, they make frequent use of information that is assumed to be common ground and left implicit in the text. One important type of such commonsense knowledge is script knowledge, which is the knowledge about the events and participants in everyday activities such as visiting a restaurant. Due to its implicitness, it is hard for machines to exploit such script knowledge for natural language processing (NLP). This dissertation addresses the role of script knowledge in a central field of NLP, natural language understanding (NLU). In the first part of this thesis, we address script parsing. The idea of script parsing is to align event and participant mentions in a text with an underlying script representation. This makes it possible for a system to leverage script knowledge for downstream tasks. We develop the first script parsing model for events that can be trained on a large scale on crowdsourced script data. The model is implemented as a linear-chain conditional random field and trained on sequences of short event descriptions, implicitly exploiting the inherent event ordering information. We show that this ordering information plays a crucial role for script parsing. Our model provides an important first step towards facilitating the use of script knowledge for NLU. In the second part of the thesis, we move our focus to an actual application in the area of NLU, i.e. machine comprehension. For the first time, we provide data sets for the systematic evaluation of the contribution of script knowledge for machine comprehension. We create MCScript, a corpus of narrations about everyday activities and questions on the texts. By collecting questions based on a scenario rather than a text, we aimed at creating challenging questions which require script knowledge for finding the correct answer. Based on the findings of a shared task carried out with the data set, which indicated that script knowledge is not relevant for good performance on our corpus, we revised the data collection process and created a second version of the data set. |
2018
|
Ostermann, Simon; Roth, Michael; Modi, Ashutosh; Thater, Stefan; Pinkal, Manfred SemEval-2018 Task 11: Machine Comprehension using Commonsense Knowledge Inproceedings Proceedings of International Workshop on Semantic Evaluation
, New Orleans, LA, USA, 2018. BibTeX @inproceedings{SemEval2018Task11,
title = {SemEval-2018 Task 11: Machine Comprehension using Commonsense Knowledge},
author = {Simon Ostermann and Michael Roth and Ashutosh Modi and Stefan Thater and Manfred Pinkal},
year = {2018},
date = {2018-00-00},
booktitle = {Proceedings of International Workshop on Semantic Evaluation
},
address = {New Orleans, LA, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Ostermann, Simon; Modi, Ashutosh; Roth, Michael; Thater, Stefan; Pinkal, Manfred MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge Inproceedings Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)
, Miyazaki, Japan, 2018. BibTeX @inproceedings{MCScript,
title = {MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge},
author = {Simon Ostermann and Ashutosh Modi and Michael Roth and Stefan Thater and Manfred Pinkal},
year = {2018},
date = {2018-00-00},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)
},
address = {Miyazaki, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Ostermann, Simon; Seitz, Hannah; Thater, Stefan; Pinkal, Manfred Mapping Text to Scripts: An Entailment Study Inproceedings Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)
, Miyazaki, Japan, 2018. BibTeX @inproceedings{MCScriptb,
title = {Mapping Text to Scripts: An Entailment Study},
author = {Simon Ostermann and Hannah Seitz and Stefan Thater and Manfred Pinkal},
year = {2018},
date = {2018-00-00},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)
},
address = {Miyazaki, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Hong, Xudong; Sayeed, Asad; Demberg, Vera Learning Distributed Event Representations with a Multi-Task Approach Inproceedings 7th Joint Conference on Lexical and Computational Semantics (SEM 2018), New Orleans, USA, 2018. Links | BibTeX @inproceedings{Hong2018,
title = {Learning Distributed Event Representations with a Multi-Task Approach},
author = {Xudong Hong and Asad Sayeed and Vera Demberg},
url = {http://www.aclweb.org/anthology/S18-2002 },
doi = {10.18653/v1/S18-2002},
year = {2018},
date = {2018-00-00},
booktitle = {7th Joint Conference on Lexical and Computational Semantics (SEM 2018)},
address = {New Orleans, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2017
|
Nguyen, Dai Quoc; Nguyen, Dat Quoc; Chu, Cuong Xuan; Thater, Stefan; Pinkal, Manfred Sequence to Sequence Learning for Event Prediction Inproceedings Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pp. 37-42, Asian Federation of Natural Language Processing, Taipei, Taiwan, 2017. Abstract | Links | BibTeX @inproceedings{nguyen-EtAl:2017:I17-2,
title = {Sequence to Sequence Learning for Event Prediction},
author = {Dai Quoc Nguyen and Dat Quoc Nguyen and Cuong Xuan Chu and Stefan Thater and Manfred Pinkal},
url = {http://www.aclweb.org/anthology/I17-2007},
year = {2017},
date = {2017-11-00},
booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers)},
pages = {37-42},
publisher = {Asian Federation of Natural Language Processing},
address = {Taipei, Taiwan},
abstract = {This paper presents an approach to the task of predicting an event description from a preceding sentence in a text. Our approach explores sequence-to-sequence learning using a bidirectional multi-layer recurrent neural network. Our approach substantially outperforms previous work in terms of the BLEU score on two datasets derived from WikiHow and DeScript respectively. Since the BLEU score is not easy to interpret as a measure of event prediction, we complement our study with a second evaluation that exploits the rich linguistic annotation of gold paraphrase sets of events.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents an approach to the task of predicting an event description from a preceding sentence in a text. Our approach explores sequence-to-sequence learning using a bidirectional multi-layer recurrent neural network. Our approach substantially outperforms previous work in terms of the BLEU score on two datasets derived from WikiHow and DeScript respectively. Since the BLEU score is not easy to interpret as a measure of event prediction, we complement our study with a second evaluation that exploits the rich linguistic annotation of gold paraphrase sets of events. |
Nguyen, Dai Quoc; Nguyen, Dat Quoc; Modi, Ashutosh; Thater, Stefan; Pinkal, Manfred A Mixture Model for Learning Multi-Sense Word Embeddings Inproceedings Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017), pp. 121-127, Association for Computational Linguistics, Vancouver, Canada, 2017. Abstract | Links | BibTeX @inproceedings{nguyen-EtAl:2017:starSEM,
title = {A Mixture Model for Learning Multi-Sense Word Embeddings},
author = {Dai Quoc Nguyen and Dat Quoc Nguyen and Ashutosh Modi and Stefan Thater and Manfred Pinkal},
url = {http://www.aclweb.org/anthology/S17-1015},
year = {2017},
date = {2017-08-00},
booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
pages = {121-127},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Word embeddings are now a standard technique for inducing meaning representations for words. For getting good representations, it is important to take into account different senses of a word. In this paper, we propose a mixture model for learning multi-sense word embeddings. Our model generalizes the previous works in that it allows to induce different weights of different senses of a word. The experimental results show that our model outperforms previous models on standard evaluation tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Word embeddings are now a standard technique for inducing meaning representations for words. For getting good representations, it is important to take into account different senses of a word. In this paper, we propose a mixture model for learning multi-sense word embeddings. Our model generalizes the previous works in that it allows to induce different weights of different senses of a word. The experimental results show that our model outperforms previous models on standard evaluation tasks. |
Ostermann, Simon; Roth, Michael; Thater, Stefan; Pinkal, Manfred Aligning Script Events with Narrative Texts Inproceedings Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017), pp. 128-134, Association for Computational Linguistics, Vancouver, Canada, 2017. Abstract | Links | BibTeX @inproceedings{ostermann-EtAl:2017:starSEM,
title = {Aligning Script Events with Narrative Texts},
author = {Simon Ostermann and Michael Roth and Stefan Thater and Manfred Pinkal},
url = {http://www.aclweb.org/anthology/S17-1016},
year = {2017},
date = {2017-08-00},
booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
pages = {128-134},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Script knowledge plays a central role in text understanding and is relevant for a variety of downstream tasks. In this paper, we consider two recent datasets which provide a rich and general representation of script events in terms of paraphrase sets. We introduce the task of mapping event mentions in narrative texts to such script event types, and present a model for this task that exploits rich linguistic representations as well as information on temporal ordering. The results of our experiments demonstrate that this complex task is indeed feasible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Script knowledge plays a central role in text understanding and is relevant for a variety of downstream tasks. In this paper, we consider two recent datasets which provide a rich and general representation of script events in terms of paraphrase sets. We introduce the task of mapping event mentions in narrative texts to such script event types, and present a model for this task that exploits rich linguistic representations as well as information on temporal ordering. The results of our experiments demonstrate that this complex task is indeed feasible. |
2016
|
Zarcone, Alessandra; van Schijndel, Marten; Vogels, Jorrig; Demberg, Vera Salience and attention in surprisal-based accounts of language processing Journal Article Frontiers in Psychology, 7 (844), 2016, ISSN: 1664-1078. Links | BibTeX @article{10.3389/fpsyg.2016.00844,
title = {Salience and attention in surprisal-based accounts of language processing},
author = {Alessandra Zarcone and Marten van Schijndel and Jorrig Vogels and Vera Demberg},
url = {http://www.frontiersin.org/language_sciences/10.3389/fpsyg.2016.00844/abstract},
doi = {10.3389/fpsyg.2016.00844},
issn = {1664-1078},
year = {2016},
date = {2016-01-01},
journal = {Frontiers in Psychology},
volume = {7},
number = {844},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Modi, Ashutosh; Anikina, Tatjana; Ostermann, Simon; Pinkal, Manfred InScript: Narrative texts annotated with script information Inproceedings Calzolari, Nicoletta; Choukri, Khalid; Declerck, Thierry; Grobelnik, Marko; Maegaard, Bente; Mariani, Joseph; Moreno, Asuncion; Odijk, Jan; Piperidis, Stelios (Ed.): Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), European Language Resources Association (ELRA), Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1. Links | BibTeX @inproceedings{MODI16.352,
title = {InScript: Narrative texts annotated with script information},
author = {Ashutosh Modi and Tatjana Anikina and Simon Ostermann and Manfred Pinkal},
editor = {Nicoletta Calzolari and Khalid Choukri and Thierry Declerck and Marko Grobelnik and Bente Maegaard and Joseph Mariani and Asuncion Moreno and Jan Odijk and Stelios Piperidis},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/352_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
publisher = {European Language Resources Association (ELRA)},
address = {Portoro\v{z}, Slovenia},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Kravtchenko, Ekaterina; Demberg, Vera Informational status of redundant event mentions mediates pragmatic interpretation Miscellaneous Poster presented at Events in Language and Cognition workshop at 29th CUNY Conference on Human Sentence Processing, University of Florida, March 2016, 2016. BibTeX @misc{kravtchenko:CUNY2016,
title = {Informational status of redundant event mentions mediates pragmatic interpretation},
author = {Ekaterina Kravtchenko and Vera Demberg},
year = {2016},
date = {2016-01-01},
howpublished = {Poster presented at Events in Language and Cognition workshop at 29th CUNY Conference on Human Sentence Processing, University of Florida, March 2016},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
|
Modi, Ashutosh Event Embeddings for Semantic Script Modeling Inproceedings Proceedings of the Conference on Computational Natural Language Learning (CoNLL), Berlin, Germany, 2016. BibTeX @inproceedings{modi:CONLL2016,
title = {Event Embeddings for Semantic Script Modeling},
author = {Ashutosh Modi},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the Conference on Computational Natural Language Learning (CoNLL)},
address = {Berlin, Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Modi, Ashutosh; Titov, Ivan; Demberg, Vera; Sayeed, Asad; Pinkal, Manfred Modeling Semantic Expectations: Using Script Knowledge for Referent Prediction Journal Article Transactions of ACL, 2016. BibTeX @article{ashutoshTacl2016,
title = {Modeling Semantic Expectations: Using Script Knowledge for Referent Prediction},
author = {Ashutosh Modi and Ivan Titov and Vera Demberg and Asad Sayeed and Manfred Pinkal},
year = {2016},
date = {2016-01-01},
journal = {Transactions of ACL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Tilk, Ottokar; Demberg, Vera; Sayeed, Asad; Klakow, Dietrich; Thater, Stefan Event participation modelling with neural networks Inproceedings Conference on Empirical Methods in Natural Language Processing, Austin, Texas, USA, 2016. BibTeX @inproceedings{Tilk2016,
title = {Event participation modelling with neural networks},
author = {Ottokar Tilk and Vera Demberg and Asad Sayeed and Dietrich Klakow and Stefan Thater},
year = {2016},
date = {2016-00-00},
publisher = {Conference on Empirical Methods in Natural Language Processing},
address = {Austin, Texas, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2015
|
White, Michael; Howcroft, David M Inducing Clause-Combining Rules: A Case Study with the SPaRKy Restaurant Corpus Inproceedings Proc. of the 15th European Workshop on Natural Language Generation, Association for Computational Linguistics, Brighton, England, UK, 2015. Links | BibTeX @inproceedings{white:howcroft:enlg-2015,
title = {Inducing Clause-Combining Rules: A Case Study with the SPaRKy Restaurant Corpus},
author = {Michael White and David M. Howcroft},
url = {http://www.aclweb.org/anthology/W15-4704},
year = {2015},
date = {2015-01-01},
booktitle = {Proc. of the 15th European Workshop on Natural Language Generation},
publisher = {Association for Computational Linguistics},
address = {Brighton, England, UK},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Rudinger, Rachel; Demberg, Vera; Modi, Ashutosh; Durme, Benjamin Van; Pinkal, Manfred Learning to Predict Script Events from Domain-Specific Text Journal Article Lexical and Computational Semantics (* SEM 2015), pp. 205-210, 2015. Links | BibTeX @article{rudinger2015learning,
title = {Learning to Predict Script Events from Domain-Specific Text},
author = {Rachel Rudinger and Vera Demberg and Ashutosh Modi and Benjamin Van Durme and Manfred Pinkal},
url = {http://www.aclweb.org/anthology/S15-1024},
year = {2015},
date = {2015-01-01},
journal = {Lexical and Computational Semantics (* SEM 2015)},
pages = {205-210},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Batiukova, Olga; Bertinetto, Pier Marco; Lenci, Alessandro; Zarcone, Alessandra Identifying Actional Features Through Semantic Priming: Cross-Romance Comparison Incollection Taming the TAME systems. Cahiers Chronos 27, pp. 161-187, Rodopi, Amsterdam/Philadelphia, 2015. Links | BibTeX @incollection{batiukova2015identifying,
title = {Identifying Actional Features Through Semantic Priming: Cross-Romance Comparison},
author = {Olga Batiukova and Pier Marco Bertinetto and Alessandro Lenci and Alessandra Zarcone},
url = {https://www.academia.edu/20493945/Identifying_actional_features_through_semantic_priming_Cross-Romance_comparison
},
year = {2015},
date = {2015-01-01},
booktitle = {Taming the TAME systems. Cahiers Chronos 27},
pages = {161-187},
publisher = {Rodopi},
address = {Amsterdam/Philadelphia},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
|
Kravtchenko, Ekaterina; Demberg, Vera Semantically Underinformative Utterances Trigger Pragmatic Inferences Proceeding Annual Conference of the Cognitive Science Society, CogSci Mind, Technology, and Society Pasadena Convention Center, 2015. Links | BibTeX @proceedings{kravtchenko:demberg,
title = {Semantically Underinformative Utterances Trigger Pragmatic Inferences},
author = {Ekaterina Kravtchenko and Vera Demberg},
url = {http://www.sfb1102.uni-saarland.de/wp/wp-content/uploads/2016/02/kravtchenko1.pdf},
year = {2015},
date = {2015-01-01},
address = {Mind, Technology, and Society Pasadena Convention Center},
organization = {Annual Conference of the Cognitive Science Society, CogSci},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
|
Demberg, Vera; Hoffmann, Jörg; Howcroft, David M; Klakow, Dietrich; Torralba, Álvaro Search Challenges in Natural Language Generation with Complex Optimization Objectives Journal Article KI - Künstliche Intelligenz, 2015, ISSN: 1610-1987. Links | BibTeX @article{demberg:hoffmann:ki-2015,
title = {Search Challenges in Natural Language Generation with Complex Optimization Objectives},
author = {Vera Demberg and J\"{o}rg Hoffmann and David M. Howcroft and Dietrich Klakow and \'{A}lvaro Torralba},
url = {https://fai.cs.uni-saarland.de/hoffmann/papers/ki16.pdf},
issn = {1610-1987},
year = {2015},
date = {2015-01-01},
journal = {KI - K\"{u}nstliche Intelligenz},
publisher = {Springer Berlin Heidelberg},
series = {Special Issue on Companion Technologies},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Zarcone, Alessandra; Padó, Sebastian; Lenci, Alessandro Same Same but Different: Type and Typicality in a Distributional Model of Complement Coercion Inproceedings Word Structure and Word Usage. Proceedings of the NetWordS Final Conference. Pisa, March 30-April 1, 2015, pp. 91-94, Pisa, Italy, 2015. Links | BibTeX @inproceedings{zarcone2015same,
title = {Same Same but Different: Type and Typicality in a Distributional Model of Complement Coercion},
author = {Alessandra Zarcone and Sebastian Pad\'{o} and Alessandro Lenci},
url = {http://ceur-ws.org/Vol-1347/paper19.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Word Structure and Word Usage. Proceedings of the NetWordS Final Conference. Pisa, March 30-April 1, 2015},
pages = {91-94},
address = {Pisa, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|