2020
|
Mosbach, Marius; Degaetano-Ortlieb, Stefania; Krielke, Marie-Pauline; Abdullah, Badr M; Klakow, Dietrich A Closer Look at Linguistic Knowledge in Masked Language Models: The Case of Relative Clauses in American English Inproceedings Proceedings of the 28th International Conference on Computational Linguistics, pp. 771-787, 2020. Abstract | Links | BibTeX @inproceedings{Mosbach2020,
title = {A Closer Look at Linguistic Knowledge in Masked Language Models: The Case of Relative Clauses in American English},
author = {Marius Mosbach and Stefania Degaetano-Ortlieb and Marie-Pauline Krielke and Badr M. Abdullah and Dietrich Klakow},
url = {https://www.aclweb.org/anthology/2020.coling-main.67/
https://www.aclweb.org/anthology/2020.coling-main.67.pdf
},
year = {2020},
date = {2020-12-00},
booktitle = {Proceedings of the 28th International Conference on Computational Linguistics},
pages = {771-787},
abstract = {Transformer-based language models achieve high performance on various tasks, but we still lack understanding of the kind of linguistic knowledge they learn and rely on. We evaluate three models (BERT, RoBERTa, and ALBERT), testing their grammatical and semantic knowledge by sentence-level probing, diagnostic cases, and masked prediction tasks. We focus on relative clauses (in American English) as a complex phenomenon needing contextual information and antecedent identification to be resolved. Based on a naturalistic dataset, probing shows that all three models indeed capture linguistic knowledge about grammaticality, achieving high performance. Evaluation on diagnostic cases and masked prediction tasks considering fine-grained linguistic knowledge, however, shows pronounced model-specific weaknesses especially on semantic knowledge, strongly impacting models’ performance. Our results highlight the importance of (a) model comparison in evaluation task and (b) building up claims of model performance and the linguistic knowledge they capture beyond purely probing-based evaluations.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Transformer-based language models achieve high performance on various tasks, but we still lack understanding of the kind of linguistic knowledge they learn and rely on. We evaluate three models (BERT, RoBERTa, and ALBERT), testing their grammatical and semantic knowledge by sentence-level probing, diagnostic cases, and masked prediction tasks. We focus on relative clauses (in American English) as a complex phenomenon needing contextual information and antecedent identification to be resolved. Based on a naturalistic dataset, probing shows that all three models indeed capture linguistic knowledge about grammaticality, achieving high performance. Evaluation on diagnostic cases and masked prediction tasks considering fine-grained linguistic knowledge, however, shows pronounced model-specific weaknesses especially on semantic knowledge, strongly impacting models’ performance. Our results highlight the importance of (a) model comparison in evaluation task and (b) building up claims of model performance and the linguistic knowledge they capture beyond purely probing-based evaluations.
|
Adelani*, David Ifeoluwa; Hedderich*, Michael A; Zhu*, Dawei; van den Berg, Esther; Klakow, Dietrich Distant Supervision and Noisy Label Learning for Low Resource Named Entity Recognition: A Study on Hausa and Yorùbá Miscellaneous https://arxiv.org/abs/2003.08370, 2020, (* equal contribution). Abstract | Links | BibTeX @misc{Adelani2020,
title = {Distant Supervision and Noisy Label Learning for Low Resource Named Entity Recognition: A Study on Hausa and Yor\`{u}b\'{a}},
author = {David Ifeoluwa Adelani* and Michael A. Hedderich* and Dawei Zhu* and Esther van den Berg and Dietrich Klakow
},
url = {https://arxiv.org/abs/2003.08370
https://arxiv.org/pdf/2003.08370.pdf
https://africanlp-workshop.github.io/
https://pml4dc.github.io/iclr2020/},
year = {2020},
date = {2020-03-00},
abstract = {The lack of labeled training data has limited the development of natural language processing tools, such as named entity recognition, for many languages spoken in developing countries. Techniques such as distant and weak supervision can be used to create labeled data in a (semi-) automatic way. Additionally, to alleviate some of the negative effects of the errors in automatic annotation, noise-handling methods can be integrated. Pretrained word embeddings are another key component of most neural named entity classifiers. With the advent of more complex contextual word embeddings, an interesting trade-off between model size and performance arises. While these techniques have been shown to work well in high-resource settings, we want to study how they perform in low-resource scenarios. In this work, we perform named entity recognition for Hausa and Yor\`{u}b\'{a}, two languages that are widely spoken in several developing countries. We evaluate different embedding approaches and show that distant supervision can be successfully leveraged in a realistic low-resource scenario where it can more than double a classifier's performance.},
howpublished = {https://arxiv.org/abs/2003.08370},
note = {* equal contribution},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
The lack of labeled training data has limited the development of natural language processing tools, such as named entity recognition, for many languages spoken in developing countries. Techniques such as distant and weak supervision can be used to create labeled data in a (semi-) automatic way. Additionally, to alleviate some of the negative effects of the errors in automatic annotation, noise-handling methods can be integrated. Pretrained word embeddings are another key component of most neural named entity classifiers. With the advent of more complex contextual word embeddings, an interesting trade-off between model size and performance arises. While these techniques have been shown to work well in high-resource settings, we want to study how they perform in low-resource scenarios. In this work, we perform named entity recognition for Hausa and Yorùbá, two languages that are widely spoken in several developing countries. We evaluate different embedding approaches and show that distant supervision can be successfully leveraged in a realistic low-resource scenario where it can more than double a classifier's performance. |
2019
|
Lange, Lukas; Hedderich, Michael A; Klakow, Dietrich Feature-Dependent Confusion Matrices for Low-Resource NER Labeling with Noisy Labels Inproceedings Inui, Kentaro; Jiang, Jing; Ng, Vincent; Wan, Xiaojun (Ed.): Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3552-3557, Association for Computational Linguistics, Hong Kong, China, 2019. Abstract | Links | BibTeX @inproceedings{lange-etal-2019-feature,
title = {Feature-Dependent Confusion Matrices for Low-Resource NER Labeling with Noisy Labels},
author = {Lukas Lange and Michael A. Hedderich and Dietrich Klakow},
editor = {Kentaro Inui and Jing Jiang and Vincent Ng and Xiaojun Wan},
url = {https://www.aclweb.org/anthology/D19-1362/
https://github.com/uds-lsv/noise-matrix-ner},
doi = {10.18653/v1/D19-1362},
year = {2019},
date = {2019-11-00},
booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages = {3552-3557},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {In low-resource settings, the performance of supervised labeling models can be improved with automatically annotated or distantly supervised data, which is cheap to create but often noisy. Previous works have shown that significant improvements can be reached by injecting information about the confusion between clean and noisy labels in this additional training data into the classifier training. However, for noise estimation, these approaches either do not take the input features (in our case word embeddings) into account, or they need to learn the noise modeling from scratch which can be difficult in a low-resource setting. We propose to cluster the training data using the input features and then compute different confusion matrices for each cluster. To the best of our knowledge, our approach is the first to leverage feature-dependent noise modeling with pre-initialized confusion matrices. We evaluate on low-resource named entity recognition settings in several languages, showing that our methods improve upon other confusion-matrix based methods by up to 9%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In low-resource settings, the performance of supervised labeling models can be improved with automatically annotated or distantly supervised data, which is cheap to create but often noisy. Previous works have shown that significant improvements can be reached by injecting information about the confusion between clean and noisy labels in this additional training data into the classifier training. However, for noise estimation, these approaches either do not take the input features (in our case word embeddings) into account, or they need to learn the noise modeling from scratch which can be difficult in a low-resource setting. We propose to cluster the training data using the input features and then compute different confusion matrices for each cluster. To the best of our knowledge, our approach is the first to leverage feature-dependent noise modeling with pre-initialized confusion matrices. We evaluate on low-resource named entity recognition settings in several languages, showing that our methods improve upon other confusion-matrix based methods by up to 9%. |
Mosbach, Marius; Stenger, Irina; Avgustinova, Tania; Klakow, Dietrich incom.py - A Toolbox for Calculating Linguistic Distances and Asymmetries between Related Languages Inproceedings Angelova, Galia; Mitkov, Ruslan; Nikolova, Ivelina; Temnikova, Irina (Ed.): Proceedings of Recent Advances in Natural Language Processing, RANLP 2019, Varna, Bulgaria, 2-4 September 2019, pp. 811-819, Varna, Bulgaria, 2019. Links | BibTeX @inproceedings{Mosbach2019,
title = {incom.py - A Toolbox for Calculating Linguistic Distances and Asymmetries between Related Languages},
author = {Marius Mosbach and Irina Stenger and Tania Avgustinova and Dietrich Klakow},
editor = {Galia Angelova and Ruslan Mitkov and Ivelina Nikolova and Irina Temnikova},
url = {https://acl-bg.org/proceedings/2019/RANLP%202019/pdf/RANLP094.pdf},
doi = {10.26615/978-954-452-056-4_094},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of Recent Advances in Natural Language Processing, RANLP 2019, Varna, Bulgaria, 2-4 September 2019},
pages = {811-819},
address = {Varna, Bulgaria},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Grosse, Kathrin; Trost, Thomas A; Mosbach, Marius; Backes, Michael; Klakow, Dietrich Adversarial Initialization -- when your network performs the way I want Journal Article arXiv, Cornell University, 2019. Links | BibTeX @article{Grosse2019,
title = {Adversarial Initialization -- when your network performs the way I want},
author = {Kathrin Grosse and Thomas A. Trost and Marius Mosbach and Michael Backes and Dietrich Klakow},
url = {https://arxiv.org/abs/1902.03020
},
year = {2019},
date = {2019-02-08},
journal = {arXiv, Cornell University},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Biswas, Rajarshi; Mogadala, Aditya; Barz, Michael; Sonntag, Daniel; Klakow, Dietrich Automatic Judgement of Neural Network-Generated Image Captions Inproceedings 7th International Conference on Statistical Language and Speech Processing (SLSP2019), Ljubljana, Slovenia, 2019. BibTeX @inproceedings{Biswas2019,
title = {Automatic Judgement of Neural Network-Generated Image Captions},
author = {Rajarshi Biswas and Aditya Mogadala and Michael Barz and Daniel Sonntag and Dietrich Klakow},
year = {2019},
date = {2019-00-00},
booktitle = {7th International Conference on Statistical Language and Speech Processing (SLSP2019)},
address = {Ljubljana, Slovenia},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2017
|
Dietrich, Klakow; Trost, Thomas Parameter Free Hierarchical Graph-Based Clustering for Analyzing Continuous Word Embeddings. Inproceedings In Workshop Proceedings of TextGraphs-11: Graph-based
Methods for Natural Language Processing (Workshop at ACL 2017), 2017. BibTeX @inproceedings{TroKla2017,
title = {Parameter Free Hierarchical Graph-Based Clustering for Analyzing Continuous Word Embeddings.},
author = {Klakow Dietrich and Thomas Trost},
year = {2017},
date = {2017-00-00},
booktitle = {In Workshop Proceedings of TextGraphs-11: Graph-based
Methods for Natural Language Processing (Workshop at ACL 2017)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Oualil, Youssef; Klakow, Dietrich A batch noise contrastive estimation approach for training large vocabulary language models Inproceedings 18th Annual Conference of the International Speech Communication Association (INTERSPEECH), 2017. BibTeX @inproceedings{Oualil2017,
title = {A batch noise contrastive estimation approach for training large vocabulary language models},
author = {Youssef Oualil and Dietrich Klakow},
year = {2017},
date = {2017-00-00},
publisher = {18th Annual Conference of the International Speech Communication Association (INTERSPEECH)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Singh, Mittul; Oualil, Youssef; Klakow, Dietrich Approximated and domain-adapted LSTM language models for first-pass decoding in speech recognition Inproceedings 18th Annual Conference of the International Speech Communication Association (INTERSPEECH), Stockholm, Sweden, 2017. BibTeX @inproceedings{Singh2017,
title = {Approximated and domain-adapted LSTM language models for first-pass decoding in speech recognition},
author = {Mittul Singh and Youssef Oualil and Dietrich Klakow},
year = {2017},
date = {2017-00-00},
publisher = {18th Annual Conference of the International Speech Communication Association (INTERSPEECH)},
address = {Stockholm, Sweden},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Oualil, Youssef; Klakow, Dietrich A neuronal network approach for mixing language models Inproceedings ICASSP 2017 2017. BibTeX @inproceedings{Oualil2017b,
title = {A neuronal network approach for mixing language models},
author = {Youssef Oualil and Dietrich Klakow},
year = {2017},
date = {2017-00-00},
organization = {ICASSP 2017},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Oualil, Youssef Sequential estimation techniques and application to multiple speaker tracking and language modeling PhD Thesis Saarland University, 2017. Abstract | Links | BibTeX @phdthesis{Oualil2017b,
title = {Sequential estimation techniques and application to multiple speaker tracking and language modeling},
author = {Youssef Oualil},
editor = {Dietrich Klakow [Akademische Betreuung]},
url = {http://nbn-resolving.de/urn:nbn:de:bsz:291-scidok-ds-272280},
doi = {http://dx.doi.org/10.22028/D291-27228},
year = {2017},
date = {2017-00-00},
address = {Saarbr\"{u}cken},
school = {Saarland University},
abstract = {For many real-word applications, the considered data is given as a time sequence that becomes available in an orderly fashion, where the order incorporates important information about the entities of interest. The work presented in this thesis deals with two such cases by introducing new sequential estimation solutions. More precisely, we introduce a: I. Sequential Bayesian estimation framework to solve the multiple speaker localization, detection and tracking problem. This framework is a complete pipeline that includes 1) new observation estimators, which extract a fixed number of potential locations per time frame; 2) new unsupervised Bayesian detectors, which classify these estimates into noise/speaker classes and 3) new Bayesian filters, which use the speaker class estimates to track multiple speakers. This framework was developed to tackle the low overlap detection rate of multiple speakers and to reduce the number of constraints generally imposed in standard solutions. II. Sequential neural estimation framework for language modeling, which overcomes some of the shortcomings of standard approaches through merging of different models in a hybrid architecture. That is, we introduce two solutions that tightly merge particular models and then show how a generalization can be achieved through a new mixture model. In order to speed-up the training of large vocabulary language models, we introduce a new extension of the noise contrastive estimation approach to batch training.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
For many real-word applications, the considered data is given as a time sequence that becomes available in an orderly fashion, where the order incorporates important information about the entities of interest. The work presented in this thesis deals with two such cases by introducing new sequential estimation solutions. More precisely, we introduce a: I. Sequential Bayesian estimation framework to solve the multiple speaker localization, detection and tracking problem. This framework is a complete pipeline that includes 1) new observation estimators, which extract a fixed number of potential locations per time frame; 2) new unsupervised Bayesian detectors, which classify these estimates into noise/speaker classes and 3) new Bayesian filters, which use the speaker class estimates to track multiple speakers. This framework was developed to tackle the low overlap detection rate of multiple speakers and to reduce the number of constraints generally imposed in standard solutions. II. Sequential neural estimation framework for language modeling, which overcomes some of the shortcomings of standard approaches through merging of different models in a hybrid architecture. That is, we introduce two solutions that tightly merge particular models and then show how a generalization can be achieved through a new mixture model. In order to speed-up the training of large vocabulary language models, we introduce a new extension of the noise contrastive estimation approach to batch training. |
2016
|
Singh, Mittul; Greenberg, Clayton; Oualil, Youssef; Klakow, Dietrich Sub-Word Similarity based Search for Embeddings: Inducing Rare-Word Embeddings for Word Similarity Tasks and Language Modelling Inproceedings Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pp. 2061-2070, The COLING 2016 Organizing Committee, Osaka, Japan, 2016. Abstract | Links | BibTeX @inproceedings{singh-EtAl:2016:COLING1,
title = {Sub-Word Similarity based Search for Embeddings: Inducing Rare-Word Embeddings for Word Similarity Tasks and Language Modelling},
author = {Mittul Singh and Clayton Greenberg and Youssef Oualil and Dietrich Klakow},
url = {http://aclweb.org/anthology/C16-1194},
year = {2016},
date = {2016-12-01},
booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
pages = {2061-2070},
publisher = {The COLING 2016 Organizing Committee},
address = {Osaka, Japan},
abstract = {Training good word embeddings requires large amounts of data. Out-of-vocabulary words will still be encountered at test-time, leaving these words without embeddings. To overcome this lack of embeddings for rare words, existing methods leverage morphological features to generate embeddings. While the existing methods use computationally-intensive rule-based (Soricut and Och, 2015) or tool-based (Botha and Blunsom, 2014) morphological analysis to generate embeddings, our system applies a computationally-simpler sub-word search on words that have existing embeddings. Embeddings of the sub-word search results are then combined using string similarity functions to generate rare word embeddings. We augmented pre-trained word embeddings with these novel embeddings and evaluated on a rare word similarity task, obtaining up to 3 times improvement in correlation over the original set of embeddings. Applying our technique to embeddings trained on larger datasets led to on-par performance with the existing state-of-the-art for this task. Additionally, while analysing augmented embeddings in a log-bilinear language model, we observed up to 50% reduction in rare word perplexity in comparison to other more complex language models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Training good word embeddings requires large amounts of data. Out-of-vocabulary words will still be encountered at test-time, leaving these words without embeddings. To overcome this lack of embeddings for rare words, existing methods leverage morphological features to generate embeddings. While the existing methods use computationally-intensive rule-based (Soricut and Och, 2015) or tool-based (Botha and Blunsom, 2014) morphological analysis to generate embeddings, our system applies a computationally-simpler sub-word search on words that have existing embeddings. Embeddings of the sub-word search results are then combined using string similarity functions to generate rare word embeddings. We augmented pre-trained word embeddings with these novel embeddings and evaluated on a rare word similarity task, obtaining up to 3 times improvement in correlation over the original set of embeddings. Applying our technique to embeddings trained on larger datasets led to on-par performance with the existing state-of-the-art for this task. Additionally, while analysing augmented embeddings in a log-bilinear language model, we observed up to 50% reduction in rare word perplexity in comparison to other more complex language models. |
Varjokallio, Matti; Klakow, Dietrich Unsupervised morph segmentation and statistical language models for vocabulary expansion Inproceedings Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 175-180, Association for Computational Linguistics, Berlin, Germany, 2016. Links | BibTeX @inproceedings{varjokallio-klakow:2016:P16-2,
title = {Unsupervised morph segmentation and statistical language models for vocabulary expansion},
author = {Matti Varjokallio and Dietrich Klakow},
url = {http://anthology.aclweb.org/P16-2029},
year = {2016},
date = {2016-08-01},
booktitle = {Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages = {175-180},
publisher = {Association for Computational Linguistics},
address = {Berlin, Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Sayeed, Asad; Greenberg, Clayton; Demberg, Vera Thematic fit evaluation: an aspect of selectional preferences Journal Article Proceedings of the 1st Workshop on Evaluating Vector Space Representations for NLP, pp. 99-105, 2016, ISBN: 9781945626142. BibTeX @article{Sayeed2016,
title = {Thematic fit evaluation: an aspect of selectional preferences},
author = {Asad Sayeed and Clayton Greenberg and Vera Demberg},
isbn = {9781945626142},
year = {2016},
date = {2016-01-01},
journal = {Proceedings of the 1st Workshop on Evaluating Vector Space Representations for NLP},
pages = {99-105},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Schneegass, Stefan; Oualil, Youssef; Bulling, Andreas SkullConduct: Biometric User Identification on Eyewear Computers Using Bone Conduction Through the Skull Inproceedings Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems, pp. 1379-1384, ACM, New York, NY, USA, 2016, ISBN: 978-1-4503-3362-7. Links | BibTeX @inproceedings{Schneegass:2016:SBU:2858036.2858152,
title = {SkullConduct: Biometric User Identification on Eyewear Computers Using Bone Conduction Through the Skull},
author = {Stefan Schneegass and Youssef Oualil and Andreas Bulling},
url = {http://doi.acm.org/10.1145/2858036.2858152},
doi = {10.1145/2858036.2858152},
isbn = {978-1-4503-3362-7},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
pages = {1379-1384},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI '16},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Oualil, Youssef; Greenberg, Clayton; Singh, Mittul; Klakow, Dietrich Sequential recurrent neural networks for language modeling Journal Article Interspeech 2016, pp. 3509-3513, 2016. BibTeX @article{oualil2016sequential,
title = {Sequential recurrent neural networks for language modeling},
author = {Youssef Oualil and Clayton Greenberg and Mittul Singh and Dietrich Klakow},
year = {2016},
date = {2016-01-01},
journal = {Interspeech 2016},
pages = {3509-3513},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Singh, Mittul; Greenberg, Clayton; Klakow, Dietrich The Custom Decay Language Model for Long Range Dependencies Book Chapter Sojka, Petr; á, Ale{š} Hor; č, Ivan Kope; Pala, Karel (Ed.): Text, Speech, and Dialogue: 19th International Conference, TSD 2016, Brno , Czech Republic, September 12-16, 2016, Proceedings, pp. 343-351, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-45510-5. Links | BibTeX @inbook{Singh2016,
title = {The Custom Decay Language Model for Long Range Dependencies},
author = {Mittul Singh and Clayton Greenberg and Dietrich Klakow},
editor = {Petr Sojka and Ale{\v{s}} Hor{\'{a}}k and Ivan Kope{\v{c}}ek and Karel Pala},
url = {http://dx.doi.org/10.1007/978-3-319-45510-5_39},
doi = {10.1007/978-3-319-45510-5_39},
isbn = {978-3-319-45510-5},
year = {2016},
date = {2016-01-01},
booktitle = {Text, Speech, and Dialogue: 19th International Conference, TSD 2016, Brno , Czech Republic, September 12-16, 2016, Proceedings},
pages = {343-351},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
|
Oualil, Youssef; Singh, Mittul; Greenberg, Clayton; Klakow, Dietrich Long-short range context neural networks for language models Inproceedings EMLP 2016 2016. BibTeX @inproceedings{Oualil2016,
title = {Long-short range context neural networks for language models},
author = {Youssef Oualil and Mittul Singh and Clayton Greenberg and Dietrich Klakow },
year = {2016},
date = {2016-00-00},
organization = {EMLP 2016},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2015
|
Greenberg, Clayton; Demberg, Vera; Sayeed, Asad Verb Polysemy and Frequency Effects in Thematic Fit Modeling Inproceedings Proceedings of the 6th Workshop on Cognitive Modeling and Computational Linguistics, pp. 48-57, Association for Computational Linguistics, Denver, Colorado, 2015. Links | BibTeX @inproceedings{greenberg-demberg-sayeed:2015:CMCL,
title = {Verb Polysemy and Frequency Effects in Thematic Fit Modeling},
author = {Clayton Greenberg and Vera Demberg and Asad Sayeed},
url = {http://www.aclweb.org/anthology/W15-1106},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of the 6th Workshop on Cognitive Modeling and Computational Linguistics},
pages = {48-57},
publisher = {Association for Computational Linguistics},
address = {Denver, Colorado},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Greenberg, Clayton; Sayeed, Asad; Demberg, Vera Improving Unsupervised Vector-Space Thematic Fit Evaluation via Role-Filler Prototype Clustering Inproceedings Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 21-31, Association for Computational Linguistics, Denver, Colorado, 2015. Links | BibTeX @inproceedings{greenberg-sayeed-demberg:2015:NAACL-HLT,
title = {Improving Unsupervised Vector-Space Thematic Fit Evaluation via Role-Filler Prototype Clustering},
author = {Clayton Greenberg and Asad Sayeed and Vera Demberg},
url = {http://www.aclweb.org/anthology/N15-1003},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
pages = {21-31},
publisher = {Association for Computational Linguistics},
address = {Denver, Colorado},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Oualil, Youssef; Schulder, Marc; Helmke, Hartmut; Schmidt, Anna; Klakow, Dietrich Real-Time Integration of Dynamic Context Information for Improving Automatic Speech Recognition Inproceedings INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, 2015. Links | BibTeX @inproceedings{youalil_interspeech_2015,
title = {Real-Time Integration of Dynamic Context Information for Improving Automatic Speech Recognition},
author = {Youssef Oualil and Marc Schulder and Hartmut Helmke and Anna Schmidt and Dietrich Klakow},
url = {https://core.ac.uk/display/31018097},
year = {2015},
date = {2015-01-01},
booktitle = {INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|