|
{ |
|
"paper_id": "R11-1035", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:04:18.375876Z" |
|
}, |
|
"title": "Highly Multilingual Coreference Resolution Exploiting a Mature Entity Repository", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jenya", |
|
"middle": [], |
|
"last": "Belyaeva", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Crawley", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Leonida", |
|
"middle": [], |
|
"last": "Della-Rocca", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Ebrahim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maud", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mijail", |
|
"middle": [], |
|
"last": "Kabadjov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Van Der Goot", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EC Joint Research Centre 21027", |
|
"location": { |
|
"settlement": "Ispra", |
|
"region": "VA", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper we present an approach to large-scale coreference resolution for an ample set of human languages, with a particular emphasis on time performance and precision. One of the distinctive features of our approach is the use of a mature multilingual named entity repository (persons and organizations) gradually compiled over the past few years. Our experiments show promising results-an overall precision of 94% tested on seven different languages. We also present an extrinsic evaluation on seven languages in the context of summarization where we gauge the contribution of the coreference resolver towards the end summarization performance.", |
|
"pdf_parse": { |
|
"paper_id": "R11-1035", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper we present an approach to large-scale coreference resolution for an ample set of human languages, with a particular emphasis on time performance and precision. One of the distinctive features of our approach is the use of a mature multilingual named entity repository (persons and organizations) gradually compiled over the past few years. Our experiments show promising results-an overall precision of 94% tested on seven different languages. We also present an extrinsic evaluation on seven languages in the context of summarization where we gauge the contribution of the coreference resolver towards the end summarization performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recent work on coreference resolution has been largely dominated by machine learning approaches and predominantly for the English language (Ng and Cardie, 2002; Ponzetto and Strube, 2006; Luo, 2007) . This is in great part due to the availability of annotated corpora such as MUC-6/7 (Hirschman, 1998) , ACE-2/3/4/5 (NIST, 2004) , GNOME (Poesio et al., 2004) and large-scale crowdsourcing efforts like Phrase Detectives. 1 One of the big advantages of machine learning approaches is that they are reasonably easy to reproduce given that the set of input features are documented well, since there are many good open-source platforms for machine learning (e.g., WEKA 2 ) and machine-learning-based coreference (e.g., BART 3 (Versley et al., 2008) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 160, |
|
"text": "(Ng and Cardie, 2002;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 187, |
|
"text": "Ponzetto and Strube, 2006;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 198, |
|
"text": "Luo, 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 301, |
|
"text": "(Hirschman, 1998)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 328, |
|
"text": "(NIST, 2004)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 358, |
|
"text": "(Poesio et al., 2004)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 422, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 744, |
|
"text": "(Versley et al., 2008)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, intrinsic evaluations can pose problems. As pointed out by (Stoyanov et al., 2009) there is too much variation in reported results across data sets to be able to draw robust conclusions on the state-of-the-art in the area for which they proposed a method for reporting results on a data set that makes it easier to predict performance on other data sets (by breaking down results into names, types of pronouns, nominals etc.). Also, intrinsic evaluations can be highly sensitive to preprocessing (Mitkov, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 91, |
|
"text": "(Stoyanov et al., 2009)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 519, |
|
"text": "(Mitkov, 2002)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There is agreement in the community on the level of resolution difficulty on major types of coreferential expressions. For instance, proper names are considered to be the easiest to resolve, followed by pronouns, in turn followed by common nouns. One of the main reasons why common noun coreference is challenging is because they often share little or no surface linguistic features with their antecedents and require world or encyclopedic knowledge for their resolution (see (Kabadjov, 2007) for a study for English). For instance, Ponzetto and Strube (2006) proposed to use WordNet and Wikipedia to address the problem of bringing in world and/or encyclopedic knowledge into their system for coreference resolution in English reporting improvements for common noun resolution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 476, |
|
"end": 492, |
|
"text": "(Kabadjov, 2007)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 559, |
|
"text": "Ponzetto and Strube (2006)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work we address two important remaining gaps in coreference resolution. Firstly, we are interested in highly multilingual coreference. Secondly, we address the problem of common noun coreference by exploiting a large lexical resource, the named entity database, compiled over the past few years by automatically extracting names from hundreds of thousands of online news articles in twenty languages (and subsequently cleaning the most frequent names by a human moderator). The coreference resolver we present is designed to work as part of the Europe Media Monitor (EMM) system 4 for online news analysis and aggregation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to evaluate the effectiveness of our approach we carry out two separate evaluations: one intrinsic and one extrinsic in the context of summarization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows: in the next section ( \u00a72) we describe our named entity database which is the backbone of our approach; in \u00a73, we present our approach to coreference followed by a discussion of experimental results in \u00a74. Then, in \u00a75 we briefly survey related work on coreference resolution and finally conclude and give pointers to future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The historical repository of EMM's person and organization titles is a by-product of the Named Entity Recognition (NER) process, which has been applied daily to tens of thousands of multilingual news articles per day since 2004. Titles are parts of the name recognition patterns, and each time a name is found, EMM keeps track of the titles found next to the name. The result is a large multilingual repository of titles and other attributes about names. In this section, we thus try to give an overview of the NER process and hence information about the title repository. EMM's NER is performed by applying language-independent recognition patterns to text. The hand-written language-independent recognition patterns use slots to make reference to various language-specific lists of words, phrases and regular expressions. By doing this, the system is modular and a new language can simply be plugged in by adding the language-specific parameter file, containing the relevant word lists for each slot. Pouliquen and R. Steinberger (2009) describe the types of slots and list a number of patterns. A typical and simple pattern is the one that requires that uppercase words adjacent to any title are likely to be person or organization names (e.g., President Upper Upper). As the strings indicating that neighboring uppercase words in a name are not necessarily titles, we refer to them more generally as Trigger Words. The trigger word list of elements thus contains conventional titles (e.g., Dr., Mr., President), professions and occupations (e.g., spokeswoman, artist, playboy, tennis player), roles inside teams (secretary, defense player, short-stop), adjectives referring to countries, regions, locations, ethnic groups or religions (e.g., Iraqi, Latin-American, Parisian, Berber, Catholic), and a variety of other strings that may indicate that the adjacent uppercase words are a person (e.g., XX-year-old, has declared, deceased). These lists are mostly produced using empirical methods or machine learning, but they are always manually verified. The rules are partially cascaded and allow for large combinations of trigger words, e.g., to recognize the uppercase words in the following apposition construction as a name: Upper Upper, former 56-year-old Afghan Foreign Minister.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1003, |
|
"end": 1038, |
|
"text": "Pouliquen and R. Steinberger (2009)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multilingual Named Entity Database", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As the patterns exist and are applied to twenty languages, the list of trigger words contains words in all these languages. Some of these trigger words are not suitable so we remove them from the lists. Age expressions such as XX-year-old or verbal phrases such as has declared were thus manually removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multilingual Named Entity Database", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Patterns to recognize organizations have different shapes and the trigger words are usually part of the organization name (e.g., Bank and Club in Chartered Bank or Motor Sport Club). These typical organization name parts are also used for the co-reference resolution task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multilingual Named Entity Database", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Coreference Algorithm", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multilingual Named Entity Database", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The coreference resolution module is built for inclusion in a larger pipeline architecture, where an input text document undergoes several processing phases during which the source is augmented with layers of meta data such as named entities. The data interchange format between processing phases is RSS, a light-weight type of XML typically used by on-line news providers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Known entities are entities that have been found in at least five different news clusters in the past in the EMM system. For all known entities morphological or other spelling variants are automatically generated according to hand-written rules. For example, for Angela Merkel, the genitive version Merkels will be pre-generated and recognized, and Arabic names using the infix al will be pre-generated with and without al, as well as with and without linking hyphens (Moussab al-Zarqawi, Moussab al Zarqawi, Moussab Zarqawi). For the actual lookup, a finite state tool that allows patterns and partial case sensitivity is used, employing entity information that has been gathered over a number of years from the EMM production system to recognize known entities within the text (currently, there are over 1.2 million distinct entities in the named entity repository). The RSS is then marked up with additional meta information about the entities found (see (Crawley and Wagner, 2010) for more details).", |
|
"cite_spans": [ |
|
{ |
|
"start": 958, |
|
"end": 984, |
|
"text": "(Crawley and Wagner, 2010)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lookup of Known Named Entities", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As we are interested in grounding name references to real-life entities and we thus need to disambiguate between people having the same surname (or first name), we only look for entities consisting of at least two name parts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Guessing", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The entity guessing comprises two parts, the first is a parallel lexical tokenization of the text, using classifying tokenizers, gazetteers, pattern matchers and simple tokenizers as well as any previously defined entities from further up the processing chain. The second part is a sequence of finite state grammars that pick and choose appropriate tokens for a given rule from the parallel token streams passing the output on to the next grammar in the sequence building ever more complex constructs and disambiguating on the way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Guessing", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The entity normalization takes place once the entities have been discovered and is used as a means of merging entities with newly found aliases, such as when an existing entity is written in a script we have not seen it in before or has been slightly misspelt. This is done by transliterating the name from any unicode range into the Latin unicode range using a statistical matrix for ngram substitutions. Some normalization may be performed and vowels are removed to create a consonant signature which is then used to perform a lookup for the most likely candidates with the list of known entities. This is to reduce the number of values for eventual comparison using a string similarity metric. The closest match is then selected and, if within a fine-grained tolerance, the value is assigned as a new alias. Otherwise it is assumed a new entity and assigned a new id.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Merging of NE Variants", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "When an RSS file reaches the coreference resolution module, it already contains the list of known and guessed entities. The resolution is run only over the known entities. The resolver module does the following for each article:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "1. Loads all known and guessed entities 2. For each known entity it searches the resources for its possible references (titles from the entity-title table, name parts directly from the entity mention).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "3. The reference-entity map is created; it associates each possible reference (step 2) to a known entity. 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "4. The matcher component finds all possible mentions of any entity (i.e., name parts 6 , titles) in the text. 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "5. The resolver links mentions (step 4) to entities using the reference-entity map, given that the following conditions are met:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "(a) The entity has been already introduced. 8 (b) The entity reference is not a constituent of a known or guessed entity mention (or their title).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "6. The resolved mentions are merged in order to create a non-overlapping sequence of entity mentions with the following rules:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "(a) If the mention is part of a longer mention leave only the longer one (e.g., 'former US president' would outweigh 'president'). (b) If the mentions are next to each other and they are assigned to the same entity they are concatenated. (c) If the mentions are next to each other and they are assigned to a different entity a name part will outweigh a title (probably an incorrect title). (d) Otherwise consider only the latter mention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolver", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "We carry out a precision-focused intrinsic evaluation over EMM data and an extrinsic evaluation in the context of summarization where we measure the contribution of coreference towards summarization performance. We describe each in turn below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In order to evaluate our coreference system we compiled a corpus of news articles in seven different languages: English, German, Italian, Spanish, French, Russian and Arabic, thus, covering a ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intrinsic Evaluation: EMM Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We ran each news article through the EMM pipeline. After that we asked native speakers of the seven languages to go over the news articles and mark whether each highlighted mention points to the correct entity or not, whereby measuring precision. 10 A highlighted mention could be one of three things: a known named entity recognized by the named entity disambiguation system, a mention of an entity guessed by the named entity guesser, or a mention recognized and attached to a coreference chain by the coreference resolver. The human subjects marked each entity mention via a simple HTML interface.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus and Quick Annotation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "We present separate performance results for named entity disambiguation (table 2) and for coreference resolution (table 3). In both cases we report precision.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Overall, the named entity disambiguation precision was high; 95% of the 2631 named entities recognized by the system were correct (see table 2 ). The recognition precision of person names in Arabic was the lowest, 81.7%. We discuss the possible reasons for that in our detailed error analysis below. The type of entities entailed by the category 'Others' is mostly mentions to organizations, but also some other prominent named entities such 9 In principle, since the coreference method we propose builds on the named entity repository ( \u00a72), it can be straightforwardly applied to all the languages covered by the repository (currently 20).", |
|
"cite_spans": [ |
|
{ |
|
"start": 443, |
|
"end": 444, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 143, |
|
"text": "table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "10 As pointed out earlier, we are interested in precision and not in recall, since the large volume of news articles passing through the EMM pipeline makes up for potential loss in recall. as events (e.g., Woodstock Festival). We present the coreference performance in three distinct categories: person name parts, person titles and organization head nouns (see table 3 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 370, |
|
"text": "table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Not surprisingly, the overall coreference resolution of proper names yields high precision (98%), since resolution difficulty increases as folows: proper names << pronouns << common noun phrases, in particular definite descriptions. Perhaps more notably, these results provide evidence that this is also the case across languages, with Arabic being lowest with 92.9%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "What is more significant, however, is the performance on person titles, which entail mostly refer- ences by means of definite descriptions not sharing a head noun with the antecedent, where the system surpasses the 70% threshold (with the exception of French with 61.2%). It is worth pointing out that these are largely regarded as among the most challenging to resolve, mainly because their resolution requires real-world knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "It should be noted also that our system is an end-to-end system, whose input is free text akin to (Mitkov, 2002; Kabadjov, 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 112, |
|
"text": "(Mitkov, 2002;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 128, |
|
"text": "Kabadjov, 2007)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "In what follows we discuss several representative examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Arabic. In the following example the system recognizes (Pope) as the correct reference to the preceding recognized person (Benedikt XVI), because our resources capture that Pope is one of the titles of Benedikt XVI ( , 'Benedikt XVI' \u2190\u2212 , 'Pope'):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "English. And here is a similar example in English:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "(2) Bruce, who has until 31 December to respond to the FA's request, had asked [Andre Mariner] to look at Turner's red card again... \"I hope [the referee] looks at it again. I doubt it, though.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "And finally an example in Russian (\u041c\u0430\u0445\u043c\u0443\u0434 \u0410\u0445\u043c\u0430\u0434\u0438\u043d\u0435\u0436\u0430\u0434, 'Mahmoud Ahmadinejad' \u2190\u2212 \u043b\u0438\u0434\u0435\u0440, 'leader'):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Russian.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "( ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Russian.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section we discuss the most prominent types of errors and give illustrative examples for Arabic and French. 11 We adopt a precisionfocused error analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 118, |
|
"text": "11", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Precision-focused analysis of errors. We have grouped system errors into five major categories (see table 4): indefinite noun phrases (the system wrongly links an indefinite noun phrase to an antecedent), resource sparseness (errors due to incomplete database of names and/or titles), different part-of-speech (the system assumes a wrong part-of-speech, e.g., official as adjective or noun), error propagation (errors at the named entity lookup stage propagate on to the coreference resolution) and a general category Other for all the remaining errors. To illustrate these error types, we give a few representative examples next.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Arabic. While working on Arabic articles we were faced with some difficulties related to issues of ambiguity, propagation of errors from the NER module and a relative lack of resources compared to other languages. Ambiguity of Arabic person and organization names is mainly due to the relatively high polysemy of Arabic words, the widespread omission of diacritic vowels in written text and the lack of capitalization in the Arabic writing system. For example, some of the very common person names in Arabic like", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Ramdan , Shaban and Ragab also stand for month names, so if we have an Entity called Mohamed Ramdan and at a later distance in text the word Ramdan, it is difficult to decide if this is a reference to the previous entity or if it is the name of a month. Moreover, the lack of diacritic vowels increases the number for possible readings for a given word, if we have for example the name Sayad Amr and the name part Amr in a non vocalized text, the word Amr could have four different meanings,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "whereas if we had the word in the vocalized form Umar, the only possible meaning would be that of a proper name. A different kind of ambiguity results from the fact that in most Arabic countries there is no real distinction between first and last names. So, the reference to a person's full name could be done by any of the parts of the name, that is, usually in news articles references to \"Saddam Hussein\" would use the first part of his name, whereas references to \"Muhammad Husni Mubarak\" would use the third part of the name. French. There were several errors due to incorrect recognition of named entity boundaries (i.e., error propagation). For instance, in the following example (example 4), the reference to Ligue 2 has been wrongly recognized as Ligue and subsequently identified as coreferential with Ligue 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "(4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Neuf des dix matches de cette 20e journ\u00e9e de [Ligue 1] sont programm\u00e9s ce soir\u00e0 21h, avec notamment un int\u00e9ressant Lille-PSG. En bas de tableau, le match de la peur oppose Grenoble, quasiment assur\u00e9 de descendre en [Ligue] 2,\u00e0 Saint-Etienne, 18e et premier rel\u00e9gable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Error Analysis", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Project-Syndicate Data Kabadjov (2007) argued that Summarization is a suitable task for evaluating extrinsically coreference resolution systems. Here, we take on their proposal and in this section we discuss experiments with an LSA-based summarizer integrated with the coreference resolver described above on a publicly available corpus 12 for evaluating multidocument multilingual 13 summarization systems (Turchi et al., 2010 ). 14 Our approach for integrating a coreference resolver into an LSA-based summarization system draws on the method put forward by (Steinberger et al., 2007) . The intuition behind this choice is that in addition to capturing pure lexical cooccurrence the extended system is also capable of capturing entity co-occurrence which takes the summarization process to a more semanticallyaware level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 38, |
|
"text": "Kabadjov (2007)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 427, |
|
"text": "(Turchi et al., 2010", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 586, |
|
"text": "(Steinberger et al., 2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation via Summarization:", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The experimental results are presented in table 5. Each summary score is computed by first calculating the intersection of sentences selected by the 12 This is different from the dataset used for the intrinsic evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "13 Seven languages: English, French, German, Spanish, Russian, Arabic and Czech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "14 Data publicly available for download at: http://langtech.jrc.ec.europa.eu/JRC_ Resources.html. summarizer with those selected by at least two annotators divided by the number of sentences in the system summary. 15 The first thing we observe is that overall (see bottom part of table 5) for target summaries of size three sentences or smaller incorporating cross-document coreference works better than the baseline LSA case and both perform better than two baseline summarizers: one selecting the first sentence of each document in the cluster (labeled 'Lead' in table 5) and another one selecting random sentences (labeled 'Random'). One possible reason for that is that by adopting a more semantically-aware representation the summarization machinery is able to produce succinct summaries of better quality than the LSA-only method, but as soon as the summarization compression rate is relaxed the benefit of including entities becomes less visible (and even in some cases yields worse results).", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 216, |
|
"text": "15", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "The variation in summarization performance across languages can be in part explained by the inconsistent performance of the coreference resolver due to lack of or noisy resources for the languages. For instance, for languages like English and German we have good coreference resolution performance which also translates into decent summarization performance, whereas for Czech the performance is notably lower.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Representatives of machine learning work on coreference are (Ng and Cardie, 2002; Luo, 2007) for supervised learning and (Haghighi and Klein, 2007) for unsupervised.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 81, |
|
"text": "(Ng and Cardie, 2002;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 82, |
|
"end": 92, |
|
"text": "Luo, 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 147, |
|
"text": "(Haghighi and Klein, 2007)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In more recent work, (Stoyanov et al., 2009 ) provides a comprehensive discussion of the state of the art coupled with extensive experiments on the standard corpora for English: MUC-6, MUC-7, ACE-2, ACE-3, ACE-4 and ACE-5. Recasens and Hovy (2010) explore the impact on coreference resolution performance by varying several prominent contextual factors; they measure performance across corpora, languages, annotation schemes and preprocessing. However, their set of languages consisted of English and Spanish only.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 43, |
|
"text": "(Stoyanov et al., 2009", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 247, |
|
"text": "Recasens and Hovy (2010)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The most closely related experiment to ours is that of the SemEval-2010 task 1 , which covered coreference evaluation on six languages. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper we presented an approach to largescale coreference resolution for a broad spectrum of human languages with precision and efficiency in mind. The backbone of our algorithm is a mature multilingual named entity database semiautomatically compiled over the past few years. We reported an overall precision of 94% tested on seven different languages and presented a detailed error analysis with illustrative examples from our corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We performed an extrinsic evaluation on seven languages in the context of the task of summarization. We concluded that producing short informative summaries (from one to three sentences) is better achieved by bringing in cross-document coreference than without it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future work, we intend to carry out a comprehensive extrinsic evaluations in the context of endgoal tasks like Sentiment Analysis and Quotation extraction. We also plan to perform an additional intrinsic evaluation on the SemEval'10 corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://www.phrasedetectives.org. 2 http://www.cs.waikato.ac.nz/ml/weka/. 3 http://www.bart-coref.org/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://emm.newsbrief.eu/overview.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ambiguous references are ignored (e.g., title 'president' is not considered as a coreference candidate in the case of an article in which two entities carry the title 'president').6 We are also aware of names with infixes like 'de la Vega'.7 Because of efficiency reasons it uses lists of all possible name parts and titles, not only those found in the article -the resources are loaded during the matcher's initialization.8 The candidate mention appears after the first mention of the entity identified by the name recognition module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We left out examples for other languages due to space contraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For a discussion on how this evaluation metric compares with ROUGE see(Turchi et al., 2010).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Desktop text mining for law enforcement", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Crawley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wagner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of IEEE ISI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J.B. Crawley and G. Wagner. 2010. Desktop text min- ing for law enforcement. In Proceedings of IEEE ISI, pages 138-140.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Unsupervised coreference resolution in a nonparametric bayesian model", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "848--855", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Haghighi and D. Klein. 2007. Unsupervised coreference resolution in a nonparametric bayesian model. In Proceedings of ACL, pages 848-855.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "MUC-7 coreference task definition, version 3.0", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hirschman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of MUC. NIST", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Hirschman. 1998. MUC-7 coreference task defini- tion, version 3.0. In Proceedings of MUC. NIST.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A Comprehensive Evaluation of Anaphora Resolution and Discourse-new Recognition", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kabadjov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Kabadjov. 2007. A Comprehensive Evaluation of Anaphora Resolution and Discourse-new Recogni- tion. Ph.D. thesis, Department of Computer Sci- ence, University of Essex, December.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Coreference or not: A twin model for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Luo. 2007. Coreference or not: A twin model for coreference resolution. In Proceedings of NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Anaphora Resolution", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mitkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Mitkov. 2002. Anaphora Resolution. Longman.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Improving machine learning approaches to coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Ng and C. Cardie. 2002. Improving machine learn- ing approaches to coreference resolution. In Pro- ceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The ace evaluation plan", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nist", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "NIST. 2004. The ace evaluation plan.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Centering: A parametric theory and its instantiations", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"Di" |
|
], |
|
"last": "Eugenio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Hitzeman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Computational Linguistics", |
|
"volume": "30", |
|
"issue": "3", |
|
"pages": "309--363", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Poesio, R. Stevenson, B. Di Eugenio, and J. M. Hitzeman. 2004. Centering: A parametric theory and its instantiations. Computational Linguistics, 30(3):309-363.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Exploiting semantic role labeling, WordNet and Wikipedia for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S.P. Ponzetto and M. Strube. 2006. Exploiting seman- tic role labeling, WordNet and Wikipedia for coref- erence resolution. In Proceedings of HLT-NAACL, pages 192-199.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic construction of multilingual name dictionaries", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Pouliquen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Learning Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "NIPS se-- ries", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Pouliquen and R. Steinberger. 2009. Automatic construction of multilingual name dictionaries. In Learning Machine Translation. MIT Press, NIPS se- ries.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Coreference resolution across corpora: Languages, Coding schemes, and Preprocessing Information", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1423--1432", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Recasens and E. Hovy. 2010. Coreference reso- lution across corpora: Languages, Coding schemes, and Preprocessing Information. In Proceedings of ACL, pages 1423-1432.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "SemEval-2010 Task 1: Coreference Resolution in Multiple Languages", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sapena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mart\u00ed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Taul\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Recasens, L. Marquez, E. Sapena, M.A. Mart\u00ed, M. Taul\u00e9, V. Hoste, M. Poesio, and Y. Versley. 2010. SemEval-2010 Task 1: Coreference Resolution in Multiple Languages. In Proceedings of ACL, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Two uses of anaphora resolution in summarization. Information Processing and Management", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kabadjov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Jezek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "1663--1680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Steinberger, M. Poesio, M. Kabadjov, and K. Jezek. 2007. Two uses of anaphora resolution in summa- rization. Information Processing and Management, 43(6):1663-1680.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Conundrums in noun phrase coreference resolution: Making sense of the state-of-the-art", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Riloff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Stoyanov, N. Gilbert, C. Cardie, and E. Riloff. 2009. Conundrums in noun phrase coreference resolution: Making sense of the state-of-the-art. In Proceedings of ACL-IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Using parallel corpora for multilingual (multi-document) summarisation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kabadjov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of CLEF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Turchi, J. Steinberger, M. Kabadjov, and R. Stein- berger. 2010. Using parallel corpora for multi- lingual (multi-document) summarisation evaluation. In Proceedings of CLEF, pages 52-63.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "BART: A modular toolkit for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Eidelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Jern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Versley, S.P. Ponzetto, M. Poesio, V. Eidelman, A. Jern, J. Smith, X. Yang, and A. Moschitti. 2008. BART: A modular toolkit for coreference resolution. In Proceedings of LREC.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "Corpus statistics.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">Language News articles Words Words per art.</td></tr><tr><td>English</td><td>149</td><td>56891</td><td>382</td></tr><tr><td>German</td><td>45</td><td>18213</td><td>405</td></tr><tr><td>Italian</td><td>117</td><td>14082</td><td>120</td></tr><tr><td>Spanish</td><td>94</td><td>18772</td><td>200</td></tr><tr><td>French</td><td>96</td><td>35046</td><td>365</td></tr><tr><td>Russian</td><td>149</td><td>24435</td><td>164</td></tr><tr><td>Arabic</td><td>67</td><td>24400</td><td>364</td></tr><tr><td>Overall</td><td>717</td><td>191839</td><td>268</td></tr><tr><td colspan=\"4\">diverse set of language family branches as are Ger-</td></tr><tr><td colspan=\"3\">manic, Romance, Slavic and Semitic. 9</td><td/></tr><tr><td colspan=\"4\">Statistics about the corpus are shown in table 1.</td></tr><tr><td colspan=\"4\">Overall, we gathered 717 news articles containing</td></tr><tr><td colspan=\"2\">almost 200k words.</td><td/><td/></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"text": "Quality of named entity recognition in the analyzed languages. Values correspond to: Precision (Correct/Recognized).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Language</td><td>Persons</td><td>Others</td><td>All</td></tr><tr><td>English</td><td>97.0%</td><td>89.5%</td><td>94.0%</td></tr><tr><td/><td>(419/432)</td><td>(256/286)</td><td>(675/718)</td></tr><tr><td>German</td><td>97.5%</td><td>100.0%</td><td>97.9%</td></tr><tr><td/><td>(230/236)</td><td>(46/46)</td><td>(276/282)</td></tr><tr><td>Italian</td><td>92.1%</td><td>100.0%</td><td>94.6%</td></tr><tr><td/><td>(151/164)</td><td>(76/76)</td><td>(227/240)</td></tr><tr><td>Spanish</td><td>95.7%</td><td>96.0%</td><td>95.8%</td></tr><tr><td/><td>(180/188)</td><td>(72/75)</td><td>(252/263)</td></tr><tr><td>French</td><td>98.4%</td><td>97.2%</td><td>97.9%</td></tr><tr><td/><td>(432/439)</td><td>(278/286)</td><td>(710/725)</td></tr><tr><td>Russian</td><td>97.7%</td><td>100.0%</td><td>98.2%</td></tr><tr><td/><td>(130/133)</td><td>(35/35)</td><td>(165/168)</td></tr><tr><td>Arabic</td><td>81.7%</td><td>100.0%</td><td>88.1%</td></tr><tr><td/><td>(125/153)</td><td>(82/82)</td><td>(207/235)</td></tr><tr><td>Overall</td><td>95.5%</td><td>95.4%</td><td>95.5%</td></tr><tr><td/><td colspan=\"3\">(1667/1745) (845/886) (2512/2631)</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "Quality of coreference resolution. Values correspond to: Precision (Correct/Recognized).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Person</td><td>Person</td><td>Organiz.</td><td>All</td></tr><tr><td>Language</td><td>name</td><td>titles</td><td>head</td><td/></tr><tr><td/><td>parts</td><td/><td>nouns</td><td/></tr><tr><td>English</td><td>99.2%</td><td>72.7%</td><td>94.4%</td><td>94.2%</td></tr><tr><td/><td>237/239</td><td>40/55</td><td>34/36</td><td>311/330</td></tr><tr><td>German</td><td>99.0%</td><td>86.7%</td><td>100.0%</td><td>97.5%</td></tr><tr><td/><td>104/105</td><td>13/15</td><td>1/1</td><td>118/121</td></tr><tr><td>Italian</td><td>94.1%</td><td>75.0%</td><td>100.0%</td><td>86.8%</td></tr><tr><td/><td>16/17</td><td>9/12</td><td>1/1</td><td>26/30</td></tr><tr><td>Spanish</td><td>100.0%</td><td>72.7%</td><td>100.0%</td><td>91.0%</td></tr><tr><td/><td>41/41</td><td>16/22</td><td>4/4</td><td>61/67</td></tr><tr><td>French</td><td>98.1%</td><td>61.2%</td><td>13.3%</td><td>69.1%</td></tr><tr><td/><td>51/52</td><td>52/85</td><td>2/15</td><td>105/152</td></tr><tr><td>Russian</td><td>100.0%</td><td>100.0%</td><td>-</td><td>100.0%</td></tr><tr><td/><td>45/45</td><td>7/7</td><td>0/0</td><td>52/52</td></tr><tr><td>Arabic</td><td>92.9%</td><td>100.0%</td><td>40.0%</td><td>90.6%</td></tr><tr><td/><td>92/99</td><td>2/2</td><td>2/5</td><td>96/106</td></tr><tr><td>Overall</td><td>98.0%</td><td>70.2%</td><td>71.0%</td><td>89.6%</td></tr><tr><td/><td colspan=\"2\">586/598 139/198</td><td>44/62</td><td>769/858</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"text": "Types of errors.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"4\">Person Person Organiz. All</td></tr><tr><td>Type of error</td><td>name</td><td>titles</td><td>head</td><td/></tr><tr><td/><td>parts</td><td/><td>nouns</td><td/></tr><tr><td>Indefinite NP</td><td/><td>18</td><td>13</td><td>32</td></tr><tr><td>Res. sparseness</td><td/><td>11</td><td>3</td><td>14</td></tr><tr><td>Different POS</td><td/><td>18</td><td>1</td><td>20</td></tr><tr><td>Error propag.</td><td>9</td><td/><td/><td>9</td></tr><tr><td>Other</td><td>3</td><td>12</td><td>1</td><td>16</td></tr><tr><td>Overall</td><td>12</td><td>59</td><td>18</td><td>89</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"text": "Summarization Results.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"7\">Summarizers Summary Size (number of sentences)</td></tr><tr><td/><td>1</td><td>3</td><td>5</td><td>10</td><td>15</td><td>20</td></tr><tr><td/><td/><td/><td/><td>English</td><td/><td/></tr><tr><td>LSA+Coref</td><td colspan=\"2\">1.0 .67</td><td>.6</td><td>.6</td><td>.5</td><td>.43</td></tr><tr><td>LSA</td><td>0</td><td>.67</td><td>.6</td><td>.6</td><td>.47</td><td>.45</td></tr><tr><td/><td/><td/><td/><td>French</td><td/><td/></tr><tr><td>LSA+Coref</td><td>.5</td><td>.67</td><td>.6</td><td colspan=\"2\">.55 .47</td><td>.43</td></tr><tr><td>LSA</td><td>0</td><td>.5</td><td>.6</td><td colspan=\"2\">.45 .47</td><td>.4</td></tr><tr><td/><td/><td/><td colspan=\"2\">German</td><td/><td/></tr><tr><td>LSA+Coref</td><td colspan=\"2\">1.0 .83</td><td>.7</td><td colspan=\"2\">.55 .47</td><td>.35</td></tr><tr><td>LSA</td><td>.5</td><td>.5</td><td>.7</td><td colspan=\"2\">.55 .43</td><td>.38</td></tr><tr><td/><td/><td/><td/><td>Spanish</td><td/><td/></tr><tr><td>LSA+Coref</td><td colspan=\"2\">1.0 .83</td><td>.7</td><td colspan=\"2\">.45 .37</td><td>.4</td></tr><tr><td>LSA</td><td>.5</td><td>.67</td><td>.5</td><td>.5</td><td>.37</td><td>.43</td></tr><tr><td/><td/><td/><td/><td>Russian</td><td/><td/></tr><tr><td>LSA+Coref</td><td colspan=\"2\">1.0 .67</td><td>.6</td><td colspan=\"2\">.65 .53</td><td>.6</td></tr><tr><td>LSA</td><td colspan=\"2\">1.0 .67</td><td>.6</td><td>.5</td><td>.57</td><td>.6</td></tr><tr><td/><td/><td/><td/><td>Arabic</td><td/><td/></tr><tr><td>LSA+Coref</td><td>0</td><td>.5</td><td>.7</td><td colspan=\"2\">.55 .47</td><td>.5</td></tr><tr><td>LSA</td><td>.5</td><td>.67</td><td>.5</td><td>.6</td><td>.53</td><td>.53</td></tr><tr><td/><td/><td/><td/><td>Czech</td><td/><td/></tr><tr><td>LSA+Coref</td><td>0</td><td>.67</td><td>.6</td><td>.5</td><td>.43</td><td>.48</td></tr><tr><td>LSA</td><td>.5</td><td>.67</td><td>.7</td><td>.7</td><td>.53</td><td>.48</td></tr><tr><td/><td/><td/><td/><td>Overall</td><td/><td/></tr><tr><td>LSA+Coref</td><td colspan=\"5\">.64 .69 .64 .55 .46</td><td>.45</td></tr><tr><td>LSA</td><td colspan=\"2\">.43 .62</td><td>.6</td><td colspan=\"2\">.56 .48</td><td>.46</td></tr><tr><td>Lead</td><td>-</td><td>-</td><td>.3</td><td colspan=\"2\">.25 .26</td><td>.25</td></tr><tr><td>Random</td><td colspan=\"5\">.22 .22 .22 .22 .22</td><td>.22</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |