{ "paper_id": "2020", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T01:59:20.681679Z" }, "title": "GAIA: A Fine-grained Multimedia Knowledge Extraction System", "authors": [ { "first": "Manling", "middle": [], "last": "Li", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Illinois at Urbana-Champaign", "location": {} }, "email": "manling2@illinois.edu" }, { "first": "Alireza", "middle": [], "last": "Zareian", "suffix": "", "affiliation": { "laboratory": "", "institution": "Columbia University", "location": {} }, "email": "" }, { "first": "Ying", "middle": [], "last": "Lin", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Illinois at Urbana-Champaign", "location": {} }, "email": "" }, { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Illinois at Urbana-Champaign", "location": {} }, "email": "" }, { "first": "Spencer", "middle": [], "last": "Whitehead", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Illinois at Urbana-Champaign", "location": {} }, "email": "" }, { "first": "Brian", "middle": [], "last": "Chen", "suffix": "", "affiliation": { "laboratory": "", "institution": "Columbia University", "location": {} }, "email": "" }, { "first": "Bo", "middle": [], "last": "Wu", "suffix": "", "affiliation": { "laboratory": "", "institution": "Columbia University", "location": {} }, "email": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Illinois at Urbana-Champaign", "location": {} }, "email": "hengji@illinois.edu" }, { "first": "Shih-Fu", "middle": [], "last": "Chang", "suffix": "", "affiliation": { "laboratory": "", "institution": "Columbia University", "location": {} }, "email": "" }, { "first": "Clare", "middle": [], "last": "Voss", "suffix": "", "affiliation": { "laboratory": "US Army Research Laboratory", "institution": "", "location": {} }, "email": "" }, { "first": "Daniel", "middle": [], "last": "Napierski", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marjorie", "middle": [], "last": "Freedman", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Russian", "middle": [], "last": "English", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [], "last": "Ukrainian", "suffix": "", "affiliation": {}, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "We present the first comprehensive, open source multimedia knowledge extraction system that takes a massive stream of unstructured, heterogeneous multimedia data from various sources and languages as input, and creates a coherent, structured knowledge base, indexing entities, relations, and events, following a rich, fine-grained ontology. Our system, GAIA 1 , enables seamless search of complex graph queries, and retrieves multimedia evidence including text, images and videos. GAIA achieves top performance at the recent NIST TAC SM-KBP2019 evaluation 2. The system is publicly available at GitHub 3 and DockerHub 4 , with complete documentation 5 .", "pdf_parse": { "paper_id": "2020", "_pdf_hash": "", "abstract": [ { "text": "We present the first comprehensive, open source multimedia knowledge extraction system that takes a massive stream of unstructured, heterogeneous multimedia data from various sources and languages as input, and creates a coherent, structured knowledge base, indexing entities, relations, and events, following a rich, fine-grained ontology. Our system, GAIA 1 , enables seamless search of complex graph queries, and retrieves multimedia evidence including text, images and videos. GAIA achieves top performance at the recent NIST TAC SM-KBP2019 evaluation 2. The system is publicly available at GitHub 3 and DockerHub 4 , with complete documentation 5 .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Knowledge Extraction (KE) aims to find entities, relations and events involving those entities from unstructured data, and link them to existing knowledge bases. Open source KE tools are useful for many real-world applications including disaster monitoring (Zhang et al., 2018a) , intelligence analysis (Li et al., 2019a) and scientific knowledge mining (Luan et al., 2017; Wang et al., 2019) . Recent years have witnessed the great success and wide usage of open source Natural Language Processing tools (Manning et al., 2014; Fader et al., 2011; Daniel Khashabi, 2018; Honnibal and Montani, 2017) , but there is no comprehensive open source system for KE. We release a new comprehensive KE system, GAIA, that advances the state of the art in two aspects: (1) it extracts and integrates knowledge across multiple languages and modalities, and (2) it classifies knowledge elements into fine-grained types, as shown in Table 1 . We also release the pretrained models 6 and provide a script to retrain it for any ontology.", "cite_spans": [ { "start": 257, "end": 278, "text": "(Zhang et al., 2018a)", "ref_id": "BIBREF50" }, { "start": 303, "end": 321, "text": "(Li et al., 2019a)", "ref_id": "BIBREF20" }, { "start": 354, "end": 373, "text": "(Luan et al., 2017;", "ref_id": "BIBREF26" }, { "start": 374, "end": 392, "text": "Wang et al., 2019)", "ref_id": "BIBREF47" }, { "start": 505, "end": 527, "text": "(Manning et al., 2014;", "ref_id": "BIBREF27" }, { "start": 528, "end": 547, "text": "Fader et al., 2011;", "ref_id": "BIBREF9" }, { "start": 548, "end": 570, "text": "Daniel Khashabi, 2018;", "ref_id": null }, { "start": 571, "end": 598, "text": "Honnibal and Montani, 2017)", "ref_id": "BIBREF13" }, { "start": 966, "end": 967, "text": "6", "ref_id": null } ], "ref_spans": [ { "start": 918, "end": 925, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "GAIA has been inherently designed for multimedia, which is rapidly replacing text-only data in many domains. We extract complementary knowledge from text as well as related images or video frames, and integrate the knowledge across modalities. Taking Figure 1 as an example, the text entity extraction system extracts the nominal mention troops, but is unable to link or relate that due to a vague textual context. From the image, the entity linking system recognizes the flag as Ukrainian and represents it as a NationalityCitizen relation in the knowledge base. It can be deduced, although not for sure, that the detected people are Ukrainian. Meanwhile, our cross-media fusion system grounds the troops to the people detected in the image. This establishes a connection between the knowledge Figure 2 : User-facing views of knowledge networks constructed with events automatically extracted from multimedia multilingual news reports. We display the event arguments, type, summary, similar events, as well as visual knowledge extracted from the corresponding image and video.", "cite_spans": [], "ref_spans": [ { "start": 251, "end": 259, "text": "Figure 1", "ref_id": "FIGREF0" }, { "start": 795, "end": 803, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "extracted from the two modalities, allowing to infer that the troops are Ukrainian, and They refers to the Ukrainian government.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Compared to coarse-grained event types of previous work (Li et al., 2019a) , we follow a richer ontology to extract fine-grained types, which are crucial to scenario understanding and event prediction. For example, an event of type Movement.TransportPerson involving an entity of type PER.Politician.HeadOfGovernment differs in implications from the same event type involving a PER.Combatant.Sniper entity (i.e., a political trip versus a military deployment). Similarly, it is far more likely that an event of type Conflict.Attack.Invade will lead to a Contact.Negotiate.Meet event, while a Conflict.Attack.Hanging event is more likely to be followed by an event of type Contact.FuneralVigil.Meet. Table 1 : Compared to the coarse-grained knowledge extraction of previous work, GAIA can support finegrained entity, relation, and event extraction with types that are a superset of the previous coarse-grained types.", "cite_spans": [ { "start": 56, "end": 74, "text": "(Li et al., 2019a)", "ref_id": "BIBREF20" } ], "ref_spans": [ { "start": 699, "end": 706, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The knowledge base extracted by GAIA can support various applications, such as multimedia news event understanding and recommendation. We use Russia-Ukraine conflicts of 2014-2015 as a case study, and develop a knowledge exploration interface that recommends events related to the user's ongoing search based on previously-selected attribute values and dimensions of events being viewed 7 , as shown in Figure 2 . Thus, this system automatically provides the user with a more comprehensive exposure to collected events, their importance, and their interconnections. Extensions of this system to real-time applications would be particularly useful for tracking current events, providing alerts, and predicting possible changes, as well as topics related to ongoing incidents.", "cite_spans": [], "ref_spans": [ { "start": 403, "end": 411, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The architecture of our multimedia knowledge extraction system is illustrated in Figure 3 . The system pipeline consists of a Text Knowledge Extraction (TKE) branch and a Visual Knowledge Extraction (VKE) branch (Sections 3 and 4 respectively). Each branch takes the same set of documents as input, and initially creates a separate knowledge base (KB) that encodes the information from its respec- tive modality. Both output knowledge bases make use of the same types from the DARPA AIDA ontology 8 , as referred to in Table 1 . Therefore, while the branches both encode their modality-specific extractions into their KBs, they do so with types defined in the same semantic space. This shared space allows us to fuse the two KBs into a single, coherent multimedia KB through the Cross-Media Knowledge Fusion module (Section 5). Our userfacing system demo accesses one such resulting KB, where attack events have been extracted from multi-media documents related to the 2014-2015 Russia-Ukraine conflict scenario. In response to user queries, the system recommends information around a primary event and its connected events from the knowledge graph (screenshot in Figure 2 ).", "cite_spans": [], "ref_spans": [ { "start": 81, "end": 89, "text": "Figure 3", "ref_id": "FIGREF1" }, { "start": 519, "end": 526, "text": "Table 1", "ref_id": null }, { "start": 1164, "end": 1172, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Overview", "sec_num": "2" }, { "text": "As shown in Figure 3 , the Text Knowledge Extraction (TKE) system extracts entities, relations, and events from input documents. Then it clusters identical entities through entity linking and coreference, and clusters identical events using event coreference.", "cite_spans": [], "ref_spans": [ { "start": 12, "end": 20, "text": "Figure 3", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Text Knowledge Extraction", "sec_num": "3" }, { "text": "8 https://tac.nist.gov/tracks/SM-KBP/2019/ ontologies/LDCOntology", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Text Knowledge Extraction", "sec_num": "3" }, { "text": "Coarse-grained Mention Extraction We extract coarse-grained named and nominal entity mentions using a LSTM-CRF model. We use pretrained ELMo (Peters et al., 2018) word embeddings as input features for English, and pretrain Word2Vec (Le and Mikolov, 2014) models on Wikipedia data to generate Russian and Ukrainian word embeddings. Entity Linking and Coreference We seek to link the entity mentions to pre-existing entities in the background KBs (Pan et al., 2015) , including Freebase (LDC2015E42) and GeoNames (LDC2019E43). For mentions that are linkable to the same Freebase entity, coreference information is added. For name mentions that cannot be linked to the KB, we apply heuristic rules (Li et al., 2019b) to same-named mentions within each document to form NIL clusters. A NIL cluster is a cluster of entity mentions referring to the same entity but do not have corresponding KB entries (Ji et al., 2014) . Fine-grained Entity Typing We develop an attentive fine-grained type classification model with latent type representation . It takes as input a mention with its context sentence and predicts the most likely fine-grained type. We obtain the YAGO (Suchanek et al., 2008) fine-grained types from the results of Freebase entity linking, and map these types to the DARPA AIDA ontology. For mentions with identified, coarse-grained GPE and LOC types, we further determine their fine-grained types using GeoNames attributes feature class and feature code from the GeoNames entity linking result. Given that most nominal mentions are descriptions and thus do not link to entries in Freebase or GeoNames, we develop a nominal keyword list (Li et al., 2019b) for each type to incorporate these mentions into the entity analyses. Entity Salience Ranking To better distill the information, we assign each entity a salience score in each document. We rank the entities in terms of the weighted sum of all mentions, with higher weights for name mentions. If one entity appears only in nominal and pronoun mentions, we reduce its salience score so that it is ranked below other entities with name mentions. The salience score is normalized over all entities in each document.", "cite_spans": [ { "start": 445, "end": 463, "text": "(Pan et al., 2015)", "ref_id": "BIBREF31" }, { "start": 695, "end": 713, "text": "(Li et al., 2019b)", "ref_id": null }, { "start": 896, "end": 913, "text": "(Ji et al., 2014)", "ref_id": "BIBREF17" }, { "start": 1161, "end": 1184, "text": "(Suchanek et al., 2008)", "ref_id": "BIBREF44" }, { "start": 1646, "end": 1664, "text": "(Li et al., 2019b)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Text Entity Extraction and Coreference", "sec_num": "3.1" }, { "text": "For fine-grained relation extraction, we first apply a language-independent CNN based model (Shi et al., 2018) to extract coarse-grained relations from English, Russian and Ukrainian documents. Then we apply entity type constraints and dependency patterns to these detected relations and re-categorize them into fine-grained types (Li et al., 2019b) . To extract dependency paths for these relations in the three languages, we run the corresponding language's Universal Dependency parser (Nivre et al., 2016) . For types without coarse-grained type training data in ACE/ERE, we design dependency pathbased patterns instead and implement a rule-based system to detect their fine-grained relations directly from the text (Li et al., 2019b) .", "cite_spans": [ { "start": 92, "end": 110, "text": "(Shi et al., 2018)", "ref_id": "BIBREF42" }, { "start": 331, "end": 349, "text": "(Li et al., 2019b)", "ref_id": null }, { "start": 488, "end": 508, "text": "(Nivre et al., 2016)", "ref_id": "BIBREF29" }, { "start": 719, "end": 737, "text": "(Li et al., 2019b)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Text Relation Extraction", "sec_num": "3.2" }, { "text": "We start by extracting coarse-grained events and arguments using a Bi-LSTM CRF model and a CNNbased model (Zhang et al., 2018b) for three languages, and then detect the fine-grained event types by applying verb-based rules, context-based rules, and argument-based rules (Li et al., 2019b) . We also extract FrameNet frames (Chen et al., 2010) in English corpora to enrich the fine-grained events.", "cite_spans": [ { "start": 106, "end": 127, "text": "(Zhang et al., 2018b)", "ref_id": "BIBREF52" }, { "start": 270, "end": 288, "text": "(Li et al., 2019b)", "ref_id": null }, { "start": 323, "end": 342, "text": "(Chen et al., 2010)", "ref_id": "BIBREF5" } ], "ref_spans": [], "eq_spans": [], "section": "Text Event Extraction and Coreference", "sec_num": "3.3" }, { "text": "We apply a graph-based algorithm (Al-Badrashiny et al., 2017) for our languageindependent event coreference resolution. For each event type, we cast the event mentions as nodes in a graph, so that the undirected, weighted edges be-tween these nodes represent coreference confidence scores between their corresponding events. We then apply hierarchical clustering to obtain event clusters and train a Maximum Entropy binary classifier on the cluster features (Li et al., 2019b) .", "cite_spans": [ { "start": 458, "end": 476, "text": "(Li et al., 2019b)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Text Event Extraction and Coreference", "sec_num": "3.3" }, { "text": "The Visual Knowledge Extraction (VKE) branch of GAIA takes images and video key frames as input and creates a single, coherent (visual) knowledge base, relying on the same ontology as GAIA's Text Knowledge Extraction (TKE) branch. Similar to TKE, the VKE consists of entity extraction, linking, and coreference modules. Our VKE system also extracts some events and relations.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Visual Knowledge Extraction", "sec_num": "4" }, { "text": "We use an ensemble of visual object detection and concept localization models to extract entities and some events from a given image. To detect generic objects such as person and vehicle, we employ two off-the-shelf Faster R-CNN models (Ren et al., 2015) trained on the Microsoft Common Objects in COntext (MS COCO) (Lin et al., 2014) and Open Images (Kuznetsova et al., 2018) datasets. To detect scenario-specific entities and events, we train a Class Activation Map (CAM) model (Zhou et al., 2016) in a weakly supervised manner using a combination of Open Images with image-level labels and Google image search.", "cite_spans": [ { "start": 316, "end": 334, "text": "(Lin et al., 2014)", "ref_id": "BIBREF23" }, { "start": 351, "end": 376, "text": "(Kuznetsova et al., 2018)", "ref_id": "BIBREF18" }, { "start": 480, "end": 499, "text": "(Zhou et al., 2016)", "ref_id": "BIBREF53" } ], "ref_spans": [], "eq_spans": [], "section": "Visual Entity Extraction", "sec_num": "4.1" }, { "text": "Given an image, each R-CNN model produces a set of labeled bounding boxes, and the CAM model produces a set of labeled heat maps which are then thresholded to produce bounding boxes. The union of all bounding boxes is then post-processed by a set of heuristic rules to remove duplicates and ensure quality. We separately apply a face detector, MTCNN (Zhang et al., 2016) , and add the results to the pool of detected objects as additional person entities. Finally, we represent each detected bounding box as an entity in the visual knowledge base. Since the CAM model includes some event types, we create event entries (instead of entity entries) for bounding boxes classified as events.", "cite_spans": [ { "start": 350, "end": 370, "text": "(Zhang et al., 2016)", "ref_id": "BIBREF51" } ], "ref_spans": [], "eq_spans": [], "section": "Visual Entity Extraction", "sec_num": "4.1" }, { "text": "Once entities are added into the (visual) knowledge base, we try to link each entity to the real-world entities from a curated background knowledge base. Due to the complexity of this task, we develop distinct models for each coarse-grained entity type. For the type person, we train a FaceNet model (Schroff et al., 2015) that takes each cropped human face (detected by the MTCNN model as mentioned in Section 4.1) and classifies it in one or none of the predetermined identities. We compile a list of recognizable and scenario-relevant identities by automatically searching for each person name in the background KB via Google Image Search, collecting top retrieved results that contain a face, training a binary classifier on half of the results, and evaluating on the other half. If the accuracy is higher than a threshold, we include that person name in our list of recognizable identities. For example, the visual entity in Figure 4 (a) is linked to the Wikipedia entry Rudy Giuliani 9 .", "cite_spans": [ { "start": 300, "end": 322, "text": "(Schroff et al., 2015)", "ref_id": "BIBREF41" } ], "ref_spans": [ { "start": 930, "end": 942, "text": "Figure 4 (a)", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Visual Entity Linking", "sec_num": "4.2" }, { "text": "To recognize location, facility, and organization entities, we use a DELF model (Noh et al., 2017) pre-trained on Google Landmarks, to match each image with detected buildings against a predetermined list. We use a similar approach as mentioned above to create a list of recognizable, scenariorelevant landmarks, such as buildings and other types of structure that identify a specific location, facility, or organization. For example, the visual entity in Figure 4 (b) is linked to the Wikipedia entry Maidan Square 10 Finally, to recognize geopolitical entities, we train a CNN to classify flags into a predetermined list of entities, such as all the nations in the world, for detection in our system. Take Figure 4 (c) as an example. The flags of Ukraine, US and Russia are linked to the Wikipedia entries of corresponding countries. Once a flag in an image is recognized, we apply a set of heuristic rules to create a nationality affiliation relationship in the knowledge base between some entities in the scene and the detected country. For instance, a person who is holding a Ukrainian flag would be affiliated with the country 9 https://en.wikipedia.org/wiki/Rudy_ Giuliani 10 https://en.wikipedia.org/wiki/Maidan_ Nezalezhnosti Ukraine.", "cite_spans": [ { "start": 80, "end": 98, "text": "(Noh et al., 2017)", "ref_id": "BIBREF30" } ], "ref_spans": [ { "start": 456, "end": 464, "text": "Figure 4", "ref_id": "FIGREF2" }, { "start": 708, "end": 720, "text": "Figure 4 (c)", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Visual Entity Linking", "sec_num": "4.2" }, { "text": "While we cast each detected bounding box as an entity node in the output knowledge base, we resolve potential coreferential links between them, since one unique real-world entity can be detected multiple times. Cross-image coreference resolution aims to identify the same entity appearing in multiple images, where the entities are in different poses from different angles. Take Figure 5 as an example. The red bounding boxes in these two images refer to the same person, so they are coreferential and are put into the same NIL cluster. Within-image coreference resolution requires the detection of duplicates, such as the duplicates in an collage image. To resolve entity coreference, we train an instancematching CNN on the Youtube-BB dataset (Real et al., 2017) , where we ask the model to match an object bounding box to the same object in a different video frame, rather than to a different object. We use this model to extract features for each detected bounding box and run the DBSCAN (Ester et al., 1996) clustering algorithm on the box features across all images. The entities in the same cluster are coreferential, and are represented using a NIL cluster in the output (visual) KB. Similarly, we use a pretrained FaceNet (Schroff et al., 2015) model followed by DBSCAN to cluster face features. We also define heuristic rules to complement the aforementioned procedure in special cases. For example, if in the entity linking process (Section 4.2), some entities are linked to the same real-world entity based on entity linking result, we consider them coreferential. Besides, since we have both face detection and person detection which result in two entities for each person instance, we use their bounding box intersection to merge them into the same entity.", "cite_spans": [ { "start": 745, "end": 764, "text": "(Real et al., 2017)", "ref_id": "BIBREF37" }, { "start": 992, "end": 1012, "text": "(Ester et al., 1996)", "ref_id": "BIBREF8" }, { "start": 1231, "end": 1253, "text": "(Schroff et al., 2015)", "ref_id": "BIBREF41" } ], "ref_spans": [ { "start": 379, "end": 387, "text": "Figure 5", "ref_id": "FIGREF3" } ], "eq_spans": [], "section": "Visual Entity Coreference", "sec_num": "4.3" }, { "text": "Given a set of multimedia documents which consist of textual data, such as written articles and transcribed speech, as well as visual data, such as images and video key frames, the TKE and VKE branches of the system take their respective modality data as input, extract knowledge elements, and create separate knowledge bases. These textual and visual knowledge bases rely on the same ontology, but contain complementary information. Some knowledge elements in a document may not be explicitly mentioned in the text, but will appear visually, such as the Ukrainian flag in Figure 1 . Even coreferential knowledge elements that exist in both knowledge bases are not completely redundant, since each modality has its own unique granularity. For example, the word troops in text could be considered coreferential to the individuals with military uniform detected in the image, but the uniforms being worn may provide additional visual features useful in identifying the military ranks, organizations and nationalities of the individuals.", "cite_spans": [], "ref_spans": [ { "start": 573, "end": 581, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Cross-Media Knowledge Fusion", "sec_num": "5" }, { "text": "To exploit the complementary nature of the two modalities, we combine the two modality-specific knowledge bases into a single, coherent, multimedia knowledge base, where each knowledge element could be grounded in either or both modalities. To fuse the two bases, we develop a state-of-the-art visual grounding system (Akbari et al., 2019) to resolve entity coreference across modalities. More specifically, for each entity mention extracted from text, we feed its text along with the whole sentence into an ELMo model (Peters et al., 2018) that extracts contextualized features for the entity mention, and then we compare that with CNN feature maps of surrounding images. This leads to a relevance score for each image, as well as a granular relevance map (heatmap) within each image. For images that are relevant enough, we threshold the heatmap to obtain a bounding box, compare that box content with known visual entities, and assign it to the entity with the most overlapping match. If no overlapping entity is found, we create a new visual entity with the heatmap bounding box. Then we link the matching textual and visual entities using a NIL cluster. Additionally, with visual linking (Section 4.2), we corefer cross-modal entities that are linked to the same background KB node. -2003 (Sang and De Meulder, 2003) , ACE (Walker et al., 2006) , ERE (Song et al., 2015) , AIDA (LDC2018E01:AIDA Seedling Corpus V2.0), MSCOCO (Lin et al., 2014) , FDDB (Jain and Learned-Miller, 2010) , LFW (Huang et al., 2008) , Oxf105k (Philbin et al., 2007) , YoutubeBB (Real et al., 2017) , and Flickr30k (Plummer et al., 2015).", "cite_spans": [ { "start": 318, "end": 339, "text": "(Akbari et al., 2019)", "ref_id": "BIBREF0" }, { "start": 1288, "end": 1303, "text": "-2003 (Sang and", "ref_id": "BIBREF40" }, { "start": 1304, "end": 1321, "text": "De Meulder, 2003)", "ref_id": "BIBREF40" }, { "start": 1328, "end": 1349, "text": "(Walker et al., 2006)", "ref_id": "BIBREF46" }, { "start": 1356, "end": 1375, "text": "(Song et al., 2015)", "ref_id": "BIBREF43" }, { "start": 1430, "end": 1448, "text": "(Lin et al., 2014)", "ref_id": "BIBREF23" }, { "start": 1456, "end": 1487, "text": "(Jain and Learned-Miller, 2010)", "ref_id": "BIBREF16" }, { "start": 1490, "end": 1514, "text": "LFW (Huang et al., 2008)", "ref_id": null }, { "start": 1525, "end": 1547, "text": "(Philbin et al., 2007)", "ref_id": "BIBREF34" }, { "start": 1560, "end": 1579, "text": "(Real et al., 2017)", "ref_id": "BIBREF37" } ], "ref_spans": [], "eq_spans": [], "section": "Cross-Media Knowledge Fusion", "sec_num": "5" }, { "text": "The performance of each component is shown in Table 2 . To evaluate the end-to-end performance, we participated with our system in the TAC SM-KBP 2019 evaluation 11 . The input corpus contains 1999 documents (756 English, 537 Russian, 703 Ukrainian), 6194 images, and 322 videos. We populated a multimedia, multilingual knowledge base with 457,348 entities, 67,577 relations, 38,517 events. The system performance was evaluated based on its responses to class queries and graph queries 12 , and GAIA was awarded first place. Class queries evaluated cross-lingual, crossmodal, fine-grained entity extraction and coreference, where the query is an entity type, such as FAC.Building.GovernmentBuilding, and the result is a ranked list of entities of the given type. Our entity ranking is generated by the entity salience score in Section 3.1. The evaluation metric was Average Precision (AP), where AP-B was the AP score where ties are broken by ranking all Right responses above all Wrong responses, AP-W was the AP score where ties are broken by ranking all Wrong responses above all Right responses, and AP-T was the AP score where ties are broken as in TREC Eval 13 . Graph queries evaluated cross-lingual, crossmodal, fine-grained relation extraction, event extraction and coreference, where the query is an argument role type of event (e.g., Victim of Life.Die.DeathCausedByViolentEvents) or relation (e.g., Parent of PartWhole.Subsidiary) and the result is a list of entities with that role. The evaluation metrics were Precision, Recall and F 1 .", "cite_spans": [], "ref_spans": [ { "start": 46, "end": 53, "text": "Table 2", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "Quantitative Performance", "sec_num": "6.1" }, { "text": "To demonstrate the system, we have selected Ukraine-Russia Relations in 2014-2015 for a case study to visualize attack events, as extracted from the topic-related corpus released by LDC 14 . The system displays recommended events related to the user's ongoing search based on their previouslyselected attribute values and dimensions of events being viewed, such as the fine-grained type, place, time, attacker, target, and instrument. The demo is publicly available 15 with a user interface as shown in Figure 2 , displaying extracted text entities and events across languages, visual entities, visual entity linking and coreference results from face, landmark and flag recognition, and the results of grounding text entities to visual entities.", "cite_spans": [], "ref_spans": [ { "start": 503, "end": 511, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Qualitative Analysis", "sec_num": "6.2" }, { "text": "Existing knowledge extraction systems mainly focus on text (Manning et al., 2014; Fader et al., 2011; Daniel Khashabi, 2018; Honnibal and Montani, 2017; Li et al., 2019a) , and do not readily support fine-grained knowledge extraction. Visual knowledge extraction is typically limited to atomic concepts that have distinctive visual features of daily life (Ren et al., 2015; Schroff et al., 2015; Fern\u00e1ndez et al., 2017; Gu et al., 2018; Lin et al., 2014) , and so lacks more complex concepts, making extracted elements challenging to integrate with text. Existing multimedia systems overlook the connections and distinctions between modalities (Yazici et al., 2018) . Our system makes use of a multi-modal ontology with concepts from real-world, newsworthy topics, resulting in a rich cross-modal, as well as intra-modal connectivity.", "cite_spans": [ { "start": 59, "end": 81, "text": "(Manning et al., 2014;", "ref_id": "BIBREF27" }, { "start": 82, "end": 101, "text": "Fader et al., 2011;", "ref_id": "BIBREF9" }, { "start": 102, "end": 124, "text": "Daniel Khashabi, 2018;", "ref_id": null }, { "start": 125, "end": 152, "text": "Honnibal and Montani, 2017;", "ref_id": "BIBREF13" }, { "start": 153, "end": 170, "text": "Li et al., 2019a)", "ref_id": "BIBREF20" }, { "start": 355, "end": 373, "text": "(Ren et al., 2015;", "ref_id": "BIBREF38" }, { "start": 374, "end": 395, "text": "Schroff et al., 2015;", "ref_id": "BIBREF41" }, { "start": 396, "end": 419, "text": "Fern\u00e1ndez et al., 2017;", "ref_id": "BIBREF10" }, { "start": 420, "end": 436, "text": "Gu et al., 2018;", "ref_id": "BIBREF12" }, { "start": 437, "end": 454, "text": "Lin et al., 2014)", "ref_id": "BIBREF23" }, { "start": 644, "end": 665, "text": "(Yazici et al., 2018)", "ref_id": "BIBREF49" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "7" }, { "text": "Innovations in technology often face the ethical dilemma of dual use: the same advance may offer potential benefits and harms (Ehni, 2008; Hovy and Spruit, 2016; Brundage et al., 2018) . We first discuss dual use, 17 as it relates to this demo in particular and then discuss two other considerations for applying this technology, data bias and privacy.", "cite_spans": [ { "start": 126, "end": 138, "text": "(Ehni, 2008;", "ref_id": "BIBREF7" }, { "start": 139, "end": 161, "text": "Hovy and Spruit, 2016;", "ref_id": "BIBREF14" }, { "start": 162, "end": 184, "text": "Brundage et al., 2018)", "ref_id": "BIBREF3" } ], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "For our demo, the distinction between beneficial use and harmful use depends, in part, on the data. Proper use of the technology requires that input documents/images are legally and ethically obtained. Regulation and standards (e.g. GDPR 18 ) provide a legal framework for ensuring that such data is properly used and that any individual whose data is used has the right to request its removal. In the absence of such regulation, society relies on those who apply technology to ensure that data is used in an ethical way.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "Even if the data itself is obtained legally and ethically, the technology when use for unintended purposes can result in harmful outcomes. This demo organizes multimedia information to aid in navigating and understanding international events described in multiple sources. We have also applied the underlying technology on data that would aid natural disaster relief efforts (Zhang et al., 2018a) 19 and we are currently exploring the application of the models (with different data) to scientific literature and drug discovery (Wang et al., 2020) 20 .", "cite_spans": [ { "start": 375, "end": 399, "text": "(Zhang et al., 2018a) 19", "ref_id": null }, { "start": 527, "end": 546, "text": "(Wang et al., 2020)", "ref_id": null }, { "start": 547, "end": 549, "text": "20", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "One potential for harm could come if the technology were used for surveillance, especially in the context of targeting private citizens. Advances in technology require increased care when balancing potential benefits that come from preventing harmful activities (e.g. preventing human trafficking, preventing terrorism) against the potential for harm, such as when surveillance is applied too broadly (e.g. limiting speech, targeting vulnerable groups) or when system error could lead to false accusations. An additional potential harm could come from the output of the system being used in ways that magnify the system errors or bias in its training data. Our demo is intended for human interpretation. Incorporating the system's output into an automatic decision-making system (forecasting, profiling, etc.) could be harmful.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "Training and assessment data is often biased in ways that limit system accuracy on less well represented populations and in new domains, for example causing disparity of performance for different subpopulations based on ethnic, racial, gender, and other attributes (Buolamwini and Gebru, 2018; Rudinger et al., 2018) . Furthermore, trained systems degrade when used on new data that is distant from their training data. The performance of our system components as reported in Table 2 is based on the specific benchmark datasets, which could be affected by such data biases. Thus questions concerning generalizability and fairness should be carefully considered. In our system, the linking of an entity to an external source (entity linking and facial recognition) is limited to entities in Wikipedia and the publicly available background knowledge bases (KBs) provided by LDC (LDC2015E42 and LDC2019E43). These sources introduce their own form of bias, which limits the demo's applicability in both the original and new contexts. Within the research community, addressing data bias requires a combination of new data sources, research that mitigates the impact of bias, and, as done in (Mitchell et al., 2019) , auditing data and models. Sections 3-5 cite data sources used for training to support future auditing.", "cite_spans": [ { "start": 265, "end": 293, "text": "(Buolamwini and Gebru, 2018;", "ref_id": "BIBREF4" }, { "start": 294, "end": 316, "text": "Rudinger et al., 2018)", "ref_id": "BIBREF39" }, { "start": 1186, "end": 1209, "text": "(Mitchell et al., 2019)", "ref_id": "BIBREF28" } ], "ref_spans": [ { "start": 476, "end": 483, "text": "Table 2", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "To understand, organize, and recommend information, our system aggregates information about people as reported in its input sources. For example, in addition to external KB linking, the system performs coreference on named people and uses text-visual grounding to link images to the surrounding context. Privacy concerns thus merit at-tention (Tucker, 2019) . The demo relies on publicly available, online sources released by the LDC 21 . When applying our system to other sources, care should be paid to privacy with respect to the intended application and the data that it uses. More generally, end-to-end algorithmic auditing should be conducted before the deployment of our software (Raji et al., 2020) .", "cite_spans": [ { "start": 343, "end": 357, "text": "(Tucker, 2019)", "ref_id": "BIBREF45" }, { "start": 434, "end": 436, "text": "21", "ref_id": "BIBREF1" }, { "start": 687, "end": 706, "text": "(Raji et al., 2020)", "ref_id": "BIBREF36" } ], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "A general approach to ensure proper, rather than malicious, application of dual-use technology should: incorporate ethics considerations as the first-order principles in every step of the system design, maintain a high degree of transparency and interpretability of data, algorithms, models, and functionality throughout the system, make software available as open source for public verification and auditing, and explore countermeasures to protect vulnerable groups.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ethical Considerations 16", "sec_num": "8" }, { "text": "We demonstrate a state-of-the-art multimedia multilingual knowledge extraction and event recommendation system. This system enables the user to readily search a knowledge network of extracted, linked, and summarized complex events from multimedia, multilingual sources (e.g., text, images, videos, speech and OCR).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "9" }, { "text": "Pretrained models: http://blender.cs.illinois. edu/resources/gaia.html", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Event recommendation demo: http://blender.cs. illinois.edu/demo/video_recommendation/index_ attack_dark.html", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "http://tac.nist.gov/2019/SM-KBP/index.html 12 http://tac.nist.gov/2019/SM-KBP/guidelines. html", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://trec.nist.gov/trec_eval/ 14 LDC2018E01, LDC2018E52, LDC2018E63, LDC2018E76, LDC2019E7715 http://blender.cs.illinois.edu/demo/video_ recommendation/index_attack_dark.html", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "This section was added after the conference. 17 Dual-use items are goods, software, and technology that can be used for \"both civilian and military applications, and more broadly, toward beneficial and harmful ends\"(Brundage et al., 2018).18 The General Data Protection Regulation of the European Union https://gdpr.eu/what-is-gdpr/.19 http://159.89.180.81:3300/elisa ie/heatmap 20 http://blender.cs.illinois.edu/covid19/", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "This research is based upon work supported in part by U.S. DARPA AIDA Program No. FA8750-18-2-0014 and KAIROS Program No. FA8750-19-2-1004. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of DARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for governmental purposes notwithstanding any copyright annotation therein.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgement", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Multi-level multimodal common semantic space for image-phrase grounding", "authors": [ { "first": "Hassan", "middle": [], "last": "Akbari", "suffix": "" }, { "first": "Svebor", "middle": [], "last": "Karaman", "suffix": "" }, { "first": "Surabhi", "middle": [], "last": "Bhargava", "suffix": "" }, { "first": "Brian", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Carl", "middle": [], "last": "Vondrick", "suffix": "" }, { "first": "Shih-Fu", "middle": [], "last": "Chang", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", "volume": "", "issue": "", "pages": "12476--12486", "other_ids": {}, "num": null, "urls": [], "raw_text": "Hassan Akbari, Svebor Karaman, Surabhi Bhargava, Brian Chen, Carl Vondrick, and Shih-Fu Chang. 2019. Multi-level multimodal common semantic space for image-phrase grounding. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition, pages 12476-12486.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "LDC2018E64, available to participants in NIST's TAC SM-KBP evaluation", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "LDC2018E64, available to participants in NIST's TAC SM-KBP evaluation: https://tac.nist.gov/2018/SM- KBP/data.html", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Tinkerbell: Cross-lingual cold-start knowledge base construction", "authors": [ { "first": "Mohamed", "middle": [], "last": "Al-Badrashiny", "suffix": "" }, { "first": "Jason", "middle": [], "last": "Bolton", "suffix": "" }, { "first": "Arun", "middle": [], "last": "Tejasvi Chaganty", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Craig", "middle": [], "last": "Harman", "suffix": "" }, { "first": "Lifu", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Matthew", "middle": [], "last": "Lamm", "suffix": "" }, { "first": "Jinhao", "middle": [], "last": "Lei", "suffix": "" }, { "first": "Di", "middle": [], "last": "Lu", "suffix": "" }, { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mohamed Al-Badrashiny, Jason Bolton, Arun Tejasvi Chaganty, Kevin Clark, Craig Harman, Lifu Huang, Matthew Lamm, Jinhao Lei, Di Lu, Xiaoman Pan, et al. 2017. Tinkerbell: Cross-lingual cold-start knowledge base construction. In TAC.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Hyrum Anderson", "authors": [ { "first": "Miles", "middle": [], "last": "Brundage", "suffix": "" }, { "first": "Shahar", "middle": [], "last": "Avin", "suffix": "" }, { "first": "Jack", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Helen", "middle": [], "last": "Toner", "suffix": "" }, { "first": "Peter", "middle": [], "last": "Eckersley", "suffix": "" }, { "first": "Ben", "middle": [], "last": "Garfinkel", "suffix": "" }, { "first": "Allan", "middle": [], "last": "Dafoe", "suffix": "" }, { "first": "Paul", "middle": [], "last": "Scharre", "suffix": "" }, { "first": "Thomas", "middle": [], "last": "Zeitzoff", "suffix": "" }, { "first": "Bobby", "middle": [], "last": "Filar", "suffix": "" } ], "year": 2018, "venue": "The malicious use of artifcial intelligence: Forecasting, prevention, and mitigation", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1802.07228" ] }, "num": null, "urls": [], "raw_text": "Miles Brundage, Shahar Avin, Jack Clark, Helen Toner, Peter Eckersley, Ben Garfinkel, Allan Dafoe, Paul Scharre, Thomas Zeitzoff, Bobby Filar, Hyrum An- derson, Heather Roff, Gregory C. Allen, Jacob Stein- hardt, Carrick Flynn, Se\u00e1n\u00d3 h\u00c9igeartaigh, Simon Beard, Haydn Belfield, Sebastian Farquhar, Clare Lyle, Rebecca Crootof, Owain Evans, Michael Page, Joanna Bryson, Roman Yampolskiy, and Dario Amodei. 2018. The malicious use of artifcial in- telligence: Forecasting, prevention, and mitigation. arXiv:1802.07228.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Gender shades: Intersectional accuracy disparities in commercial gender classification", "authors": [ { "first": "Joy", "middle": [], "last": "Buolamwini", "suffix": "" }, { "first": "Timnit", "middle": [], "last": "Gebru", "suffix": "" } ], "year": 2018, "venue": "Conference on fairness, accountability and transparency", "volume": "", "issue": "", "pages": "77--91", "other_ids": {}, "num": null, "urls": [], "raw_text": "Joy Buolamwini and Timnit Gebru. 2018. Gender shades: Intersectional accuracy disparities in com- mercial gender classification. In Conference on fair- ness, accountability and transparency, pages 77-91.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Semafor: Frame argument resolution with log-linear models", "authors": [ { "first": "Desai", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Nathan", "middle": [], "last": "Schneider", "suffix": "" }, { "first": "Dipanjan", "middle": [], "last": "Das", "suffix": "" }, { "first": "Noah A", "middle": [], "last": "Smith", "suffix": "" } ], "year": 2010, "venue": "Proceedings of the 5th international workshop on semantic evaluation", "volume": "", "issue": "", "pages": "264--267", "other_ids": {}, "num": null, "urls": [], "raw_text": "Desai Chen, Nathan Schneider, Dipanjan Das, and Noah A Smith. 2010. Semafor: Frame argument resolution with log-linear models. In Proceedings of the 5th international workshop on semantic evalua- tion, pages 264-267. Association for Computational Linguistics.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Cogcompnlp: Your swiss army knife for nlp", "authors": [], "year": 2018, "venue": "11th Language Resources and Evaluation Conference", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ben Zhou Tom Redman Christos Christodoulopou- los Vivek Srikumar Nicholas Rizzolo Lev Ratinov Guanheng Luo Quang Do Chen-Tse Tsai Subhro Roy Stephen Mayhew Zhili Feng John Wieting Xi- aodong Yu Yangqiu Song Shashank Gupta Shyam Upadhyay Naveen Arivazhagan Qiang Ning Shaoshi Ling Dan Roth Daniel Khashabi, Mark Sammons. 2018. Cogcompnlp: Your swiss army knife for nlp. In 11th Language Resources and Evaluation Confer- ence.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Dual use and the ethical responsibility of scientists", "authors": [ { "first": "Hans-J\u00f6rg", "middle": [], "last": "Ehni", "suffix": "" } ], "year": 2008, "venue": "Archivum immunologiae et therapiae experimentalis", "volume": "56", "issue": "3", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Hans-J\u00f6rg Ehni. 2008. Dual use and the ethical re- sponsibility of scientists. Archivum immunologiae et therapiae experimentalis, 56(3):147.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "A density-based algorithm for discovering clusters in large spatial databases with noise", "authors": [ { "first": "Martin", "middle": [], "last": "Ester", "suffix": "" }, { "first": "Hans-Peter", "middle": [], "last": "Kriegel", "suffix": "" }, { "first": "J\u00f6rg", "middle": [], "last": "Sander", "suffix": "" }, { "first": "Xiaowei", "middle": [], "last": "Xu", "suffix": "" } ], "year": 1996, "venue": "Kdd", "volume": "96", "issue": "", "pages": "226--231", "other_ids": {}, "num": null, "urls": [], "raw_text": "Martin Ester, Hans-Peter Kriegel, J\u00f6rg Sander, Xiaowei Xu, et al. 1996. A density-based algorithm for discovering clusters in large spatial databases with noise. In Kdd, volume 96, pages 226-231.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Identifying relations for open information extraction", "authors": [ { "first": "Anthony", "middle": [], "last": "Fader", "suffix": "" }, { "first": "Stephen", "middle": [], "last": "Soderland", "suffix": "" }, { "first": "Oren", "middle": [], "last": "Etzioni", "suffix": "" } ], "year": 2011, "venue": "Proceedings of the Conference of Empirical Methods in Natural Language Processing (EMNLP '11)", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Anthony Fader, Stephen Soderland, and Oren Etzioni. 2011. Identifying relations for open information extraction. In Proceedings of the Conference of Empirical Methods in Natural Language Processing (EMNLP '11), Edinburgh, Scotland, UK.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Vits: video tagging system from massive web multimedia collections", "authors": [ { "first": "Delia", "middle": [], "last": "Fern\u00e1ndez", "suffix": "" }, { "first": "David", "middle": [], "last": "Varas", "suffix": "" }, { "first": "Joan", "middle": [], "last": "Espadaler", "suffix": "" }, { "first": "Issey", "middle": [], "last": "Masuda", "suffix": "" }, { "first": "Jordi", "middle": [], "last": "Ferreira", "suffix": "" }, { "first": "Alejandro", "middle": [], "last": "Woodward", "suffix": "" }, { "first": "David", "middle": [], "last": "Rodr\u00edguez", "suffix": "" }, { "first": "Xavier", "middle": [], "last": "Gir\u00f3-I Nieto", "suffix": "" }, { "first": "Juan", "middle": [ "Carlos" ], "last": "Riveiro", "suffix": "" }, { "first": "Elisenda", "middle": [], "last": "Bou", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the IEEE International Conference on Computer Vision Workshops", "volume": "", "issue": "", "pages": "337--346", "other_ids": {}, "num": null, "urls": [], "raw_text": "Delia Fern\u00e1ndez, David Varas, Joan Espadaler, Is- sey Masuda, Jordi Ferreira, Alejandro Woodward, David Rodr\u00edguez, Xavier Gir\u00f3-i Nieto, Juan Car- los Riveiro, and Elisenda Bou. 2017. Vits: video tagging system from massive web multimedia col- lections. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 337-346.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Allennlp: A deep semantic natural language processing platform", "authors": [ { "first": "Matt", "middle": [], "last": "Gardner", "suffix": "" }, { "first": "Joel", "middle": [], "last": "Grus", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Neumann", "suffix": "" }, { "first": "Oyvind", "middle": [], "last": "Tafjord", "suffix": "" }, { "first": "Pradeep", "middle": [], "last": "Dasigi", "suffix": "" }, { "first": "Nelson", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Matthew", "middle": [], "last": "Peters", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Schmitz", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Zettlemoyer", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1803.07640" ] }, "num": null, "urls": [], "raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson Liu, Matthew Pe- ters, Michael Schmitz, and Luke Zettlemoyer. 2018. Allennlp: A deep semantic natural language process- ing platform. arXiv preprint arXiv:1803.07640.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Ava: A video dataset of spatio-temporally localized atomic visual actions", "authors": [ { "first": "Chunhui", "middle": [], "last": "Gu", "suffix": "" }, { "first": "Chen", "middle": [], "last": "Sun", "suffix": "" }, { "first": "A", "middle": [], "last": "David", "suffix": "" }, { "first": "Carl", "middle": [], "last": "Ross", "suffix": "" }, { "first": "Caroline", "middle": [], "last": "Vondrick", "suffix": "" }, { "first": "Yeqing", "middle": [], "last": "Pantofaru", "suffix": "" }, { "first": "Sudheendra", "middle": [], "last": "Li", "suffix": "" }, { "first": "George", "middle": [], "last": "Vijayanarasimhan", "suffix": "" }, { "first": "Susanna", "middle": [], "last": "Toderici", "suffix": "" }, { "first": "Rahul", "middle": [], "last": "Ricco", "suffix": "" }, { "first": "", "middle": [], "last": "Sukthankar", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", "volume": "", "issue": "", "pages": "6047--6056", "other_ids": {}, "num": null, "urls": [], "raw_text": "Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijaya- narasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. 2018. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE Conference on Com- puter Vision and Pattern Recognition, pages 6047- 6056.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "spacy 2: Natural language understanding with bloom embeddings, convolutional neural networks and incremental parsing", "authors": [ { "first": "Matthew", "middle": [], "last": "Honnibal", "suffix": "" }, { "first": "Ines", "middle": [], "last": "Montani", "suffix": "" } ], "year": 2017, "venue": "", "volume": "7", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Matthew Honnibal and Ines Montani. 2017. spacy 2: Natural language understanding with bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear, 7(1).", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "The social impact of natural language processing", "authors": [ { "first": "Dirk", "middle": [], "last": "Hovy", "suffix": "" }, { "first": "Shannon", "middle": [ "L" ], "last": "Spruit", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", "volume": "2", "issue": "", "pages": "591--598", "other_ids": { "DOI": [ "10.18653/v1/P16-2096" ] }, "num": null, "urls": [], "raw_text": "Dirk Hovy and Shannon L. Spruit. 2016. The social impact of natural language processing. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Pa- pers), pages 591-598, Berlin, Germany. Association for Computational Linguistics.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Labeled faces in the wild: A database forstudying face recognition in unconstrained environments", "authors": [ { "first": "B", "middle": [], "last": "Gary", "suffix": "" }, { "first": "Marwan", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Tamara", "middle": [], "last": "Mattar", "suffix": "" }, { "first": "Eric", "middle": [], "last": "Berg", "suffix": "" }, { "first": "", "middle": [], "last": "Learned-Miller", "suffix": "" } ], "year": 2008, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Gary B Huang, Marwan Mattar, Tamara Berg, and Eric Learned-Miller. 2008. Labeled faces in the wild: A database forstudying face recognition in uncon- strained environments.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Fddb: A benchmark for face detection in unconstrained settings", "authors": [ { "first": "Vidit", "middle": [], "last": "Jain", "suffix": "" }, { "first": "Erik", "middle": [], "last": "Learned-Miller", "suffix": "" } ], "year": 2010, "venue": "UMass Amherst technical report", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Vidit Jain and Erik Learned-Miller. 2010. Fddb: A benchmark for face detection in unconstrained set- tings. Technical report, UMass Amherst technical report.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Overview of tac-kbp2014 entity discovery and linking tasks", "authors": [ { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Joel", "middle": [], "last": "Nothman", "suffix": "" }, { "first": "Ben", "middle": [], "last": "Hachey", "suffix": "" } ], "year": 2014, "venue": "Proc. Text Analysis Conference (TAC2014)", "volume": "", "issue": "", "pages": "1333--1339", "other_ids": {}, "num": null, "urls": [], "raw_text": "Heng Ji, Joel Nothman, Ben Hachey, et al. 2014. Overview of tac-kbp2014 entity discovery and link- ing tasks. In Proc. Text Analysis Conference (TAC2014), pages 1333-1339.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale", "authors": [ { "first": "Alina", "middle": [], "last": "Kuznetsova", "suffix": "" }, { "first": "Hassan", "middle": [], "last": "Rom", "suffix": "" }, { "first": "Neil", "middle": [], "last": "Alldrin", "suffix": "" }, { "first": "Jasper", "middle": [], "last": "Uijlings", "suffix": "" }, { "first": "Ivan", "middle": [], "last": "Krasin", "suffix": "" }, { "first": "Jordi", "middle": [], "last": "Pont-Tuset", "suffix": "" }, { "first": "Shahab", "middle": [], "last": "Kamali", "suffix": "" }, { "first": "Stefan", "middle": [], "last": "Popov", "suffix": "" }, { "first": "Matteo", "middle": [], "last": "Malloci", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Duerig", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1811.00982" ] }, "num": null, "urls": [], "raw_text": "Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Ka- mali, Stefan Popov, Matteo Malloci, Tom Duerig, et al. 2018. The open images dataset v4: Uni- fied image classification, object detection, and vi- sual relationship detection at scale. arXiv preprint arXiv:1811.00982.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Distributed representations of sentences and documents", "authors": [ { "first": "Quoc", "middle": [], "last": "Le", "suffix": "" }, { "first": "Tomas", "middle": [], "last": "Mikolov", "suffix": "" } ], "year": 2014, "venue": "International conference on machine learning", "volume": "", "issue": "", "pages": "1188--1196", "other_ids": {}, "num": null, "urls": [], "raw_text": "Quoc Le and Tomas Mikolov. 2014. Distributed repre- sentations of sentences and documents. In Interna- tional conference on machine learning, pages 1188- 1196.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Multilingual entity, relation, event and human value extraction", "authors": [ { "first": "Manling", "middle": [], "last": "Li", "suffix": "" }, { "first": "Ying", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Joseph", "middle": [], "last": "Hoover", "suffix": "" }, { "first": "Spencer", "middle": [], "last": "Whitehead", "suffix": "" }, { "first": "Clare", "middle": [], "last": "Voss", "suffix": "" }, { "first": "Morteza", "middle": [], "last": "Dehghani", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", "volume": "", "issue": "", "pages": "110--115", "other_ids": {}, "num": null, "urls": [], "raw_text": "Manling Li, Ying Lin, Joseph Hoover, Spencer White- head, Clare Voss, Morteza Dehghani, and Heng Ji. 2019a. Multilingual entity, relation, event and hu- man value extraction. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demon- strations), pages 110-115.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Gaia at sm-kbp 2019 -a multi-media multi-lingual knowledge extraction and hypothesis generation system", "authors": [ { "first": "Hassan", "middle": [], "last": "Zareian", "suffix": "" }, { "first": "Brian", "middle": [], "last": "Akbari", "suffix": "" }, { "first": "Bo", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Emily", "middle": [], "last": "Wu", "suffix": "" }, { "first": "Shih-Fu", "middle": [], "last": "Allaway", "suffix": "" }, { "first": "Kathleen", "middle": [], "last": "Chang", "suffix": "" }, { "first": "Yixiang", "middle": [], "last": "Mckeown", "suffix": "" }, { "first": "Jennifer", "middle": [], "last": "Yao", "suffix": "" }, { "first": "Eric", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Kexuan", "middle": [], "last": "Berquist", "suffix": "" }, { "first": "Xujun", "middle": [], "last": "Sun", "suffix": "" }, { "first": "Ryan", "middle": [], "last": "Peng", "suffix": "" }, { "first": "Marjorie", "middle": [], "last": "Gabbard", "suffix": "" }, { "first": "Pedro", "middle": [], "last": "Freedman", "suffix": "" }, { "first": "T", "middle": [ "K" ], "last": "Szekely", "suffix": "" }, { "first": "", "middle": [], "last": "Kumar", "suffix": "" } ], "year": 2019, "venue": "Proceedings of TAC KBP 2019, the 26th International Conference on Computational Linguistics: Technical Papers", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Zareian, Hassan Akbari, Brian Chen, Bo Wu, Emily Allaway, Shih-Fu Chang, Kathleen McKeown, Yixi- ang Yao, Jennifer Chen, Eric Berquist, Kexuan Sun, Xujun Peng, Ryan Gabbard, Marjorie Freedman, Pe- dro Szekely, T.K. Satish Kumar, Arka Sadhu, Ram Nevatia, Miguel Rodriguez, Yifan Wang, Yang Bai, Ali Sadeghian, and Daisy Zhe Wang. 2019b. Gaia at sm-kbp 2019 -a multi-media multi-lingual knowl- edge extraction and hypothesis generation system. In Proceedings of TAC KBP 2019, the 26th Inter- national Conference on Computational Linguistics: Technical Papers.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Microsoft coco: Common objects in context", "authors": [ { "first": "Tsung-Yi", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Maire", "suffix": "" }, { "first": "Serge", "middle": [], "last": "Belongie", "suffix": "" }, { "first": "James", "middle": [], "last": "Hays", "suffix": "" }, { "first": "Pietro", "middle": [], "last": "Perona", "suffix": "" }, { "first": "Deva", "middle": [], "last": "Ramanan", "suffix": "" }, { "first": "Piotr", "middle": [], "last": "Doll\u00e1r", "suffix": "" }, { "first": "C Lawrence", "middle": [], "last": "Zitnick", "suffix": "" } ], "year": 2014, "venue": "European conference on computer vision", "volume": "", "issue": "", "pages": "740--755", "other_ids": {}, "num": null, "urls": [], "raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European confer- ence on computer vision, pages 740-755. Springer.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "An attentive fine-grained entity typing model with latent type representation", "authors": [ { "first": "Ying", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", "volume": "", "issue": "", "pages": "6198--6203", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ying Lin and Heng Ji. 2019. An attentive fine-grained entity typing model with latent type representation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 6198- 6203.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Reliability-aware dynamic feature composition for name tagging", "authors": [ { "first": "Ying", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Liyuan", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Dong", "middle": [], "last": "Yu", "suffix": "" }, { "first": "Jiawei", "middle": [], "last": "Han", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "165--174", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ying Lin, Liyuan Liu, Heng Ji, Dong Yu, and Jiawei Han. 2019. Reliability-aware dynamic feature com- position for name tagging. In Proceedings of the 57th Annual Meeting of the Association for Compu- tational Linguistics, pages 165-174.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Scientific information extraction with semi-supervised neural tagging", "authors": [ { "first": "Yi", "middle": [], "last": "Luan", "suffix": "" }, { "first": "Mari", "middle": [], "last": "Ostendorf", "suffix": "" }, { "first": "Hannaneh", "middle": [], "last": "Hajishirzi", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1708.06075" ] }, "num": null, "urls": [], "raw_text": "Yi Luan, Mari Ostendorf, and Hannaneh Hajishirzi. 2017. Scientific information extraction with semi-supervised neural tagging. arXiv preprint arXiv:1708.06075.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "The stanford corenlp natural language processing toolkit", "authors": [ { "first": "D", "middle": [], "last": "Christopher", "suffix": "" }, { "first": "Mihai", "middle": [], "last": "Manning", "suffix": "" }, { "first": "John", "middle": [], "last": "Surdeanu", "suffix": "" }, { "first": "Jenny", "middle": [ "Rose" ], "last": "Bauer", "suffix": "" }, { "first": "Steven", "middle": [], "last": "Finkel", "suffix": "" }, { "first": "David", "middle": [], "last": "Bethard", "suffix": "" }, { "first": "", "middle": [], "last": "Mc-Closky", "suffix": "" } ], "year": 2014, "venue": "Proceedings of 52nd annual meeting of the association for computational linguistics: system demonstrations", "volume": "", "issue": "", "pages": "55--60", "other_ids": {}, "num": null, "urls": [], "raw_text": "Christopher D Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The stanford corenlp natural language processing toolkit. In Proceedings of 52nd annual meeting of the association for computational linguis- tics: system demonstrations, pages 55-60.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Model cards for model reporting", "authors": [ { "first": "Margaret", "middle": [], "last": "Mitchell", "suffix": "" }, { "first": "Simone", "middle": [], "last": "Wu", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Zaldivar", "suffix": "" }, { "first": "Parker", "middle": [], "last": "Barnes", "suffix": "" }, { "first": "Lucy", "middle": [], "last": "Vasserman", "suffix": "" }, { "first": "Ben", "middle": [], "last": "Hutchinson", "suffix": "" }, { "first": "Elena", "middle": [], "last": "Spitzer", "suffix": "" }, { "first": "Deborah", "middle": [], "last": "Inioluwa", "suffix": "" }, { "first": "Timnit", "middle": [], "last": "Raji", "suffix": "" }, { "first": "", "middle": [], "last": "Gebru", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency", "volume": "", "issue": "", "pages": "220--229", "other_ids": {}, "num": null, "urls": [], "raw_text": "Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model cards for model reporting. In Proceedings of the Conference on Fairness, Account- ability, and Transparency, pages 220-229.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Universal dependencies v1: A multilingual treebank collection", "authors": [ { "first": "Joakim", "middle": [], "last": "Nivre", "suffix": "" }, { "first": "Marie-Catherine", "middle": [], "last": "De Marneffe", "suffix": "" }, { "first": "Filip", "middle": [], "last": "Ginter", "suffix": "" }, { "first": "Yoav", "middle": [], "last": "Goldberg", "suffix": "" }, { "first": "Jan", "middle": [], "last": "Hajic", "suffix": "" }, { "first": "D", "middle": [], "last": "Christopher", "suffix": "" }, { "first": "Ryan", "middle": [], "last": "Manning", "suffix": "" }, { "first": "Slav", "middle": [], "last": "Mcdonald", "suffix": "" }, { "first": "Sampo", "middle": [], "last": "Petrov", "suffix": "" }, { "first": "Natalia", "middle": [], "last": "Pyysalo", "suffix": "" }, { "first": "", "middle": [], "last": "Silveira", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", "volume": "", "issue": "", "pages": "1659--1666", "other_ids": {}, "num": null, "urls": [], "raw_text": "Joakim Nivre, Marie-Catherine De Marneffe, Filip Gin- ter, Yoav Goldberg, Jan Hajic, Christopher D Man- ning, Ryan McDonald, Slav Petrov, Sampo Pyysalo, Natalia Silveira, et al. 2016. Universal dependencies v1: A multilingual treebank collection. In Proceed- ings of the Tenth International Conference on Lan- guage Resources and Evaluation (LREC'16), pages 1659-1666.", "links": null }, "BIBREF30": { "ref_id": "b30", "title": "Large-scale image retrieval with attentive deep local features", "authors": [ { "first": "Hyeonwoo", "middle": [], "last": "Noh", "suffix": "" }, { "first": "Andre", "middle": [], "last": "Araujo", "suffix": "" }, { "first": "Jack", "middle": [], "last": "Sim", "suffix": "" }, { "first": "Tobias", "middle": [], "last": "Weyand", "suffix": "" }, { "first": "Bohyung", "middle": [], "last": "Han", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the IEEE international conference on computer vision", "volume": "", "issue": "", "pages": "3456--3465", "other_ids": {}, "num": null, "urls": [], "raw_text": "Hyeonwoo Noh, Andre Araujo, Jack Sim, Tobias Weyand, and Bohyung Han. 2017. Large-scale im- age retrieval with attentive deep local features. In Proceedings of the IEEE international conference on computer vision, pages 3456-3465.", "links": null }, "BIBREF31": { "ref_id": "b31", "title": "Unsupervised entity linking with abstract meaning representation", "authors": [ { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "" }, { "first": "Taylor", "middle": [], "last": "Cassidy", "suffix": "" }, { "first": "Ulf", "middle": [], "last": "Hermjakob", "suffix": "" }, { "first": "Ji", "middle": [], "last": "Heng", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the 2015 conference of the north american chapter of the association for computational linguistics: Human language technologies", "volume": "", "issue": "", "pages": "1130--1139", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xiaoman Pan, Taylor Cassidy, Ulf Hermjakob, Heng Ji, and Kevin Knight. 2015. Unsupervised entity link- ing with abstract meaning representation. In Pro- ceedings of the 2015 conference of the north amer- ican chapter of the association for computational linguistics: Human language technologies, pages 1130-1139.", "links": null }, "BIBREF32": { "ref_id": "b32", "title": "Crosslingual name tagging and linking for 282 languages", "authors": [ { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "" }, { "first": "Boliang", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "May", "suffix": "" }, { "first": "Joel", "middle": [], "last": "Nothman", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" } ], "year": 2017, "venue": "Proc. the 55th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xiaoman Pan, Boliang Zhang, Jonathan May, Joel Nothman, Kevin Knight, and Heng Ji. 2017. Cross- lingual name tagging and linking for 282 languages. In Proc. the 55th Annual Meeting of the Association for Computational Linguistics.", "links": null }, "BIBREF33": { "ref_id": "b33", "title": "Deep contextualized word representations", "authors": [ { "first": "E", "middle": [], "last": "Matthew", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Peters", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Neumann", "suffix": "" }, { "first": "Matt", "middle": [], "last": "Iyyer", "suffix": "" }, { "first": "Christopher", "middle": [], "last": "Gardner", "suffix": "" }, { "first": "Kenton", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Lee", "suffix": "" }, { "first": "", "middle": [], "last": "Zettlemoyer", "suffix": "" } ], "year": 2018, "venue": "Proceedings of NAACL-HLT", "volume": "", "issue": "", "pages": "2227--2237", "other_ids": {}, "num": null, "urls": [], "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of NAACL-HLT, pages 2227-2237.", "links": null }, "BIBREF34": { "ref_id": "b34", "title": "Object retrieval with large vocabularies and fast spatial matching", "authors": [ { "first": "James", "middle": [], "last": "Philbin", "suffix": "" }, { "first": "Ondrej", "middle": [], "last": "Chum", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Isard", "suffix": "" }, { "first": "Josef", "middle": [], "last": "Sivic", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Zisserman", "suffix": "" } ], "year": 2007, "venue": "2007 IEEE conference on computer vision and pattern recognition", "volume": "", "issue": "", "pages": "1--8", "other_ids": {}, "num": null, "urls": [], "raw_text": "James Philbin, Ondrej Chum, Michael Isard, Josef Sivic, and Andrew Zisserman. 2007. Object re- trieval with large vocabularies and fast spatial match- ing. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE.", "links": null }, "BIBREF35": { "ref_id": "b35", "title": "Flickr30k entities: Collecting region-to-phrase correspondences for richer imageto-sentence models", "authors": [ { "first": "A", "middle": [], "last": "Bryan", "suffix": "" }, { "first": "Liwei", "middle": [], "last": "Plummer", "suffix": "" }, { "first": "Chris", "middle": [ "M" ], "last": "Wang", "suffix": "" }, { "first": "Juan", "middle": [ "C" ], "last": "Cervantes", "suffix": "" }, { "first": "Julia", "middle": [], "last": "Caicedo", "suffix": "" }, { "first": "Svetlana", "middle": [], "last": "Hockenmaier", "suffix": "" }, { "first": "", "middle": [], "last": "Lazebnik", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the IEEE international conference on computer vision", "volume": "", "issue": "", "pages": "2641--2649", "other_ids": {}, "num": null, "urls": [], "raw_text": "Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. 2015. Flickr30k entities: Collecting region-to-phrase correspondences for richer image- to-sentence models. In Proceedings of the IEEE international conference on computer vision, pages 2641-2649.", "links": null }, "BIBREF36": { "ref_id": "b36", "title": "Closing the ai accountability gap: Defining an end-to-end framework for internal algorithmic auditing", "authors": [ { "first": "Andrew", "middle": [], "last": "Inioluwa Deborah Raji", "suffix": "" }, { "first": "Rebecca", "middle": [ "N" ], "last": "Smart", "suffix": "" }, { "first": "Margaret", "middle": [], "last": "White", "suffix": "" }, { "first": "Timnit", "middle": [], "last": "Mitchell", "suffix": "" }, { "first": "Ben", "middle": [], "last": "Gebru", "suffix": "" }, { "first": "Jamila", "middle": [], "last": "Hutchinson", "suffix": "" }, { "first": "Daniel", "middle": [], "last": "Smith-Loud", "suffix": "" }, { "first": "Parker", "middle": [], "last": "Theron", "suffix": "" }, { "first": "", "middle": [], "last": "Barnes", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency", "volume": "", "issue": "", "pages": "33--44", "other_ids": {}, "num": null, "urls": [], "raw_text": "Inioluwa Deborah Raji, Andrew Smart, Rebecca N. White, Margaret Mitchell, Timnit Gebru, Ben Hutchinson, Jamila Smith-Loud, Daniel Theron, and Parker Barnes. 2020. Closing the ai accountability gap: Defining an end-to-end framework for internal algorithmic auditing. In Proceedings of the 2020 Conference on Fairness, Accountability, and Trans- parency, pages 33-44.", "links": null }, "BIBREF37": { "ref_id": "b37", "title": "Youtubeboundingboxes: A large high-precision humanannotated data set for object detection in video", "authors": [ { "first": "Esteban", "middle": [], "last": "Real", "suffix": "" }, { "first": "Jonathon", "middle": [], "last": "Shlens", "suffix": "" }, { "first": "Stefano", "middle": [], "last": "Mazzocchi", "suffix": "" }, { "first": "Xin", "middle": [], "last": "Pan", "suffix": "" }, { "first": "Vincent", "middle": [], "last": "Vanhoucke", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", "volume": "", "issue": "", "pages": "5296--5305", "other_ids": {}, "num": null, "urls": [], "raw_text": "Esteban Real, Jonathon Shlens, Stefano Mazzocchi, Xin Pan, and Vincent Vanhoucke. 2017. Youtube- boundingboxes: A large high-precision human- annotated data set for object detection in video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5296-5305.", "links": null }, "BIBREF38": { "ref_id": "b38", "title": "Faster r-cnn: Towards real-time object detection with region proposal networks", "authors": [ { "first": "Kaiming", "middle": [], "last": "Shaoqing Ren", "suffix": "" }, { "first": "Ross", "middle": [], "last": "He", "suffix": "" }, { "first": "Jian", "middle": [], "last": "Girshick", "suffix": "" }, { "first": "", "middle": [], "last": "Sun", "suffix": "" } ], "year": 2015, "venue": "Advances in neural information processing systems", "volume": "", "issue": "", "pages": "91--99", "other_ids": {}, "num": null, "urls": [], "raw_text": "Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. 2015. Faster r-cnn: Towards real-time ob- ject detection with region proposal networks. In Advances in neural information processing systems, pages 91-99.", "links": null }, "BIBREF39": { "ref_id": "b39", "title": "Gender bias in coreference resolution", "authors": [ { "first": "Rachel", "middle": [], "last": "Rudinger", "suffix": "" }, { "first": "Jason", "middle": [], "last": "Naradowsky", "suffix": "" }, { "first": "Brian", "middle": [], "last": "Leonard", "suffix": "" }, { "first": "Benjamin", "middle": [], "last": "Van Durme", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "2", "issue": "", "pages": "8--14", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14.", "links": null }, "BIBREF40": { "ref_id": "b40", "title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", "authors": [ { "first": "F", "middle": [], "last": "Erik", "suffix": "" }, { "first": "Fien", "middle": [], "last": "Sang", "suffix": "" }, { "first": "", "middle": [], "last": "De Meulder", "suffix": "" } ], "year": 2003, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition. arXiv preprint cs/0306050.", "links": null }, "BIBREF41": { "ref_id": "b41", "title": "Facenet: A unified embedding for face recognition and clustering", "authors": [ { "first": "Florian", "middle": [], "last": "Schroff", "suffix": "" }, { "first": "Dmitry", "middle": [], "last": "Kalenichenko", "suffix": "" }, { "first": "James", "middle": [], "last": "Philbin", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", "volume": "", "issue": "", "pages": "815--823", "other_ids": {}, "num": null, "urls": [], "raw_text": "Florian Schroff, Dmitry Kalenichenko, and James Philbin. 2015. Facenet: A unified embedding for face recognition and clustering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 815-823.", "links": null }, "BIBREF42": { "ref_id": "b42", "title": "Genre separation network with adversarial training for cross-genre relation extraction", "authors": [ { "first": "Ge", "middle": [], "last": "Shi", "suffix": "" }, { "first": "Chong", "middle": [], "last": "Feng", "suffix": "" }, { "first": "Lifu", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Boliang", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Lejian", "middle": [], "last": "Liao", "suffix": "" }, { "first": "Heyan", "middle": [], "last": "Huang", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "1018--1023", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ge Shi, Chong Feng, Lifu Huang, Boliang Zhang, Heng Ji, Lejian Liao, and Heyan Huang. 2018. Genre separation network with adversarial training for cross-genre relation extraction. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1018-1023.", "links": null }, "BIBREF43": { "ref_id": "b43", "title": "From light to rich ere: annotation of entities, relations, and events", "authors": [ { "first": "Zhiyi", "middle": [], "last": "Song", "suffix": "" }, { "first": "Ann", "middle": [], "last": "Bies", "suffix": "" }, { "first": "Stephanie", "middle": [], "last": "Strassel", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Riese", "suffix": "" }, { "first": "Justin", "middle": [], "last": "Mott", "suffix": "" }, { "first": "Joe", "middle": [], "last": "Ellis", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "Wright", "suffix": "" }, { "first": "Seth", "middle": [], "last": "Kulick", "suffix": "" }, { "first": "Neville", "middle": [], "last": "Ryant", "suffix": "" }, { "first": "Xiaoyi", "middle": [], "last": "Ma", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the the 3rd Workshop on EVENTS: Definition, Detection, Coreference, and Representation", "volume": "", "issue": "", "pages": "89--98", "other_ids": {}, "num": null, "urls": [], "raw_text": "Zhiyi Song, Ann Bies, Stephanie Strassel, Tom Riese, Justin Mott, Joe Ellis, Jonathan Wright, Seth Kulick, Neville Ryant, and Xiaoyi Ma. 2015. From light to rich ere: annotation of entities, relations, and events. In Proceedings of the the 3rd Workshop on EVENTS: Definition, Detection, Coreference, and Representa- tion, pages 89-98.", "links": null }, "BIBREF44": { "ref_id": "b44", "title": "Yago: A large ontology from wikipedia and wordnet", "authors": [ { "first": "M", "middle": [], "last": "Fabian", "suffix": "" }, { "first": "Gjergji", "middle": [], "last": "Suchanek", "suffix": "" }, { "first": "Gerhard", "middle": [], "last": "Kasneci", "suffix": "" }, { "first": "", "middle": [], "last": "Weikum", "suffix": "" } ], "year": 2008, "venue": "Journal of Web Semantics", "volume": "6", "issue": "3", "pages": "203--217", "other_ids": {}, "num": null, "urls": [], "raw_text": "Fabian M Suchanek, Gjergji Kasneci, and Gerhard Weikum. 2008. Yago: A large ontology from wikipedia and wordnet. Journal of Web Semantics, 6(3):203-217.", "links": null }, "BIBREF45": { "ref_id": "b45", "title": "Privacy, algorithms, and artificial intelligence", "authors": [ { "first": "Catherine", "middle": [], "last": "Tucker", "suffix": "" } ], "year": 2019, "venue": "The Economics of Artificial Intelligence: An Agenda", "volume": "", "issue": "", "pages": "423--437", "other_ids": {}, "num": null, "urls": [], "raw_text": "Catherine Tucker. 2019. Privacy, algorithms, and ar- tificial intelligence. In The Economics of Artificial Intelligence: An Agenda, pages 423 -437. National Bureau of Economic Research Center.", "links": null }, "BIBREF46": { "ref_id": "b46", "title": "Ace 2005 multilingual training corpus. Linguistic Data Consortium", "authors": [ { "first": "Christopher", "middle": [], "last": "Walker", "suffix": "" }, { "first": "Stephanie", "middle": [], "last": "Strassel", "suffix": "" }, { "first": "Julie", "middle": [], "last": "Medero", "suffix": "" }, { "first": "Kazuaki", "middle": [], "last": "Maeda", "suffix": "" } ], "year": 2006, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Christopher Walker, Stephanie Strassel, Julie Medero, and Kazuaki Maeda. 2006. Ace 2005 multilin- gual training corpus. Linguistic Data Consortium, Philadelphia, 57.", "links": null }, "BIBREF47": { "ref_id": "b47", "title": "Paperrobot: Incremental draft generation of scientific ideas", "authors": [ { "first": "Qingyun", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Lifu", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Zhiying", "middle": [], "last": "Jiang", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Bansal", "suffix": "" }, { "first": "Yi", "middle": [], "last": "Luan", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1905.07870" ] }, "num": null, "urls": [], "raw_text": "Qingyun Wang, Lifu Huang, Zhiying Jiang, Kevin Knight, Heng Ji, Mohit Bansal, and Yi Luan. 2019. Paperrobot: Incremental draft generation of scien- tific ideas. arXiv preprint arXiv:1905.07870.", "links": null }, "BIBREF48": { "ref_id": "b48", "title": "and Boyan Onyshkevych. 2020. Covid-19 literature knowledge graph construction and drug repurposing report generation", "authors": [ { "first": "Qingyun", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Manling", "middle": [], "last": "Li", "suffix": "" }, { "first": "Xuan", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Nikolaus", "middle": [], "last": "Parulian", "suffix": "" }, { "first": "Guangxing", "middle": [], "last": "Han", "suffix": "" }, { "first": "Jiawei", "middle": [], "last": "Ma", "suffix": "" }, { "first": "Jingxuan", "middle": [], "last": "Tu", "suffix": "" }, { "first": "Ying", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Haoran", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Weili", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Aabhas", "middle": [], "last": "Chauhan", "suffix": "" }, { "first": "Yingjun", "middle": [], "last": "Guan", "suffix": "" }, { "first": "Bangzheng", "middle": [], "last": "Li", "suffix": "" }, { "first": "Ruisong", "middle": [], "last": "Li", "suffix": "" }, { "first": "Xiangchen", "middle": [], "last": "Song", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Jiawei", "middle": [], "last": "Han", "suffix": "" }, { "first": "Shih-Fu", "middle": [], "last": "Chang", "suffix": "" }, { "first": "James", "middle": [], "last": "Pustejovsky", "suffix": "" }, { "first": "David", "middle": [], "last": "Liem", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2007.00576" ] }, "num": null, "urls": [], "raw_text": "Qingyun Wang, Manling Li, Xuan Wang, Niko- laus Parulian, Guangxing Han, Jiawei Ma, Jingx- uan Tu, Ying Lin, Haoran Zhang, Weili Liu, Aabhas Chauhan, Yingjun Guan, Bangzheng Li, Ruisong Li, Xiangchen Song, Heng Ji, Jiawei Han, Shih-Fu Chang, James Pustejovsky, David Liem, Ahmed Elsayed, Martha Palmer, Jasmine Rah, Clare Voss, Cynthia Schneider, and Boyan Onyshkevych. 2020. Covid-19 literature knowledge graph con- struction and drug repurposing report generation. In arXiv:2007.00576.", "links": null }, "BIBREF49": { "ref_id": "b49", "title": "An intelligent multimedia information system for multimodal content extraction and querying", "authors": [ { "first": "Adnan", "middle": [], "last": "Yazici", "suffix": "" }, { "first": "Murat", "middle": [], "last": "Koyuncu", "suffix": "" }, { "first": "Turgay", "middle": [], "last": "Yilmaz", "suffix": "" }, { "first": "Saeid", "middle": [], "last": "Sattari", "suffix": "" }, { "first": "Mustafa", "middle": [], "last": "Sert", "suffix": "" }, { "first": "Elvan", "middle": [], "last": "Gulen", "suffix": "" } ], "year": 2018, "venue": "Multimedia Tools and Applications", "volume": "77", "issue": "", "pages": "2225--2260", "other_ids": {}, "num": null, "urls": [], "raw_text": "Adnan Yazici, Murat Koyuncu, Turgay Yilmaz, Saeid Sattari, Mustafa Sert, and Elvan Gulen. 2018. An intelligent multimedia information system for multi- modal content extraction and querying. Multimedia Tools and Applications, 77(2):2225-2260.", "links": null }, "BIBREF50": { "ref_id": "b50", "title": "Elisa-edl: A cross-lingual entity extraction, linking and localization system", "authors": [ { "first": "Boliang", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Ying", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "" }, { "first": "Di", "middle": [], "last": "Lu", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "May", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations", "volume": "", "issue": "", "pages": "41--45", "other_ids": {}, "num": null, "urls": [], "raw_text": "Boliang Zhang, Ying Lin, Xiaoman Pan, Di Lu, Jonathan May, Kevin Knight, and Heng Ji. 2018a. Elisa-edl: A cross-lingual entity extraction, linking and localization system. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Demon- strations, pages 41-45.", "links": null }, "BIBREF51": { "ref_id": "b51", "title": "Joint face detection and alignment using multitask cascaded convolutional networks", "authors": [ { "first": "Kaipeng", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Zhanpeng", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Zhifeng", "middle": [], "last": "Li", "suffix": "" }, { "first": "Yu", "middle": [], "last": "Qiao", "suffix": "" } ], "year": 2016, "venue": "IEEE Signal Processing Letters", "volume": "23", "issue": "10", "pages": "1499--1503", "other_ids": {}, "num": null, "urls": [], "raw_text": "Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, and Yu Qiao. 2016. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Processing Letters, 23(10):1499-1503.", "links": null }, "BIBREF52": { "ref_id": "b52", "title": "Gaia -a multi-media multi-lingual knowledge extraction and hypothesis generation system", "authors": [ { "first": "Tongtao", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Ananya", "middle": [], "last": "Subburathinam", "suffix": "" }, { "first": "Ge", "middle": [], "last": "Shi", "suffix": "" }, { "first": "Lifu", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Di", "middle": [], "last": "Lu", "suffix": "" }, { "first": "Xiaoman", "middle": [], "last": "Pan", "suffix": "" }, { "first": "Manling", "middle": [], "last": "Li", "suffix": "" }, { "first": "Boliang", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Qingyun", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Spencer", "middle": [], "last": "Whitehead", "suffix": "" }, { "first": "Heng", "middle": [], "last": "Ji", "suffix": "" }, { "first": "Alireza", "middle": [], "last": "Zareian", "suffix": "" }, { "first": "Hassan", "middle": [], "last": "Akbari", "suffix": "" }, { "first": "Brian", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Ruiqi", "middle": [], "last": "Zhong", "suffix": "" }, { "first": "Steven", "middle": [], "last": "Shao", "suffix": "" }, { "first": "Emily", "middle": [], "last": "Allaway", "suffix": "" }, { "first": "Shih-Fu", "middle": [], "last": "Chang", "suffix": "" }, { "first": "Kathleen", "middle": [], "last": "Mckeown", "suffix": "" }, { "first": "Dongyu", "middle": [], "last": "Li", "suffix": "" }, { "first": "Xin", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Kexuan", "middle": [], "last": "Sun", "suffix": "" }, { "first": "Xujun", "middle": [], "last": "Peng", "suffix": "" }, { "first": "Ryan", "middle": [], "last": "Gabbard", "suffix": "" }, { "first": "Marjorie", "middle": [], "last": "Freedman", "suffix": "" }, { "first": "Mayank", "middle": [], "last": "Kejriwal", "suffix": "" }, { "first": "Ram", "middle": [], "last": "Nevatia", "suffix": "" }, { "first": "Pedro", "middle": [], "last": "Szekely", "suffix": "" }, { "first": "T", "middle": [ "K" ], "last": "Kumar", "suffix": "" }, { "first": "Ali", "middle": [], "last": "Sadeghian", "suffix": "" }, { "first": "Giacomo", "middle": [], "last": "Bergami", "suffix": "" } ], "year": 2018, "venue": "Proceedings of TAC KBP 2018, the 25th International Conference on Computational Linguistics: Technical Papers", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Tongtao Zhang, Ananya Subburathinam, Ge Shi, Lifu Huang, Di Lu, Xiaoman Pan, Manling Li, Boliang Zhang, Qingyun Wang, Spencer Whitehead, Heng Ji, Alireza Zareian, Hassan Akbari, Brian Chen, Ruiqi Zhong, Steven Shao, Emily Allaway, Shih- Fu Chang, Kathleen McKeown, Dongyu Li, Xin Huang, Kexuan Sun, Xujun Peng, Ryan Gabbard, Marjorie Freedman, Mayank Kejriwal, Ram Nevatia, Pedro Szekely, T.K. Satish Kumar, Ali Sadeghian, Giacomo Bergami, Sourav Dutta, Miguel Rodriguez, and Daisy Zhe Wang. 2018b. Gaia -a multi-media multi-lingual knowledge extraction and hypothesis generation system. In Proceedings of TAC KBP 2018, the 25th International Conference on Compu- tational Linguistics: Technical Papers.", "links": null }, "BIBREF53": { "ref_id": "b53", "title": "Learning deep features for discriminative localization", "authors": [ { "first": "Bolei", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Aditya", "middle": [], "last": "Khosla", "suffix": "" }, { "first": "Agata", "middle": [], "last": "Lapedriza", "suffix": "" }, { "first": "Aude", "middle": [], "last": "Oliva", "suffix": "" }, { "first": "Antonio", "middle": [], "last": "Torralba", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", "volume": "", "issue": "", "pages": "2921--2929", "other_ids": {}, "num": null, "urls": [], "raw_text": "Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. 2016. Learning deep features for discriminative localization. In Proceed- ings of the IEEE conference on computer vision and pattern recognition, pages 2921-2929.", "links": null } }, "ref_entries": { "FIGREF0": { "num": null, "uris": null, "text": "An example of cross-media knowledge fusion and a look inside the visual knowledge extraction.", "type_str": "figure" }, "FIGREF1": { "num": null, "uris": null, "text": "The architecture of GAIA multimedia knowledge extraction.", "type_str": "figure" }, "FIGREF2": { "num": null, "uris": null, "text": "Examples of visual entity linking, based on face recognition, landmark recognition and flag recognition.", "type_str": "figure" }, "FIGREF3": { "num": null, "uris": null, "text": "The two green bounding boxes are coreferential since they are both linked to \"Kirstjen Nielsen\", and two red bounding boxes are coreferential based on face features. The yellow bounding boxes are unlinkable and also not coreferential to other bounding boxes.", "type_str": "figure" }, "TABREF2": { "html": null, "content": "", "num": null, "text": "", "type_str": "table" }, "TABREF4": { "html": null, "content": "
", "num": null, "text": "GAIA achieves top performance on Task 1 at the recent NIST TAC SM-KBP2019 evaluation.", "type_str": "table" } } } }