|
{ |
|
"paper_id": "I17-1018", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:38:04.122765Z" |
|
}, |
|
"title": "Character-based Joint Segmentation and POS Tagging for Chinese using Bidirectional RNN-CRF", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "yan.shao@lingfil.uu.se" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Hardmeier", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "christian.hardmeier@lingfil.uu.se" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Helsinki", |
|
"location": {} |
|
}, |
|
"email": "jorg.tiedemann@helsinki.fi" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "joakim.nivre@lingfil.uu.se" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a character-based model for joint segmentation and POS tagging for Chinese. The bidirectional RNN-CRF architecture for general sequence tagging is adapted and applied with novel vector representations of Chinese characters that capture rich contextual information and sub-character level features. The proposed model is extensively evaluated and compared with a state-of-the-art tagger respectively on CTB5, CTB9 and UD Chinese. The experimental results indicate that our model is accurate and robust across datasets in different sizes, genres and annotation schemes. We obtain stateof-the-art performance on CTB5, achieving 94.38 F1-score for joint segmentation and POS tagging.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1018", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a character-based model for joint segmentation and POS tagging for Chinese. The bidirectional RNN-CRF architecture for general sequence tagging is adapted and applied with novel vector representations of Chinese characters that capture rich contextual information and sub-character level features. The proposed model is extensively evaluated and compared with a state-of-the-art tagger respectively on CTB5, CTB9 and UD Chinese. The experimental results indicate that our model is accurate and robust across datasets in different sizes, genres and annotation schemes. We obtain stateof-the-art performance on CTB5, achieving 94.38 F1-score for joint segmentation and POS tagging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Word segmentation and part-of-speech (POS) tagging are core steps for higher-level natural language processing (NLP) tasks. Given the raw text, segmentation is applied at the very first step and POS tagging is performed on top afterwards. As by convention the words in Chinese are not delimited by spaces, segmentation is non-trivial, but its accuracy has a significant impact on POS tagging. Moreover, POS tags provide useful information for word segmentation. Thus, modelling word segmentation and POS tagging jointly can outperform the pipeline models (Ng and Low, 2004; Zhang and Clark, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 555, |
|
"end": 573, |
|
"text": "(Ng and Low, 2004;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 596, |
|
"text": "Zhang and Clark, 2008)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "POS tagging is a typical sequence tagging problem over segmented words, while segmentation also can be modelled as a character-level tagging problem via predicting the labels that identify the word boundaries. Ng and Low (2004) propose a joint model which predicts the combinatory labels of segmentation boundaries and POS tags at the character level. Joint segmentation and POS tagging becomes a standard character-based sequence tagging problem and therefore the general machine learning algorithms for structured prediction can be applied.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 227, |
|
"text": "Ng and Low (2004)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The bidirectional recurrent neural network (RNN) using conditional random fields (CRF) (Lafferty et al., 2001) as the output interface for sentence-level optimisation (BiRNN-CRF) achieves state-of-the-art accuracies on various sequence tagging tasks (Huang et al., 2015; Ma and Hovy, 2016) and outperforms the traditional linear statistical models. RNNs with gated recurrent cells, such as long-short term memory (LSTM) (Hochreiter and Schmidhuber, 1997) and gated recurrent units (GRU) are capable of capturing long dependencies and retrieving rich global information. The sequential CRF on top of the recurrent layers ensures that the optimal sequence of tags over the entire sentence is obtained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 110, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 270, |
|
"text": "(Huang et al., 2015;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 289, |
|
"text": "Ma and Hovy, 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 454, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we model joint segmentation and POS tagging as a fully character-based sequence tagging problem via predicting the combinatory labels. The BiRNN-CRF architecture is adapted and applied. The Chinese characters are fed into the neural networks as vector representations. In addition to utilising the pre-trained character embeddings, we propose a concatenated ngram-representation of the characters. Furthermore, sub-character level information, namely radicals and orthographical features extracted by convolutional neural networks (CNNs) , are also incorporated and tested. Three datasets of different sizes, genres and with different annotation schemes are employed for evaluation. Our model is thoroughly evaluated and compared with the joint segmentation and POS tagging model in ZPar (Zhang and Clark, 2010) , which is a state-of-theart joint tagger using structured perceptron and beam decoding. According to the experimental results, our proposed model outperforms ZPar on all the datasets in terms of accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 546, |
|
"end": 552, |
|
"text": "(CNNs)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 826, |
|
"text": "(Zhang and Clark, 2010)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this work include: 1. We apply the BiRNN-CRF model for general sequence tagging to joint segmentation and POS tagging for Chinese and achieve state-of-the-art accuracy. The experimental results show that our tagger is robust and accurate across datasets of different sizes, genres and annotation schemes. 2. We propose a novel approach for vector representations of characters that leads to substantial improvements over the baseline model. 3. Additional improvements are obtained via exploring the feasibility of utilising sub-character level information. 4. We provide an open-source implementation of our method along with pre-trained character embeddings. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our baseline model is an adaptation of BiRNN-CRF. As illustrated in Figure 1 , the Chinese characters are represented as vectors and fed into the bidirectional recurrent layers. The character representations will be described in detail in the following sections. For the recurrent layer, we employ GRU as the basic recurrent unit as it has similar functionalities but fewer parameters compared to LSTM (Chung et al., 2014) . Dropout (Srivastava et al., 2014) is applied to the outputs of the bidirectional recurrent layers. The outputs are concatenated and passed to the first-order chain CRF layer. The optimal sequence of the combinatory labels is predicted at the end. There is a post processing step to retrieve both segmentation and POS tags from the combinatory tags.", |
|
"cite_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 422, |
|
"text": "(Chung et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 458, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 76, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Network Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Following the work of Kruengkrai et al. (2009a) , the employed tags indicating the word boundaries are B, I, E, S representing a character at the beginning, inside, end of a word or as a singlecharacter word. The CRF layer models conditional scores over all possible combinatory labels given the input characters. Incorporating the transition scores between the successive labels, the op- Figure 1 : The BiRNN-CRF model for joint Chinese segmentation and POS tagging. The dashed arrows indicate that dropout layers are applied to the outputs of the recurrent layers. timal sequence can be obtained efficiently via the Viterbi algorithm both for training and decoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 47, |
|
"text": "Kruengkrai et al. (2009a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 397, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tagging Scheme", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1 https://github.com/yanshao9798/tagger \u590f \u5929 \u592a \u70ed (too) (hot) (summer) character representations GRU GRU GRU GRU forward RNN GRU GRU GRU GRU backward RNN B-NT E-NT S-AD S-VA CRF Layer \u592a AD \u70ed VA \u590f\u5929 NT Output", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagging Scheme", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The time complexity for the Viterbi algorithm is linear with respect to the sentence length n as O(k 2 n), where k is constant and equals to the total number of combinatory labels. The efficiency can be improved if we reduce k. For some POS tags, combining them with the full boundary tags is redundant. For instance, only the functional word \u7684 can be tagged as DEG in Chinese Treebank (Xue et al., 2005) . Since it is a single-character word, combinatory tags of B-DEG, I-DEG, and E-DEG never occur in the experimental data and should therefore be pruned to reduce the search space. Similarly, if the maximum length of words under a given POS tag is two in the training data, we prune the corresponding label.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 404, |
|
"text": "(Xue et al., 2005)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagging Scheme", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We propose three different approaches to effectively represent Chinese characters as vectors for the neural network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character Representations", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The prevalent character-based neural models assume that larger spans of text, such as words and n-grams, can be represented by the sequence of characters that they consist of. For example, the vector representation V m,n of a span c m,n is obtained by passing the vector representations v i of the characters c i to a functions f as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "\u590f \u5929 \u592a \u70ed (too) (hot) (summer) V i,i V i\u22121,i V i\u22121,i+1 n-gram character Representation V 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "V m,n = f (v m , v m+1 , ..., v n )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "where f is usually an RNN (Ling et al., 2015) or a CNN (dos Santos and Zadrozny, 2014). In this paper, instead of completely relying on the BiRNN to extract contextual features from context-free character representations, we encode rich local information in the character vectors via employing the incrementally concatenated n-gram representation as demonstrated in Figure 2 . In the example, the vector representation of the pivot character \u592a in the given context is the concatenation of the context-free vector representation", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 45, |
|
"text": "(Ling et al., 2015)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 366, |
|
"end": 374, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "V i,i of \u592a itself along with V i\u22121,i of the bigram \u5929\u592a as well as V i\u22121,i+1 of the trigram \u5929\u592a\u70ed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Instead of constructing the vector representation V m,n of an n-gram c m,n from the character representations as in Equation 1,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "V m,n in different or- ders, such as V i,i , V i\u22121,i , and V i\u22121,i+1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": ", are randomly initialised separately. We use a single special vector to represent all the unknown n-grams per order. The n-grams in different orders are then concatenated incrementally to form up the vector representations of a Chinese character in the given context, which is passed further to the recurrent layers. As shown in Figure 2 , the neighbouring characters on both sides of the pivot character are taken into account.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 338, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Concatenated N-gram", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Chinese characters are logograms. As opposed to alphabetical languages, there is rich information encrypted in the graphical components. For instance, the Chinese characters that share the same part \u9485 (gold) are all somewhat related to metals, such as \u94f6 (silver), \u94c1 (iron), \u9488 (needle) and so on. The shared part \u9485 is known as the radical, which functions as a semantic indicator. Hence, we investigate the effectiveness of using the information below the character level for our task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Radicals and Orthographical Features", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "Radicals are first represented as randomly initialised vectors and concatenated as parts of the character representations. Radicals are traditionally used as indices in Chinese dictionaries. In our approach, they are retrieved via the unicode representation of Chinese characters as the characters that share the same radical are grouped together. They are organised in consistent with the categorisation in Kangxi Dictionary (\u5eb7\u7199\u5b57\u5178), in which all the Chinese characters are grouped under 214 different radicals. We only employ the radicals of the common characters in the unicode range of (U+4E00, U+9FFF). For the characters out of the range and the non-Chinese characters, we use a single special vector as their radical representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Radicals and Orthographical Features", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "Additionally, instead of presuming that only radicals encode sub-character level information, we use convolutional neural networks (CNNs) to extract graphical features from scratch by regarding the Chinese characters as pictures and feed their pixels as the input. As illustrated in Figure 3 , there are two convolutional layers, both followed by a max-pooling layer. The output of the second max-pooling layer is reshaped and passed to a regular fully-connected layer. Dropout is applied to the output of the fully-connected layer. The output is then concatenated as parts of the character representation. The CNNs are trained jointly with the main network.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 291, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Radicals and Orthographical Features", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "The context-free vector representations of single characters introduced in section 2.3.1 can be replaced by pre-trained character embeddings retrieved from large corpora. We employ GloVe (Pennington et al., 2014) to train our character embeddings on Wikipedia 2 and the freely available Sogou News Corpora (SogouCS). 3 We use randomly initialised vectors as the representations of the characters that are not in the embedding vocabulary. Pre-trained embeddings for higher-order n-grams are not employed in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 212, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 318, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-trained Character Embeddings", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "At the final decoding phase, we use ensemble decoding, a simple averaging technique, to mitigate the deviations led by random weight initialisation of the neural network. For the chain CRF decoder, the final sequence of the combinatory tags y is obtained via the conditional scores S(y i |x i ) and the transition scores T (y i , y j ) given the input sequence x. Instead of computing the optimal sequence with respect to the scores returned by a single model, both the conditional scores and transition scores are averaged over four models with identical parameter settings that are trained independently:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Decoding", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y * = argmax y\u2208L(x) p(y|x;{ S},{T })", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Ensemble Decoding", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Ensemble decoding is only applied to the best performing model according to the feature experiments at the final testing phase in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Decoding", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Our neural networks are implemented using the TensorFlow 1.2.0 library (Abadi et al., 2016) . We group the sentences with similar lengths into the same buckets and the sentences in the same bucket are padded to the same length accordingly. We construct sub-computational graphs respectively for each bucket. The training and tagging speed of our neural network on GPU devices can be drastically improved thanks to the bucket model. The training time is proportional to both the size of the training set and the number of POS tags. Table 1 shows the adopted hyper-parameters. We use one set of parameters for all the experiments on different datasets. The weights of the neural networks, including the randomly intialised embeddings, are initialised using the scheme introduced in Glorot and Bengio (2010) . The network is trained with the error back-propagation algorithm. All the embeddings are fine-tuned during training by back-propagating gradients. Adagrad (Duchi et al., 2011) with mini-batches is employed for optimisation with the initial learning rate \u03b7 0 = 0.1, which is updated with a decay rate \u03c1 = 0.05", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 91, |
|
"text": "(Abadi et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 804, |
|
"text": "Glorot and Bengio (2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 962, |
|
"end": 982, |
|
"text": "(Duchi et al., 2011)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 531, |
|
"end": 538, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "as \u03b7 t = \u03b7 0 \u03c1(t\u22121)+1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ", where t is the index of the current epoch.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The model is optimised with respect to the performance on the development sets. F1-scores of both segmentation (F 1 Seg ) and joint POS tagging (F 1 Seg&T ag ) are employed as F 1 Seg * F 1 Seg&T ag to measure the performance of the model after each epoch during training. In our experiments, the models are trained for 30 epochs. To ensure that the weights are well optimised, we only adopt the best epoch after the model is trained at least for 5 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We employ three different datasets for our experiments, namely Chinese Treebank (Xue et al., 2005) 5.0 (CTB5) and 9.0 (CTB9) along with the Chinese section in Universal Dependencies (UD Chinese) (Nivre et al., 2016) of version 1.4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 98, |
|
"text": "(Xue et al., 2005)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 195, |
|
"end": 215, |
|
"text": "(Nivre et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "CTB5 is the most employed dataset for joint segmentation and POS tagging in previous research. It is composed of newswire data. We follow the conventional split of the dataset as in Jiang et al. (2008) ; Kruengkrai et al. (2009a) ; Zhang and Clark (2010) . CTB9 consists of source texts in various genres, CTB5 is a subset of it. We split CTB9 by referring to the partition of CTB7 in Wang et al. (2011) . We extend the training, development and test sets from CTB5 by adding 80% of the new data in CTB9 to training and 10% each to development and test. The doublechecked files are all placed in the test set. The detailed splitting information can be found in Table 10 in Appendix. UD Chinese has both universal and language-specific POS tags. They are not predicted jointly in this paper. For the sake of convenience, we refer the universal tags as UD1 and the language-specific ones as UD2 in the following sessions. To make the model benefit from the pre-trained character embeddings, we convert the texts in UD Chinese from traditional Chinese into simplified Chinese. Table 2 shows brief statistics of the employed datasets in numbers of words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "Jiang et al. (2008)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 229, |
|
"text": "Kruengkrai et al. (2009a)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 254, |
|
"text": "Zhang and Clark (2010)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 403, |
|
"text": "Wang et al. (2011)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1074, |
|
"end": 1081, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The out-ofvocabulary (OOV) words are counted regardless of the POS tags. We can see that the size of UD Chinese is much smaller and it has a notably higher OOV rate than the two CTB datasets. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Both segmentation (Seg) and joint segmentation and POS tagging (Seg&Tag) are evaluated in our experiments. 4 We employ word-level recall (R), precision (P) and F1-score (F) as the evaluation metrics. A series of feature experiments are carried out on the development sets to evaluate the effectiveness of the proposed approaches for vector representations of the characters. Finally, the best performing model according to the feature experiment is applied to the test sets in the forms of single as well as ensemble and compared with ZPar. Table 3 shows the evaluation results of using concatenated n-grams up to different orders as the character representations. By introducing 2grams, we can obtain vast improvements over solely using the conventional character embeddings, which indicates that not all the local information can be effectively captured by the BiRNN using context-free character representations. Utilising the concatenated n-grams ensures that the same character has different but yet closely related representations in different contexts, which is an effective way to encode contextual features. From the table, we see that notable improvements can be achieved further via employing 3grams. 4-grams still help but only to CTB9 while adding 5-grams achieves almost no improvement on any of the datasets. The results imply that concatenating higher-order n-grams can be detrimental, especially on datasets in smaller sizes due to the fact that higher-order n-grams are more sparse in the training data and their vector representations cannot be trained well enough. Besides, adopting higher-order n-grams also substantially increases the numbers of weights and therefore both training and decoding become less efficient. Under the circumstances, we consider that 3-gram model is optimal for our task and it is employed in the following experiments for all the datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 108, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 548, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The concatenated n-grams have a bigger size compared to the basic character representation. We conduct one additional experiment using a basic 1-gram character model with a larger character vector size of 300. The evaluation scores are similar to the basic character model with the size of 64, which shows that the improvements obtained by the n-gram model are not matched by enlarging the size of the vector representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Experiments", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "The evaluation scores of the sub-character level features are reported in Table 4 . The relevant features are added on top of the 3-gram model. Employing radicals and graphical features achieves similar improvements for segmentation while utilising radicals obtains better results for joint POS tagging on CTB5. However, radicals are not a very effective feature on CTB9, UD1 and UD2 whereas a notable enhancement is observed when employing graphical features on UD1. Using CNNs to extract graphical features is computationally much more expensive than simply adopting radicals via a lookup Table 5 , we can learn that employing pretrained embeddings as initial vector representations for the characters achieves improvements in general, whereas the improvements are comparatively smaller if the the concatenated n-gram representations and the radicals are added. Additionally, the improvements obtained on UD Chinese are more significant than on CTBs, which indicates that the pre-trained character embeddings are more beneficial to the datasets in smaller sizes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 81, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 598, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Experiments", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "In general, the feature experiments indicate that the proposed Chinese character representations are all sensitive to dataset size. Using higher-order n-grams requires more data for training. On the other hand, the pre-trained embeddings are more vital if the dataset is small. In addition, the different representations are sensitive to tagging schemes as the evaluation results on UD1 and UD2 are quite diverse. Taking both robustness and efficiency into consideration, we select 3-grams along with radicals and pre-trained character embeddings as the best setting for final evaluation. Table 6 shows the final scores on the test sets. The complete evaluation results in precision, re-call and F1-scores are contained in Table 11 and Table 12 in Appendix. Our system is compared with ZPar. We retrained a ZPar model on CTB5 that reproduces the evaluation scores reported in Zhang and Clark (2010) . We also modified the source code so that it is applicable to CTB9 and UD Chinese. In addition, we perform the mid-p McNemar's test (Fagerland et al., 2013) to examine the statistical significances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 877, |
|
"end": 899, |
|
"text": "Zhang and Clark (2010)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1033, |
|
"end": 1057, |
|
"text": "(Fagerland et al., 2013)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 589, |
|
"end": 596, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 745, |
|
"text": "Table 11 and Table 12", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Experiments", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 6 , the single model is worse than the ensemble model but still outperforms ZPar on all the tested datasets. ZPar incorporates discrete local features at both character and word levels and employs structured perceptron for global optimisation, whereas we encode rich local information in the character representations and employ BiRNN to effectively extract global features and capture long term dependencies. The chain CRF layer is used for sentencelevel optimisation, which functions similarly to structured perceptron. As opposed to the taggers built with traditional machine learning algorithms, our model avoids heavy feature engineering and benefits from large plain texts via utilising pre-trained character embeddings. It is also very flexible to add sub-character level features as parts of the character representations. The model performs very well despite being fully character based. Moreover, it has clear advantages when applied to smaller datasets like UD Chinese, while the prevalence is much smaller on CTB5. Both our model and ZPar segment OOV words in UD Chinese with higher accuracies than the ones in CTBs despite that UD Chinese is notably smaller and the overall OOV rate is higher. Compared to CTB, the words in UD Chinese are more fine-grained and the average word length is shorter, which makes it easier for the tagger to correctly segment the OOV words as show that the longer words are more difficult to be segmented correctly. For joint POS tagging for OOV words, the two systems both perform significantly better on CTB5 as it is only composed of news text.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Final Results", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "In general, our model is more robust to OOV words than ZPar, except that ZPar yields better result for segmentation by a small margin on CTB9. ZPar also obtains higher accuracy for joint POS tagging than the single model on CTB9. The differences between ZPar and our model for both segmentation and POS tagging are more substantial on UD Chinese, which indicates that our model is relatively more advantageous for handling OOV words when the training sets are small, whereas ZPar is able to perform equally well when substantial amount of training data is available as they achieve similar results on the CTB sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Results", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "The single model is further improved by ensemble-averaging four independently trained models. The improvements are not drastic but they are observed systematically across all the datasets. In general, ensemble decoding is beneficial to handling OOV words as well except that a small drop for segmentation on CTB5 is observed. of CTB9 in different genres. Our model surpasses ZPar on all the genres in both segmentation and joint POS tagging. The differences are subtle on the genres in which the texts are normalised, such as News and Broadcast News. This, to a very large extent, explains why our model is only marginally better than ZPar on CTB5, whereas the experimental results reveal that our model is substantially better at processing non-standard text as it yields significantly higher scores on Conversations, Short Messages and Weblogs. The evaluation results of both our model and ZPar vary substantially across different genres as some genres are fundamentally more challenging to process. Our models are compared with the previous best-performing systems on CTB5 in Table 8 . Our models are not optimised particularly with respect to CTB5 but still yield competitive results, especially for joint POS tagging. We are the first to report evaluation scores on CTB9 and UD Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1079, |
|
"end": 1086, |
|
"text": "Table 8", |
|
"ref_id": "TABREF13" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Final Results", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "Our joint segmentation and POS tagger is very efficient with GPU devices and can be practically Seg Seg&Tag Kruengkrai et al. (2009b) 97.98 94.00 Zhang and Clark (2010) 97.78 93.67 Sun (2011) 98.17 94.02 Wang et al. (2011) 98.11 94.18 Shen et al. (2014) 98 used for processing very large files. The memory demand of decoding is drastically milder compared to training, a large batch size therefore can be employed. The tagger takes constant time to build the sub-computational graphs and load the weights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 139, |
|
"text": "Kruengkrai et al. (2009b) 97.98", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 174, |
|
"text": "Zhang and Clark (2010) 97.78", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 191, |
|
"text": "Sun (2011)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 222, |
|
"text": "Wang et al. (2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 253, |
|
"text": "Shen et al. (2014)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagging Speed", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "With bucket size of 10 and batch size of 500, Table 9 shows the tagging speed of the tagger using a single Tesla K80 GPU card and the pre-trained model on CTB5. The tagging speed of ZPar is also presented for comparison. GPU devices are not supported by ZPar and therefore the tagging speed is calculated using an Intel Core i7 CPU. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagging Speed", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The fundamental BiRNN-CRF architecture is task-independent and has been applied to many sequence tagging problems on Chinese. Peng and Dredze (2016) adopt the model for Chinese segmentation and named entity recognition in the context of multi-task and multi-domain learning. Dong et al. (2016) employ a character level BiLSTM-CRF model that utilises radicallevel information for Chinese named entity recognition. Ma and Sun (2016) use a similar architecture but feed the Chinese characters pairwise as edge embeddings instead. Their model is applied respectively to chunking, segmentation and POS tagging. Zheng et al. (2013) model joint Chinese segmentation and POS tagging via predicting the combinatory segmentation and POS tags. They employ the adaptation of the feed forward neural network introduced in Collobert et al. (2011) that only extracts local features in a context window. A perceptron-style training algorithm is employed for sentence level optimisation, which is the same as the training algorithm of the BiRNN-CRF model. Their proposed model is not evaluated on CTB5 and therefore difficult to be compared with our system. Kong et al. (2015) apply segmental recurrent neural networks to joint segmentation and POS tagging but the evaluation results are substantially below the state-of-the-art on CTB5. Bojanowski et al. (2016) retrieve word embeddings via representing words as a bag of character n-grams for morphologically rich languages. A similar character n-gram model is proposed by Wieting et al. (2016) . Sun et al. (2014) attempt to encode radical information into the conventional character embeddings. The radicalenhanced embeddings are employed and evaluated for Chinese segmentation. The results show that radical-enhanced embeddings outperform both skip-ngram and continues bag-of-word (Mikolov et al., 2013) in word2vec.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 148, |
|
"text": "Peng and Dredze (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 293, |
|
"text": "Dong et al. (2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 430, |
|
"text": "Ma and Sun (2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 625, |
|
"text": "Zheng et al. (2013)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 832, |
|
"text": "Collobert et al. (2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1141, |
|
"end": 1159, |
|
"text": "Kong et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1321, |
|
"end": 1345, |
|
"text": "Bojanowski et al. (2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1508, |
|
"end": 1529, |
|
"text": "Wieting et al. (2016)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1532, |
|
"end": 1549, |
|
"text": "Sun et al. (2014)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1819, |
|
"end": 1841, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We adapt and apply the BiRNN-CRF model for sequence tagging in NLP to joint Chinese segmentation and POS tagging via predicting the combinatory tags of word boundaries and POS tags. Concatenated n-grams as well as sub-character features are employed along with the conventional pre-trained character embeddings as the vector representations for Chinese characters. The feature experiments indicate that concatenated ngrams contribute substantially. However, both radicals and graphical features as sub-character level information are less effective. How to incorporate the sub-character level information more effectively will be further explored in the future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The proposed model is extensively evaluated on CTB5, CTB9 and UD Chinese. Despite the fact that different character representation approaches are sensitive to data size and tagging schemes, we use one set of hyper-parameters and universal feature settings so that the model is robust across datasets. The experimental results on the test sets show that our model outperforms ZPar which is built on structured perceptron on all the datasets. We obtain state-of-the-art performances on CTB5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The results on UD Chinese and CTB9 also reveal that our model has great advantages in processing non-standard text, such as weblogs, forum text and short messages. Moreover, the implemented tagger is very efficient with GPU devices and therefore can be applied to tagging very large files. Table 12 : Evaluation of joint segmentations and POS tagging in precision, recall and F1-scores", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 298, |
|
"text": "Table 12", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://dumps.wikimedia.org/ 3 http://www.sogou.com/labs/resource/cs.php", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The evaluation script is downloaded from: http://people.sutd.edu.sg/ yue zhang/doc/doc/joint files /evaluate.py", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We acknowledge the computational resources provided by CSC in Helsinki and Sigma2 in Oslo through NeIC-NLPL (www.nlpl.eu). This work is supported by the Chinese Scholarship Council (CSC) (No. 201407930015).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dataset CTB chapter IDs Train", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendix", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "TensorFlow: A system for largescale machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Mart\u00edn", |
|
"middle": [], |
|
"last": "Abadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Barham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianmin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Devin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Ghemawat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Irving", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Isard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 12th USENIX Symposium on Operating Systems Design and Implementation (OSDI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mart\u00edn Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. 2016. TensorFlow: A system for large- scale machine learning. In Proceedings of the 12th USENIX Symposium on Operating Systems Design and Implementation (OSDI). Savannah, Georgia, USA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.04606" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2016. Enriching word vec- tors with subword information. arXiv preprint arXiv:1607.04606 .", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "On the properties of neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. arXiv preprint arXiv:1409.1259 .", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Empirical evaluation of gated recurrent neural networks on sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.3555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence model- ing. arXiv preprint arXiv:1412.3555 .", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research 12(August):2493-2537.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Characterbased LSTM-CRF with radical-level features for Chinese named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Chuanhai", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masanori", |
|
"middle": [], |
|
"last": "Hattori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Di", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Computer Processing of Oriental Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "239--250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuanhai Dong, Jiajun Zhang, Chengqing Zong, Masanori Hattori, and Hui Di. 2016. Character- based LSTM-CRF with radical-level features for Chinese named entity recognition. In International Conference on Computer Processing of Oriental Languages. Springer, pages 239-250.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Learning character-level representations for part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "C\u00edcero", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bianca", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zadrozny", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of The 31st International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1818--1826", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C\u00edcero Nogueira dos Santos and Bianca Zadrozny. 2014. Learning character-level representations for part-of-speech tagging. In Proceedings of The 31st International Conference on Machine Learn- ing. pages 1818-1826.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adaptive subgradient methods for online learning and stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Duchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elad", |
|
"middle": [], |
|
"last": "Hazan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2121--2159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research 12(Jul):2121-2159.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The McNemar test for binary matched-pairs data: mid-p and asymptotic are better than exact conditional", |
|
"authors": [ |
|
{ |
|
"first": "Stian", |
|
"middle": [], |
|
"last": "Morten W Fagerland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Petter", |
|
"middle": [], |
|
"last": "Lydersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Laake", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "BMC medical research methodology", |
|
"volume": "13", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Morten W Fagerland, Stian Lydersen, and Petter Laake. 2013. The McNemar test for binary matched-pairs data: mid-p and asymptotic are better than exact conditional. BMC medical research methodology 13(1):91.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Understanding the difficulty of training deep feedforward neural networks. In Aistats", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Aistats. pages 249-256.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Bidirectional LSTM-CRF models for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.01991" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidi- rectional LSTM-CRF models for sequence tagging. arXiv preprint arXiv:1508.01991 .", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A cascaded linear model for joint Chinese word segmentation and part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "Wenbin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yajuan", |
|
"middle": [], |
|
"last": "L\u00fc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenbin Jiang, Liang Huang, Qun Liu, and Yajuan L\u00fc. 2008. A cascaded linear model for joint Chinese word segmentation and part-of-speech tagging. In In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Segmental recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Lingpeng", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.06018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lingpeng Kong, Chris Dyer, and Noah A Smith. 2015. Segmental recurrent neural networks. arXiv preprint arXiv:1511.06018 .", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "An error-driven word-character hybrid model for joint Chinese word segmentation and POS tagging", |
|
"authors": [ |
|
{ |
|
"first": "Canasai", |
|
"middle": [], |
|
"last": "Kruengkrai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyotaka", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiou", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "513--521", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Canasai Kruengkrai, Kiyotaka Uchimoto, Jun'ichi Kazama, Yiou Wang, Kentaro Torisawa, and Hitoshi Isahara. 2009a. An error-driven word-character hy- brid model for joint Chinese word segmentation and POS tagging. In Proceedings of the Joint Confer- ence of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Lan- guage Processing of the AFNLP: Volume 1-Volume 1. Association for Computational Linguistics, Sin- gapore, pages 513-521.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Joint Chinese word segmentation and POS tagging using an error-driven wordcharacter hybrid model", |
|
"authors": [ |
|
{ |
|
"first": "Canasai", |
|
"middle": [], |
|
"last": "Kruengkrai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyotaka", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Yiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IEICE transactions on information and systems", |
|
"volume": "92", |
|
"issue": "12", |
|
"pages": "2298--2305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Canasai Kruengkrai, Kiyotaka Uchimoto, Jun'ichi Kazama, WANG Yiou, Kentaro Torisawa, and Hi- toshi Isahara. 2009b. Joint Chinese word segmen- tation and POS tagging using an error-driven word- character hybrid model. IEICE transactions on in- formation and systems 92(12):2298-2305.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the eighteenth international conference on machine learning", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, Fernando Pereira, et al. 2001. Conditional random fields: Probabilis- tic models for segmenting and labeling sequence data. In Proceedings of the eighteenth international conference on machine learning, ICML. volume 1, pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Finding function in form: Compositional character models for open vocabulary word representation", |
|
"authors": [ |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Alan" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Trancoso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramon", |
|
"middle": [], |
|
"last": "Fermandez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvio", |
|
"middle": [], |
|
"last": "Amir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Marujo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiago", |
|
"middle": [], |
|
"last": "Luis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1520--1530", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang Ling, Chris Dyer, W. Alan Black, Isabel Tran- coso, Ramon Fermandez, Silvio Amir, Luis Marujo, and Tiago Luis. 2015. Finding function in form: Compositional character models for open vocabu- lary word representation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing. Association for Computational Linguistics, pages 1520-1530.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A new recurrent neural CRF for learning non-linear edge features", |
|
"authors": [ |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.04233" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuming Ma and Xu Sun. 2016. A new recurrent neu- ral CRF for learning non-linear edge features. arXiv preprint arXiv:1611.04233 .", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "End-to-end sequence labeling via bi-directional LSTM-CNNs-CRF", |
|
"authors": [ |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuezhe Ma and Eduard Hovy. 2016. End-to-end sequence labeling via bi-directional LSTM-CNNs- CRF. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics. Berlin, Germany, page 10641074.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781 .", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Chinese part-ofspeech tagging: One-at-a-time or all-at-once? wordbased or character-based?", |
|
"authors": [ |
|
{ |
|
"first": "Tou", |
|
"middle": [], |
|
"last": "Hwee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [ |
|
"Kiat" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Low", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "277--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwee Tou Ng and Jin Kiat Low. 2004. Chinese part-of- speech tagging: One-at-a-time or all-at-once? word- based or character-based? In Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing. Barcelona, Spain, pages 277- 284.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Universal dependencies v1: A multilingual treebank collection", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Hajic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1659--1666", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Marie-Catherine de Marneffe, Filip Gin- ter, Yoav Goldberg, Jan Hajic, Christopher D. Man- ning, Ryan McDonald, Slav Petrov, Sampo Pyysalo, Natalia Silveira, Reut Tsarfaty, and Daniel Zeman. 2016. Universal dependencies v1: A multilingual treebank collection. In Proceedings of the 10th In- ternational Conference on Language Resources and Evaluation (LREC 2016). pages 1659-1666.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Multi-task multi-domain representation learning for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1608.02689" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng and Mark Dredze. 2016. Multi-task multi-domain representation learning for sequence tagging. arXiv preprint arXiv:1608.02689 .", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. GloVe: Global vectors for word representation. In Empirical Methods in Nat- ural Language Processing (EMNLP). Doha, Qatar, pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Chinese morphological analysis with character-level POS tagging", |
|
"authors": [ |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongxiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "253--258", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mo Shen, Hongxiao Liu, Daisuke Kawahara, and Sadao Kurohashi. 2014. Chinese morphological analysis with character-level POS tagging. In Pro- ceedings of the 52nd Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers). Association for Computational Linguistics, Baltimore, Maryland, pages 253-258.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey E Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. Journal of Machine Learning Re- search 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A stacked sub-word model for joint Chinese word segmentation and part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1385--1394", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiwei Sun. 2011. A stacked sub-word model for joint Chinese word segmentation and part-of-speech tag- ging. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies-Volume 1. Associ- ation for Computational Linguistics, Portland, Ore- gon, USA, pages 1385-1394.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Radical-enhanced Chinese character embedding", |
|
"authors": [ |
|
{ |
|
"first": "Yaming", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenzhou", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Neural Information Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "279--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaming Sun, Lei Lin, Nan Yang, Zhenzhou Ji, and Xiaolong Wang. 2014. Radical-enhanced Chinese character embedding. In International Conference on Neural Information Processing. Springer, pages 279-286.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving Chinese word segmentation and POS tagging with semi-supervised methods using large auto-analyzed data", |
|
"authors": [ |
|
{ |
|
"first": "Yiou", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yujie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of 5th International Joint Conference on Natural Language Processing. Asian Federation of Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "309--317", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiou Wang, Jun'ichi Kazama, Yoshimasa Tsuruoka, Wenliang Chen, Yujie Zhang, and Kentaro Torisawa. 2011. Improving Chinese word segmentation and POS tagging with semi-supervised methods using large auto-analyzed data. In Proceedings of 5th In- ternational Joint Conference on Natural Language Processing. Asian Federation of Natural Language Processing, pages 309-317.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "CHARAGRAM: Embedding words and sentences via character n-grams", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Livescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.02789" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2016. CHARAGRAM: Embedding words and sentences via character n-grams. arXiv preprint arXiv:1607.02789 .", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "The Penn Chinese treebank: Phrase structure annotation of a large corpus", |
|
"authors": [ |
|
{ |
|
"first": "Naiwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fu-Dong", |
|
"middle": [], |
|
"last": "Chiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Natural language engineering", |
|
"volume": "11", |
|
"issue": "02", |
|
"pages": "207--238", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naiwen Xue, Fei Xia, Fu-Dong Chiou, and Marta Palmer. 2005. The Penn Chinese treebank: Phrase structure annotation of a large corpus. Natural lan- guage engineering 11(02):207-238.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Transition-based neural word segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Meishan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guohong", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "421--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meishan Zhang, Yue Zhang, and Guohong Fu. 2016. Transition-based neural word segmentation. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, pages 421-431.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Joint word segmentation and POS tagging using a single perceptron", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "888--896", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Stephen Clark. 2008. Joint word seg- mentation and POS tagging using a single percep- tron. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics. Columbus, Ohio, pages 888-896.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A fast decoder for joint word segmentation and POS-tagging using a single discriminative model", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "843--852", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Stephen Clark. 2010. A fast decoder for joint word segmentation and POS-tagging using a single discriminative model. In Proceedings of the 2010 Conference on Empirical Methods in Natu- ral Language Processing. Association for Computa- tional Linguistics, Massachusetts, USA, pages 843- 852.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Deep learning for Chinese word segmentation and POS tagging", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqing", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanyang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7013--7014", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqing Zheng, Hanyang Chen, and Tianyu Xu. 2013. Deep learning for Chinese word segmentation and POS tagging. In Proceedings of the 2013 Confer- ence on Empirical Methods in Natural Language Processing. Seattle, USA, pages 647-657. 0044-0143, 0170-0270, 0400-0899, 1001-1017, 1019, 1021-1035, 1037- 1043, 1045-1059, 1062-1071, 1073- 1117, 1120-1131, 1133-1140, 1143- 1147, 1149-1151, 2000-2915, 4051- 4099, 4112-4180, 4198-4368, 5000- 5446, 6000-6560, 7000-7013 Dev 0301-0326, 2916-3030, 4100-4106, 4181-4189, 4369-4390, 5447-5492, 6561-6630, 7013-7014", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Vector representations of the Chinese characters as incrementally concatenated n-gram vectors in a given context." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Convolutional Neural Networks for orthographical feature extraction. Only the first convolutional layer and its following max-pooling layer are presented." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Hyper-parameters.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Statistics of the employed datasets in numbers of words.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"text": "table, especially when GPU is not avail-", |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>CTB5</td><td/><td>CTB9</td><td/><td>UD1</td><td/><td>UD2</td></tr><tr><td/><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td></tr><tr><td colspan=\"2\">size = 300 95.22</td><td>91.71</td><td>95.53</td><td>90.89</td><td>91.84</td><td>85.43</td><td>92.40</td><td>85.63</td></tr><tr><td>1-gram</td><td>95.14</td><td>91.52</td><td>95.25</td><td>90.43</td><td>91.74</td><td>85.07</td><td>91.83</td><td>84.93</td></tr><tr><td>2-gram</td><td>97.08</td><td>93.72</td><td>96.30</td><td>91.66</td><td>94.50</td><td>88.36</td><td>94.42</td><td>88.14</td></tr><tr><td>3-gram</td><td>97.14</td><td>94.01</td><td>96.47</td><td>91.75</td><td>94.36</td><td>88.27</td><td>94.43</td><td>88.32</td></tr><tr><td>4-gram</td><td>97.13</td><td>94.02</td><td>96.48</td><td>91.89</td><td>94.25</td><td>88.37</td><td>94.16</td><td>88.24</td></tr><tr><td>5-gram</td><td>96.94</td><td>93.84</td><td>96.50</td><td>91.88</td><td>94.40</td><td>88.47</td><td>94.25</td><td>88.03</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"text": "Evaluation of concatenated n-gram representations on the development sets in F1-scores", |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>CTB5</td><td/><td>CTB9</td><td/><td>UD1</td><td/><td>UD2</td></tr><tr><td/><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td></tr><tr><td>3-gram</td><td>97.14</td><td>94.01</td><td>96.47</td><td>91.75</td><td>94.36</td><td>88.27</td><td>94.43</td><td>88.32</td></tr><tr><td colspan=\"2\">+radicals 97.26</td><td>94.42</td><td>96.42</td><td>91.74</td><td>94.37</td><td>88.21</td><td>94.39</td><td>88.36</td></tr><tr><td colspan=\"2\">+graphical 97.25</td><td>94.08</td><td>96.50</td><td>91.78</td><td>94.50</td><td>88.59</td><td>94.23</td><td>87.95</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"text": "Evaluation of sub-character level features on the development sets in F1-scores.", |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>CTB5</td><td/><td>CTB9</td><td/><td>UD1</td><td/><td>UD2</td></tr><tr><td/><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td></tr><tr><td>1-gram</td><td>95.14</td><td>91.52</td><td>95.25</td><td>90.43</td><td>91.74</td><td>85.07</td><td>91.83</td><td>84.93</td></tr><tr><td>+GloVe</td><td>95.82</td><td>92.45</td><td>95.44</td><td>90.57</td><td>92.77</td><td>86.48</td><td>93.01</td><td>86.48</td></tr><tr><td colspan=\"2\">3-gram, radicals 97.26</td><td>94.42</td><td>96.42</td><td>91.74</td><td>94.37</td><td>88.21</td><td>94.39</td><td>88.36</td></tr><tr><td>+GloVe</td><td>97.42</td><td>94.58</td><td>96.56</td><td>91.96</td><td>95.12</td><td>89.69</td><td>95.02</td><td>89.20</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"text": "Evaluation of the pre-trained character embeddings on the development sets in F1-scores.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"text": "Evaluations of the best model on the final test sets in F1-scores as well as the recalls of out-ofvocabulary words. Significance tests for Single are in comparison to ZPar, while tests for Ensemble are in comparison to Single ( ** p < 0.01, * p < 0.05)", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF10": { |
|
"html": null, |
|
"text": "displays the evaluation of the ensemble model and ZPar on the decomposed test sets", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Ensemble</td><td/><td>ZPar</td></tr><tr><td>Seg</td><td>Seg&Tag</td><td colspan=\"2\">Seg Seg&Tag</td></tr><tr><td>BN 97.89*</td><td>94.48**</td><td>97.68</td><td>94.22</td></tr><tr><td colspan=\"2\">CS 96.67** 91.78**</td><td>95.61</td><td>90.15</td></tr><tr><td colspan=\"2\">FM 96.54** 91.92**</td><td>96.30</td><td>91.51</td></tr><tr><td colspan=\"2\">MG 94.54** 89.23**</td><td>94.22</td><td>88.60</td></tr><tr><td>NS 97.56</td><td>93.92**</td><td>97.49</td><td>93.70</td></tr><tr><td colspan=\"2\">SM 96.43** 91.78**</td><td>96.13</td><td>90.32</td></tr><tr><td colspan=\"2\">SP 97.29** 93.93**</td><td>96.69</td><td>93.35</td></tr><tr><td colspan=\"2\">WB 94.27** 88.44**</td><td>93.38</td><td>86.88</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>: Evaluation on Broadcast News (BN),</td></tr><tr><td>Conversations (CS), Forum (FM), Magazine</td></tr><tr><td>(MG), News (NS), Short Messages (SM), Speech</td></tr><tr><td>(SP) and Weblogs (WB) in CTB9. ( ** p < 0.01,</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF13": { |
|
"html": null, |
|
"text": "Result comparisions on CTB5 in F1scores.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF15": { |
|
"html": null, |
|
"text": "Tagging speed in numbers of sentences and characters per second", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF16": { |
|
"html": null, |
|
"text": "The split of Chinese Treebank 9.0", |
|
"num": null, |
|
"content": "<table><tr><td/><td>P</td><td>R</td><td>F</td></tr><tr><td>CTB5</td><td colspan=\"3\">Single Ensemble 97.57 98.47 98.02 97.49 98.30 97.89</td></tr><tr><td>CTB9</td><td colspan=\"3\">Single Ensemble 96.61 96.74 96.67 96.38 96.55 96.47</td></tr><tr><td>UD1</td><td colspan=\"3\">Single Ensemble 95.07 95.27 95.17 94.71 94.99 94.85</td></tr><tr><td>UD2</td><td colspan=\"3\">Single Ensemble 95.00 95.22 95.11 94.98 94.93 94.93</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF17": { |
|
"html": null, |
|
"text": "Evaluation of segmentations in precision, recall and F1-scores", |
|
"num": null, |
|
"content": "<table><tr><td/><td>P</td><td>R</td><td>F</td></tr><tr><td>CTB5</td><td colspan=\"3\">Single Ensemble 93.95 94.81 94.38 93.68 94.47 94.07</td></tr><tr><td>CTB9</td><td colspan=\"3\">Single Ensemble 92.28 92.40 92.34 91.81 91.97 91.89</td></tr><tr><td>UD1</td><td colspan=\"3\">Single Ensemble 89.67 89.86 89.77 89.28 89.54 89.41</td></tr><tr><td>UD2</td><td colspan=\"3\">Single Ensemble 89.33 89.54 89.43 88.95 89.04 89.00</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |