#!/usr/bin/env python
# -*- coding: utf8 -*-
# Filename: src/ner/ner.py
# Author: xul - 294739212@qq.com
# Create: 2017-04-01 14:20:37
# Description: NER


import pycrfsuite
import re
import nltk
from itertools import chain
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelBinarizer


def load_ner_data(ner_train_file):
    '''
    load ner train data

    Parameters
    ----------
    ner_train_file : str
        the file to train ner

    Returns
    -------
    ner_train_data : list(list(tuple))
        the data extract from ner_train_file
    '''
    def connect_entity_words(train_data):
        '''
        接受一条训练数据，保留实体的完整性，在多个词语之间加上下划线
        '''
        m_list = re.findall(r'====(.+?)@.+?\+\+\+\+', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(
                r'====(.+?)@.+?\+\+\+\+', '-e-' + m_list[i], train_data, 1
            )
        m_list = re.findall(r'____(.+?)----', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(
                r'____(.+?)----', '-e-'+m_list[i], train_data, 1
            )
        return train_data

    def del_irrelevant_char(line):
        '''
        取出字符串无关的字符，包括（标点符号、数字、人为插入的====和++++）
        '''
        word_list_res = []
        punctuation = '!"#$%&\'()*+,./:;<=>?@[\\]^`{|}~'
        word_list = line.translate(None, punctuation).split()
        for word in word_list:
            if word.isdigit():
                pass
            elif word.startswith('===='):
                word_list_res.append(word[4:])
            elif word.endswith('++++'):
                word_list_res.append(word[:-4])
            else:
                word_list_res.append(word)
        return word_list_res

    def filter_nonprintable_char(src_word):
        '''
        因为做的是英文的word2vec，所以过滤ascii不在0-127之间的字符
        此处的做法是直接将这些字符去掉
        '''
        dst_word = []
        for c in src_word:
            if ord(c) >= 0 and ord(c) < 128:
                dst_word.append(c)
        return ''.join(dst_word)

    def get_token_postag_label(token):
        if token == '-e-':
            return None

        if token.startswith('-e-'):
            return (token[len('-e-'):],
                    nltk.pos_tag(token[len('-e-'):].split())[0][1],
                    'NE')
        else:
            return (token,
                    nltk.pos_tag(token.split())[0][1],
                    '0')

    ner_train_lines = open(ner_train_file).readlines()
    ner_train_data = []
    for line in ner_train_lines:
        ner_train_data_piece = []
        word_list = line.split('\t:\t')
        pure_word_list = del_irrelevant_char(connect_entity_words(word_list[3]))
        for word in pure_word_list:
            printable_word = filter_nonprintable_char(word)
            if len(printable_word) != 0:
                token_postag_label = get_token_postag_label(printable_word)
                if token_postag_label is not None:
                    ner_train_data_piece.append(token_postag_label)
        ner_train_data.append(ner_train_data_piece)
    return ner_train_data


def word2features(sent, i):
    word = sent[i][0]
    postag = sent[i][1]
    features = [
        'bias',
        'word.lower=' + word.lower(),
        'word[-3:]=' + word[-3:],
        'word[-2:]=' + word[-2:],
        'word.isupper=%s' % word.isupper(),
        'word.istitle=%s' % word.istitle(),
        'word.isdigit=%s' % word.isdigit(),
        'postag=' + postag,
        'postag[:2]=' + postag[:2],
    ]
    if i > 0:
        word1 = sent[i-1][0]
        postag1 = sent[i-1][1]
        features.extend([
            '-1:word.lower=' + word1.lower(),
            '-1:word.istitle=%s' % word1.istitle(),
            '-1:word.isupper=%s' % word1.isupper(),
            '-1:postag=' + postag1,
            '-1:postag[:2]=' + postag1[:2],
        ])
    else:
        features.append('BOS')

    if i < len(sent)-1:
        word1 = sent[i+1][0]
        postag1 = sent[i+1][1]
        features.extend([
            '+1:word.lower=' + word1.lower(),
            '+1:word.istitle=%s' % word1.istitle(),
            '+1:word.isupper=%s' % word1.isupper(),
            '+1:postag=' + postag1,
            '+1:postag[:2]=' + postag1[:2],
        ])
    else:
        features.append('EOS')

    return features


def sent2features(sent):
    return [word2features(sent, i) for i in range(len(sent))]


def sent2labels(sent):
    return [label for token, postag, label in sent]


def sent2tokens(sent):
    return [token for token, postag, label in sent]


def ner_train(ner_train_file, ner_model_file):
    '''
    use train file to get ner model

    Parameters
    ----------
    ner_train_file : str
        the file to train ner
    ner_model_file : str
        the file to store ner model
    '''
    ner_train_data = load_ner_data(ner_train_file)

    X_train = [sent2features(s) for s in ner_train_data]
    y_train = [sent2labels(s) for s in ner_train_data]

    trainer = pycrfsuite.Trainer(verbose=False)

    for xseq, yseq in zip(X_train, y_train):
        trainer.append(xseq, yseq)

    trainer.set_params({
        'c1': 1.0,   # coefficient for L1 penalty
        'c2': 1e-3,  # coefficient for L2 penalty
        'max_iterations': 50,  # stop earlier

        # include transitions that are possible, but not observed
        'feature.possible_transitions': True
    })

    trainer.train(ner_model_file)


def bio_classification_report(y_true, y_pred):
    """
    Classification report for a list of BIO-encoded sequences.
    It computes token-level metrics and discards "O" labels.

    Note that it requires scikit-learn 0.15+ (or a version from github master)
    to calculate averages properly!
    """
    lb = LabelBinarizer()
    y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
    y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))

    tagset = set(lb.classes_) - {'O'}
    tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
    class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}

    return classification_report(
        y_true_combined,
        y_pred_combined,
        labels=[class_indices[cls] for cls in tagset],
        target_names=tagset,
    )


def ner_evaluate(ner_evaluate_file, ner_model_file):
    ner_evaluate_data = load_ner_data(ner_evaluate_file)

    X_test = [sent2features(s) for s in ner_evaluate_data]
    y_test = [sent2labels(s) for s in ner_evaluate_data]

    tagger = pycrfsuite.Tagger()
    tagger.open(ner_model_file)
    y_pred = [tagger.tag(xseq) for xseq in X_test]

    print bio_classification_report(y_test, y_pred)
