from sklearn_crfsuite import CRF
from NER.check import check
from tqdm import tqdm
import pickle

sorted_labels_eng = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"]

sorted_labels_chn = [
    'O',
    'B-NAME', 'M-NAME', 'E-NAME', 'S-NAME'
    , 'B-CONT', 'M-CONT', 'E-CONT', 'S-CONT'
    , 'B-EDU', 'M-EDU', 'E-EDU', 'S-EDU'
    , 'B-TITLE', 'M-TITLE', 'E-TITLE', 'S-TITLE'
    , 'B-ORG', 'M-ORG', 'E-ORG', 'S-ORG'
    , 'B-RACE', 'M-RACE', 'E-RACE', 'S-RACE'
    , 'B-PRO', 'M-PRO', 'E-PRO', 'S-PRO'
    , 'B-LOC', 'M-LOC', 'E-LOC', 'S-LOC'
]


def init(language='English', mode='train'):
    f = open('./NER/' + language + '/' + mode + '.txt', 'r', encoding='utf-8')
    sentences = []
    sentence = []
    for i in range(10000000):
        s = f.readline()
        if s == '':
            break
        s = s[:-1]
        if s != '':
            word, tag = s.split(' ')
            sentence.append((word, tag))
        elif len(sentence) != 0:
            sentences.append(sentence.copy())
            sentence.clear()
    if len(sentence) != 0:
        sentences.append(sentence.copy())
    return sentences


def word2features(sent, i, language):
    word = sent[i][0]
    features = {
        'word': word,
        'word.isdigit()': word.isdigit()
    }
    if language == 'English':
        features.update({
            'word.lower()': word.lower(),
            'word[:3]': word[:3],
            'word[:2]': word[:2],
            'word[-3:]': word[-3:],
            'word[-2:]': word[-2:],
            'word.isupper()': word.isupper(),
            'word.istitle()': word.istitle(),
        })
    if i > 0:
        word1 = sent[i - 1][0]
        features.update({
            '-1_word': word1,
            '-1_0_word': word1 + '_' + word,
            '-1:word.isdigit()': word1.isdigit()
        })
        if language == 'English':
            features.update({
                '-1:word.lower()': word1.lower(),
                '-1word[:3]': word1[:3],
                '-1word[:2]': word1[:2],
                '-1word[-3:]': word1[-3:],
                '-1word[-2:]': word1[-2:],
                '-1word.isupper()': word1.isupper(),
                '-1:word.istitle()': word1.istitle()
            })
    else:
        features['BOS'] = True
    if i > 1:
        word1 = sent[i - 2][0]
        features.update({
            '-2_word': word1,
            '-2_-1_word': word1 + '_' + sent[i - 1][0],
            '-2:word.isdigit()': word1.isdigit()
        })
        if language == 'English':
            features.update({
                '-2:word.lower()': word1.lower(),
                '-2word[:3]': word1[:3],
                '-2word[:2]': word1[:2],
                '-2word[-3:]': word1[-3:],
                '-2word[-2:]': word1[-2:],
                '-2word.isupper()': word1.isupper(),
                '-2:word.istitle()': word1.istitle(),
            })
    else:
        features['BOS2'] = True
    if i < len(sent) - 1:
        word1 = sent[i + 1][0]
        features.update({
            '+1_word': word1,
            '0_+1_word': word + '_' + word1,
            '+1:word.isdigit()': word1.isdigit()
        })
        if language == 'English':
            features.update({
                '+1:word.lower()': word1.lower(),
                '+1word[:3]': word1[:3],
                '+1word[:2]': word1[:2],
                '+1word[-3:]': word1[-3:],
                '+1word[-2:]': word1[-2:],
                '+1word.isupper()': word1.isupper(),
                '+1:word.istitle()': word1.istitle(),
            })
    else:
        features['EOS'] = True
    if i < len(sent) - 2:
        word1 = sent[i + 2][0]
        features.update({
            '+2_word': word1,
            '+1_+2+word': sent[i + 1][0] + '_' + word1,
            '+2:word.isdigit()': word1.isdigit()
        })
        if language == 'English':
            features.update({
                '+2:word.lower()': word1.lower(),
                '+2word[:3]': word1[:3],
                '+2word[:2]': word1[:2],
                '+2word[-3:]': word1[-3:],
                '+2word[-2:]': word1[-2:],
                '+2word.isupper()': word1.isupper(),
                '+2:word.istitle()': word1.istitle(),
            })
    else:
        features['EOS2'] = True
    if 0 < i < len(sent) - 1:
        features.update({
            '-1_+1_word': sent[i - 1][0] + '_' + sent[i + 1][0],
        })
    else:
        features['NOT_MIDDLE'] = True
    return features


def extract_features(sentences, language):
    X = []
    for sent in sentences:
        x = [word2features(sent, i, language) for i in range(len(sent))]
        X.append(x)
    return X


def extract_labels(sentences):
    y = []
    for sent in sentences:
        labels = [label for word, label in sent]
        y.append(labels)
    return y


language = 'English'
train_data = init(language, 'train')
X_train, y_train = extract_features(train_data, language), extract_labels(train_data)
crf = CRF(algorithm='ap', max_iterations=300, all_possible_transitions=True, verbose=True)
crf.fit(X_train, y_train)
savefile = open(language + '.param', 'wb')
pickle.dump(crf, savefile)

mode = 'train'
savefile = open(language + '.param', 'rb')
crf = pickle.load(savefile)
test_data = init(language, mode)
X_test, y_test = extract_features(test_data, language), extract_labels(test_data)
y_pred = crf.predict(X_test)
mypath = './NER/example_data/example_my_result.txt'
f = open(mypath, 'w', encoding='utf-8')
for sentence_data, my_tag_list in tqdm(zip(test_data, y_pred)):
    sentence = [x[0] for x in sentence_data]
    for idx in range(len(sentence)):
        f.write(sentence[idx] + ' ' + my_tag_list[idx] + '\n')
    f.write('\n')
f.close()
check(language, 'NER/' + language + '/' + mode + '.txt', mypath)
