#!/usr/bin/env python
# encoding: utf-8
'''
@author: Justin Ruan
@license:
@contact: ruanjun@whut.edu.cn
@time: 2019-11-30
@desc:
'''


import numpy as np
import matplotlib.pyplot as plt
import os
import util
import datetime

from tqdm import tqdm
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Dataset, Field, Example, BucketIterator
from sklearn.model_selection import train_test_split

from net.bilstm_crf import BiLSTM_CRF

'''
这里存在三种文本：
1. 原始文本txtoriginal.txt，包括一行文本。
2. 人工标注文本*.txt：包括多行文本，每行包括四列，短语，短语的开始位置，结束位置，该短语的标签（BI）
3. 用于训练LSTM的训练标注文件train.txt：每行包括两列，一个字，一个标签（BIO串流）
算法思路：
第一步：由原始文本，由人工进行手动输入生成人工标注文本txt。或者，由词典匹配生成人工标注文本txt，再由人工进行校正；
第二步：将人工标注文本txt，通过transfer_data.py生成用于训练LSTM的训练标注文件train.txt
第三步：进行LSTM模型的训练
第四步：用LSTM模型对新的输入文件进行预测得到BIO串流，再由它转换为人工标注文本的格式

'''
PROJECT_ROOT = util.get_project_root()

class BI_LSTMNER:

    def __init__(self,model_name, EMBEDDING_DIM, EMBEDDING_filename):
        self.vocab_path = PROJECT_ROOT + '//models//vocab.txt'
        self.embedding_file = PROJECT_ROOT + '//models//token_vec_300.bin'
        self.embedding_file = PROJECT_ROOT + '//models//{}'.format(EMBEDDING_filename)

        # self.model_name = "BERT_BI_LSTM_CRF"
        self.model_name = model_name
        self.model_root = PROJECT_ROOT + '/models/{}'.format(self.model_name)

        self.word_dict = None

        self.tag_dict = {
            "治疗": "TREATMENT",
            "身体部位": "BODY",
            "症状和体征": "SIGNS",
            "疾病和诊断": "DISEASE",
            "检查和检验": "CHECK"
        }

        # tag_to_ix
        self.class_dict = {
            'O': 0,
            'TREATMENT-I': 1,
            'TREATMENT-B': 2,
            'BODY-B': 3,
            'BODY-I': 4,
            'SIGNS-I': 5,
            'SIGNS-B': 6,
            'CHECK-B': 7,
            'CHECK-I': 8,
            'DISEASE-I': 9,
            'DISEASE-B': 10,
            'START': 11,
            'STOP': 12
        }
        # self.EMBEDDING_DIM = 768
        self.EMBEDDING_DIM = EMBEDDING_DIM
        self.NUM_CLASSES = len(self.class_dict)
        # self.TIME_STAMPS = 150
        self.HIDDEN_DIM = 256

        self.use_GPU = True
        # self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.device = torch.device("cuda:0" if self.use_GPU else "cpu")



    def build_data(self):
        '''
        构造数据集
        :return:
        '''
        datas = []
        sample_x = []
        sample_y = []
        vocabs = {'UNK'}
        for line in open(self.train_path, encoding="UTF-8"):
            line = line.rstrip().split('\t')
            if not line:
                continue
            char = line[0]
            if not char:
                continue
            cate = line[-1]
            sample_x.append(char)
            sample_y.append(cate)
            vocabs.add(char)
            if char in ['。','?','!','！','？']:
                datas.append([sample_x, sample_y])
                sample_x = []
                sample_y = []

        if os.path.exists(self.vocab_path):
            word_dict = self.load_worddict()
        else:
            word_dict = {wd:index for index, wd in enumerate(list(vocabs))}
            self.write_file(list(vocabs), self.vocab_path)
        return datas, word_dict

    def write_file(self, wordlist, filepath):
        '''
        保存字典文件
        :param wordlist:字典数据
        :param filepath: 存盘路径
        :return:
        '''
        with open(filepath, 'w+',encoding="UTF-8") as f:
            f.write('\n'.join(wordlist))

    def load_pretrained_embedding(self):
        '''
        加载预训练词向量
        :return:
        '''
        embeddings_dict = {}
        with open(self.embedding_file, 'r', encoding="UTF-8") as f:
            for line in f:
                values = line.strip().split(' ')
                if len(values) < self.EMBEDDING_DIM:
                    continue
                word = values[0]
                coefs = np.asarray(values[1:], dtype='float32')
                embeddings_dict[word] = coefs
        print('Found %s word vectors.' % len(embeddings_dict))
        return embeddings_dict

    def build_embedding_matrix(self):
        '''
        加载词向量矩阵
        :return:
        '''
        embedding_dict = self.load_pretrained_embedding()
        embedding_matrix = np.zeros((self.VOCAB_SIZE + 1, self.EMBEDDING_DIM), dtype=np.float32)
        for word, i in self.word_dict.items():
            embedding_vector = embedding_dict.get(word)
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector
        return embedding_matrix

    def create_initial_model(self,):
        model = BiLSTM_CRF(word_size=self.VOCAB_SIZE,
                           label_size= self.NUM_CLASSES,
                           word_ebd_dim=self.EMBEDDING_DIM,
                           lstm_layers=2,
                           dropout=0.5,
                           lstm_hsz = self.HIDDEN_DIM,
                           batch_size = self.batch_size,
                           embedding_matrix=self.embedding_matrix,
                           use_cuda=self.use_GPU)
        return model

    def load_model(self, model_file):
        '''
        加载模型
        :param model_file: 模型文件
        :return: 网络模型
        '''
        if model_file is None:
            checkpoint_dir = self.model_root
            if (not os.path.exists(checkpoint_dir)):
                os.makedirs(checkpoint_dir)

            model_file = util.latest_checkpoint(checkpoint_dir)

        if model_file is not None:
            print("loading >>> ", model_file, " ...")
            load_object = torch.load(model_file)
            if isinstance(load_object, dict):
                model = self.create_initial_model()
                model.load_state_dict(torch.load(model_file))
            else:
                model = load_object
        else:
            model = self.create_initial_model()
        return model

    def x_tokenize(self, x):
        return [self.word_dict[char] for char in x]

    def y_tokenize(self, y):
        return [self.class_dict[label] for label in y]

    def create_dataset(self, datas, batch_size):
        TEXT = Field(sequential=True, use_vocab=False, batch_first=True, fix_length=None,
                     eos_token=None, init_token=None, include_lengths=True, pad_token=0)
        LABEL = Field(sequential=True, use_vocab=False, batch_first=True, fix_length=None,
                     eos_token=None, init_token=None, include_lengths=False, pad_token=0, is_target=True)
        fields = [
            ("label", LABEL), ("text", TEXT)]

        examples = []
        for text, label in datas:
            x_token = self.x_tokenize(text)
            y_token = self.y_tokenize(label)
            new_example = Example.fromlist([y_token, x_token], fields)
            examples.append(new_example)

        examples_train,  examples_test = train_test_split(examples, test_size=0.1, random_state=12)

        data_train = Dataset(examples_train, fields)
        data_test = Dataset(examples_test, fields)

        train_iter, test_iter = BucketIterator.splits((data_train, data_test),
                                                     batch_sizes=(batch_size, batch_size),
                                                     device=self.device,
                                                     sort_key=lambda x: len(x.text),  # field sorted by len
                                                     sort_within_batch=True,
                                                     repeat=False)

        return train_iter, test_iter, len(examples_train), len(examples_test)

    def train_model(self, train_file, batch_size, epochs=10):
        '''
        训练模型
        :param epochs: 训练的Epoch次数
        :return:
        '''
        self.batch_size = batch_size
        self.train_path = PROJECT_ROOT + '//data//' + train_file

        datas, self.word_dict = self.build_data()
        self.VOCAB_SIZE = len(self.word_dict)

        # word_to_ix
        self.embedding_matrix = self.build_embedding_matrix()

        train_iter, test_iter, train_len, test_len = self.create_dataset(datas, batch_size)
        print("length of train set =", train_len, ", length of test set =", test_len)

        model = self.load_model(model_file=None)
        print(model)

        optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
                                                               factor=0.1)  # mode为min，则loss不下降学习率乘以factor，max则反之
        if self.use_GPU:
            model.to(self.device)

        for epoch in range(epochs):
            # print('Epoch {}/{}'.format(epoch + 1, epochs))
            # print('-' * 80)
            model.train()
            # 开始训练
            total_loss = 0
            with tqdm(train_iter, desc="training in Epoch {}/{}".format(epoch + 1, epochs)) as tq:
                for step, batch in enumerate(tq):
                    word = batch.text[0]
                    label = batch.label
                    #label_one_hot = torch.zeros(batch_size,self.NUM_CLASSES).scatter_(1,label,1)
                    seq_lengths = batch.text[1]

                    optimizer.zero_grad()
                    loss, _ = model(word, label, seq_lengths, use_decode = False)
                    loss.backward()
                    optimizer.step()

                    running_loss = loss.item()
                    total_loss += running_loss

                    if step % 3 == 0:
                        tq.set_postfix(Loss=running_loss)

            scheduler.step(total_loss)

            running_loss = 0.0
            running_corrects = 0
            epoch_count = 0
            model.eval()
            # 开始评估
            for step, batch in enumerate(test_iter):
                word = batch.text[0]
                label = batch.label
                seq_lengths = batch.text[1]
                loss, preds = model(word, label, seq_lengths, use_decode = True)

                running_loss += loss.item() * word.size(0)
                running_corrects += torch.sum(preds == label)
                epoch_count += label.size(0) * label.size(1) # 补0也被计算在内了

            epoch_loss=running_loss / test_len
            epoch_acc = running_corrects.double() / epoch_count
            print('Epoch {}/{}, on test set loss = {:.4f}, accu = {:.4f} \n'.format(epoch + 1, epochs, epoch_loss, epoch_acc))

            torch.save(model.state_dict(), self.model_root + "/{}_{}_cp-{:04d}-{:.4f}-{:.4f}.pth".format(
                self.model_name, self.HIDDEN_DIM, epoch+1, epoch_loss, epoch_acc),
                       )

    ############################################################################################################
    #                      evaluate
    ############################################################################################################
    def evaluate_model(self, train_file, batch_size):
        self.batch_size = batch_size
        self.train_path = PROJECT_ROOT + '//data//' + train_file

        datas, self.word_dict = self.build_data()
        self.VOCAB_SIZE = len(self.word_dict)

        # word_to_ix
        self.embedding_matrix = None

        train_iter, test_iter, train_len, test_len = self.create_dataset(datas, batch_size)
        print("length of train set =", train_len, ", length of test set =", test_len)

        model = self.load_model(model_file=None)
        # 关闭求导，节约大量的显存
        for param in model.parameters():
            param.requires_grad = False
        print(model)

        if self.use_GPU:
            model.to(self.device)

        running_loss = 0.0
        running_corrects = 0
        epoch_count = 0
        model.eval()
        # 开始评估
        with tqdm(test_iter, desc="evaluating") as tq:
            for batch in tq:
                word = batch.text[0]
                label = batch.label
                seq_lengths = batch.text[1]
                loss, preds = model(word, label, seq_lengths, use_decode=True)

                running_loss += loss.item() * word.size(0)
                running_corrects += torch.sum(preds == label)
                epoch_count += label.size(0) * label.size(1)  # 补0也被计算在内了

                tq.set_postfix(accuraycy=float(running_corrects.item())/epoch_count)

        epoch_loss = running_loss / test_len
        epoch_acc = running_corrects.double() / epoch_count
        print('on test set loss = {:.4f}, accu = {:.4f} \n'.format(epoch_loss, epoch_acc))

        return


    ############################################################################################################
    #                      predict
    ############################################################################################################

    def load_worddict(self):
        '''
        加载词表
        :return:
        '''
        vocabs = [line.strip('\n') for line in open(self.vocab_path, encoding="UTF-8")]#空格要保留的
        word_dict = {wd: index for index, wd in enumerate(vocabs)}
        return word_dict

    def build_input(self, text):
        '''
        构造输入，转换成所需形式
        :param text: 输入文本
        :return:
        '''
        x = []
        for char in text:
            if char not in self.word_dict:
                char = 'UNK'
            x.append(self.word_dict.get(char))
        return x

    def prepare_predict_dataset(self, text_list, batch_size):
        if self.word_dict is None:
            self.word_dict = self.load_worddict()
            self.VOCAB_SIZE = len(self.word_dict)

        TEXT = Field(sequential=True, use_vocab=False, batch_first=True, fix_length=None,
                     eos_token=None, init_token=None, include_lengths=True, pad_token=0)
        fields = [("text", TEXT)]

        examples = []
        for text in text_list:
            x_token = self.build_input(text)
            new_example = Example.fromlist([x_token], fields)
            examples.append(new_example)

        data_test = Dataset(examples, fields)
        test_iter = BucketIterator(data_test, batch_size=batch_size,
                                   device=self.device,
                                   train=True,
                                   shuffle=False,
                                   # sort_key=lambda x: len(x.text),
                                   # sort_within_batch=True,
                                   sort_key=None, # 为了预测时，预测输出保持原来的顺序
                                   sort_within_batch=False,
                                   repeat=False)

        return test_iter

    def predict(self, text_list, batch_size = 10):
        self.batch_size = batch_size

        test_iter = self.prepare_predict_dataset(text_list, self.batch_size)
        print("count of samples = ", len(test_iter))

        self.embedding_matrix = None
        model = self.load_model(model_file=None)
        # 关闭求导，节约大量的显存
        for param in model.parameters():
            param.requires_grad = False
        print(model)

        if self.use_GPU:
            model.to(self.device)

        model.eval()
        predicted_tags = []
        for step, batch in enumerate(test_iter):
            word = batch.text[0]
            seq_lengths = batch.text[1]
            preds = model.predict(word, seq_lengths)
            predicted_tags.extend(preds.cpu().numpy())

        self.label_dict = {j: i for i, j in self.class_dict.items()}#反查表
        results = []
        for text, result in zip(text_list, predicted_tags):
            chars = [i for i in text]
            tags = [self.label_dict[i] for i in result]
            res = list(zip(chars, tags))
            # print(res)
            line_tag = self.extract_code(res)
            results.append(line_tag)

        return results

    def extract_code(self, char_tag_list):
        '''
        从LSTM所预测的BIO串流，生成 人工标注文件（四列信息）
        :param char_tag_list:
        :return:
        '''
        char_tag_list.append((' ', 'O'))
        result = []
        start = -1
        new_word = ""
        cur_tag = ""
        for index, (char, tag) in enumerate(char_tag_list):
            if '-B' in tag[-2:]:  # 判断是否为标签的开始
                if len(new_word) > 0:  # 出现连续的B
                    end = index - 1
                    result.append((new_word, start, end, cur_tag))

                start = index  # 实体开始的序号
                new_word = char
                cur_tag = tag[:-2]
            elif '-I' in tag[-2:] and tag[:-2] == cur_tag:  # 出现与前面一致的I
                new_word += char
            else:  # 出现 O,或者 # 出现错误的 -I
                if len(new_word) > 0:
                    end = index - 1
                    result.append((new_word, start, end, cur_tag))
                new_word = ""
                cur_tag = ""

        return result

    def generate_tag_sample(self, original_filename, start_id, count, batch_size):
        '''
        将每行输入文本，进行LSTM的预测，生成BIO串流， 再转换为人工标注文件（四列信息）并存盘。
        :param original_filename: 输入数据文件
        :param start_id: 数据文件的第一行的编号
        :return: 每一行文本存到一个原始文本txtoriginal.txt中，并对应生成一个人工标注文件
        '''
        original_path = PROJECT_ROOT + "//data//original_txt//{}".format(original_filename)
        original_text = []
        k = 0
        for line in open(original_path, "r", encoding='utf-8'):
            line = line.replace(' ', '').replace('\n', '').replace('\t','')
            if len(line)>0:
                original_text.append(line)
            k += 1
            if k > count:
                break

        result = self.predict(original_text, batch_size)
        # print(result)

        # save to file
        id = start_id
        for original, tag_set in zip(original_text, result):
            # print(original)
            # print(tag_set)
            original_txt = PROJECT_ROOT + "//data//original_txt//病理诊断-{}.txtoriginal.txt".format(id)
            txt = PROJECT_ROOT + "//data//original_txt//病理诊断-{}.txt".format(id)

            f = open(original_txt, "w", encoding='utf-8')
            f.write(original)
            f.close()

            f = open(txt, "w", encoding='utf-8')
            count = 0
            len_terms = len(tag_set)
            for item in tag_set:
                print("generate >>> ", id, " text: ", item)
                f.write("{}\t{}\t{}\t{}".format(item[0], item[1], item[2], self.tag_dict[item[3]]))
                count += 1
                if count < len_terms:
                    f.write("\n")

            f.close()

            id += 1


