import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from transformers import BertModel, AdamW, BertTokenizer
from TorchCRF import CRF

train_set = pd.read_csv(r"D:\study\毕设\data\ner_train_dataset.csv",error_bad_lines=False)
test_set= pd.read_csv(r"D:\study\毕设\data\ner_test_dataset.csv",error_bad_lines=False)

#实体级别评价模型，计算每个标签的精确率，召回率，F1分数
class Metrics(object):
    def __init__(self, std_tags, predict_tags):
        #将按句的标签列表转化成按字的标签列表 如 [[t1, t2], [t3, t4]...] --> [t1, t2, t3, t4...]
        self.std_tags = flatten_lists(std_tags)     #将已知标签和预测的标签拼成列表
        self.predict_tags = flatten_lists(predict_tags)

        #计数器
        self.std_entity_counter = self.count_entity_dict(self.std_tags)  #标准结果的各实体个数
        self.predict_entity_counter = self.count_entity_dict(self.predict_tags)  #预测结果的各实体个数
        print("标准各实体个数", self.std_entity_counter)
        print("预测各实体个数", self.predict_entity_counter)

        self.std_entity_number = self.count_entity(self.std_tags)    #标准结果的实体总个数
        # self.predict_entity_number = self.count_entity(self.predict_tags)  #预测结果的实体总个数
        print("标准实体数", self.std_entity_number)
        # print("预测实体数", self.predict_entity_number)

        self.corrent_entity_number = self.count_correct_entity()
        print("正确的实体", self.corrent_entity_number)

        self.entity_set = set(self.std_entity_counter)
        print("实体集合", self.entity_set)

        # 计算精确率
        self.precision_scores = self.cal_precision()
        print("各个实体的准确率", self.precision_scores)

        # 计算召回率
        self.recall_scores = self.cal_recall()
        print("各个实体的召回率", self.recall_scores)

        # 计算F1分数
        self.f1_scores = self.cal_f1()
        print("各个实体的f1值", self.f1_scores)

        # 计算加权均值
        self.wighted_average = self._cal_wighted_average()
        print("各项指标的加权均值", self.wighted_average)

    # 计算每类实体的准确率：某实体正确的个数/预测中某实体所有的个数
    def cal_precision(self):
        precision_scores = {}
        for entity in self.entity_set:
            precision_scores[entity] = self.corrent_entity_number.get(entity, 0) / max(1e-10, self.predict_entity_counter[entity])
        return precision_scores

    # 计算每类实体的召回率：某实体正确的个数/实际中某实体所有的个数
    def cal_recall(self):
        recall_scores ={}
        for entity in self.entity_set:
            recall_scores[entity] = self.corrent_entity_number.get(entity, 0) / max(1e-10, self.std_entity_counter[entity])
        return recall_scores

    # 计算f1值
    def cal_f1(self):
        f1_scores = {}
        for entity in self.entity_set:
            p, r = self.precision_scores[entity], self.recall_scores[entity]
            f1_scores[entity] = 2 * p * r / (p + r + 1e-10)
        return f1_scores

    # 将结果用表格的形式打印出来
    def report_scores(self):
        # 打印表头
        header_format = '{:>9s}  {:>9} {:>9} {:>9} {:>9}'
        header = ['precision', 'recall', 'f1-score', 'support']
        print(header_format.format('', *header))
        row_format = '{:>9s}  {:>9.4f} {:>9.4f} {:>9.4f} {:>9}'
        # 打印每个实体的p, r, f
        for entity in self.entity_set:
            print(row_format.format(
                entity,
                self.precision_scores[entity],
                self.recall_scores[entity],
                self.f1_scores[entity],
                self.std_entity_counter[entity]   #这部分是support的值
            ))
        #计算并打印平均值
        avg_metrics = self._cal_wighted_average()
        print(row_format.format(
            'avg/total',
            avg_metrics['precision'],
            avg_metrics['recall'],
            avg_metrics['f1_score'],
            self.std_entity_number
        ))

    # 计算加权均值
    def _cal_wighted_average(self):
        weighted_average = {}
        total = self.std_entity_number  #标准实体的总数

        #计算weighted precisions:
        weighted_average['precision'] = 0.
        weighted_average['recall'] = 0.
        weighted_average['f1_score'] = 0.
        for entity in self.entity_set:
            size = self.std_entity_counter[entity]  #标准结果各个实体的个数
            weighted_average['precision'] += self.precision_scores[entity] * size
            weighted_average['recall'] += self.recall_scores[entity] * size
            weighted_average['f1_score'] += self.f1_scores[entity] * size

        for metric in weighted_average.keys():
            weighted_average[metric] /= total

        return weighted_average

    # 计算每个实体对应的个数
    def count_entity_dict(self, tag_list):
        enti_dict = {"BEAN":0,"DISEASE":0,"INSECT":0,"PATHONGEN":0}
        flag = 0  # 初始状态设置为0
        for tag in tag_list:
            if 'B-' in tag and flag == 0:  #当B-出现时，将状态变为1
                flag = 1
            if 'I-' in tag and flag == 1: #I前有B则可以判定为一个实体
                flag = 0  #状态回归初始
                tag = tag[2:]
                enti_dict[tag] += 1
            if tag == 'O': #出现O-时，将状态设为0 B_word设为0
                flag = 0
        return enti_dict

    # 计算每种实体被正确预测的个数
    def count_correct_entity(self):
        correct_enti_dict = {"BEAN":0,"DISEASE":0,"INSECT":0,"PATHONGEN":0}
        flag = 0  #初始状态，表示等待下一个开始状态
        for std_tag, predict_tag in zip(self.std_tags, self.predict_tags):  #zip()用于将数据打包成元组
            if 'B-' in std_tag and std_tag == predict_tag and flag == 0:   #当以B-开头且标签相等时
                flag = 1  #表示已经有B
            if 'I-' in std_tag and std_tag == predict_tag and flag == 1:
                std_tag = std_tag[2:]
                correct_enti_dict[std_tag] += 1
            if std_tag != predict_tag:
                flag = 0
        return correct_enti_dict

    # 计算标准列表中的实体个数
    def count_entity(self, tag_list):
        entity_count = 0   #记录实体数量
        for tag in tag_list:    #遍历所有标签
            if 'B-' in tag:
                entity_count += 1
        return entity_count

# 将列表的列表拼成一个列表
def flatten_lists(lists):
    flatten_list = []
    for l in lists:
        if type(l) == list:
            flatten_list += l
        else:
            flatten_list.append(l)
    return flatten_list

class Model(nn.Module):
    def __init__(self,tag_num,max_length):
        super().__init__()
        self.bert = BertModel.from_pretrained('bert-base-chinese')
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
        config = self.bert.config
        self.lstm = nn.LSTM(bidirectional=True, num_layers=2, input_size=config.hidden_size, hidden_size=config.hidden_size//2, batch_first=True)
        self.crf = CRF(tag_num)
        self.fc = nn.Linear(config.hidden_size,tag_num)

    def forward(self,x,y):
        tokenizer_result =  self.tokenizer.encode_plus(x, return_token_type_ids=True, return_attention_mask=True,
                                                 return_tensors='pt',
                                                 padding='max_length', max_length=max_length)
        input_ids = torch.tensor(tokenizer_result['input_ids'])
        attention_mask = torch.tensor(tokenizer_result['attention_mask'])
        token_type_ids=torch.tensor(tokenizer_result['token_type_ids'])
        with torch.no_grad():
            bert_output = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0]
        lstm_output, _ = self.lstm(bert_output)
        fc_output = self.fc(lstm_output)
        loss = self.crf(fc_output,y)
        tag = self.crf.decode(fc_output)
        return loss,tag

if __name__ == '__main__':
    # 参数
    epoch = 5
    max_length = 148
    batch_size = 32
    # 数据预处理
    train_x = train_set['question'].values
    train_y = train_set['label'].values
    test_x = test_set['question'].values
    test_y = test_set['label'].values
    train_features = []
    train_y1 = []
    test_features = []
    test_y1 = []
    for i in train_x:
        train_features.append(i.split())
    for i in train_y:
        train_y1.append(i.split())
    for i in test_x:
        test_features.append(i.split())
    for i in test_y:
        test_y1.append(i.split())
    tag_to_ix = {"B-BEAN": 0, "I-BEAN": 1, "B-DISEASE": 2,"I-DISEASE": 3,"B-INSECT": 4,"I-INSECT": 5,"B-PATHONGEN": 6,"I-PATHONGEN": 7,"O":8}
    ix_to_tag = {0:"B-BEAN",1:"I-BEAN",2:"B-DISEASE",3:"I-DISEASE",4:"B-INSECT",5:"I-INSECT",6:"B-PATHONGEN",7:"I-PATHONGEN",8:"O"}
    train_targets= []
    test_targets= []
    for label in train_y1:
        r = [tag_to_ix[train_features] for train_features in label]
        if len(r)<max_length:
            r += [tag_to_ix['O']] * (max_length-len(r))
        train_targets.append(r)
    for label in test_y1:
        r = [tag_to_ix[test_features] for test_features in label]
        if len(r)<max_length:
            r += [tag_to_ix['O']] * (max_length-len(r))
        test_targets.append(r)
    # targets = np.hstack((train_targets, test_targets))
    # 分段
    batch_count = int(len(train_features) / batch_size)
    batch_train_inputs, batch_train_targets = [], []
    for i in range(batch_count):
        batch_train_inputs.append(train_features[i * batch_size: (i + 1) * batch_size])
        batch_train_targets.append(train_targets[i * batch_size: (i + 1) * batch_size])
    # 训练
    model = Model(len(tag_to_ix),max_length)
    optimizer = AdamW(model.parameters(), lr=5e-4)
    model.train()
    for _ in range(epoch):
        los = 0
        for i in range(batch_count):
            for j in range(batch_size):
                optimizer.zero_grad()  # 1.梯度置零
                loss, result = model(batch_train_inputs[i][j], torch.tensor(batch_train_targets[i][j]).unsqueeze(dim=0)) # 2.获得模型结果并计算损失
                loss = abs(loss)
                loss.backward()  # 3.反向传播
                optimizer.step()  # 4.修改参数，w，b
                los += loss # item()返回loss的值
            # 查看每处理五个段的损失
            if i % 5 == 0:
                print("Batch:%d,Loss %.4f" % ((i), los / 5))
                los = 5

    # 测试验证
    model.eval()
    hit = 0  # 记录预测正确的个数
    total = len(test_features)  # 记录例子总数
    predict_targets=[]
    with torch.no_grad():
        for i in range(total):
            predict_target=[]
            _, predict = model(test_features[i], torch.tensor(test_targets[i]).unsqueeze(dim=0))
            for j in predict:
                predict_target.append(ix_to_tag[j[0]])
            predict_targets.append(predict_target)
    for i in range(len(test_targets)):
        for j in range(len(test_targets[i])):
            test_targets[i][j] = ix_to_tag[test_targets[i][j]]
    metrics=Metrics(test_targets,predict_targets)
    metrics.report_scores()
    torch.save(model, r'..\..\model\ner_2.pt')
