import json
import random
import logging
import spacy
from spacy.training import Example
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score

def convert_dataturks_to_spacy(dataturks_JSON_FilePath):
    try:
        training_data = []
        lines=[]
        with open(dataturks_JSON_FilePath, 'r', encoding='utf-8') as f:
            lines = f.readlines()

        for line in lines:
            data = json.loads(line)
            text = data['content']
            entities = []
            for annotation in data['annotation']:
                # only a single point in text annotation.
                point = annotation['points'][0]
                labels = annotation['label']
                # handle both list of labels or a single label.
                if not isinstance(labels, list):
                    labels = [labels]

                for label in labels:
                    # dataturks indices are both inclusive [start, end] but spacy is not [start, end)
                    entities.append((point['start'], point['end'] + 1 ,label))
            # 排序实体按 start 位置
            entities = sorted(entities, key=lambda x: x[0])

            # 过滤掉重叠的实体
            non_overlapping_entities = []
            last_end = -1
            for start, end, label in entities:
                if start >= last_end:  # 只有当当前实体的 start 大于等于上一个实体的 end 才保留
                    non_overlapping_entities.append((start, end, label))
                    last_end = end  # 更新最后一个实体的 end 位置

            training_data.append((text, {"entities" : non_overlapping_entities}))

        return training_data
    except Exception as e:
        logging.exception("Unable to process " + dataturks_JSON_FilePath + "\n" + "error = " + str(e))
        return None


# 修改后的 train_spacy 函数，使用预训练模型进行微调
def train_spacy():

    # 加载预训练模型（en_core_web_sm）
    nlp = spacy.load("en_core_web_sm")  # 加载已经预训练的模型

    # 加载训练数据
    TRAIN_DATA = convert_dataturks_to_spacy("E:/code/python-workspace/ResumeParser/train/traindata.json")

    # 获取 NER 层，如果没有则创建
    if 'ner' not in nlp.pipe_names:
        ner = nlp.create_pipe('ner')
        nlp.add_pipe(ner, last=True)
    else:
        ner = nlp.get_pipe('ner')

    # 添加新的标签到 NER 层
    for _, annotations in TRAIN_DATA:
        for ent in annotations.get('entities'):
            ner.add_label(ent[2])  # 添加实体类型标签

    # 禁用其他管道（仅训练 NER）
    other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
    with nlp.disable_pipes(*other_pipes):  # 只训练 NER
        optimizer = nlp.begin_training()
        for itn in range(10):  # 训练 10 次迭代
            print("Starting iteration " + str(itn))
            random.shuffle(TRAIN_DATA)
            losses = {}
            for text, annotations in TRAIN_DATA:
                example = Example.from_dict(nlp.make_doc(text), annotations)
                nlp.update([example], drop=0.2, losses=losses)

            print(losses)  # 输出每次迭代的损失
    # 保存训练好的模型
    model_dir = "E:/code/python-workspace/ResumeParser/saved_model"  # 设置模型保存路径
    nlp.to_disk(model_dir)
    print(f"Model saved to {model_dir}")
    # 评估模型
    examples = convert_dataturks_to_spacy("E:/code/python-workspace/ResumeParser/train/testdata.json")
    tp = 0
    tr = 0
    tf = 0
    ta = 0
    c = 0

    for text, annot in examples:
        doc_to_test = nlp(text)
        d = {}
        for ent in doc_to_test.ents:
            d[ent.label_] = []
        for ent in doc_to_test.ents:
            d[ent.label_].append(ent.text)

        for i in set(d.keys()):
            print(i + ":")
            for j in set(d[i]):
                print(j)

        d = {}
        for ent in doc_to_test.ents:
            d[ent.label_] = [0, 0, 0, 0, 0, 0]
        for ent in doc_to_test.ents:
            doc_gold_text = nlp.make_doc(text)
            gold = Example.from_dict(doc_gold_text, annot)
            # 从 annot.get("entities") 中提取实体
            gold_entities = [span[2] for span in annot.get("entities", [])]
            # 生成 y_true 和 y_pred
            y_true = [ent.label_ if ent.label_ in gold_entities else "Not " + ent.label_ for _ in doc_to_test]
            y_pred = [token.ent_type_ if token.ent_type_ == ent.label_ else "Not " + ent.label_ for token in doc_to_test]

            # 计算各项指标：precision, recall, f1-score, accuracy
            if d[ent.label_][0] == 0:
                p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted")
                a = accuracy_score(y_true, y_pred)

                # 累积各项指标
                d[ent.label_][0] = 1
                d[ent.label_][1] += p
                d[ent.label_][2] += r
                d[ent.label_][3] += f
                d[ent.label_][4] += a
                d[ent.label_][5] += 1  # 统计数目
        c += 1

    for i in d:
        print("\nFor Entity " + i + "\n")
        print("Accuracy : " + str((d[i][4] / d[i][5]) * 100) + "%")
        print("Precision : " + str(d[i][1] / d[i][5]))
        print("Recall : " + str(d[i][2] / d[i][5]))
        print("F-score : " + str(d[i][3] / d[i][5]))

train_spacy()
