#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: data_loader.py 
@time: 2022/04/30
@software: PyCharm 
description:该文件用于数据存储与加载
"""
import os

import json
import pickle
import numpy as np
from tqdm import tqdm
import torchtext
from torchtext.vocab import build_vocab_from_iterator
from torchtext.data.functional import to_map_style_dataset

from parameters.lstm_parameters import Config

def data_save(config):
    """
    读取data_file,将文件存储为
    text:["原","神","出","新","活","动","了"]
    label:["B-game","I-game","O","O","O","O","O"]
    :param data_file:
    :return:
    """
    with open(config.data_path,encoding="utf-8") as f:
        lines=f.readlines()
        word_list = []
        label_list = []

        for line in tqdm(lines):
            data=json.loads(line.strip())
            content,label=data_process(data)
            word_list.append(content)
            label_list.append(label)

        np.savez_compressed(config.data_save_path,content=word_list,label=label_list)
        print(f"------------------data_process done---------------")

def data_process(data):
    """
    将
    {"text": "彭小军认为，国内银行现在走的是台湾的发卡模式，先通过跑马圈地再在圈的地里面选择客户，",
    "label": {"address": {"台湾": [[15, 16]]}, "name": {"彭小军": [[0, 2]]}}}
    转化为
    text:["原","神","出","新","活","动","了"]
    label:["B-game","I-game","O","O","O","O","O"]
    格式
    :param data:dict
    :return:
    """
    content = list(data["text"])
    # 存在验证集没有label
    label_entity = data.get("label", None)
    labels = ["O"] * len(content)

    if label_entity:  # "label": {"address": {"台湾": [[15, 16]]}, "name": {"彭小军": [[0, 2]]}}
        for entity_class, entity in label_entity.items():  # {"address": {"台湾": [[15, 16]]}
            for entity_name, sub_num in entity.items():  # {"台湾": [[15, 16]]}
                for index in sub_num:
                    start_id, end_id = index[0], index[1]
                    assert "".join(content[start_id:end_id + 1]) == entity_name
                    if start_id == end_id:
                        labels[start_id] = "S-" + entity_class
                    else:
                        labels[start_id] = "B-" + entity_class
                        labels[start_id + 1:end_id + 1] = ["I-" + entity_class] * (end_id - start_id)
    return content,labels

######################构建词表过程#####################
#1.建立分词器 按字拆分按字分词,   因为data_process已经做了分词处理，所以不用分词
#2.使用分词器对每一句处理     因此构建可迭代对象
#3.构建词表

def yeild_tokens(data_iter):
    """
    返回分词的iterable,因为不需要分词，所以并未处理
    :param data_iter:
    :param tokenizer:
    :return:
    """
    for sample in data_iter:
        yield sample

def data_iter(config):
    """
    生成可迭代对象
    :param config:
    :return:
    """
    contents=np.load(config.data_save_path,allow_pickle=True)["content"]
    for content in contents:
        yield content


def build_vocab(config):
    """
    构建词表
    :param config:
    :return:
    """
    dataset=data_iter(config)
    vocab=build_vocab_from_iterator(yeild_tokens(dataset),
                                    min_freq=0,specials=["<pad>","<unk>"]) #返回vocab对象

    #存储词表
    if not os.path.exists(config.vocab_path):
        with open(config.vocab_path,"wb") as f:
            pickle.dump(vocab.get_stoi(),f)
            print(f"创建词表成功，路径为{config.vocab_path}")
    else:
        print(f"词表已存在,路径为{config.vocab_path}")


#通过word2vec预训练权重，截取需要的权重
def vocab_word2vec(config):
    """
    生成训练集的embedding
    :param config:
    :return:
    """
    word_2_id=config.vocab
    embedding=np.random.rand(len(word_2_id),300)
    with open(config.sougou_pretrian_embedding,"rb") as f:
        lines=f.readlines()
        for line in tqdm(lines):
            result=line.strip().split()
            if result[0] in word_2_id.keys():
                idx=word_2_id[result[0]]
                emb=[float(x) for x in result[1:]]
                word_2_id[idx]=np.asarray(emb,dtype=float)
    np.savez_compressed(config.pretrain_save_dir,embedding=embedding)

#############################################################
##文本转化为id
def content2id(content,config):
    """
    将文本与vocab做映射
    :param content:
    :param config:
    :return:
    """
    content_id=[]

    if config.seq_len>len(content):
        content.extend(["<pad>"]*(config.seq_len-len(content)))
    else:
        content=content[:config.seq_len]

    for word in content:
        content_id.append(config.vocab.get(word,config.vocab["<unk>"]))

    return content_id

def label2id(labels,config):
    """
    将label映射为id
    :param config:
    :param label:
    :return:
    """
    label_list=[]
    for label in labels:
        label_list.append(config.label_id[label])

    if config.seq_len>len(labels):
        label_list.extend([0]*(config.seq_len-len(labels)))
    else:
        label_list=label_list[:config.seq_len]
    return label_list

def mask_make(content,config):
    """
    制作mask
    :param self:
    :param content:
    :return:
    """
    mask_list=[1]*len(content)

    if config.seq_len>len(content):
        mask_list.extend([0]*(config.seq_len-len(content)))
    else:
        mask_list=mask_list[:config.seq_len]
    return mask_list

def id2label(seq,config):
    """
    预测的标签转化为打标标签
    :param seq:  [0,0,2,3]
    :param config:
    :return:  ["O","O","MISC","book"]
    """
    tags=[]
    for id in seq:
        tags.append(config.id2label[id])
    return tags


def collate_fn(batch):
    """
    对DataLoader生成的mini-batch进行后处理
    :param batch:
    :return:
    """
    ##todo
    pass


#############################################################
def get_entities(seq,config):
    """
    从label_list中获得实体，
    :param label_list:
    :return:list: list of (chunk_type, chunk_start, chunk_end).
    :Example:
        seq = [['1', '2', '0', '3']]
        get_entities(seq)
        [('PER', 0, 1), ('LOC', 3, 3)]
    """

    if any(isinstance(s, list) for s in seq):
        seq = [item for sublist in seq for item in sublist + [config.label_id['O']]]

    #id转化为label
    seq=id2label(seq,config)

    prev_tag="O"
    prev_type=""
    begin_offset=0
    chunks=[]

    for i,chunk in enumerate(seq+["O"]):
        tag=chunk[0]
        type_=chunk.split("-")[-1]

        if end_of_chunk(prev_tag,tag,prev_type,type_):
            chunks.append((prev_type,begin_offset,i-1))

        if start_of_chunk(prev_tag,tag,prev_type,type_):
            begin_offset=i
        prev_type=type_
        prev_tag=tag
    return chunks

def end_of_chunk(prev_tag,tag,prev_type,type):
    """
Checks if a chunk ended between the previous and current word.

    Args:
        prev_tag: previous chunk tag.
        tag: current chunk tag.
        prev_type: previous type.
        type_: current type.

    Returns:
        chunk_end: boolean.
    """
    chunk_end=False

    if prev_tag=="S":
        chunk_end=True
    # pred_label中可能出现这种情形
    if prev_tag == "B" and (tag == "B" or tag == "O" or tag == "S"):
        chunk_end=True

    if prev_tag == "I" and (tag == "B" or tag == "S" or tag == "O"):
        chunk_end=True

    if prev_tag != "O" and prev_tag !="." and prev_type != type:
        chunk_end=True

    return chunk_end



def start_of_chunk(prev_tag, tag, prev_type, type_):
    """Checks if a chunk started between the previous and current word.

    Args:
        prev_tag: previous chunk tag.
        tag: current chunk tag.
        prev_type: previous type.
        type_: current type.

    Returns:
        chunk_start: boolean.
    """
    chunk_start = False

    if tag == "B" or tag == "S":
        chunk_start=True

    if prev_tag=="S" and tag =="I":
        chunk_start=True

    if prev_tag=="O" and tag == "I":
        chunk_start=True


    if tag != "O" and tag != "." and prev_type!=type_:
        chunk_start=True

    return chunk_start


#############################################################
def get_f1(y_pred,y_label,config):
    """
    得到实体，计算f1值,
    :param y_pred:  [['B-PER', 'I-PER', 'O', 'B-GAME']]
    :param y_true:  [['B-PER', 'I-PER', 'O', 'B-LOC']]
    :return:
    """

    pred_entity=set(get_entities(y_pred,config))
    label_entity=set(get_entities(y_label,config))
    true_num=len(pred_entity&label_entity)
    pred_num=len(pred_entity)
    label_num=len(label_entity)

    p=true_num/pred_num if pred_num>0 else 0
    r=true_num/label_num if label_num>0 else 0
    f1_score_total=2*p*r/(p+r) if p+r >0 else 0

    if config.mode=="dev":
        return f1_score_total

    else:
        f1_score_class={}

        for label in config.label:
            true_entities_label=set()
            pred_entities_label=set()
            for t in label_entity:
                if t[0]==label:
                    true_entities_label.add(t)
            for p in pred_entity:
                if p[0]==label:
                    pred_entities_label.add(p)
            true_num = len(true_entities_label & pred_entities_label)
            pred_num = len(pred_entities_label)
            label_num = len(true_entities_label)

            p_label = true_num / pred_num if pred_num > 0 else 0
            r_label = true_num / label_num if label_num > 0 else 0
            f1_label = 2 * p_label * r_label / (p_label + r_label) if p_label + r_label > 0 else 0
            f1_score_class[label]=f1_label
        return f1_score_total,f1_score_class

#############################################################


