#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: data_utils.py 
@time: 2022/05/05
@software: PyCharm 
description:
"""
import json
import torch

import numpy as np
from tqdm import tqdm
from transformers import BertTokenizer


###################数据转化过程
def data_save(config):
    """
    读取data_file,将文件存储为
    text:["原","神","出","新","活","动","了"]
    label:["B-game","I-game","O","O","O","O","O"]
    :param data_file:
    :return:
    """
    tokenizer=BertTokenizer.from_pretrained(config.pretrained_model_path)

    with open(config.raw_data_path,encoding="utf-8") as f:
        lines=f.readlines()
        word_list = []
        label_list = []

        for line in tqdm(lines):
            data=json.loads(line.strip())
            content,label=data_process(data,tokenizer)
            word_list.append(content)
            label_list.append(label)

    np.savez_compressed(config.processed_data_path,content=word_list,label=label_list)
    print(f"------------------data_process done---------------")

def data_save_json(config):
    """
    读取data_file,将文件存储为
    text:["原","神","出","新","活","动","了"]
    label:["B-game","I-game","O","O","O","O","O"]
    :param data_file:
    :return:
    """
    with open(config.raw_data_path,encoding="utf-8") as f:
        lines=f.readlines()


        with open(config.processed_data_path_json, "w+") as f:
            for line in tqdm(lines):
                data=json.loads(line.strip())
                content,label=data_process(data)
                dict_={"contents":content,"labels":label}
                json.dump(dict_,f)

        print(f"------------------data_process done---------------")

def data_process(data,tokenizer):
    """
    将
    {"text": "彭小军认为，国内银行现在走的是台湾的发卡模式，先通过跑马圈地再在圈的地里面选择客户，",
    "label": {"address": {"台湾": [[15, 16]]}, "name": {"彭小军": [[0, 2]]}}}
    转化为
    text:原始出新活动了
    label:["B-game","I-game","O","O","O","O","O"]
    格式
    :param data:dict
    :return:
    """
    content = data["text"]
    # 存在验证集没有label
    label_entity = data.get("label", None)
    labels = ["O"] * len(content)

    if label_entity:  # "label": {"address": {"台湾": [[15, 16]]}, "name": {"彭小军": [[0, 2]]}}
        for entity_class, entity in label_entity.items():  # {"address": {"台湾": [[15, 16]]}
            for entity_name, sub_num in entity.items():  # {"台湾": [[15, 16]]}
                for index in sub_num:
                    start_id, end_id = index[0], index[1]
                    assert "".join(content[start_id:end_id + 1]) == entity_name
                    if start_id == end_id:
                        labels[start_id] = "S-" + entity_class
                    else:
                        labels[start_id] = "B-" + entity_class
                        labels[start_id + 1:end_id + 1] = ["I-" + entity_class] * (end_id - start_id)
    return content,labels



###################data_loader
def load_data(data_dir):
    """
    文件读取
    :param data_dir:
    :return:
    """
    data=np.load(data_dir,allow_pickle=True)
    return data


#########################label_to_id &id_to_label##########
def labels_to_ids(labels,config):
    """
    将label映射为id
    :param config:
    :param labels:
    :return:
    """
    label_list=[]
    for i,label in enumerate(labels):
        label_list.append(config.label2id[label])
    new_labels=align_labels(label_list)

    ####padding and truncations
    if config.seq_len>len(new_labels): #padding
        new_labels.extend([-100]*(config.seq_len-len(new_labels)))
    else: #truncation
        new_labels=new_labels[:config.seq_len-1]
        new_labels.append(-100)

    return new_labels

def id_to_label(seq,config):
    """
    预测的标签转化为打标标签
    :param seq:  [0,0,2,3]
    :param config:
    :return:  ["O","O","MISC","book"]
    """
    tags=[]

    for i,id in enumerate(seq):
        if id!=-100:
            tags.append(config.id2label[id])
    return tags

####将使用-100填充[cls],[sep],不计算loss
def align_labels(label_ids):
    new_labels=[-100]
    new_labels.extend(label_ids)
    new_labels.extend([-100])
    return new_labels


def crf_decode(model,outputs,mask,lens,config):
    """
    进行crf decode操作，主要对mask形状处理
    :param model:
    :param outputs:
    :param mask:
    :return:
    """
    for i, len_ in enumerate(lens):
        if len_ < config.seq_len - 2:
            mask[i][len_ + 1] = torch.Tensor([0])
            ##因为crf无法接受标签为-100向量，因此将-100改为0
    mask = mask[:, 1:-1].gt(0)
    tags=model.crf.decode(outputs[:,1:-1,:],mask)
    return tags
