#! -*- coding: utf-8 -*-
"""
@Author: AI
@Create Time: 20240709
@Info: 辅助标注
"""
import torch
from transformers import AutoTokenizer

from metric import get_bool_ids_greater_than, get_span
from schema import NER_SCHEMA, BASE_SCHEMA
import pandas as pd

THRESHOLD = 0.9 # NER过滤门限，避免小概率实体混入
MODEL_PATH =  '../model/saved_model/model_20240821_002909/'
MODEL = torch.load(MODEL_PATH + 'pytorch_model.bin')
MODEL.eval()


def padding(batch, device):
    """padding函数"""
    max_length = max([item['seq_len'] for item in batch])
    output = {'input_ids': [], 'token_type_ids': [], 'attention_mask': []}

    for item in batch:
        padding = [0] * (max_length - item['seq_len'])
        for key in output.keys():
            value = item[key] + padding
            output[key].append(value)

    for key, value in output.items():
        output[key] = torch.LongTensor(value).to(device)

    return output


def merge_entity(entity_list):
    """实体合并"""
    if not entity_list:
        return entity_list
    new_entities = []
    entity_list = sorted(entity_list, key=lambda x: x[0])
    last_entity = entity_list[0]
    for entity in entity_list[1:]:
        if (entity[0] >= last_entity[1]) and (last_entity[4] > THRESHOLD):
            new_entities.append(last_entity)
            last_entity = entity
            continue
        last_entity = entity if entity[-1] > last_entity[-1] else last_entity

    if last_entity:
        new_entities.append(last_entity)
    return new_entities


class Inference(object):
    def __init__(self):
        self.model = MODEL
        self.tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
        self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        self.max_length = 512

        self.model.to(self.device)
        self.model.eval()

    @classmethod
    def get_pos(cls, tokens, prompt_tokens):
        """获取新位置"""
        offset = {}
        start = len(prompt_tokens) + 2
        index = 0
        for i, token in enumerate(tokens[start:]):
            if token.startswith('##'):
                token = token.replace('##', '', 1)
            offset[i + start] = {'start': index, 'end': index + len(token)}
            index += len(token)
        return offset

    @classmethod
    def get_schema(cls, operate_type):
        """获取对应的schema信息"""
        for item in NER_SCHEMA:
            if item['operate_type'] == operate_type:
                return item['schema']
        return BASE_SCHEMA

    def create_inputs_by_one(self, text, prompt):
        """构建单条输入"""
        prompt_tokens = self.tokenizer.tokenize(prompt)
        text_tokens = self.tokenizer.tokenize(text)

        if len(prompt_tokens) + len(text_tokens) > self.max_length - 3:
            return

        tokens = ['[CLS]'] + prompt_tokens + ['[SEP]'] + text_tokens + ['SEP']
        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        token_type_ids = [0] * (len(prompt_tokens) + 2) + [1] * (len(text_tokens) + 1)
        mask_ids = [1] * len(input_ids)
        inputs = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': mask_ids,
            'seq_len': len(tokens)
        }
        offset = self.get_pos(tokens, prompt_tokens)
        return inputs, offset

    def predict_batch(self, text, first_business):
        """根据schema预测实体信息"""
        # text = text.replace(' ', '，').replace('\n', '。').replace(u'\xa0', '，').replace('\t', '，')
        text = text.replace(
            '\n', '。').replace(u'\xa0', '，').replace('\t', '，')
        schema = self.get_schema(first_business)
        batch, offsets = [], []
        for prompt in schema:
            inputs, offset = self.create_inputs_by_one(text, prompt)
            batch.append(inputs)
            offsets.append(offset)
        batch = padding(batch, self.device)
        output_sp, output_ep = self.model(**batch)
        output_sp, output_ep = output_sp.detach().cpu().tolist(), output_ep.detach().cpu().tolist()
        start_ids_list = get_bool_ids_greater_than(output_sp, limit=0.3, return_prob=True)
        end_ids_list = get_bool_ids_greater_than(output_ep, limit=0.3, return_prob=True)

        result = []
        for (prompt, offset, start_ids, end_ids) in zip(schema, offsets, start_ids_list, end_ids_list):
            span_set = get_span(start_ids, end_ids, with_prob=True)
            for start, end in span_set:
                start_index, start_prob = start
                end_index, end_prob = end
                if start_index < len(prompt) + 2:
                    continue
                start = offset[start_index]['start']
                end = offset[end_index]['end']
                entity = text[start:end]
                entity_type = prompt
                prob = start_prob * end_prob
                result.append([start, end, entity_type, entity, prob])
        result = merge_entity(result)
        return result

    def predict(self, text, prompt):
        """模型预测"""
        prompt_tokens = self.tokenizer.tokenize(prompt)
        text_tokens = self.tokenizer.tokenize(text)

        if len(prompt_tokens) + len(text_tokens) > self.max_length - 3:
            return

        tokens = ['[CLS]'] + prompt_tokens + ['[SEP]'] + text_tokens + ['SEP']
        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        token_type_ids = [0] * (len(prompt_tokens) + 2) + [1] * (len(text_tokens) + 1)
        mask_ids = [1] * len(input_ids)
        inputs = {
            'input_ids': torch.LongTensor([input_ids]).to(self.device),
            'token_type_ids': torch.LongTensor([token_type_ids]).to(self.device),
            'attention_mask': torch.LongTensor([mask_ids]).to(self.device)
        }
        offset = self.get_pos(tokens, prompt_tokens)
        with torch.no_grad():
            output_sp, output_ep = self.model(**inputs)
            output_sp, output_ep = output_sp.detach().cpu().tolist(), output_ep.detach().cpu().tolist()
        start_ids_list = get_bool_ids_greater_than(output_sp, return_prob=True)
        end_ids_list = get_bool_ids_greater_than(output_ep, return_prob=True)

        span_set = get_span(start_ids_list[0], end_ids_list[0])
        result = []
        for span in span_set:
            start, end = span
            if start < len(prompt) + 2:
                continue
            start = offset[start]['start']
            end = offset[end]['end']
            entity = text[start:end]
            entity_type = prompt
            result.append([start, end, entity_type, entity])
        return result


if __name__ == '__main__':
    test_predict = Inference()
    df = pd.read_excel('../../data/AIPS语料-20240709.xlsx')
    df = df[df.iloc[:, 7] == 0]  # 筛选已标注用于训练的数据
    df.rename(columns={df.columns[5]: 'text'}, inplace=True)
    df.insert(df.shape[1], 'label', '')
    df = df.iloc[:, [5, 8]]
    df.reset_index()

    label = 'TSK实际开始时间'
    for i in range(df.shape[0]):
        text = df.iloc[i, 0]
        test_inputs = {
            'text': text,
            'first_business': '查询',
        }
        result = test_predict.predict_batch(**test_inputs)[0]
        result[2] = label
        if text == '请帮我查找实际开始时间是2024-06-15的制造任务时间明细表。':
            label = 'TSK实际结束时间'
        if text == '请帮我查找实际结束时间是2024-06-15的制造任务时间明细表。':
            label = 'TSK延期时间'
        if text == '请帮我查找TSK延期时间是-3小时的制造任务时间明细表。':
            label = 'SO交付时间'
        if text == '请帮我查找SO交付时间是2024-06-15的制造任务时间明细表。':
            label = 'MO重排前预计交付时间'
        if text == '请帮我查找MO重排前预计交付时间是2024-06-15的制造任务时间明细表。':
            label = 'MO重排后预计交付时间'
        if text == '请帮我查找MO重排后预计交付时间是2024-06-15的制造任务时间明细表。':
            label = 'MO重排差异'
        if text == '查查重排差异是-2小时的任务清单':
            label = 'MO延期时间'
        # if text == ''
        if (result[1] < len(text)) and (text[result[1]] == '0'):
            result[1] += 1
        text = text.replace(result[3], '{' + text[result[0]: result[1]] + '[' + result[2] + ']}')
        df.iloc[i, 1] = text
    print(df)
    df.to_excel('tmp.xlsx')

