#! -*- coding: utf-8 -*-
"""
@Create Time: 20240625
@Info: 数据预处理
"""
import json
import torch
import random
import schema
from torch.utils.data import Dataset

import arguments as args



class ClassifyDataSet(Dataset):
    """数据加载"""
    def __init__(self, file_path, tokenizer, classify_type, is_train, max_length=512):
        self.file_path = file_path
        self.tokenizer = tokenizer
        self.is_train = is_train
        self.classify_type = classify_type
        self.classes = args.OPERATE_TYPE
        self.type2id = {_type: i for i, _type in enumerate(self.classes)}
        self.id2type = {i: _type for i, _type in enumerate(self.classes)}
        self.data = []
        self.max_length = max_length

        self.read_json()
        # print(self.classes)

    def read_json(self):
        """读取文件"""
        with open(self.file_path, 'r', encoding='utf-8') as f:
            for line in f.readlines():
                line = json.loads(line.strip())
                cats = line['cats']
                line['operate_intent'] = schema.get_operate_type(cats)
                operate_type = line['operate_intent']
                if self.classify_type == '操作' and operate_type not in self.classes:
                    continue
                self.data.append(line)
        random.shuffle(self.data)

    def create_example(self, text, label=None):
        """处理输入文本"""
        text_tokens = self.tokenizer.tokenize(str(text))

        if len(text_tokens) > self.max_length - 2:
            print(text)
            text_tokens = text_tokens[:self.max_length - 3]

        tokens = ['[CLS]'] + text_tokens + ['SEP']
        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        token_type_ids = [0] * (len(text_tokens) + 2)
        mask_ids = [1] * len(input_ids)
        output = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': mask_ids,
            'seq_len': len(input_ids)
        }
        if label:
            label = self.type2id[label.strip()]
            output['label'] = label
        return output

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        line = self.data[idx]
        text = line['text']
        if self.is_train:
            label = str(line['operate_intent'])
            inputs = self.create_example(text, label)
        else:
            inputs = self.create_example(text)

        return inputs


def collate(batch):
    """DataLoader回调函数"""
    max_length = max([item['seq_len'] for item in batch])
    is_train = 'label' in batch[0]
    output = {'input_ids': [], 'token_type_ids': [], 'attention_mask': []}

    for item in batch:
        padding = [0] * (max_length - item['seq_len'])
        for key in output.keys():
            value = item[key] + padding
            output[key].append(value)
    if is_train:
        output['label'] = [item['label'] for item in batch]

    for key, value in output.items():
        output[key] = torch.LongTensor(value)

    return output
