import re
import os
import pickle
from collections import defaultdict
import torch
from torch.utils.data import DataLoader

from nezha.util import nezha_torch_tool
from nezha.util.nezha_torch_tool import Collator_test, KGDataset, KGDataset_test


def save_data(args, tokenizer):
    train_cache_pkl_path = os.path.join(args.data_dir, 'hierarchy_train.pkl')
    if not os.path.exists(train_cache_pkl_path):
        train_inputs = defaultdict(list)
        with open(args.train_path, 'r', encoding='utf-8') as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                id, name, content, label = line.strip().split(',')
                content = re.sub('\*{2,}', '*', content)
                if str(name) == 'nan':
                    name = '无'
                if str(content) == 'nan':
                    content = '无'
                label = int(label)
                nezha_torch_tool.build_bert_inputs(train_inputs, label, name, tokenizer, content, high_merge_labels=args.high_merge_labels)
        nezha_torch_tool.save_pickle(train_inputs, train_cache_pkl_path)

    test_cache_pkl_path = os.path.join(args.data_dir, 'hierarchy_test.pkl')
    if not os.path.exists(test_cache_pkl_path):
        test_inputs = defaultdict(list)
        with open(args.test_path, 'r', encoding='utf-8') as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                id, name, content = line.strip().split(',')
                content = re.sub('\*{2,}', '*', content)
                if str(name) == 'nan':
                    name = '无'
                if str(content) == 'nan':
                    content = '无'
                nezha_torch_tool.build_bert_inputs_test(test_inputs, name, tokenizer, content)
        nezha_torch_tool.save_pickle(test_inputs, test_cache_pkl_path)


class Collator:
    def __init__(self, max_seq_len: int, tokenizer):
        self.max_seq_len = max_seq_len
        self.tokenizer = tokenizer

    def pad_and_truncate(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len):

        input_ids = torch.zeros((len(input_ids_list), max_seq_len), dtype=torch.long)
        token_type_ids = torch.zeros_like(input_ids)
        attention_mask = torch.zeros_like(input_ids)

        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            # pad
            if seq_len <= max_seq_len:
                input_ids[i, :seq_len] = torch.tensor(input_ids_list[i], dtype=torch.long)
                token_type_ids[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
                attention_mask[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)

            # cut
            else:
                input_ids[i] = torch.tensor(input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id],
                                            dtype=torch.long)
                token_type_ids[i] = torch.tensor(token_type_ids_list[i][:max_seq_len], dtype=torch.long)
                attention_mask[i] = torch.tensor(attention_mask_list[i][:max_seq_len], dtype=torch.long)

        labels = torch.tensor(labels_list, dtype=torch.long)

        return input_ids, token_type_ids, attention_mask, labels

    def __call__(self, examples: list) -> dict:
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list, high_labels_list = list(zip(*examples))
        high_labels = torch.tensor(high_labels_list, dtype=torch.long)

        cur_max_seq_len = max(len(input_id) for input_id in input_ids_list)
        max_seq_len = min(cur_max_seq_len, self.max_seq_len)

        input_ids, token_type_ids, attention_mask, labels = \
            self.pad_and_truncate(input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len)

        data_dict = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
            'high_labels': high_labels,
        }
        return data_dict


def load_data(args, tokenizer):
    train_cache_pkl_path = os.path.join(args.data_dir, 'hierarchy_train.pkl')
    test_cache_pkl_path = os.path.join(args.data_dir, 'hierarchy_test.pkl')

    with open(train_cache_pkl_path, 'rb') as f:
        train_data = pickle.load(f)

    with open(test_cache_pkl_path, 'rb') as f:
        test_data = pickle.load(f)

    collate_fn = Collator(args.max_seq_len, tokenizer)
    collate_fn_test = Collator_test(args.max_seq_len, tokenizer)

    train_dataset = KGDataset(train_data, tokenizer)
    test_dataset = KGDataset_test(test_data, tokenizer)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
                                  num_workers=0, collate_fn=collate_fn)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size * 8, shuffle=False,
                                 num_workers=0, collate_fn=collate_fn_test)

    return train_dataloader, test_dataloader


def load_cv_data(args, train_index, dev_index, tokenizer):
    # 交叉验证 载入数据
    train_data = defaultdict(list)
    dev_data = defaultdict(list)

    cache_pkl_path = os.path.join(args.data_dir, 'hierarchy_train.pkl')

    with open(cache_pkl_path, 'rb') as f:
        all_data = pickle.load(f)

    for key, values in all_data.items():
        train_data[key] = [values[idx] for idx in train_index]
        dev_data[key] = [values[idx] for idx in dev_index]

    collate_fn = Collator(args.max_seq_len, tokenizer)

    train_dataset = KGDataset(train_data, tokenizer)
    dev_dataset = KGDataset(dev_data, tokenizer)

    if train_index is not None:
        train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
                                      num_workers=0, collate_fn=collate_fn)
    else:
        # 全部训练数据 用来测试分数的时候， 训练集为空， 全部作为验证集返回
        train_dataloader = []
    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=args.batch_size, shuffle=False,
                                num_workers=0, collate_fn=collate_fn)
    return train_dataloader, dev_dataloader
