import os
import pickle
from collections import defaultdict

from torch.utils.data import DataLoader

from nezha.util import nezha_torch_tool
from nezha.util.nezha_torch_tool import Collator, Collator_test, KGDataset, KGDataset_test


def save_data(args, tokenizer):
    train_cache_pkl_path = os.path.join(args.data_dir, 'train.pkl')
    if not os.path.exists(train_cache_pkl_path):
        train_inputs = defaultdict(list)
        with open(args.train_path, 'r', encoding='utf-8') as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                id, name, content, label = line.strip().split(',')
                if str(name) == 'nan':
                    name = '无'
                if str(content) == 'nan':
                    content = '无'
                label = int(label)
                nezha_torch_tool.build_bert_inputs(train_inputs, label, name, tokenizer, content)
        nezha_torch_tool.save_pickle(train_inputs, train_cache_pkl_path)

    test_cache_pkl_path = os.path.join(args.data_dir, 'test.pkl')
    if not os.path.exists(test_cache_pkl_path):
        test_inputs = defaultdict(list)
        with open(args.test_path, 'r', encoding='utf-8') as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                id, name, content = line.strip().split(',')
                if str(name) == 'nan':
                    name = '无'
                if str(content) == 'nan':
                    content = '无'
                nezha_torch_tool.build_bert_inputs_test(test_inputs, name, tokenizer, content)
        nezha_torch_tool.save_pickle(test_inputs, test_cache_pkl_path)


def load_data(args, tokenizer):
    train_cache_pkl_path = os.path.join(args.data_dir, 'train.pkl')
    test_cache_pkl_path = os.path.join(args.data_dir, 'test.pkl')

    with open(train_cache_pkl_path, 'rb') as f:
        train_data = pickle.load(f)

    with open(test_cache_pkl_path, 'rb') as f:
        test_data = pickle.load(f)

    collate_fn = Collator(args.max_seq_len, tokenizer)
    collate_fn_test = Collator_test(args.max_seq_len, tokenizer)

    train_dataset = KGDataset(train_data, tokenizer)
    test_dataset = KGDataset_test(test_data, tokenizer)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
                                  num_workers=0, collate_fn=collate_fn)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size * 8, shuffle=False,
                                 num_workers=0, collate_fn=collate_fn_test)

    return train_dataloader, test_dataloader


def load_cv_data(args, train_index, dev_index, tokenizer):
    # 交叉验证 载入数据
    train_data = defaultdict(list)
    dev_data = defaultdict(list)

    cache_pkl_path = os.path.join(args.data_dir, 'train.pkl')

    with open(cache_pkl_path, 'rb') as f:
        all_data = pickle.load(f)

    for key, values in all_data.items():
        train_data[key] = [values[idx] for idx in train_index]
        dev_data[key] = [values[idx] for idx in dev_index]

    collate_fn = Collator(args.max_seq_len, tokenizer)

    train_dataset = KGDataset(train_data, tokenizer)
    dev_dataset = KGDataset(dev_data, tokenizer)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
                                  num_workers=0, collate_fn=collate_fn)
    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=args.batch_size, shuffle=False,
                                num_workers=0, collate_fn=collate_fn)
    return train_dataloader, dev_dataloader
