from torch.utils.data import DataLoader, Dataset
from config import Config
import os
from sklearn.model_selection import StratifiedGroupKFold, train_test_split
from transformers import AutoTokenizer
import torch
import pandas as pd
from tqdm import tqdm


class DataGenerator(Dataset):
    def __init__(self, df, config):
        self.config = config
        self.inputs = df['input_one'].values.astype(str)
        # self.targets = df['input_two'].values.astype(str)
        self.label = df['score'].values
        self.index_to_label = {0: 0, 1: 0.25, 2: 0.5, 3: 0.75, 4: 1}
        self.label_to_index = dict((y, x) for x, y in self.index_to_label.items())
        # self.config["class_num"] = len(self.index_to_label)
        self.tokenizer = AutoTokenizer.from_pretrained(config['pretrain_model_path'])
        self.data = []
        self.config = config
        self.load()

    def load(self):
        for i in range(len(self.inputs)):
            inputs = self.inputs[i]
            # targets = self.targets[i]
            label = self.label_to_index[self.label[i]]
            inputs = self.tokenizer.encode(inputs, max_length=self.config['max_length'], padding='max_length',
                                           truncation=True)
            # targets = self.tokenizer.encode(targets, max_length=self.config['max_length'], padding='max_length',
            #                                 truncation=True)
            inputs = torch.LongTensor(inputs)
            # targets = torch.LongTensor(targets)
            label = torch.LongTensor([label])
            # label = torch.LongTensor([label])
            # self.data.append([inputs, targets, label])
            self.data.append([inputs, label])
        return

    def __len__(self):
        return len(self.inputs)

    def __getitem__(self, index):
        return self.data[index]


def load_data(data_path, config, shuffle=True):
    df = pd.read_csv(data_path)
    dg = DataGenerator(df, config)
    dl = DataLoader(dg, batch_size=config['batch_size'], shuffle=shuffle)
    return dl


if __name__ == '__main__':
    # 数据预处理
    flag = '12th'
    if flag == 'cls':
        df = pd.read_csv('usp_data/train.csv')
        df_title = pd.read_csv('usp_data/titles.csv')
        df = df.merge(df_title, how='left', left_on='context', right_on='code')

        df = df[['id', 'anchor', 'target', 'context', 'score', 'title']]
        # tokenizer = AutoTokenizer.from_pretrained(Config['pretrain_model_path'])
        df['input_one'] = df['anchor'] + '[SEP]' + df['target'] + '[SEP]' + df['title'].apply(str.lower)
        # df['input_two'] = df['target']
        # df = df.sample(2000)
        # train_df, test_df = train_test_split(df, test_size=0.1, random_state=42)
        train_df, valid_df = train_test_split(df, test_size=0.1, random_state=42)
        if not os.path.isdir('new_data'):
            os.mkdir('new_data/')
        # print(len(train_df), len(valid_df), len(test_df))
        print(len(train_df), len(valid_df))
        train_df.to_csv('new_data/cls_train.csv', index=False)
        valid_df.to_csv('new_data/cls_valid.csv', index=False)
        # test_df.to_csv('new_data/cls_test.csv', index=False)

    elif flag == 'short':
        df = pd.read_csv('usp_data/train.csv')
        df_title = pd.read_csv('usp_data/titles.csv')
        df = df.merge(df_title, how='left', left_on='context', right_on='code')
        df = df[['id', 'anchor', 'target', 'context', 'score', 'title']]
        # tokenizer = AutoTokenizer.from_pretrained(Config['pretrain_model_path'])
        df['input_one'] = df['anchor'] + '[SEP]' + df['title'].apply(str.lower)
        df['input_two'] = df['target'] + '[SEP]' + df['title'].apply(str.lower)
        # df = df.sample(2000)
        # train_df, test_df = train_test_split(df, test_size=0.1, random_state=42)
        train_df, valid_df = train_test_split(df, test_size=0.1, random_state=42)
        if not os.path.isdir('new_data'):
            os.mkdir('new_data/')
        print(len(train_df), len(valid_df))
        train_df.to_csv('new_data/short_train.csv', index=False)
        valid_df.to_csv('new_data/short_valid.csv', index=False)
        # test_df.to_csv('new_data/short_test.csv', index=False)
    elif flag == '12th':
        df = pd.read_csv('usp_data/train.csv')
        df_title = pd.read_csv('usp_data/titles.csv')
        df = df.merge(df_title, how='left', left_on='context', right_on='code')
        df = df[['id', 'anchor', 'target', 'context', 'score', 'title']]
        # tokenizer = AutoTokenizer.from_pretrained(Config['pretrain_model_path'])

        # https://zhuanlan.zhihu.com/p/532407317
        # todo 搞定这部分

        df['text'] = df['anchor'] + '[SEP]' + df['target'] + '[SEP]' + df['title'].apply(str.lower)
        target_info = df.groupby(['anchor', 'context'])['target'].agg(list).reset_index()
        target_info['target'] = target_info['target'].apply(lambda x: list(set(x)))
        target_info['target_info'] = target_info['target'].apply(lambda x: ', '.join(x))
        target_info['target_info'].apply(lambda x: len(x.split(', '))).describe()
        del target_info['target']
        df = df.merge(target_info, on=['anchor', 'context'], how='left')
        df['input_one'] = df['text'] + '[SEP]' + df['target_info']

        train_df, valid_df = train_test_split(df, test_size=0.1, random_state=42)
        if not os.path.isdir('new_data'):
            os.mkdir('new_data/')
        # print(len(train_df), len(valid_df), len(test_df))
        print(len(train_df), len(valid_df))
        train_df.to_csv('new_data/cls_12th_train.csv', index=False)
        valid_df.to_csv('new_data/cls_12th_valid.csv', index=False)

    elif flag == 'long':
        df = pd.read_csv('usp_data/train.csv')
        df_title = pd.read_csv('usp_data/titles.csv')
        df = df.merge(df_title, how='left', left_on='context', right_on='code')
        df = df[['id', 'anchor', 'target', 'context', 'score', 'title']]
        tokenizer = AutoTokenizer.from_pretrained(Config['pretrain_model_path'])
        df['input_one'] = df['anchor'] + tokenizer.sep_token + df['title'].apply(str.lower)
        # df = df.sample(2000)
        anchor_list = df['anchor'].tolist()
        input_two = []
        for i in tqdm(range(len(anchor_list))):
            cur_anchor = anchor_list[i]
            cur_score = df.loc[i, 'score']
            cur_context = df.loc[i, 'context']
            cur_target = df.loc[i, 'target']
            part_df = df[df['anchor'] == cur_anchor]
            part_df = part_df[part_df['score'] == cur_score]
            part_df = part_df[part_df['context'] == cur_context]
            part_target = part_df['target'].tolist()
            for j in range(1, len(part_target)):
                cur_target = cur_target + ';' + part_target[j]
            input_two.append(cur_target)
        df['input_two'] = input_two
        df.drop_duplicates(subset=['anchor', 'score'], inplace=True)
        train_df, test_df = train_test_split(df, test_size=0.01, random_state=42)
        train_df, valid_df = train_test_split(train_df, test_size=0.1, random_state=42)
        if not os.path.isdir('new_data'):
            os.mkdir('new_data/')
        print(len(train_df), len(valid_df), len(test_df))
        df.to_csv('new_data/merge.csv', index=False)
        train_df.to_csv('new_data/train.csv', index=False)
        valid_df.to_csv('new_data/valid.csv', index=False)
        test_df.to_csv('new_data/test.csv', index=False)

    # 将数据分为５折，每次将４个作为训练集，将剩余１个作为测试集
    # kf = StratifiedGroupKFold(n_splits=2, shuffle=True, random_state=42)
    # df['fold'] = -1
    # for f, (t_, v_) in enumerate(kf.split(X=df, y=df['anchor'], groups=df['anchor'])):
    #     df.loc[v_, 'fold'] = f
    #
    #
    # if not os.path.isdir('backup'):
    #     os.mkdir('backup/')
    # df.to_excel('backup/test.xlsx', index=False)
