import sys
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import os
from src.EnvironmentVariables import DATA_PATH

sys.path.append('../..')
SEED = 308

torch.manual_seed(SEED)
np.random.seed(SEED)

data_name = "data202303041.xlsx"
origin_data_path = os.path.join(DATA_PATH, data_name)  # 有用的 key 包括 title,emotion
origin_data = pd.read_excel(origin_data_path)
origin_data = origin_data[origin_data.emotion.isin([-1, 0, 1])].loc[:, ['article_id', 'title', 'emotion']]

# 简单的进行本地的验证，8 1 1 分离数据验证模型的效果
train_data_path = os.path.join(DATA_PATH, 'train.csv')
test_data_path = os.path.join(DATA_PATH, 'test.csv')
validation_data_path = os.path.join(DATA_PATH, 'validation.csv')


def get_data(_type='train') -> pd.DataFrame:
    """

    :param _type: train, test , validation, 训练，测试，验证集
    :return:
    """
    if _type == 'train':
        return pd.read_csv(train_data_path)
    if _type == 'test':
        return pd.read_csv(test_data_path)
    return pd.read_csv(validation_data_path)


class MyDataset(Dataset):
    def __init__(self, tokenizer, max_len, _dataframe: pd.DataFrame, mode='train'):
        super(MyDataset, self).__init__()
        self.data = _dataframe
        self.texts = self.data['title'].tolist()
        # 由于数据的不均衡，将中性与正面归为一类

        if 'emotion' in self.data.keys():
            self.labels = [0 if _ < 0 else 1 for _ in self.data['emotion'].tolist()]
        else:
            self.labels = [0] * len(self.texts)
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __getitem__(self, index):
        text = str(self.texts[index])
        label = torch.from_numpy(np.array(self.labels[index]))
        encoding = self.tokenizer.encode_plus(text,
                                              padding='max_length',
                                              truncation=True,
                                              add_special_tokens=True,
                                              max_length=self.max_len,
                                              return_token_type_ids=True,
                                              return_attention_mask=True,
                                              return_tensors='pt', )

        sample = {
            'texts': text,
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'label': label.long()
        }
        return sample

    def __len__(self):
        return len(self.texts)


def create_dataloader(dataset, batch_size, mode='train'):
    shuffle = True if mode == 'train' else False
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
    return data_loader



def show(_df: pd.DataFrame):
    emotion = Counter(_df.emotion)
    dic = dict({-1: emotion[-1], 0: emotion[0], 1: emotion[1], 2: emotion[0] + emotion[1]})
    plt.bar(x=dic.keys(), height=dic.values())


def build_self_dataset():
    """
    简单的进行本地的验证，8.5 1 0.5 分离数据验证模型的效果
    :return:
    """
    train_data_percent = 0.85
    test_data_percent = 0.1
    df = origin_data
    df.drop(df.keys()[0], axis=1, inplace=True)
    train_len = int(len(df) * train_data_percent)
    test_len = train_len + int(len(df) * test_data_percent)
    df = df.sample(frac=1, random_state=SEED)
    train_data = df[:train_len]
    test_data = df[train_len:test_len]
    validation_data = df[test_len:]
    show(train_data)
    show(test_data)
    show(validation_data)
    train_data.to_csv(train_data_path)
    test_data.to_csv(test_data_path)
    validation_data.to_csv(validation_data_path)


if __name__ == '__main__':
    build_self_dataset()
    plt.show()
