# Chinese Military News
import os
import random

from tqdm import tqdm
import numpy as np
import pandas as pd
import pickle as pkl
import torch
from torch.nn import functional
from torch.utils.data import Dataset
from torchvision import datasets

from utils import build_vocab, read_from_txt, convert2tensor
from utils import tokenizer

class CMNDataset(Dataset):
    ID2LABEL = {
        0 : 0,
        1 : 1,
        2 : 'should be deleted',
        3 : 'should be deleted',
        4 : 2,
        5 : 3,
        6 : 4,
        7 : 5,
    }
    def __init__(self, config, split, rebuild_vocab=False, transform=None, target_transform=None):
        self.config = config
        self.split = split
        if split == 'train':
            self.texts, self.text_labels = read_from_txt(self.config.train_path, self.ID2LABEL)
            print('finish read')
            if os.path.exists(self.config.vocab_path) and not(rebuild_vocab):
                self.vocab_dic = pkl.load(open(self.config.vocab_path, 'rb'))
            elif not(os.path.exists(self.config.vocab_path)) or rebuild_vocab:
                self.vocab_dic = build_vocab(self.texts, tokenizer)
                pkl.dump(self.vocab_dic, open(self.config.vocab_path, 'wb'))
            else:
                raise ValueError('we need a vocab')
        if split == 'val':
            self.texts, self.text_labels = read_from_txt(self.config.val_path, self.ID2LABEL)
            try:
                self.vocab_dic = pkl.load(open(self.config.vocab_path, 'rb'))
            except:
                print('no vocab_path')

        if split == 'test':
            self.texts, self.text_labels = read_from_txt(self.config.test_path, self.ID2LABEL)
            try:
                self.vocab_dic = pkl.load(open(self.config.vocab_path, 'rb'))
            except:
                print('no vocab_path')

        self.texts, self.lengths = convert2tensor(
            self.texts, 
            tokenizer, 
            self.vocab_dic, 
            self.config.device, 
            pad_size=self.config.pad_size, 
        )
        self.text_labels = torch.LongTensor(self.text_labels).to(self.config.device)
        self.config.n_vocab = len(self.vocab_dic)

        self.transform = transform
        self.target_transform = target_transform

    def __len__(self):
        return len(self.text_labels)

    def __getitem__(self, idx):
        r= random.randint(0, len(self.text_labels) - 1)
        w = torch.rand(2)
        w = w / torch.sum(w)
        text1 = self.texts[idx]
        text2 = self.texts[r]
        text = w[0] * text1 + w[1] * text2
        label1 = self.text_labels[idx]
        label_1 = functional.one_hot(label1, num_classes=self.config.num_classes)
        label2 = self.text_labels[r]
        label_2 = functional.one_hot(label2, num_classes=self.config.num_classes)
        label = w[0] * label_1 + w[1] * label_2
        length = self.lengths[idx]
        if self.transform:
            text = self.transform(text)
        if self.target_transform:
            label = self.target_transform(label)
        #if self.split == 'train':
        #    return (text, length), label
        return (text1, length), label1
