import os
import random

import torch
from torch.utils.data import Dataset
from utils.utils import read_from_txt

class MetaDataset(Dataset):
    def __init__(self, config4dataset, config4maml, task_num_total, split):
        self.config4dataset = config4dataset
        self.split = split
        if split == 'train':
            self.texts, self.text_labels = read_from_txt(self.config4dataset.train_path)
        if split == 'val':
            self.texts, self.text_labels = read_from_txt(self.config4dataset.val_path)
        if split == 'test2' or split == 'test3':
            self.texts, self.text_labels = read_from_txt(self.config4dataset.test_path)
        self.text_labels = torch.LongTensor(self.text_labels).to(self.config4dataset.device)

        self.task_num_total = task_num_total  # batch of set, not batch of imgs
        self.task_per_batch = config4maml.task_per_batch
        self.n_way = config4maml.n_way  # n-way be not used
        self.k_shot = config4maml.k_shot  # k-shot
        self.k_query = config4maml.k_query  # for evaluation

        self.create_task(self.task_num_total, self.split)

    def choice_label(self, split):
        if split == 'train':
            # 从1、4、5、6中选一个
            selected_cls = []
            while True:
                tmp = random.randint(1, 6)
                if tmp == 2 or tmp == 3:
                    continue
                break
            selected_cls.append(tmp)
            selected_cls.append(0)
            return selected_cls
        if split == 'test2':
            return [2, 0]
        if split == 'test3':
            return [3, 0]

    def create_task(self, task_num_total, split):
        self.support_task = []  # support set task
        self.query_task = []  # query set task
        for b in range(task_num_total):  # for each task
            # 1.select n_way classes randomly
            selected_cls = self.choice_label(split)
            
            support = []
            query = []
            for cls in selected_cls:
                # 2. select k_shot + k_query for each class
                cnt = 0
                while cnt < self.k_shot:
                    tmp = random.randint(0, len(self.text_labels) - 1)
                    if self.text_labels[tmp] == cls:
                        cnt += 1
                        if cls:
                            support.append([self.texts[tmp], 1])
                        else:
                            support.append([self.texts[tmp], 0])
                cnt = 0
                while cnt < self.k_query:
                    tmp = random.randint(0, len(self.text_labels) - 1)
                    if self.text_labels[tmp] == cls:
                        cnt += 1
                        if cls:
                            query.append([self.texts[tmp], 1])
                        else:
                            query.append([self.texts[tmp], 0])

            # shuffle the correponding relation between support set and query set
            random.shuffle(support)
            random.shuffle(query)

            self.support_task.append(support)  # append set to current sets
            self.query_task.append(query)  # append sets to current sets

    def __len__(self):
        return self.task_num_total

    def gettask(self, index):
        support_x = []
        support_y = []
        query_x = []
        query_y = []
        for item in self.support_task[index]:
            support_x.append(item[0])
            support_y.append(item[1])

        for item in self.query_task[index]:
            query_x.append(item[0])
            query_y.append(item[1])
        support_y = torch.LongTensor(support_y).to(self.config4dataset.device)
        query_y = torch.LongTensor(query_y).to(self.config4dataset.device)
        return support_x, support_y, query_x, query_y

    def getbatch(self, index):
        batch_num = self.task_num_total // self.task_per_batch
        if index >= batch_num:
            print('error: index out of range, look at Meta_dataset.py')
            input()
        if index == 0:
            self.create_task(self.task_num_total, self.split)
        # 0: 0->task_per_batch-1, 1: task_per_batch->...
        support_x = []
        support_y = []
        query_x = []
        query_y = []
        for i in range(self.task_per_batch):
            tmp = self.gettask(index * self.task_per_batch + i)
            support_x.append(tmp[0])
            support_y.append(tmp[1])
            query_x.append(tmp[2])
            query_y.append(tmp[3])
        return support_x, support_y, query_x, query_y




        

if __name__ == '__main__':
    dataset = 'data/' + 'total7'
    print('Training on dataset:', dataset)
    model_name = 'ERNIE'
    from importlib import import_module
    x = import_module('models.' + model_name)
    config1 = (x.Config(dataset))
    print('finish 1')
    y = import_module('MAML.' + 'meta')
    config2 = (y.Config())
    print('finish 2')
    a = MetaDataset(config1, config2, 100, 'train')
    while True:
        k = input()
        print(a.getitem(int(k)))
    from torch.utils.data import DataLoader
    import numpy as np
    dataloader = DataLoader(
        a, 
        batch_size=config2.task_per_batch, 
        shuffle=True,
        worker_init_fn=np.random.seed(1), 
    )
    for sx, sy, qx, qy in dataloader:
        print(sx.size)
        input()

    