# -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 16:05:21 2022

@author: S1mple
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Apr 13 09:16:22 2022

@author: S1mple
"""

# -*- coding: utf-8 -*-
"""
Created on Sat Apr  9 09:38:56 2022

@author: S1mple
"""
from PIL import Image
import torch
from torch.utils.data import Dataset
#import random
#import os
import csv
import torchvision
from torchvision import transforms
import learn2learn as l2l
from learn2learn.data.transforms import NWays, KShots, LoadData, RemapLabels
#from torch.utils.data import DataLoader


def read_split_data(root: str):
    train_images_path = []  # 存储训练集的所有图片路径
    train_images_labels = []  # 存储训练集图片对应索引信息
    test_images_path = []  # 存储验证集的所有图片路径
    test_images_labels = []  # 存储验证集图片对应索引信息
    val_images_path = []  # 存储验证集的所有图片路径
    val_images_labels = []  # 存储验证集图片对应索引信息
    # 遍历每个文件夹下的文件
    root = root+"/mini-imagenet-sxc"


    reader = csv.reader(open(root+"/train.csv",'r',encoding='utf-8',newline=''))
    next(reader)
    labels_count = -1
    labels_pre = ""
    for filename,labels in reader: 
        train_images_path.append(root+"/images/"+filename)
        if labels != labels_pre:
            labels_pre = labels
            labels_count += 1
            train_images_labels.append(labels_count)
        else :
            train_images_labels.append(labels_count)
            
        
    reader = csv.reader(open(root+"/test.csv",'r',encoding='utf-8',newline=''))
    next(reader)
    labels_count = -1
    labels_pre = ""
    for filename,labels in reader: 
        test_images_path.append(root+"/images/"+filename)
        if labels != labels_pre:
            labels_pre = labels
            labels_count += 1
            test_images_labels.append(labels_count)
        else :
            test_images_labels.append(labels_count)
            
    reader = csv.reader(open(root+"/val.csv",'r',encoding='utf-8',newline=''))
    next(reader)
    labels_count = -1
    labels_pre = ""
    for filename,labels in reader: 
        val_images_path.append(root+"/images/"+filename)
        if labels != labels_pre:
            labels_pre = labels
            labels_count += 1
            val_images_labels.append(labels_count)
        else :
           val_images_labels.append(labels_count)
    print("{} images for training.".format(len(train_images_path)))
    print("{} images for test.".format(len(test_images_path)))
    print("{} images for val.".format(len(val_images_path)))

 

    return train_images_path, train_images_labels, test_images_path, test_images_labels, val_images_path, val_images_labels


class MyDataset(Dataset):
    
    #自定义数据集
    
    def __init__(self, images_path: list, images_class: list, transform=None):
        self.images_path = images_path
        self.images_class = images_class
        self.transform = transform

        
    def __len__(self):
       return len(self.images_path)
   
    def __getitem__(self, item):
       
        # print(self.class_list)
         img = Image.open(self.images_path[item])
         # RGB为彩色图片，L为灰度图片
         if img.mode != 'RGB':
             img = img.convert('RGB')     #转化为彩色图片
         label = self.images_class[item]
         if self.transform is not None:
             img = self.transform(img)
         return img, label
     
    
    @staticmethod
    def collate_fn(batch):
        # 官方实现的default_collate可以参考
        # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
        images, labels = tuple(zip(*batch))
    
        images = torch.stack(images, dim=0)
        labels = torch.as_tensor(labels)
        return images, labels

class GetDataTasks():
    
    def __init__(self,root = './'):
        
        self.root = root
    
    def get(self,shot=5,train_way=5,train_query=15,
            test_shot=1,test_way=5,test_query=15,
            train_num_tasks=-1,
            valid_num_tasks=-1,
            test_num_tasks=-1,
            ):
        root = self.root
        train_images_path, train_images_label, test_images_path, test_images_label,val_images_path,val_images_label=read_split_data(root) 
        data_transform = {#定义训练集和测试集的transform
            "train": transforms.Compose([transforms.Resize((224,224)),
                                        # transforms.RandomHorizontalFlip(),
                                         transforms.ToTensor()
                                         ]),
            "test": transforms.Compose([transforms.Resize((224,224)),
                                       transforms.ToTensor()]),
            "val":transforms.Compose([transforms.Resize((224,224)),
                                       transforms.ToTensor()])
                                     }
        train_dataset = MyDataset(images_path=train_images_path,
                                   images_class=train_images_label,
                                   transform=data_transform["train"])
        test_dataset = MyDataset(images_path=test_images_path,
                                   images_class=test_images_label,
                                   transform=data_transform["test"])
        
        valid_dataset = MyDataset(images_path=val_images_path,
                                   images_class=val_images_label,
                                   transform=data_transform["val"])
        
        train_dataset = l2l.data.MetaDataset(train_dataset)
        train_transforms = [
            NWays(train_dataset, train_way),
            KShots(train_dataset, train_query + shot),
            LoadData(train_dataset),
            RemapLabels(train_dataset),
        ]
        train_tasks = l2l.data.TaskDataset(train_dataset,
                                           task_transforms=train_transforms,
                                           num_tasks=train_num_tasks)
        #train_loader = DataLoader(train_tasks, pin_memory=False, shuffle=True)
        
        valid_dataset = l2l.data.MetaDataset(valid_dataset)
        valid_transforms = [
            NWays(valid_dataset, test_way),
            KShots(valid_dataset, test_query + test_shot),
            LoadData(valid_dataset),
            RemapLabels(valid_dataset),
        ]
        valid_tasks = l2l.data.TaskDataset(valid_dataset,
                                           task_transforms=valid_transforms,
                                           num_tasks=valid_num_tasks)
       # valid_loader = DataLoader(valid_tasks, pin_memory=False, shuffle=True)
        
        test_dataset = l2l.data.MetaDataset(test_dataset)
        test_transforms = [
            NWays(test_dataset, test_way),
            KShots(test_dataset, test_query + test_shot),
            LoadData(test_dataset),
            RemapLabels(test_dataset),
        ]
        test_tasks = l2l.data.TaskDataset(test_dataset,
                                          task_transforms=test_transforms,
                                          num_tasks=test_num_tasks)
       # test_loader = DataLoader(test_tasks, pin_memory=False, shuffle=True)
        
        return train_tasks,valid_tasks,test_tasks
    
    def get2(self,args): 
        #初始化参数
        data_addr = str(args.data_root)
        test_shot = args.test_shot
        test_way = args.test_way
        test_query = args.test_query
        valid_num_tasks=args.max_epoch
        #根据efficient的大小定义读取的数据图片size
        image_resize = {
            "common" : args.val_res,
            "efficientnetb0" : 224,
            "efficientnetb1" : 240,
            "efficientnetb2" : 260,
            "efficientnetb3" : 300,
            "efficientnetb4" : 380
            }
        image_size = image_resize[args.encoder] if args.encoder in image_resize else image_resize["common"]
        res_times = args.res_times
        transform = transforms.Compose([
           transforms.ToTensor(),
           transforms.Resize((int(image_size*res_times),int(image_size*res_times))),
           transforms.CenterCrop((image_size,image_size)),
           transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])
           ])
        valid_dataset = torchvision.datasets.ImageFolder(
            root=data_addr,
            transform=transform
        )    
        valid_dataset = l2l.data.MetaDataset(valid_dataset)
        valid_transforms = [
            NWays(valid_dataset, test_way),
            KShots(valid_dataset, test_query + test_shot),
            LoadData(valid_dataset),
            RemapLabels(valid_dataset),
        ]
        valid_tasks = l2l.data.TaskDataset(valid_dataset,
                                           task_transforms=valid_transforms,
                                           num_tasks=valid_num_tasks)
      
        
        return valid_tasks,image_size
    
    