
from torchvision import datasets,transforms
from utils.dataset_utils import separate_data,read_record 
from utils.sampling import *
from utils.TinyImagenet import TinyImageNet
from utils.FMNIST import *
from sklearn.model_selection import train_test_split
from utils.ShakeSpare import *
from utils.widar import *
import os
import json
def get_dataset(args):
    file = os.path.join("data",args.dataset+"_"+str(args.num_users))
    if args.iid:
        file += "_iid"
    else:
        file += "_noniidCase" + str(args.noniid_case)

    if args.noniid_case > 4:
        file += "_beta" + str(args.data_beta)

    file += ".json"

    if args.dataset == 'cifar10':
        # trans_cifar10_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
        #                                           transforms.RandomHorizontalFlip(),
        #                                           transforms.ToTensor(),
        #                                           transforms.Normalize(mean=[0.491, 0.482, 0.447],
        #                                                                std=[0.247, 0.243, 0.262])])
        # trans_cifar10_val = transforms.Compose([transforms.ToTensor(),
        #                                         transforms.Normalize(mean=[0.491, 0.482, 0.447],
        #                                                              std=[0.247, 0.243, 0.262])])
        trans_cifar10_train = transforms.Compose([transforms.ToTensor(),
                                                  transforms.Normalize(mean=[0.491, 0.482, 0.447],
                                                                       std=[0.247, 0.243, 0.262])])
        trans_cifar10_val = transforms.Compose([transforms.ToTensor(),
                                                transforms.Normalize(mean=[0.491, 0.482, 0.447],
                                                                     std=[0.247, 0.243, 0.262])])
        dataset_train = datasets.CIFAR10('./data/cifar10', train=True, download=True, transform=trans_cifar10_train)
        # print(dataset_train)
        dataset_test = datasets.CIFAR10('./data/cifar10', train=False, download=True, transform=trans_cifar10_val)
        if args.glo_dataset :
            dataset_global = datasets.CIFAR10('./data/cifar10',train=True,download=True,transform=trans_cifar10_train)
            dict_global = cifar_global(dataset=dataset_train,frac=args.glo_dataset_frac)  

        else:
            dict_global = []
            # exit('Error: global dataset')

        if args.generate_data:
            if args.iid:
                dict_users = cifar_iid(dataset_train, args.num_users,dict_global)
            elif args.noniid_case<4:
                dict_users = cifar_noniid(dataset_train,args.num_users,args.noniid_case)
            else:
                dict_users = separate_data(dataset_train,args.num_users,args.num_classes,dict_global,args.data_beta)
        else:
            dict_users = read_record(file)
        

    elif args.dataset == 'cifar100':
        # trans_cifar100_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
        #                                            transforms.RandomHorizontalFlip(),
        #                                            transforms.ToTensor(),
        #                                            transforms.Normalize(mean=[0.507, 0.487, 0.441],
        #                                                                 std=[0.267, 0.256, 0.276])])
        # trans_cifar100_val = transforms.Compose([transforms.ToTensor(),
        #                                          transforms.Normalize(mean=[0.507, 0.487, 0.441],
        #                                                               std=[0.267, 0.256, 0.276])])
        trans_cifar100_train = transforms.Compose([transforms.ToTensor(),
                                                   transforms.Normalize(mean=[0.507, 0.487, 0.441],
                                                                        std=[0.267, 0.256, 0.276])])
        trans_cifar100_val = transforms.Compose([transforms.ToTensor(),
                                                 transforms.Normalize(mean=[0.507, 0.487, 0.441],
                                                                      std=[0.267, 0.256, 0.276])])
        dataset_train = datasets.CIFAR100('./data/cifar100', train=True, download=True, transform=trans_cifar100_train)
        dataset_test = datasets.CIFAR100('./data/cifar100', train=False, download=True, transform=trans_cifar100_val)
        if args.glo_dataset :
            dataset_global = datasets.CIFAR100('./data/cifar100',train=True,download=True,transform=trans_cifar100_train)
            dict_global = cifar_global(dataset=dataset_train,frac=args.glo_dataset_frac)  

        else:
            dict_global = []
            # exit('Error: global dataset')
        if args.generate_data:
            if args.iid:
                dict_users = cifar_iid(dataset_train, args.num_users,dict_global)
            elif args.noniid_case < 4:
                dict_users = cifar_noniid(dataset_train, args.num_users, args.noniid_case)
            else:
                dict_users = separate_data(dataset_train, args.num_users, args.num_classes,dict_global, args.data_beta)
        else:
            dict_users = read_record(file)
    
    elif args.dataset == 'fashion-mnist':
        trans = transforms.Compose([transforms.ToTensor()])
        dataset_train = datasets.FashionMNIST('./data/fashion-mnist/', train=True, download=True, transform=trans)
        dataset_test = datasets.FashionMNIST('./data/fashion-mnist/', train=False, download=True, transform=trans)
        if args.glo_dataset :
            dataset_global = datasets.FashionMNIST('./data/fashion-mnist/', train=True, download=True, transform=trans)
            dict_global = cifar_global(dataset=dataset_train,frac=args.glo_dataset_frac)  

        else:
            dict_global = []
        if args.generate_data:
            if args.iid:
                dict_users = fashion_mnist_iid(dataset_train, args.num_users,dict_global)
            else:
                dict_users = fashion_mnist_noniid(dataset_train, args.num_users, case=args.noniid_case)
        else:
            dict_users = read_record(file)
    elif args.dataset == 'femnist':
        dataset_train = FEMNIST(True,args=args)
        dataset_test = FEMNIST(False)
        dict_users,dict_global = dataset_train.get_client_dic()
        args.num_users = len(dict_users)
        args.num_channels = 1
        args.num_classes = 62
    elif args.dataset == 'TinyImagenet':
        # normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                          std=[0.229, 0.224, 0.225])
        # # 训练
        # train_transform = transforms.Compose([
        #     # 这里的scale指的是面积，ratio是宽高比
        #     # 具体实现每次先随机确定scale和ratio，可以生成w和h，然后随机确定裁剪位置进行crop
        #     # 最后是resize到target size
        #     transforms.RandomResizedCrop(224, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)),
        #     transforms.RandomHorizontalFlip(),
        #     transforms.ToTensor(),
        #     normalize
        # ])
        # # 测试
        # test_transform = transforms.Compose([
        #     transforms.Resize(256),
        #     transforms.CenterCrop(224),
        #     transforms.ToTensor(),
        #     normalize,
        # ])
        # trans_imagenet_train = transforms.Compose([transforms.RandomResizedCrop(224, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)),
        #                                             transforms.RandomHorizontalFlip(),
        #                                             transforms.ToTensor(),
        #                                             transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                                                                 std=[0.229, 0.224, 0.225])])
        # trans_imagenet_val = transforms.Compose([transforms.Resize(256),
        #                                          transforms.CenterCrop(224),
        #                                          transforms.ToTensor(),
        #                                          transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                                                             std=[0.229, 0.224, 0.225])])


        trans_imagenet_train = transforms.Compose([transforms.RandomCrop(64),
                                                   transforms.RandomHorizontalFlip(),
                                                   transforms.ToTensor(),
                                                   transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                                                                        std=[0.2770, 0.2691, 0.2821])])
        trans_imagenet_val = transforms.Compose([
                                                 transforms.ToTensor(),
                                                 transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                                                                    std=[0.2770, 0.2691, 0.2821])])
        data_dir = './data/tiny-imagenet-200/'
        dataset_train = TinyImageNet(data_dir, train=True,transform=trans_imagenet_train)
        dataset_test = TinyImageNet(data_dir, train=False,transform=trans_imagenet_val)
        args.num_channels = 3
        args.num_classes = 200
        if args.glo_dataset :
            dict_global = cifar_global(dataset=dataset_train,frac=args.glo_dataset_frac)  

        else:
            dict_global = []
            exit('Error: global dataset')

        if args.generate_data:
            if args.iid:
                dict_users = cifar_iid(dataset_train, args.num_users,dict_global)
            elif args.noniid_case < 4:
                dict_users = cifar_noniid(dataset_train, args.num_users, args.noniid_case)
            else:
                dict_users = separate_data(dataset_train, args.num_users, args.num_classes,dict_global, args.data_beta)
        else:
            dict_users = read_record(file)
    elif args.dataset == 'ShakeSpare':
        dataset_train = ShakeSpeare(True)
        dataset_test = ShakeSpeare(False)
        dict_users = dataset_train.get_client_dic()
        args.num_users = len(dict_users)

    elif args.dataset == 'widar':
        data = torch.load(f'./data/widar/widar.pkl')

        data_train, data_test = train_test_split(data, test_size=0.2, random_state=args.seed)
        dataset_train = WidarDataset(data_train)
        dataset_test = WidarDataset(data_test)
        if args.glo_dataset :
            dict_global = cifar_global(dataset=dataset_train,frac=args.glo_dataset_frac)

        else:
            dict_global = []
            exit('Error: global dataset')
        if args.generate_data:
            if args.iid:
                dict_users = cifar_iid(dataset_train, args.num_users, dict_global)
            elif args.noniid_case < 4:
                dict_users = cifar_noniid(dataset_train, args.num_users, args.noniid_case)
            else:
                dict_users = separate_data(dataset_train, args.num_users, args.num_classes, dict_global, args.data_beta)
        else:
            dict_users = read_record(file)

        args.num_channels = 22
        args.num_classes = 22
    else:
        exit('Error: unrecognized dataset')

    return dataset_train,dataset_test,dict_users,dict_global


    
    