

import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from PIL import Image
from glob import glob
import pandas as pd
import numpy as np
import os
import copy


class DatasetSplit(Dataset):
    def __init__(self, dataset, idxs):
        self.dataset = dataset
        self.idxs = list(idxs)

    def __len__(self):
        return len(self.idxs)

    def __getitem__(self, item):
        image, label = self.dataset[self.idxs[item]]
        return image, label


def calculate_accuracy(fx, y):
    preds = fx.max(1, keepdim=True)[1]
    correct = preds.eq(y.view_as(preds)).sum()
    acc = 100.00 *correct.float()/preds.shape[0]
    return acc


# Federated averaging: FedAvg
def FedAvg(w):
    w_avg = copy.deepcopy(w[0])
    for k in w_avg.keys():
        for i in range(1, len(w)):
            w_avg[k] += w[i][k]
        w_avg[k] = torch.div(w_avg[k], len(w))
    return w_avg

# dataset_iid() will create a dictionary to collect the indices of the data samples randomly for each client
# IID HAM10000 datasets will be created based on this
def dataset_iid(dataset, num_users):
    num_items = int(len(dataset)/num_users)
    dict_users, all_idxs = {}, [i for i in range(len(dataset))]
    for i in range(num_users):
        dict_users[i] = set(np.random.choice(all_idxs, num_items, replace = False))
        all_idxs = list(set(all_idxs) - dict_users[i])
    return dict_users    
                          
#==============================================================
# Custom dataset prepration in Pytorch format
class SkinData(Dataset):
    def __init__(self, df, transform = None):
        self.df = df
        self.transform = transform
        
    def __len__(self):
        return len(self.df)
    
    def __getitem__(self, index):
        X = Image.open(self.df['path'][index]).resize((224, 224))
        y = torch.tensor(int(self.df['target'][index]))
        if self.transform:
            X = self.transform(X)
        return X, y
    

    
    
def dataloadingHAM10000(test_size):
    df = pd.read_csv('data/HAM10000_metadata.csv')
    print(df.head())


    lesion_type = {
        'nv': 'Melanocytic nevi',
        'mel': 'Melanoma',
        'bkl': 'Benign keratosis-like lesions ',
        'bcc': 'Basal cell carcinoma',
        'akiec': 'Actinic keratoses',
        'vasc': 'Vascular lesions',
        'df': 'Dermatofibroma'
    }

    # merging both folders of HAM1000 dataset -- part1 and part2 -- into a single directory
    imageid_path = {os.path.splitext(os.path.basename(x))[0]: x
                    for x in glob(os.path.join("data", '*', '*.jpg'))}


    #print("path---------------------------------------", imageid_path.get)
    df['path'] = df['image_id'].map(imageid_path.get)
    df['cell_type'] = df['dx'].map(lesion_type.get)
    df['target'] = pd.Categorical(df['cell_type']).codes
    print(df['cell_type'].value_counts())
    print(df['target'].value_counts())

    #=============================================================================
    # Train-test split          
    train, test = train_test_split(df, test_size = test_size)
    # if SFLV1:
    #     train, test = train_test_split(df, test_size = 0.2)
    # elif SFLV2:
    #     train, test = train_test_split(df, test_size = 0.1)

    train = train.reset_index()
    test = test.reset_index()

    #=============================================================================
    #                         Data preprocessing
    #=============================================================================  
    # Data preprocessing: Transformation 
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), 
                            transforms.RandomVerticalFlip(),
                            transforms.Pad(3),
                            transforms.RandomRotation(10),
                            transforms.CenterCrop(224),
                            transforms.ToTensor(), 
                            transforms.Normalize(mean = mean, std = std)
                            ])
        
    test_transforms = transforms.Compose([
                            transforms.Pad(3),
                            transforms.CenterCrop(224),
                            transforms.ToTensor(), 
                            transforms.Normalize(mean = mean, std = std)
                            ])    


    # With augmentation
    dataset_train = SkinData(train, transform = train_transforms)
    dataset_test = SkinData(test, transform = test_transforms)
    print(f'Number of training examples: {len(dataset_train)}')
    print(f'Number of testing examples: {len(dataset_test)}')
    # print(dataset_train[0])
    xxx, yyy = dataset_train[0]
    print(f'dataset_train[0] xxx.shape = {xxx.shape}')
    print(f'dataset_train[0] yyy.shape = {yyy.shape}')
    print(f'dataset_train[0] yyy = {yyy}')
    return dataset_train, dataset_test


def dataloadingcifar100():
    # Data preprocessing: Transformation
    image_size = 32
    normalize = transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.262])
    train_transform = transforms.Compose([
        transforms.RandomCrop(image_size, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
        ])
    test_transform = transforms.Compose([
            transforms.ToTensor(),
            normalize,
            ])

    train_directory = os.path.join('/data/cifar-100-python', 'train1')
    valid_directory = os.path.join('/data/cifar-100-python', 'val')
    dataset_train = datasets.ImageFolder(root=train_directory, transform=train_transform)
    dataset_test = datasets.ImageFolder(root=valid_directory, transform=test_transform)
    print(f'Number of training examples: {len(dataset_train)}')
    print(f'Number of testing examples: {len(dataset_test)}')
    # print(dataset_train[0])
    xxx, yyy = dataset_train[0]
    print(f'dataset_train[0] xxx.shape = {xxx.shape}')
    print(f'dataset_train[0] yyy.shape = {yyy.shape}')
    print(f'dataset_train[0] yyy = {yyy}')
    return dataset_train, dataset_test