from torch.utils import data 
import numpy as np
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor, Lambda, Compose
from collections import  Counter
import os
from PIL import Image
import torch
from typing import Optional, Callable
from torch.utils.model_zoo import tqdm
from torchvision.datasets.utils import download_file_from_google_drive, check_integrity
import tarfile
import zipfile
from datasets.base.data_load import DataManager
import torchvision.transforms as transforms


# import matplotlib.pyplot as plt


def gen_bar_updater() -> Callable[[int, int, int], None]:
    pbar = tqdm(total=None)

    def bar_update(count, block_size, total_size):
        if pbar.total is None and total_size:
            pbar.total = total_size
        progress_bytes = count * block_size
        pbar.update(progress_bytes - pbar.n)

    return bar_update


def download_url(url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None) -> None:
    

    import urllib

    root = os.path.expanduser(root)
    if not filename:
        filename = os.path.basename(url)
    fpath = os.path.join(root, filename)

    os.makedirs(root, exist_ok=True)

    # check if file is already present locally
    if check_integrity(fpath, md5):
        print('Using downloaded and verified file: ' + fpath)
    else:   # download the file
        try:
            print('Downloading ' + url + ' to ' + fpath)
            urllib.request.urlretrieve(
                url, fpath,
                reporthook=gen_bar_updater()
            )
        except (urllib.error.URLError, IOError) as e:  # type: ignore[attr-defined]
            if url[:5] == 'https':
                url = url.replace('https:', 'http:')
                print('Failed download. Trying https -> http instead.'
                      ' Downloading ' + url + ' to ' + fpath)
                urllib.request.urlretrieve(
                    url, fpath,
                    reporthook=gen_bar_updater()
                )
            else:
                raise e
        # check integrity of downloaded file
        if not check_integrity(fpath, md5):
            raise RuntimeError("File not found or corrupted.")

def _is_tarxz(filename: str) -> bool:
    return filename.endswith(".tar.xz")


def _is_tar(filename: str) -> bool:
    return filename.endswith(".tar")


def _is_targz(filename: str) -> bool:
    return filename.endswith(".tar.gz")


def _is_tgz(filename: str) -> bool:
    return filename.endswith(".tgz")


def _is_gzip(filename: str) -> bool:
    return filename.endswith(".gz") and not filename.endswith(".tar.gz")


def _is_zip(filename: str) -> bool:
    return filename.endswith(".zip")


def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> None:
    if to_path is None:
        to_path = os.path.dirname(from_path)

    if _is_tar(from_path):
        with tarfile.open(from_path, 'r') as tar:
            tar.extractall(path=to_path)
    elif _is_targz(from_path) or _is_tgz(from_path):
        with tarfile.open(from_path, 'r:gz') as tar:
            tar.extractall(path=to_path)
    elif _is_tarxz(from_path):
        with tarfile.open(from_path, 'r:xz') as tar:
            tar.extractall(path=to_path)
    elif _is_gzip(from_path):
        to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
        with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
            out_f.write(zip_f.read())
    elif _is_zip(from_path):
        with zipfile.ZipFile(from_path, 'r') as z:
            z.extractall(to_path)
    else:
        raise ValueError("Extraction of {} not supported".format(from_path))

    if remove_finished:
        os.remove(from_path)


def download_and_extract_archive(
    url: str,
    download_root: str,
    extract_root: Optional[str] = None,
    filename: Optional[str] = None,
    md5: Optional[str] = None,
    remove_finished: bool = False,
) -> None:
    download_root = os.path.expanduser(download_root)
    if extract_root is None:
        extract_root = download_root
    if not filename:
        filename = os.path.basename(url)

    download_url(url, download_root, filename, md5)

    archive = os.path.join(download_root, filename)
    print("Extracting {} to {}".format(archive, extract_root))
    extract_archive(archive, extract_root, remove_finished)


class FEMNIST(MNIST):
    """
    This dataset is derived from the Leaf repository
    (https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
    dataset, grouping examples by writer. Details about Leaf were published in
    "LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
    """
    resources = [
        ('https://raw.githubusercontent.com/tao-shen/FEMNIST_pytorch/master/femnist.tar.gz',
         '59c65cec646fc57fe92d27d83afdf0ed')]

    def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None,
                 download=False):
        super(MNIST, self).__init__(root, transform=transform,
                                    target_transform=target_transform)
        self.train = train
        self.dataidxs = dataidxs
        # 由于在云端这个检测函数是有问题的。所以在这里直接添加一个false选项控制开关
        checked = True
        # print("数据是否已经存在",self._check_exists())
        if download and not checked:
            self.download()

        if not self._check_exists() and not checked:
            raise RuntimeError('Dataset not found.' +
                               ' You can use download=True to download it')
        if self.train:
            data_file = self.training_file
        else:
            data_file = self.test_file

        self.data, self.targets, self.users_index = torch.load(os.path.join(self.processed_folder, data_file))

        if self.dataidxs is not None:
            self.data = self.data[self.dataidxs]
            self.targets = self.targets[self.dataidxs]        


    def __getitem__(self, index):
        img, target = self.data[index], int(self.targets[index])
        img = Image.fromarray(img.numpy(), mode='F')
        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return img, target



    def download(self):
        """Download the FEMNIST data if it doesn't exist in processed_folder already."""
        import shutil

        if self._check_exists():
            return
        
        try:
            os.makedirs(self.raw_folder)
            os.makedirs(self.processed_folder)
        except Exception as _:
            pass

        # download files
        for url, md5 in self.resources:
            filename = url.rpartition('/')[2]
            download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)

        # process and save as torch files
        print('Processing...')
        shutil.move(os.path.join(self.raw_folder, self.training_file), self.processed_folder)
        shutil.move(os.path.join(self.raw_folder, self.test_file), self.processed_folder)

    def __len__(self):
        return len(self.data)


# 1. 从内存中加载数据并转换为numpy格式的数据。不同的数据集，需要重写load函数。划分训练集和测试集。这里的测试集合训练集对应的是测试客户端和训练客户端。如果没哟测试客户端返回空即可。另外，对于一个客户端上数据的细分，应该在客户端上进行。
# 2. 这里非独立同分布的划分方式是通用，不同的只有数据加载方式。针对返回的self.train_data,self.train_targets,self.test_data,self.test_targets等进行数据划分。
# 2. 根据非独立同分布的设置，讲数据划分为多分，并返回多分数据的numpy数据数据。因为返回dataset反而不利于进一步的划分和处理。
# 主要完成数据的多个客户端划分。不划分训练数据还是测试数据。因为可能存在一下多种情况：
#   a. 分为测试节点和训练节点
#   b. 每个客户端既包含测试数据也包含训练数据。单个客户端还可能面临其他情况的划分。
class FeministDataManager(DataManager):
    # 将数据集加载到内存中。
    def __init__(self,dir='data/'):
        super(FeministDataManager,self).__init__(dir)

    def load_data(self):
        transform = transforms.Compose([transforms.ToTensor()])

        mnist_train_ds = FEMNIST(self.dir, train=True, transform=transform, download=True)
        mnist_test_ds = FEMNIST(self.dir, train=False, transform=transform, download=True)

        X_train, y_train, u_train = mnist_train_ds.data, mnist_train_ds.targets, mnist_train_ds.users_index
        X_test, y_test, u_test = mnist_test_ds.data, mnist_test_ds.targets, mnist_test_ds.users_index

        X_train = torch.unsqueeze(X_train.data,1).numpy()
        y_train = y_train.data.numpy()
        self.u_train = np.array(u_train)
        X_test = torch.unsqueeze(X_test.data,1).numpy()
        y_test = y_test.data.numpy()
        self.u_test = np.array(u_test)

        return (X_train, y_train, X_test, y_test)

    def allocate_data_noniid(self,client_number=20):
        """LEAF 基准下的，现实世界中的数据非独立同分布。数据分布不平衡。

        Returns:
            [dataset_list]: 数据集列表
        """    
        num_user = self.u_train.shape[0]
        # user向量记录了，每个用户开始的索引，他表示前边的所有的个数的累加和
        user = np.zeros(num_user+1,dtype=np.int32)
        for i in range(1,num_user+1):
            user[i] = user[i-1] + self.u_train[i-1]
        # 产生用户个数的数组，并随机打乱
        no = np.random.permutation(num_user)
        # 将所有的用户划分成clientnumber份数
        batch_idxs = np.array_split(no, client_number)
        # 记录每个分组的用户的结果的聚合
        net_dataidx_map = {i:np.zeros(0,dtype=np.int32) for i in range(client_number)}
        for i in range(client_number):
            for j in batch_idxs[i]:
                net_dataidx_map[i]=np.append(net_dataidx_map[i], np.arange(user[j], user[j+1]))  

        # 将分割后的结果从字典转换成链表
        dataset_list = []  
        for name in net_dataidx_map:
            dataset_list.append((self.train_data[net_dataidx_map[name]],self.train_targets[net_dataidx_map[name]]))
        
        self.show_distribution(dataset_list)
        return dataset_list
    
    def allocate_data_test(self):
        # 测试客户端保持有一个就可以了
        return [(self.test_data,self.test_targets)]
    
# 使用单例模式加载一个数据集。在import的时候，执行这段代码，将全局变量dataManager导入到内存当中。
dataManager = FeministDataManager(dir='../data')

