from math import log10
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from dataproc.adjust import AdjustSet, get_datasets
from models.backbone import INPUT_SIZE, I_Net
from models.cdcgan import G_Net


def AugDistr(distr: list[int], ratio=0.1, neg_head=True):
    res_distr = [0] * len(distr)
    tail_id = 0
    for i in range(len(distr)):
        res_distr[i] = round(distr[0] * ratio * log10(i+1))
    cutted_distr, tail_id = CutHead(distr, res_distr)
    if not neg_head:
        res_distr = [0] * tail_id
        res_distr.extend(cutted_distr[tail_id:])
    return res_distr


def CutHead(distr: list[int], aug_list: list[int], ratio=3):
    diff = sum(distr)
    tail_id = 0
    for i in range(1, len(distr)):
        head = sum(distr[:i])
        tail = sum(distr[i:])
        new_diff = abs(head-tail)
        if new_diff < diff:
            diff, tail_id = new_diff, i
    target_num = distr[tail_id] + aug_list[tail_id]*ratio
    for i in range(tail_id):
        aug_list[i] = target_num - distr[i]
    return aug_list, tail_id


class BaseFeatureSet(Dataset):
    def __init__(self) -> None:
        self.data = []
        self.label = []

    def append(self, feature, label):
        self.data.append(feature)
        self.label.append(label)

    def extend(self, data: list, label: list):
        self.data.extend(data)
        self.label.extend(label)

    def __getitem__(self, index):
        feature = self.data[index]
        label = self.label[index]

        return feature, label

    def __len__(self):
        return len(self.data)


def copy_feature_set_from(data: list, label: list):
    res = BaseFeatureSet()
    res.data = data
    res.label = label
    return res


class FeatureSet(BaseFeatureSet):
    def __init__(self, cls_dict: list[str], org_distr: list[int], distr: list[int], feature_num: int) -> None:
        super().__init__()
        self.cls_dict = cls_dict
        self.org_distr = org_distr
        self.distr = distr
        self.feature_num = feature_num


class ImgFeatureSetFactory(BaseFeatureSet):
    def __init__(self, src: AdjustSet, backbone: str, model_pth: str, device, method: list[str], gan_path: str = None, train=True) -> None:
        super().__init__()
        self.org_distr = src.distr
        self.num_cls = len(self.org_distr)
        self.cls_dict = src.cls_dict
        self.method = method
        self.train = train
        self.npy_path = f"{src.img_path}/B-{backbone}"
        self.device = device
        self.gan_path = gan_path
        self.net = I_Net(net_name=backbone,
                         model_pth=model_pth, num_cls=self.num_cls)
        self.feature_num = self.net.net.feature_num

        self.loader = DataLoader(dataset=src, batch_size=1)
        self.net.net.to(device)
        self.net.net.eval()
        try:
            self.load(self.npy_path)
            print('Use pre-loaded .npy file')
        except:
            self._load_origin()

        self.aug_dict = {
            "GAN": self._GAN_aug,
            "SMOTE": self._SMOTE_aug,
            "TRANS": self._TRANS_aug
        }

    def save(self, path: str):
        np_data = np.array(self.data)
        np_label = np.array(self.label)
        np.save(f"{path}-x.npy", np_data)
        np.save(f"{path}-y.npy", np_label)

    def load(self, path: str):
        np_data = np.load(f"{path}-x.npy")
        np_lable = np.load(f"{path}-y.npy")
        self.data = np_data.tolist()
        self.label = np_lable.tolist()
        for i in range(len(self.data)):
            self.data[i] = torch.tensor(self.data[i])

    def _load_origin(self):
        for i, [img, label] in enumerate(self.loader):
            print(
                f"Loading origin data [{i+1}/{len(self.loader)}]", end='\r')
            out = self.net.feature(img.to(self.device))
            self.append(out[0].detach().cpu().numpy(), label[0].item())
        print('\nLoad origin data succeed')
        self.save(self.npy_path)
        for i in range(len(self.data)):
            self.data[i] = torch.from_numpy(self.data[i])

    def Augment(self):
        if not self.train:
            return copy_feature_set_from(self.data, self.label)
        return self._augmet_impl()

    def _augmet_impl(self):
        res_sets: list[BaseFeatureSet] = []
        res_distr = [v for v in self.org_distr]
        for k in self.method:
            if k not in self.aug_dict.keys():
                raise Exception("Wrong method name")
            rset, aug_distr = self.aug_dict[k]()
            res_sets.append(rset)
            res_distr = [res_distr[i] + aug_distr[i]
                         for i in range(self.num_cls)]
        if 'SMOTE' not in self.method:
            res_sets.append(copy_feature_set_from(self.data, self.label))

        aug_set = FeatureSet(cls_dict=self.cls_dict, org_distr=self.org_distr,
                             distr=res_distr, feature_num=self.feature_num)
        for fset in res_sets:
            aug_set.extend(fset.data, fset.label)
        return aug_set

    def _GAN_aug(self):
        gen_net = G_Net(self.gan_path, self.num_cls)
        gan_set = BaseFeatureSet()
        obj_distr = AugDistr(self.org_distr, neg_head=False)

        # transforms.Resize([H,W]) only resizes the last two dims into [H,W].
        trans = transforms.Resize([INPUT_SIZE, INPUT_SIZE])
        gen_net.netG.to(self.device)
        gen_net.netG.eval()
        for cls_id, nums in enumerate(obj_distr):
            if nums <= 0:
                continue
            for i in range(nums):
                fake_imgs = gen_net.generate(cls_id, self.device, 1)
                # fake imgs' shape: [B,C,H,W], resize the last two dims
                fake_imgs = trans(fake_imgs)
                fake_features = self.net.feature(fake_imgs)
                gan_set.append(fake_features[0].detach().cpu(), cls_id)
                print(
                    f"Generating datas: [{sum(obj_distr[:cls_id])+i+1}/{sum(obj_distr)}]", end='\r')
        print("\nGenerate datas succeed")
        return gan_set, obj_distr

    def _TRANS_aug(self):
        obj_distr = AugDistr(self.org_distr, neg_head=False)
        trans = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(30),
            transforms.ColorJitter(
                brightness=0.5,
                contrast=0.5,
                saturation=0.5,
                hue=0.5),
        ])
        trans_set = BaseFeatureSet()
        bucket = [0] * len(obj_distr)
        for img, label in self.loader:
            cls_id = label[0].item()
            if obj_distr[cls_id] < 0 or bucket[cls_id] == obj_distr[cls_id]:
                continue
            img = trans(img)
            out = self.net.feature(img.to(self.device))
            trans_set.append(out[0].detach().cpu(), cls_id)
            bucket[cls_id] += 1
            print(
                f"Transforming datas: [{sum(bucket)}/{sum(obj_distr)}]", end='\r')
        print("\nTransform datas succeed")
        return trans_set, bucket

    def _SMOTE_aug(self):
        obj_distr, _ = AugDistr(self.org_distr)
        pass


def get_loaders(name: str, distr: list, batch_size: int, backbone: str, model_pth: str, methods: list, gan_path: str, device):
    org_tr_set, org_vl_set = get_datasets(name, distr, INPUT_SIZE)
    if_tr_set = ImgFeatureSetFactory(src=org_tr_set, backbone=backbone,
                                     model_pth=model_pth, device=device, method=methods, gan_path=gan_path, train=True)
    if_vl_set = ImgFeatureSetFactory(src=org_vl_set, backbone=backbone,
                                     model_pth=model_pth, device=device, method=methods, gan_path=gan_path, train=False)

    vl_set = if_vl_set.Augment()
    tr_set = if_tr_set.Augment()

    tr_loader = DataLoader(tr_set, batch_size, shuffle=True)
    vl_loader = DataLoader(vl_set, batch_size, shuffle=False)

    return tr_loader, vl_loader
