from torch import nn
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch
import os
import pickle
import math
import time
import numpy as np
import joblib

from util.cutout import Cutout

class resnet18_ef(nn.Module):
    
    def __init__(self):
        super(resnet18_ef, self).__init__()
        resnet18 = models.resnet18(pretrained=True)
        self.nn_model = nn.Sequential(*list(resnet18.children())[0: -1])

    def forward(self, X):
        out = self.nn_model(X)
        return out


def extrac_cifar10_features():
    # cifar_norm_mean = (0.49139968, 0.48215827, 0.44653124)
    # cifar_norm_std = (0.24703233, 0.24348505, 0.26158768)
    # # 数据增强
    # transform_train = transforms.Compose([
    #     # transforms.RandomCrop(32, padding=4),
    #     # transforms.RandomHorizontalFlip(),
    #     transforms.ToTensor(),
    #     transforms.Normalize(cifar_norm_mean, cifar_norm_std),
    #     # Cutout(n_holes=1, length=16), # 对训练图像进行随机遮挡，该方法激励神经网络在决策时能够更多考虑次要特征，而不是主要依赖于很少的主要特征
    # ])
    # transform_test = transforms.Compose([
    #     # transforms.RandomCrop(32, padding=4),
    #     transforms.ToTensor(),
    #     transforms.Normalize(cifar_norm_mean, cifar_norm_std),
    # ])
    image_size = (224,224)
    transform=transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    training_data = datasets.CIFAR10(
        root="data",
        train=True,
        download=True,
        transform=transform
    )

    test_data = datasets.CIFAR10(
        root="data",
        train=False,
        download=True,
        transform=transform
    )

    batch_size = 64
    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(test_data, batch_size=batch_size)
    for x, y in train_dataloader:
        print(f"train: x, y shape:{x.shape}, {y.shape}")
        break

    for x, y in test_dataloader:
        print(f"test: x, y shape:{x.shape}, {y.shape}")
        break

    rnef = resnet18_ef()
    # OOM
    # training_data.data = list(map(lambda x: rnef(x), training_data.data))
    # test_data.data = list(map(lambda x: rnef(x), test_data.data))

    if not os.path.exists(features_path):
        os.makedirs(features_path)

    f_test_features = open(file_test_features, "wb")
    f_test_label = open(file_test_label, "wb")

    for t, (x, y) in enumerate(test_dataloader):
        # tf = rnef(x)
        tf = torch.squeeze(rnef(x))
        pickle.dump(tf, f_test_features)
        pickle.dump(y, f_test_label)
        print(f"\rextract test data features process: [{t:>3d}/{(math.floor(len(test_dataloader.dataset) / batch_size)):>3d}]", end="")
    f_test_features.close()
    f_test_label.close()
    print("test data extract over!")

    f_train_features = open(file_train_features, "wb")
    f_train_label = open(file_train_label, "wb")
    # train data 有数据增强操作，处理速度会慢
    for t, (x, y) in enumerate(train_dataloader):
        # tf = rnef(x)
        tf = torch.squeeze(rnef(x))
        pickle.dump(tf, f_train_features)
        pickle.dump(y, f_train_label)
        print(f"\rextract train data features process: [{t:>3d}/{(math.floor(len(train_dataloader.dataset) / batch_size)):>3d}]", end="")
    f_train_features.close()
    f_train_label.close()
    print("train data extract over!")


def batch_join(*file_urls):
    """
    batch_join 方法生成的文件太大(>6GB)，不适用，待找原因优化。
    """
    for file_url in file_urls:
        f_r = open(file_url, "rb")
        data = []
        while True:
            try:
                data += pickle.load(f_r)
            except EOFError:
                break
        f_r.close()

        f_dir, f_name = os.path.split(file_url)
        f_w = open(os.path.join(f_dir, f"bj_{f_name}") ,"wb")
        # pickle: MemoryError，使用 joblib
        joblib.dump(data, f_w)
        f_w.close()
        print(f"{f_name}: {len(data)}items")
    print("Done!")


def batch_join_(*file_urls):
    """
    优化：
    1. squeeze, 将冗余的维度去掉
    2. 转为 numpy 类型
    """
    for file_url in file_urls:
        f_dir, f_name = os.path.split(file_url)
        f_r = open(file_url, "rb")
        data = None
        while True:
            try:
                # tmp_data = torch.squeeze(pickle.load(f_r)).detach().numpy()
                tmp_data = pickle.load(f_r).detach().numpy()
                # tmp_data = pickle.load(f_r)
                if data is None:
                    data = tmp_data
                elif "features" in f_name:
                    data = np.vstack((data, tmp_data))
                elif "label" in f_name:
                    data = np.hstack((data, tmp_data))
            except EOFError:
                break
        f_r.close()

        f_w = open(os.path.join(f_dir, f"bj_{f_name}") ,"wb")
        pickle.dump(data, f_w)
        f_w.close()
        print(f"{f_name}: shape{data.shape}")
    print("Done!")
        


features_path = "data/cifar10_resnet_features/"

file_test_features = os.path.join(features_path, "test_features.pkl")
file_test_label = os.path.join(features_path, "test_label.pkl")
file_train_features = os.path.join(features_path, "train_features.pkl")
file_train_label = os.path.join(features_path, "train_label.pkl")


if __name__ == "__main__":
    # test 
    # x = torch.randn(1, 3, 224, 224)
    # rnef = resnet18_ef()
    # print(rnef.nn_model)
    # print(rnef(x).size())
    # output>> [512, 1, 1]

    extrac_cifar10_features()
    batch_join_(file_train_features, file_train_label, file_test_features, file_test_label)