import torch

from utils.dataloader import ViTDataSet
from train import get_classes, get_transform
from torch.utils.data import DataLoader
from vit_pytorch.vit import ViTBody
from einops.layers.torch import Rearrange
from numpy import load
import numpy as np
import torch.nn as nn
from torchvision import transforms

from PIL import Image
from matplotlib import pyplot as plt
from einops import repeat


def read_data_list(train_annotation_path):
    with open(train_annotation_path) as f:
        train_lines = f.read().splitlines()

    return train_lines


class_names, num_classes = get_classes("model_data/classes.txt")


def load_pretrain_npz_weights():
    pre_load = load("pretrain_weights/imagenet21k+imagenet2012_ViT-B_16-224.npz")
    # data = pre_load.files
    kernel_ = pre_load["Transformer/encoderblock_1/MultiHeadDotProductAttention_1/key/kernel"]
    cls = pre_load["cls"]
    embeding_ker = pre_load["embedding/kernel"]
    head_kernel = pre_load["head/kernel"]
    embedding_ = pre_load["Transformer/posembed_input/pos_embedding"]
    print(kernel_.shape)
    print(cls.shape)
    print(embeding_ker.shape)
    print(head_kernel.shape)
    print(embedding_.shape)


def load_pretrain_weights():
    model_path = "pretrain_weights/imagenet21k+imagenet2012_ViT-B_16-224.pth"
    pre_load = torch.load(model_path)
    stat_dict = {}
    key_names = []
    for k, v in pre_load.items():
        stat_dict = pre_load[k]
    # bias_ = stat_dict["transformer.encoder_layers.0.norm1.bias"]
    # for key_name, _ in stat_dict.items():
    #     print(key_name + "\r")
    # embedding_ = stat_dict["transformer.pos_embedding.pos_embedding"]
    # print(bias_.shape)
    # print(key_names)
    # 将预训练模型拼接为ViTBody  to_qkv参数形状
    key_ = (stat_dict["transformer.encoder_layers.0.attn.key.weight"])
    query_ = (stat_dict["transformer.encoder_layers.0.attn.query.weight"])
    value_ = (stat_dict["transformer.encoder_layers.0.attn.value.weight"])
    print(key_.shape)
    print(query_.shape)
    print(value_.shape)
    rearrange = Rearrange('b h w -> b (h w)')
    key_rear = rearrange(key_)
    query_rear = rearrange(query_)
    value_rear = rearrange(value_)
    cat = torch.cat((query_rear, key_rear, value_rear), dim=0)
    # print(cat.shape)
    mlp_weight_ = stat_dict["transformer.encoder_layers.0.mlp.fc2.weight"]
    print(mlp_weight_.shape)


def load_pretrain_model_test():
    model = ViTBody(
        image_size=224,
        patch_size=16,
        num_classes=1000,
        mlp_dim=3072,
        dim=768,
        heads=12,
        dropout=0.1,
        emb_dropout=0.1
    )
    model_dict = model.state_dict()
    keys = []
    weight_ = model_dict["transformer.layers.0.1.fn.net.3.weight"]
    print(weight_.shape)
    # for key in model_dict.keys():
    #     print(key + "\r")
    # pretrained_dict = torch.load(model_path)
    # load_key, no_load_key, temp_dict = [], [], {}
    # for k, v in pretrained_dict.items():
    #     if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
    #         temp_dict[k] = v
    #         load_key.append(k)
    #     else:
    #         no_load_key.append(k)
    # model_dict.update(temp_dict)
    # model.load_state_dict(model_dict)


# 获取排序后的索引
def argsort_test():
    origin = torch.rand(16, 1000)
    print(origin)
    argsort = origin.argsort(dim=-1)
    print(argsort)
    num_masked = int(0.75 * 1000)

    masked_indices, unmasked_indices = argsort[:, :num_masked], argsort[:, num_masked:]
    print(masked_indices.shape)
    print(unmasked_indices.shape)


def patch_embedding_test():
    img = Image.open("model_data/train/down.jpg")
    transform = get_transform("train")
    to_patch_embedding = nn.Sequential(
        # 操作张量维度：batch_size * 3 * (245, 32) * (256 * 32) -> batch_size * (256, 256) * (32, 32, 3)
        Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=32, p2=32),
        # 3072 -> 1024
        nn.Linear(3072, 1024),
    )
    img_transformed = transform(img).unsqueeze(0)
    print(img_transformed.shape)

    embedding = to_patch_embedding(img_transformed)
    print(embedding.shape)
    pos_embedding = nn.Parameter(torch.randn(1, 64 + 1, 1024))
    print(pos_embedding.shape)
    num_patches, encoder_dim = pos_embedding.shape[-2:]
    decoder_pos_emb = nn.Embedding(num_patches, encoder_dim)
    print(decoder_pos_emb)


def image_mask_test():
    img = Image.open("model_data/train/down.jpg").convert("RGB")
    erasing = transforms.Compose(
        [
            transforms.Resize((224, 224)),
            transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=(1, 1, 1), inplace=False),
            transforms.ToTensor(),
        ]
    )
    erasing_ret = erasing(img)
    invert = transform_invert(erasing_ret, erasing)
    show_image(img, invert)


def show_image(origin_image, convert_imag):
    plt.subplot(1, 2, 1)
    plt.imshow(origin_image)
    plt.subplot(1, 2, 2)
    plt.imshow(convert_imag)
    plt.show()
    plt.pause(0.5)
    plt.close()


def transform_invert(img_, transform_train):
    """
    将data 进行反transfrom操作
    :param img_: tensor
    :param transform_train: torchvision.transforms
    :return: PIL image
    """

    # 如果有标准化操作
    if 'Normalize' in str(transform_train):
        # 取出标准化的 transform
        norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms))
        # 取出均值
        mean = torch.tensor(norm_transform[0].mean, dtype=img_.dtype, device=img_.device)
        # 取出标准差
        std = torch.tensor(norm_transform[0].std, dtype=img_.dtype, device=img_.device)
        # 乘以标准差，加上均值
        img_.mul_(std[:, None, None]).add_(mean[:, None, None])

    # 把 C*H*W 变为 H*W*C
    img_ = img_.transpose(0, 2).transpose(0, 1)  # C*H*W --> H*W*C
    # 把 0~1 的值变为 0~255
    img_ = np.array(img_) * 255

    # 如果是 RGB 图
    if img_.shape[2] == 3:
        img_ = Image.fromarray(img_.astype('uint8')).convert('RGB')
        # 如果是灰度图
    elif img_.shape[2] == 1:
        img_ = Image.fromarray(img_.astype('uint8').squeeze())
    else:
        raise Exception("Invalid img shape, expected 1 or 3 in axis 2, but got {}!".format(img_.shape[2]))

    return img_


def repeat_test():
    mask_token = nn.Parameter(torch.randn(1024))
    p = repeat(mask_token, 'd -> b n d', b=16, n=50)
    print(p.shape)


if __name__ == '__main__':
    # data_list = read_data_list("annotation/train.txt")
    # t_data_set = ViTDataSet(data_list, 256, class_names, get_transform("train"))
    # gen = DataLoader(t_data_set, shuffle=True, batch_size=1, drop_last=True)
    # print(gen)
    # for iteration, batch in enumerate(gen):
    #     image, label = batch[0], batch[1]
    #     print(image)
    #     print(image.shape)
    #     print(label)
    # load_pretrain_weights()
    # load_pretrain_model_test()
    # argsort_test()
    # patch_embedding_test()
    # image_mask_test()
    # repeat_test()
    load_pretrain_weights()
    load_pretrain_model_test()
