from torch.utils import data
from torchvision import transforms, datasets  # datasets：一些常见的视觉数据集
from PIL import Image
from torch_Beit_model import BEiTImageEncoder
import torch
import json


class MyDataset(data.Dataset):
    def __init__(self, images_path: list, images_class: list, transform = None):
        self.images_path = images_path
        self.images_class = images_class
        if transform is None:
            self.transform = transforms.Compose([
                transforms.Resize((224, 224)),  # 缩放图像到指定大小
                transforms.CenterCrop((224, 224)),  # 裁剪图像
                transforms.ToTensor(),  # 将图像转换为张量
                transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])  # 标准化图像
            ])
        else:
            self.transform = transform

    def __len__(self):
        return len(self.images_path)

    def __getitem__(self, item):
        # 通过索引获取输出
        img = Image.open(self.images_path[item])
        # RGB为彩色图片，L为灰度图片
        if img.mode != 'RGB':
            raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
        label = self.images_class[item]
        if self.transform is not None:
            img = self.transform(img)

        return img, label

    @staticmethod
    def collate_fn(batch):
        # 元组数据解压
        images, labels = tuple(zip(*batch))
        images = torch.stack(images, dim = 0)
        labels = torch.as_tensor(labels)
        return images, labels


# # test
# data_path = ['./xidada.jpg', './lm.jfif']
# # 0:握手，1：柠檬
# i_class = [0, 1]
# dataset = MyDataset(data_path, i_class)
#
# test_loader = data.DataLoader(
#     dataset,
#     batch_size = 16,
#     shuffle = False,
#     # num_worderes=2
# )
#
# model = BEiTImageEncoder()
# with open('./param_name_map.json', 'r') as f:
#     map_dict = json.load(f)
#
# # for name in model.state_dict():
# #     print(name)
#
# # param_name=[name for name, _ in model.named_parameters()]
# # print(param_name)
# pretrained_params = torch.load('./beit/beit_base_patch16_224_pt22k.pth', map_location = torch.device('cpu'))
# for _, param in pretrained_params.items():
#     for name, par in param.items():
#         for k, v in map_dict.items():
#             if v == name:
#                 model.state_dict()[k].copy_(par)
#                 break
# # 深拷贝deepcopy(model.state_dict())
#
# # device = "cuda:0" if torch.cuda.is_available() else "cpu"
# # model.to(device)
#
# # weight_file='./beit/beit_base_patch16_224_pt22k.pth'
# # model.load_state_dict(torch.load(weight_file,map_location=torch.device('cpu')),strict=True)
# model.eval()
# correct = 0
# total = 0
# with torch.no_grad():
#     for image, labels in test_loader:
#         outputs = model(image)
#         _, predicted = torch.max(outputs.data, 1)
#         correct += (predicted == labels).sum().item()
#         total += predicted.shape[0]
#
# print('{}%'.format(correct / total * 100))
