import torchvision
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
import numpy as np
import torch.nn.functional as F
from PIL import Image
import os

# 加载resnet18模型

densenet201 = models.densenet201(pretrained=False)

densenet201.fc = nn.Identity()


# 创建一个transform对象
def rgb2bgr(image):
    image = np.array(image)[:, :, ::-1]
    image = Image.fromarray(np.uint8(image))
    return image


class Attention(nn.Module):
    def __init__(self, in_channels):
        super(Attention, self).__init__()
        # 输入及输出都为3通道，不改变原始图片通道数
        self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=False)

    def forward(self, x):
        a = F.relu(self.conv(x))
        a = F.softmax(a.view(a.size(0), -1), dim=1).view_as(a)
        x = x * a
        return x


# 构建新的网络，将resnet18的输出作为输入
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # 注意力，用于区分输入图片重要的部分
        self.attention1 = Attention(3)
        # self.resnet18 = resnet18
        # self.resnet101 = resnet101
        self.densenet201 = densenet201
        # self.squeezenet_1_1 = squeezenet_1_1
        # self.fc0 = nn.Linear(9000, 1000)
        self.fc1 = nn.Linear(1000, 256)
        self.fc2 = nn.Linear(256, 64)
        self.fc3 = nn.Linear(64, 10)
        self.fc4 = nn.Linear(10, 2)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        x = self.attention1(x)
        # x = self.resnet18(x)
        # x = self.resnet101(x)
        x = self.densenet201(x)
        # x = self.squeezenet_1_1(x)
        # x = F.relu(self.fc0(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.relu(self.fc4(x))
        x = self.softmax(x)
        x = x.view(-1, 2)
        return x


# 实例化网络
model = Net()
# 将模型放入GPU

model.eval()
# 在验证集上进行测试
save_name = "./result/epoch2_model_acc87.pth"
model.load_state_dict(torch.load(save_name, map_location='cpu'))
model = model.cpu()
# input_image_path = 'data/1/63abc269f1014b60b7ce4390c3bef920.jpg'
input_image_path = 'data/1/img_2.png'
transform2 = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ColorJitter(brightness=1, contrast=1, saturation=1, hue=0.5),
    # rgb转bgr
    torchvision.transforms.Lambda(rgb2bgr),
    # 入的图片为PIL image 或者 numpy.nadrry格式的图片，其shape为（HxWxC）数值范围在[0,255],转换之后shape为（CxHxw）,数值范围在[0,1]
    transforms.ToTensor(),
    # 进行归一化和标准化，Imagenet数据集的均值和方差为：mean=(0.485, 0.456, 0.406)，std=(0.229, 0.224, 0.225)，
    # 因为这是在百万张图像上计算而得的，所以我们通常见到在训练过程中使用它们做标准化。
    transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])
])

image = Image.open(input_image_path)
input_tensor = transform2(image)
input_batch = input_tensor.unsqueeze(0)

with torch.no_grad():  # 不需要计算梯度，所以关闭autograd
    output = model(input_batch)
    _, predicted = torch.max(output, 1)
    print(f'Predicted class: {predicted.item()}')
    print(predicted)
    print(type(predicted))
    print(type(predicted.item()))
