from PIL import Image
from torchvision.transforms import transforms

from model_ import *

test_img = "./test_img/2.png"

img_ = Image.open(test_img)
print(img_)
# 转换为RGB通道，去掉透明度通道
img_ = img_.convert('RGB')
print(img_)
labels = {'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4,
          'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}
labels = {v: k for k, v in labels.items()}
# 用transform改变大小和转换为tensor
img_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor()])
img_tensor = img_transform(img_)
print(img_tensor)
print(img_tensor.shape)
transform_image = transforms.ToPILImage()
tensor_img = transform_image(img_tensor)
tensor_img.show()
img_tensor = img_tensor.reshape(1, 3, 32, 32)
print(img_tensor.shape)
"""
torch.load("./model_4.pth") 这种方式读取模型是有风险的
You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle 
module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during 
unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a 
future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could 
be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are 
explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting 
`weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on 
GitHub for any issues related to this experimental feature.
torch.load("./model_4.pth")的结果：
tensor([[-5.2096, -3.0347, -0.5790,  2.8292,  1.5730,  7.1038,  3.2802,  3.2314,
         -8.2161, -1.7597]]) 5 dog
"""
# 在gpu上训练的模型，想用在cpu上推理，要加上map_location=torch.device('cpu')
torch.serialization.add_safe_globals([MyNet])
my_net: MyNet = torch.load("model_4.pth")
# 设置模型为评估模式
my_net.eval()
# 设置模型的梯度为0
with torch.no_grad():
    my_net_out = my_net(img_tensor)
    argmax = int(torch.argmax(my_net_out, dim=1))
    print(my_net_out, argmax, labels[argmax])
