import torchvision.models as models
from torchvision import transforms
import cv2
from torch.autograd import Variable
import os, torch
import torch.nn as nn
import shutil


class Res18Feature(nn.Module):
    def __init__(self, pretrained, num_classes=7):
        super(Res18Feature, self).__init__()
        resnet = models.resnet18(pretrained)
        # self.feature = nn.Sequential(*list(resnet.children())[:-2]) # before avgpool
        self.features = nn.Sequential(*list(resnet.children())[:-1])  # after avgpool 512x1

        fc_in_dim = list(resnet.children())[-1].in_features  # original fc layer's in dimention 512

        self.fc = nn.Linear(fc_in_dim, num_classes)  # new fc layer 512x7
        self.alpha = nn.Sequential(nn.Linear(fc_in_dim, 1), nn.Sigmoid())

    def forward(self, x):
        x = self.features(x)

        x = x.view(x.size(0), -1)

        attention_weights = self.alpha(x)
        out = attention_weights * self.fc(x)
        return attention_weights, out


# 模型存储路径
# model_save_path = "src/models/new_epoch35_acc0.8221.pth"  # 修改为你自己保存下来的模型文件
model_save_path = "/home/zbzbzzz/weights/Student_epoch10_acc0.7716.pth"
# ------------------------ 加载数据 --------------------------- #

preprocess_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

res18 = Res18Feature(pretrained=False)
checkpoint = torch.load(model_save_path)
res18.load_state_dict(checkpoint['model_state_dict'])
res18.cuda(0)
res18.eval()
#  0:neutral, 1:happiness, 2:surprise, 3:sadness, 4:anger, 5:disgust, 6:fear, 7:contempt, 8:unknown, 9:NF
# 总共 ： 12303
type_map = {
    4: "angry",  # 1588
    5: "disgust",  # 158
    6: "fear",  # 2475
    1: "happy",  # 393
    0: "neutral",  # 4732 38.46%
    3: "sad",  # 2700
    2: "surprise"  # 257
}
correct_sum = 0
all_sum = 0
base_path = "/home/zbzbzzz/datasets/SD_student_face"
emotion = "/home/zbzbzzz/datasets/SD_student_face_emotion"
if os.path.exists(emotion):
    shutil.rmtree(emotion)
os.mkdir(emotion)

for img in os.listdir(base_path):
    image = cv2.imread(os.path.join(base_path, img))
    image_tensor = preprocess_transform(image)
    tensor = Variable(torch.unsqueeze(image_tensor, dim=0).float(), requires_grad=False)
    tensor = tensor.cuda(0)
    _, outputs = res18(tensor)
    _, predicts = torch.max(outputs, 1)
    category = os.path.join(emotion, type_map[int(predicts)])
    if not os.path.exists(category):
        os.mkdir(category)
    shutil.copyfile(os.path.join(base_path, img), os.path.join(category, img))

# for key in type_map:
#     temp_path = os.path.join(base_path, key)
#     for path in os.listdir(temp_path):
#         all_sum += 1
#         image = cv2.imread(os.path.join(temp_path, path))
#         # image = cv2.imread(os.path.join(temp_path, path), cv2.IMREAD_GRAYSCALE)
#         # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
#         image_tensor = preprocess_transform(image)
#         tensor = Variable(torch.unsqueeze(image_tensor, dim=0).float(), requires_grad=False)
#         tensor = tensor.cuda(0)
#         _, outputs = res18(tensor)
#         _, predicts = torch.max(outputs, 1)
#         print("======================")
#         if predicts == type_map[key]:
#             print("正确")
#             correct_sum += 1
#         else:
#             print("错误")
#         print(predicts, type_map[key])
# acc = correct_sum / all_sum * 100
# print("正确率：", acc)
