# 实际使用场景，输入单个视频，输出对应的格式
from spatial_cnn import Spatial_CNN
import torch.tensor as tensor
from extract_frames import get_frames
import os
import cv2
from network import *
import torch
import torchvision.transforms as transforms
from PIL import Image
import torchvision
import numpy as np

def input_video_convert(video_path):
    SAMPLE_NUM_PRE_SECOND = 4
    img_dir = os.path.dirname(video_path) + "/sample_img/"
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)
    get_frames(video_path, SAMPLE_NUM_PRE_SECOND, img_dir)
    return img_dir


def load_label_list():
    with open("UCF_list/classInd.txt", 'r') as f:
        label_list = [a.split(" ")[-1][:-1] for a in f]
    return label_list

# test
img_dir = input_video_convert("D:/data/UCF_101_user_data/1.mp4")

frame_transform = transforms.Compose([
                transforms.Resize([224,224]),
                # transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
                ])

motion_transform = transforms.Compose([
            transforms.Resize([224,224]),
            transforms.ToTensor(),
])

# motion 模型测试单个视频
# model.build_model()
# 加载模型
checkpoint = torch.load("D:/ResSpatial.tar",map_location=torch.device('cpu'))
spatial_model = resnet101(pretrained= True, channel=3)
spatial_model.load_state_dict(checkpoint['state_dict'])
# print(model.parameters)
spatial_model.eval()

# # 原代码中channel取20
# motion_model = resnet101(pretrained= True, channel=20)
# checkpoint2 = torch.load("D:/motion.tar",map_location=torch.device('cpu'))
# motion_model.load_state_dict(checkpoint2['state_dict'])
# motion_model.eval()
# 加载分类编号
# with torch.no_grad():
#     label_list = load_label_list()
#     output = np.zeros((1,101))
#     input_data = torch.zeros((20, 224, 224))
#     for i in range(1,21):
#         flow_image = Image.open("data/frame" + str(i).zfill(6) + ".jpg")
#         input_data[i - 1,:,:] = motion_transform(flow_image)
#     input_data = input_data.unsqueeze(0)
#     output = motion_model(input_data).data.cpu().numpy()
#     print(label_list[np.argmax(output)])

# spatial 模型处理单个视频
with torch.no_grad():
    label_list = load_label_list()
    output = np.zeros((1,101))
    for i in range(1,20):
        image_data = Image.open(img_dir + "/1_"+str(i) + ".jpg")
        image_data = frame_transform(image_data)
        # torchvision.utils.save_image(image_data,"test.jpg",nrow=1)
        image_data = image_data.unsqueeze(0)
        output += spatial_model(image_data).data.cpu().numpy()
        print(spatial_model(image_data).data.cpu().argmax(dim=1))
        # print(output.argmax(dim=1))
        # print(output.size())
    print(label_list[np.argmax(output)])
