'''
@Descripttion: 
@Author: Chen Chenxi
@Date: 2019-11-01 11:04:59
@LastEditTime: 2019-11-07 12:13:56
'''
from spatial_cnn import Spatial_CNN
import torch.tensor as tensor
from extract_frames import get_frames
import get_opticalflow
import os
import cv2
from network import *
import torch
import torchvision.transforms as transforms
from PIL import Image
import torchvision
import numpy as np


def input_video_convert(video_path):
    SAMPLE_NUM_PRE_SECOND = 4
    img_dir = os.path.dirname(video_path) + "/sample_img/"
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)
    get_frames(video_path, SAMPLE_NUM_PRE_SECOND, img_dir)
    return img_dir

def load_label_list():
    with open("UCF_list/classInd.txt", 'r') as f:
        label_list = [a.split(" ")[-1][:-1] for a in f]
    return label_list


#get frames and optical flow
img_dir = input_video_convert("C:/Users/65152/Desktop/rec_test/test.mp4")
flow_path = img_dir[:-11]+"flow"
# print(img_dir)
# print(flow_path)
if not os.path.exists(flow_path):
    os.makedirs(flow_path)
flow_dir = get_opticalflow.extract_flow(img_dir,flow_path)


#frame process
frame_transform = transforms.Compose([
                transforms.Resize([224,224]),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
                ])

motion_transform = transforms.Compose([
            transforms.Resize([224,224]),
            transforms.ToTensor(),
])


#load spatial ConvNet model(trained)
checkpoint = torch.load("record/spatial/model_best.pth.tar",map_location=torch.device('cpu'))
spatial_model = resnet101(pretrained= True, channel=3)
spatial_model.load_state_dict(checkpoint['state_dict'])
spatial_model.eval()


#load motion ConvNet model(trained)
checkpoint2 = torch.load("record/motion/model_best.pth.tar",map_location=torch.device('cpu'))
motion_model = resnet101(pretrained= True, channel=20)
motion_model.load_state_dict(checkpoint2['state_dict'])
motion_model.eval()


#complete the logcal process of tow-stream fusion
with torch.no_grad():
    label_list = load_label_list()
    input_data = torch.zeros((20, 224, 224))
    spatial_output = np.zeros((1,101))
    motion_output = np.zeros((1,101))
    final_output = np.zeros((1,101))
    u = flow_path+"/u"
    v = flow_path+"/v"
    
    #use spatial ConvNet to get output
    for i in range(1,20):
        image_data = Image.open(img_dir + "/test_"+str(i) + ".jpg")
        image_data = frame_transform(image_data)
        
        image_data = image_data.unsqueeze(0)
        spatial_output += spatial_model(image_data).data.cpu().numpy()
    
    #use motion Convnet to get output
    for j in range(10):
        idx = str(j+1)
        frame_idx = 'frame'+ idx.zfill(6)
        h_image = u +'/' + frame_idx +'.jpg'
        v_image = v +'/' + frame_idx +'.jpg'
        
        imgH=(Image.open(h_image))
        imgV=(Image.open(v_image))

        H = motion_transform(imgH)
        V = motion_transform(imgV)

        
        input_data[2*j,:,:] = H
        input_data[2*j+1,:,:] = V      
        imgH.close()
        imgV.close()  
    input_data = input_data.unsqueeze(0)
    motion_output = motion_model(input_data).data.cpu().numpy()

    #complete tow-stream fusion 
    for i in range(101):
        final_output[0][i] = spatial_output[0][i] + motion_output[0][i]
    
    print(label_list[np.argmax(final_output)])
    