import os

import cv2
from PIL import Image
import numpy as np
import torch
import torch.nn as nn
from torchvision import models, transforms
from torch.utils.data import Dataset, DataLoader

# 输入图片位置与输出图片位置
data_dirs = ['../data/UCF--101/split1/train/']
out_dirs = ['./res/fft/']

# 获取图片的位置
def get_ins(elem):
    return int(elem.split('-')[2])

# 图片转换，将图片统一放缩为224*224且转化为Tensor
transform = transforms.Compose([
    transforms.Resize(224),
    transforms.ToTensor(),
])

# 初始化模型
model = models.resnet152(pretrained=True)
model = nn.Sequential(*list(model.children())[:-1])
model.cuda()
model.eval()

class VideoDataset(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.imgs = os.listdir(data_dir)

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, index):
        img_file = self.imgs[index]
        img = cv2.imread(os.path.join(self.data_dir, img_file))
        img = cv2.resize(img, (224, 224))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = np.transpose(img, (2, 0, 1))
        img = torch.from_numpy(img)
        return img

with torch.no_grad():
    for j in range(1):
        data_dir = data_dirs[j]
        out_dir = out_dirs[j]
        actions = os.listdir(data_dir)
        actions = ['WalkingWithDog']
        for action in actions:
            action_dir = os.path.join(data_dir, action)
            videos = os.listdir(action_dir)
            for video in videos:
                video_dir = os.path.join(action_dir, video)
                dataloader = DataLoader(VideoDataset(video_dir), batch_size=1024, num_workers=4)
                arrays = []
                for imgs in dataloader:
                    imgs = imgs.float().cuda()
                    array = model(imgs).cpu()
                    array = array.squeeze(3)
                    array = array.squeeze(2)
                    array = array.numpy()
                    arrays.append(array)

                arrays = np.array(arrays)
                try:
                    arrays = arrays.squeeze(0)
                except:
                    pass
                fft_res = []
                # print(arrays.shape)
                try:
                    for i in range(len(arrays[0])):
                        array = arrays[:,i]
                        fft = np.abs(np.fft.fft(array))
                        fft_res.append(fft)
                    data = np.array(fft_res)

                    rate = 0.7
                    num = 1
                    for elem in data:
                        tmp = elem[:(len(elem)+1)//2]
                        if np.std(tmp) < 0.1:
                            break
                            # plt.plot(range(len(tmp)), tmp)
                            # plt.savefig('hello.jpg')
                            # hello
                        total = sum(tmp)
                        thres = total * rate
                        count = 0
                        for idx in range(len(tmp)):
                            count += tmp[idx]
                            if count >= thres:
                                if idx >= num:
                                    num = idx
                                break  
                    print(video, len(elem) / num)

                except:
                    pass
