import os
import torch
import numpy as np
import pandas as pd
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from sklearn.metrics import precision_score, recall_score, f1_score
from mobileNetv2 import get_model_mobileNetv2
from mobilenet import get_model_mobileNet
from shuffleNet import get_model_shuffleNet
from shuffleNetv2 import get_model_shuffleNet2
from squeeze_net import SqueezeNet
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from torchvision import transforms, utils
import numpy as np
import torch
import torch.nn as nn
import cv2
import squeeze_net

PATH = '/data/DG-20_res/DG-20-x5/pull'
# PATH = '/data/fg2021/DG-13-x5/pray'
BATCH_SIZE = 16
NUM_WORKERS = 8
NUM_EPOCHS = 50

class_dict = {'throw': 0, 'sway': 1, 'slide': 2, 'push': 3, 'pull': 4, 'pray': 5,
              'non': 6, 'no': 7, 'hello': 8, 'go': 9, 'come': 10,
              'clap': 11, 'circle': 12}

def default_loader(path):
    return np.load(path)


class MyDataset(Dataset):
    def __init__(self, path, transform=None, target_transform=None, loader=default_loader):
        super(MyDataset, self).__init__()
        imgs = []

        # 遍历文件夹
        folders = os.listdir(path)

        for file in folders:
            name = path + '/' + file
            imgs.append((name, class_dict['pull']))

        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = self.loader(fn)

        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)


train_transforms = transforms.Compose([
    # transforms.ToTensor(),
])

all_dataset = MyDataset(PATH, transform=train_transforms)
train_data_size = int(len(all_dataset) * 0.8)
test_data_size = int(len(all_dataset) * 0.2)
print('train dataset len: {}'.format(train_data_size))
print('test dataset len: {}'.format(test_data_size))
train_data, test_data = torch.utils.data.random_split(all_dataset, [train_data_size, test_data_size])

train_dataloader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)

print('train dataset len: {}'.format(len(all_dataset)))


# model = squeeze_net.SqueezeNet(version=1.1, sample_size=256, sample_duration=32, num_classes=13).cuda()
model = get_model_shuffleNet(groups=3, num_classes=13, width_mult=1).cuda()
# model = get_model_shuffleNet2(num_classes=13, sample_size=256, width_mult=1.)
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load('./shuffleNetmodel.kpl'))

model.eval()
with torch.no_grad():
    # classes = (
    #     'c', 'comeon', 's', 'forbid', 'good', 'non', 'dislike', 'helpless', 'come', 'no', 'please', 'pull',
    #     'push', 'me', 'circle', 'pat', 'wave', 'pray', 'grasp2', 'grasp1')
    classes = (
        'throw', 'sway', 'slide', 'push', 'pull', 'pray',
        'non', 'no', 'hello', 'go', 'come',
        'clap', 'circle')
    classes_correct = [0 for i in range(13)]
    classes_total = [0 for i in range(13)]
    res_predict = []
    res_label = []
    for videos, labels in train_dataloader:
        video = videos.type(torch.FloatTensor).cuda()
        label = labels.type(torch.FloatTensor).cuda()
        outputs = model(video)
        print(outputs)
        _, predicted = torch.max(outputs, 1)
        print(predicted)
        c = (predicted == label).squeeze()
        for label_idx in range(len(labels)):
            label_single = int(label[label_idx].item())
            classes_correct[label_single] += (predicted[label_idx].item() == label[label_idx].item())
            classes_total[label_single] += 1
            res_predict.append(predicted[label_idx].item())
            res_label.append(label_single)
    correct_data_temp = []
    for i in range(13):
        if classes_total[i] == 0:
            continue
        correct_data_temp.append(100 * classes_correct[i] / classes_total[i])
        print(' Accuracy %.4f %%' % (
                100 * classes_correct[i] / classes_total[i]))

    correct_data_temp.append(100 * sum(classes_correct) / sum(classes_total))
    print('total Accuracy %.4f %%' % (
            100 * sum(classes_correct) / sum(classes_total)))

# data = np.load("/data/DG-20_res/DG-20-x5/c/48.npy-2.npy")
# print(data.shape)
# video = torch.tensor([data]).type(torch.FloatTensor)
#
# out = model(video)
# _, predicted = torch.max(out, 1)
# print(out)
# print(predicted)
