import os
import torch
import numpy as np
import pandas as pd
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from sklearn.metrics import precision_score, recall_score, f1_score
from mobileNetv2 import get_model_mobileNetv2
from mobilenet import get_model_mobileNet
from shuffleNet import get_model_shuffleNet
from shuffleNetv2 import get_model_shuffleNet2
from squeeze_net import SqueezeNet

PATH = '/data/fg2021/DG-13-x5-rgb'
PATH_TEST = '/data/fg2021/DG-13-x5-rgb-test'
PATH_VAL = '/data/fg2021/DG-13-x5-rgb-val'

DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 16
NUM_WORKERS = 8
NUM_EPOCHS = 30

class_dict = {'throw': 0, 'sway': 1, 'slide': 2, 'push': 3, 'pull': 4, 'pray': 5,
              'non': 6, 'no': 7, 'hello': 8, 'go': 9, 'come': 10,
              'clap': 11, 'circle': 12}


def default_loader(path):
    return np.load(path)


class MyDataset(Dataset):
    def __init__(self, path, transform=None, target_transform=None, loader=default_loader):
        super(MyDataset, self).__init__()
        imgs = []

        # 遍历文件夹
        folders = os.listdir(path)
        for folder in folders:
            folders_path = path + '/' + folder
            files = os.listdir(folders_path)

            for file in files:
                name = folders_path + '/' + file
                imgs.append((name, class_dict[folder]))

        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = self.loader(fn)

        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)


train_transforms = transforms.Compose([
    # transforms.ToTensor(),
])

train_dataset = MyDataset(PATH, transform=train_transforms)
# val_dataset = MyDataset(PATH_VAL, transform=train_transforms)
test_dataset = MyDataset(PATH_TEST, transform=train_transforms)

train_data_size = len(train_dataset)
test_data_size = len(test_dataset)
# val_data_size = len(val_dataset)

print('train dataset len: {}'.format(train_data_size))
print('test dataset len: {}'.format(test_data_size))
# print('validation dataset len: {}'.format(val_data_size))

train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
# test_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)

val_data_size = int(test_data_size * 0.5)
test_data_size = int(test_data_size * 0.5)
print(val_data_size,test_data_size)
val_data, test_data = torch.utils.data.random_split(test_dataset, [val_data_size, test_data_size])
val_dataloader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)

squueze = SqueezeNet(version=1.1, sample_size=256, sample_duration=32, num_classes=13)
shuffle = get_model_shuffleNet(groups=3, num_classes=13, width_mult=1)
shufflev2 = get_model_shuffleNet2(num_classes=13, sample_size=256, width_mult=1.)
mobile = get_model_mobileNet(num_classes=13, sample_size=256, width_mult=1.)
mobilev2 = get_model_mobileNetv2(num_classes=13, sample_size=256, width_mult=1.)
models = [squueze, shuffle, shufflev2, mobile, mobilev2]
MODULE = ['squeezeNet', 'shuffleNet', 'shuffleNetv2', 'mobileNet', 'mobileNetv2']
# models = [ mobile, mobilev2]
# MODULE = ['mobileNet', 'mobileNetv2']
for index, model in enumerate(models):
    model = model.cuda()
    model = nn.DataParallel(model, device_ids=None)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.003)

    total_step = len(train_dataloader)
    loss_data = []
    correct_data = []
    for epoch in range(NUM_EPOCHS):
        loss_data_temp = []
        correct_data_temp = []
        model.train()
        for i, (videos, labels) in enumerate(train_dataloader):

            video = videos.type(torch.FloatTensor).to(DEVICE)
            label = labels.type(torch.FloatTensor).to(DEVICE)
            outputs = model(video)
            loss = criterion(outputs, label.long())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 50 == 0:
                loss_data_temp.append(loss.item())
                print('Epoch: [{}/{}],Step: [{}/{}],Loss: {:.4f}'.format(epoch + 1, NUM_EPOCHS, i + 1, total_step,
                                                                         loss.item()))
        model.eval()
        with torch.no_grad():
            classes = (
                'throw', 'sway', 'slide', 'push', 'pull', 'pray',
                'non', 'no', 'hello', 'go', 'come',
                'clap', 'circle')
            classes_correct = [0 for i in range(13)]
            classes_total = [0 for i in range(13)]
            res_predict = []
            res_label = []
            for videos, labels in val_dataloader:
                video = videos.type(torch.FloatTensor).to(DEVICE)
                label = labels.type(torch.FloatTensor).to(DEVICE)
                outputs = model(video)
                _, predicted = torch.max(outputs, 1)
                c = (predicted == label).squeeze()
                for label_idx in range(len(labels)):
                    label_single = int(label[label_idx].item())
                    classes_correct[label_single] += (predicted[label_idx].item() == label[label_idx].item())
                    classes_total[label_single] += 1
                    res_predict.append(predicted[label_idx].item())
                    res_label.append(label_single)

            correct_data_temp = []
            for i in range(13):
                correct_data_temp.append(100 * classes_correct[i] / classes_total[i])
            correct_data_temp.append(100 * sum(classes_correct) / sum(classes_total))
            print('total Accuracy %.4f %%' % (
                    100 * sum(classes_correct) / sum(classes_total)))
            correct_data_temp.append(precision_score(res_label, res_predict, average='macro'))
            correct_data_temp.append(recall_score(res_label, res_predict, average='macro'))
            correct_data_temp.append(f1_score(res_label, res_predict, average='macro'))
            correct_data.append(correct_data_temp)
        loss_data.append(loss_data_temp)

    temp2 = pd.DataFrame(correct_data)
    temp2.columns = ['throw', 'sway', 'slide', 'push', 'pull', 'pray',
                     'non', 'no', 'hello', 'go', 'come',
                     'clap', 'circle', 'total', 'precision_score',
                     'recall_score', 'f1_score']
    temp2.to_excel(MODULE[index] + "correct.xlsx")
    temp = pd.DataFrame(loss_data)
    temp.to_excel(MODULE[index] + "loss.xlsx")
    torch.save(model.state_dict(), MODULE[index] + 'model.kpl')

    model.eval()
    with torch.no_grad():
        classes = (
            'throw', 'sway', 'slide', 'push', 'pull', 'pray',
            'non', 'no', 'hello', 'go', 'come',
            'clap', 'circle')
        classes_correct = [0 for i in range(13)]
        classes_total = [0 for i in range(13)]
        res_predict = []
        res_label = []
        for videos, labels in test_dataloader:
            video = videos.type(torch.FloatTensor).to(DEVICE)
            label = labels.type(torch.FloatTensor).to(DEVICE)
            outputs = model(video)
            _, predicted = torch.max(outputs, 1)
            c = (predicted == label).squeeze()
            for label_idx in range(len(labels)):
                label_single = int(label[label_idx].item())
                classes_correct[label_single] += (predicted[label_idx].item() == label[label_idx].item())
                classes_total[label_single] += 1
                res_predict.append(predicted[label_idx].item())
                res_label.append(label_single)

        correct_data_temp = []
        for i in range(13):
            correct_data_temp.append(100 * classes_correct[i] / classes_total[i])
        correct_data_temp.append(100 * sum(classes_correct) / sum(classes_total))
        print('test data  Accuracy %.4f %%' % (
                100 * sum(classes_correct) / sum(classes_total)))
        correct_data_temp.append(precision_score(res_label, res_predict, average='macro'))
        correct_data_temp.append(recall_score(res_label, res_predict, average='macro'))
        correct_data_temp.append(f1_score(res_label, res_predict, average='macro'))
        correct_data.append(correct_data_temp)
