from torch.optim.lr_scheduler import ReduceLROnPlateau

from cnn import CNN
from res_net import ResNet18
from shuffleNetv2 import get_model_shuffleNet2
from vgg_net import resnet34, resnet50
from alex_net import AlexNet
from squeeze_net import SqueezeNet
from shuffleNet import get_model_shuffleNet
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision import transforms, utils
import numpy as np
import torch
import torch.nn as nn

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


def default_loader(path):
    return np.load(path)
    # return Image.open(path).convert('L')


train_transforms = transforms.Compose([
    # transforms.ToTensor(),
])


class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):
        super(MyDataset, self).__init__()
        fh = open(txt, 'r')
        imgs = []
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            imgs.append((words[0], int(words[1])))
            # print(len(words))

        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)


BATCH_SIZE = 24
NUM_WORKERS = 8

train_data = MyDataset(txt='/home/xiaoguojian/theoreticalAcademic/rgbd_seq_train.txt', transform=None)
test_data = MyDataset(txt='/home/xiaoguojian/theoreticalAcademic/rgbd_seq_test.txt', transform=train_transforms)

train_dataloader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)

print('train dataset len: {}'.format(len(train_dataloader.dataset)))
print('test dataset len: {}'.format(len(test_dataloader.dataset)))

#  start make net


# model = SqueezeNet(version=1.1, sample_size=256, sample_duration=32, num_classes=20)
model = get_model_shuffleNet(groups=3, num_classes=20, width_mult=1)
# model = get_model_shuffleNet2(num_classes=20, sample_size=256, width_mult=1.)
# model = get_model_mobileNet(num_classes=20, sample_size=256, width_mult=1.)
# model = get_model_mobileNetv2(num_classes=20, sample_size=256, width_mult=1.)
model = model.cuda()
model = nn.DataParallel(model, device_ids=None)

criterion = nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)
total_step = len(train_dataloader)
num_epochs = 20

for epoch in range(num_epochs):
    model.train()
    for i, (videos, labels) in enumerate(train_dataloader):

        video = videos.type(torch.FloatTensor).to(device)
        label = labels.type(torch.FloatTensor).to(device)
        outputs = model(video)
        loss = criterion(outputs, label.long())
        optimizer.zero_grad()
        loss.backward()
        scheduler.step()

        if (i + 1) % 20 == 0:
            print('Epoch: [{}/{}],Step: [{}/{}],Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step,
                                                                     loss.item()))
    model.eval()
    with torch.no_grad():
        classes = (
            'c', 'comeon', 's', 'forbid', 'good', 'non', 'dislike', 'helpless', 'come', 'no', 'please', 'pull', 'push',
            'me', 'circle', 'pat', 'wave', 'pray', 'grasp2', 'grasp1')

        classes_correct = [0 for i in range(20)]
        classes_total = [0 for i in range(20)]
        with torch.no_grad():
            for videos, labels in test_dataloader:
                video = videos.type(torch.FloatTensor).to(device)
                label = labels.type(torch.FloatTensor).to(device)
                outputs = model(video)
                _, predicted = torch.max(outputs, 1)
                print(len(labels))
                for label_idx in range(len(labels)):
                    label_single = int(label[label_idx].item())
                    classes_correct[label_single] += (predicted[label_idx] == label[label_idx])
                    classes_total[label_single] += 1

        print('total Accuracy %.4f %%' % (
                100 * sum(classes_correct) / sum(classes_total)))

        for i in range(20):
            print('Accuracy of %10s : %.4f %%' % (
                classes[i], 100 * classes_correct[i] / classes_total[i]))

torch.save(model.state_dict(), 'Model1.kpl')
