import os
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from DataSet import MyDataSet, ToTensor, Resize
from net import ResNet18
import torchvision.models as models


if __name__ == "__main__":
    # print(torch.cuda.is_available() )
    num_epoch = 100
    batch_size = 64
    num_class = 19
    show_dir = '/home/xys/CloundShiProjects/traffic_light/trafficlight_classify/model/new_last/data-show.txt'
    model_save_dir = '/home/xys/CloundShiProjects/traffic_light/trafficlight_classify/model/new_last/'
    root_dir = '/home/xys/CloundShiProjects/traffic_light/trafficlight_classify/data'

    best_acc = 0.0
    best_epoch = 0

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    train_dataset = MyDataSet(root_dir=root_dir+'/train',
         transform = transforms.Compose([
            transforms.RandomRotation((-3.0,3.0)),
            # transforms.RandomHorizontalFlip(p=0.5),#依概率水平翻转
            # transforms.RandomVerticalFlip(p=0.5),#依概率垂直翻转
            # transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0),
            transforms.ColorJitter(contrast = 0.1),
            transforms.Resize([110, 110]),
            transforms.RandomCrop((96, 96)),
            transforms.ToTensor(),
            transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
            # transforms.Normalize((0.150, 0.126, 0.123), (0.245, 0.209, 0.201))
        ]))
    trainset_dataloader = DataLoader(dataset = train_dataset, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)

    test_dataset = MyDataSet(root_dir=root_dir+'/test',\
         transform = transforms.Compose([
            # transforms.Resize([48,112]),
            transforms.Resize((96, 96)),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ]))
    testset_dataloader = DataLoader(dataset = test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)

    model = ResNet18(num_class=num_class)
    model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.0003, momentum = 0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    max_epoch = num_epoch
    for epoch in range(max_epoch):
        loss_sigma = 0.0
        correct = 0.0
        total =0.0
        scheduler.step()

        for i, data in enumerate(trainset_dataloader):
            # inputs, labels = data["image"].cuda(), data["label"].cuda()
            inputs, labels, image_path = data
            inputs = inputs.cuda()
            labels = labels.cuda()
            inputs, labels = Variable(inputs).float(), Variable(labels)#.type(torch.LongTensor)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            # print(labels.shape)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).squeeze().sum().cpu().numpy()
            loss_sigma += loss.item()
            # print("len(trainset_dataloader): " + str(len(trainset_dataloader)))
            # print("labels.size(0): " + str(labels.size(0)))
            if i % 10 == 9:
                loss_avg = loss_sigma / 10
                loss_sigma = 0.0
                print("Training: Epoch[{:0>3}/{:0>3}] iteration[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2f}".format(\
                    epoch + 1, max_epoch, i+1, len(trainset_dataloader), loss_avg, correct/total ))
                with open(show_dir, 'a+') as f:
                    f.write('\n Training: Epoch[{:0>3}/{:0>3}] iteration[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2f}'.format(\
                    epoch + 1, max_epoch, i+1, len(trainset_dataloader), loss_avg, correct/total ))

        val_loss_sigma = 0.0
        label_num = 2
        model.eval()
        val_correct = 0.0
        val_total = 0.0
        for j, val_data in enumerate(testset_dataloader):
            val_inputs, val_labels, image_path = val_data
            val_inputs = Variable(val_inputs.cuda())
            val_labels = Variable(val_labels.cuda())
            # val_inputs, val_labels = Variable(val_data["image"].cuda()).float(), Variable(val_data["label"].cuda())
            val_outputs = model(val_inputs)
            val_outputs.detach_()
            val_total += val_labels.size(0)
            val_loss_sigma += (criterion(val_outputs, val_labels)).item()
            _, val_predicted = torch.max(val_outputs.data, 1)
            val_correct += (val_predicted == val_labels).squeeze().sum().cpu().numpy()
        val_loss_avg = val_loss_sigma/float(len(testset_dataloader))
        print("Val: Epoch[{:0>3}/{:0>3}] Loss: {:.4f} Acc: {:.3f}".format(epoch+1, max_epoch,\
            val_loss_avg,  float(val_correct)/val_total))
        if float(val_correct)/val_total > best_acc:
            best_acc = float(val_correct)/val_total
            best_epoch = epoch+1
            torch.save(model.state_dict(), model_save_dir+'/model_best.pth')
        with open(show_dir, 'a+') as f:
            f.write('\n Val: Epoch[{:0>3}/{:0>3}] Loss: {:.4f} Acc: {:.3f}'.format(epoch+1, max_epoch,\
            val_loss_avg,  float(val_correct)/val_total))
        torch.save(model.state_dict(),model_save_dir+'/model_{:0>3}.pth'.format(epoch+1))
    print(best_epoch,best_acc)

