import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import sys
sys.path.append("/data/chengjiangang/workspace/VGG19/models")
from class_model import VGG19
from resnet import resnet18
import time
import os
from math import cos, pi
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
from torch.utils.tensorboard import SummaryWriter
# %matplotlib inline
# **************************************************************
max_epoch = 300
lr_min=0.0001
lr_max=0.01
batch_size = 64

load_train = True #False
resume_train = False #True
# **************************************************************
'''{'green': 0, 'left_green': 1, 'left_red': 2, 'left_yollow': 3, 'off': 4, 'other': 5, 'red': 6, 'right_green': 7, 'right_red': 8, 'right_yellow': 9, 'straight_green': 10, 'straight_red': 11, 'straight_yellow': 12, 'yellow': 13}
39
{'green': 0, 'left_green': 1, 'left_red': 2, 'left_yollow': 3, 'off': 4, 'other': 5, 'red': 6, 'right_green': 7, 'right_red': 8, 'right_yellow': 9, 'straight_green': 10, 'straight_red': 11, 'straight_yellow': 12, 'yellow': 13}'''

def adjust_learning_rate(optimizer, current_epoch, max_epoch, lr_min=0., lr_max=0.01, warmup=True):
    warmup_epoch = 5 if warmup else 0
    lr = optimizer.param_groups[0]['lr']
    lr_min = lr * 0.5
    if current_epoch < warmup_epoch:
        lr = lr_max * current_epoch / warmup_epoch
    else:
        lr = lr_min + 0.5 * (lr_max - lr_min) * (
            1.0
            + cos(
                pi
                * (current_epoch - warmup_epoch)
                / (max_epoch - warmup_epoch)
            )
        )
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
        print("lr:", lr)


current_time = time.localtime()
savefolder = os.path.join("output", time.strftime("%Y_%m_%d_%H_%M_%S", current_time))
os.makedirs(savefolder, exist_ok = True)

# ����torchvision��ͼ������Ԥ����
train_transform = transforms.Compose([
    transforms.Resize((224,224)),
    # transforms.CenterCrop(224),
    # transforms.RandomResizedCrop(80),
    # transforms.RandomAffine(degrees=15,scale=(0.8,1.5)),
    transforms.ToTensor(),
    transforms.Normalize([0.4278617, 0.4715784, 0.47092885], [0.2286385, 0.22028573, 0.2346289])
])

val_transform = transforms.Compose([
    transforms.Resize((224,224)),
    # transforms.CenterCrop(224),
    # transforms.CenterCrop(80),
    transforms.ToTensor(),
    transforms.Normalize([0.4278617, 0.4715784, 0.47092885], [0.2286385, 0.22028573, 0.2346289])
])

trainset = torchvision.datasets.ImageFolder(root='/media/data/dataset/traffic_light/d2city/myannotations/light_class/data/train', transform=train_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
valset = torchvision.datasets.ImageFolder(root='/media/data/dataset/traffic_light/d2city/myannotations/light_class/data/val', transform=val_transform)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, num_workers=4)

# չʾѵ�������Ͳ���������
print(len(trainloader))
print(trainset.class_to_idx)
print(len(valloader))
print(valset.class_to_idx)

# CPU ���� GPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# ��ʼ������,����Ԥѵ��ģ��
#model = VGG19(num_classes=14, init_weights=False)
model = resnet18(False, "../checkpoint",num_classes=14)
print("heer")
if load_train:
    model_dict = model.state_dict()
    state_dict = torch.load("/data/chengjiangang/workspace/VGG19/checkpoint/resnet18-5c106cde.pth")
    new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
    model_dict.update(new_state_dict)
    model.load_state_dict(model_dict)

start_epoch = 0
if resume_train:
    model_dict = model.state_dict()
    state_dict = torch.load("/data/chengjiangang/workspace/VGG19/checkpoint/resnet18-5c106cde.pth")
    new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
    model_dict.update(new_state_dict)
    model.load_state_dict(model_dict)
    start_epoch = state_dict["start_epoch"]
    optimizer = state_dict["optimizer"]


# �鿴GPU�������
# if torch.cuda.device_count()>1:
#     print('We are using',torch.cuda.device_count(),'GPUs!')
#     model = nn.DataParallel(model)
model.to(device)

# ����loss function���Ż���
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)

tblogger = SummaryWriter(savefolder)


# ����ÿ��epoch���Accuracy Loss Val_Accuracy
Accuracy = []
Loss = []
Val_Accuracy = []
BEST_VAL_ACC = 0.
# ѵ��
since = time.time()
print("start_epoch: ", start_epoch)
for epoch in range(start_epoch, max_epoch):
    train_loss = 0.
    train_accuracy = 0.
    run_accuracy = 0.
    run_loss =0.
    total = 0.
    model.train()
    adjust_learning_rate(optimizer, epoch, max_epoch, lr_min=lr_min, lr_max=lr_max, warmup=True)
    for i,data in enumerate(trainloader,0):
        images, labels = data
        print(images.shape)
        images = images.to(device)
        print("images", images.shape)
        labels = labels.to(device)  
        # �����Ĳ�
        optimizer.zero_grad()
        outs = model(images)
        loss = criterion(outs, labels)
        loss.backward()
        optimizer.step()
        # ���״̬
        total += labels.size(0)
        run_loss += loss.item()
        _, prediction = torch.max(outs, 1)
        run_accuracy += (prediction == labels).sum().item()
        if i % 20 == 19:
            print('|epoch: {}|, |iter: {}|, |lr: {:.4f}| train accuracy: {:.4f}% |loss: {:.4f}|'.format(epoch + 1,
                     i + 1, optimizer.param_groups[0]['lr'], 100*run_accuracy/(labels.size(0)*20), run_loss/20))
            train_accuracy += run_accuracy
            train_loss += run_loss
            run_accuracy, run_loss = 0., 0.
    Loss.append(train_loss / total)
    tblogger.add_scalar("train_loss", train_loss / total, epoch + 1)
    Accuracy.append(100 * train_accuracy/total)
    tblogger.add_scalar("train_accuracy", 100 * train_accuracy/total, epoch + 1)
    tblogger.add_scalar("lr", optimizer.param_groups[0]['lr'], epoch + 1)
    # # ���ӻ�ѵ������
    # fig1, ax1 = plt.subplots(figsize=(11, 8))
    # ax1.plot(range(0, epoch+1, 1), Accuracy)
    # ax1.set_title("Average trainset accuracy vs epochs")
    # ax1.set_xlabel("Epoch")
    # ax1.set_ylabel("Avg. train. accuracy")
    # plt.savefig('Train_accuracy_vs_epochs.png')
    # plt.clf()
    # plt.close()
    
    # fig2, ax2 = plt.subplots(figsize=(11, 8))
    # ax2.plot(range(epoch+1), Loss)
    # ax2.set_title("Average trainset loss vs epochs")
    # ax2.set_xlabel("Epoch")
    # ax2.set_ylabel("Current loss")
    # plt.savefig('loss_vs_epochs.png')

    # plt.clf()
    # plt.close()
    # ��֤
    acc = 0.
    model.eval()
    print('waitting for Val...')
    with torch.no_grad():
        accuracy = 0.
        total =0
        for data in valloader:
            images, labels = data
            images = images.to(device)
            labels = labels.to(device)
            out = model(images)
            _, prediction = torch.max(out, 1)
            total += labels.size(0)
            accuracy += (prediction == labels).sum().item()
            acc = 100.*accuracy/total
    print('epoch {}  The ValSet accuracy is {:.4f}% \n'.format(epoch, acc))
    Val_Accuracy.append(acc)
    if acc > BEST_VAL_ACC:
        print('Find Better Model and Saving it...')
        ckpt_state = {
            "start_epoch":epoch + 1,
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        torch.save(ckpt_state, savefolder + "/resnet18.pth")
        BEST_VAL_ACC = acc
        print('Saved!')
    
    # fig3, ax3 = plt.subplots(figsize=(11, 8))

    # ax3.plot(range(epoch+1),Val_Accuracy )
    # ax3.set_title("Average Val accuracy vs epochs")
    # ax3.set_xlabel("Epoch")
    # ax3.set_ylabel("Current Val accuracy")

    # plt.savefig('val_accuracy_vs_epoch.png')
    # plt.close()
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed%60))
    print('Now the best val Acc is {:.4f}%'.format(BEST_VAL_ACC))