# encoding: utf-8
"""
@author:  shaoqijun
@date:  2020/7/30
"""
from __future__ import print_function, division
import shutil
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
from config import  cfg
from utils.log_helper import *
import logging
from tensorboardX import SummaryWriter
from torch.nn import DataParallel
from datasets.data_loader import Data_Loader
from utils.show_img import sw_add_image,imshow
from torch.autograd import Variable
from utils.toonnx import to_onnx
from model.cnn import Net
logger = logging.getLogger('global')


def train_model(model, data_loader, criterion, optimizer, scheduler, summary_writer,
                           num_epochs=25):
    since = time.time()
    #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    best_epoch = 0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_acc = 0

            for batch, (inputs, labels) in enumerate(data_loader[phase]):
                if cfg.CUDA:
                    inputs, labels = Variable(inputs.cuda()), Variable((labels.cuda()))
                else:
                    inputs, labels = Variable(inputs), Variable((labels))

                out = model(inputs)
                loss = criterion(out, labels)
                running_loss+=loss.item()
                pred = torch.max(out, 1)[1]
                running_corrects = (pred==labels).sum()
                running_acc+=running_corrects

                if summary_writer:
                    global_step = epoch * len(data_loader[phase]) + batch
                    summary_writer.add_scalar('{} loss'.format(phase), loss.item(), global_step)
                    summary_writer.add_scalar('{} corrects'.format(phase), running_corrects.item() / cfg.TRAIN.BATCH_SIZE, global_step)

                # print('epoch: {}/{} batch {}/{}  {} Loss: {:.3f}, Acc: {:.3f}'
                #             .format(epoch + 1, cfg.TRAIN.EPOCHS, batch, len(data_loader[phase]),phase,
                #                 loss.item(), running_corrects.item() / len(inputs)))

                logger.info('epoch: {}/{} batch {}/{}  {} Loss: {:.3f}, Acc: {:.3f}'
                            .format(epoch + 1, cfg.TRAIN.EPOCHS, batch, len(data_loader[phase]),phase,
                                loss.item(), running_corrects.item() / len(inputs)))

                if phase == 'train':
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

            if phase == 'train':
                scheduler.step()  # 更新learning rate

            print('epoch = {} {} Loss: {:.6f}, Acc: {:.3f}'.format(epoch, phase, running_loss / len(data_loader[phase]),
                                                  running_acc / len(data_loader[phase].dataset)))

            logger.info('epoch = {} {} Loss: {:.6f}, Acc: {:.3f}'.format(epoch, phase, running_loss / len(data_loader[phase]),
                                                  running_acc / len(data_loader[phase].dataset)))

            #每个epoch都保存一个模型
            if phase == 'val' and (epoch + 1) % 1 == 0:
                if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR):
                    os.makedirs(cfg.TRAIN.SNAPSHOT_DIR)
                torch.save(model.state_dict(), os.path.join(cfg.TRAIN.SNAPSHOT_DIR, 'params_' + str(epoch + 1) + '.pth'))
                #to_onnx(model, 3, 28, 28, 'params.onnx')
            # deep copy the model
            if phase == 'val' and running_acc > best_acc:
                best_acc = running_acc
                best_epoch = epoch
                best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    ## 保存模型
    torch.save(model.state_dict(), "best_model_by_{}.pth".format(best_epoch))
    #to_onnx(model, 3, 160, 160, 'params.onnx')
    return model


#模型预测的可视化
def visualize_model(model,dataloaders,class_names, num_images=6):
    was_training = model.training
    model.eval()
    images_so_far = 0
    fig = plt.figure()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['val']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            for j in range(inputs.size()[0]):
                images_so_far += 1
                ax = plt.subplot(num_images//2, 2, images_so_far)
                ax.axis('off')
                ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                imshow(inputs.cpu().data[j])

                if images_so_far == num_images:
                    model.train(mode=was_training)
                    return
        model.train(mode=was_training)


def main():
    if os.path.exists(cfg.TRAIN.LOG_DIR):
        shutil.rmtree(cfg.TRAIN.LOG_DIR)
    os.makedirs(cfg.TRAIN.LOG_DIR)
    init_log('global', logging.INFO)
    if cfg.TRAIN.LOG_DIR:
        add_file_handler('global',os.path.join(cfg.TRAIN.LOG_DIR, "logs.txt"),logging.info)

    imgs, data_loader, class_to_idx, classer = Data_Loader()
    logger.info("data prepare done")

    #resnet model
    model = models.resnet18(pretrained=False)
    model.load_state_dict(torch.load(cfg.TRAIN.RESNET18_ROOT))
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, len(class_to_idx))

    #lenet
    # model = Net()

    #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if cfg.CUDA:
        model.cuda()
        #model = model.to(device)
        #多GPU训练
        # if torch.cuda.device_count() > 1:
        #     model= DataParallel(model)
    logger.info("model prepare done")

    if cfg.TRAIN.LOG_DIR:
        summary_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
    else:
        summary_writer = None

    #训练数据可视化
    images, out = sw_add_image(data_loader['train'])
    summary_writer.add_image('mnist images',out)

    #模型可视化
    images = images.cuda()
    summary_writer.add_graph(model, images)



    criterion = nn.CrossEntropyLoss()
    #optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    # exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], 0.1)

    # 模型训练
    model = train_model(model, data_loader, criterion, optimizer, scheduler, summary_writer,
                           num_epochs=cfg.TRAIN.EPOCHS)

    #visualize_model(model,data_loader, classer)
    summary_writer.close()

if __name__ == '__main__':
    main()