# -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 18:06:15 2020

@author: 1
"""

import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import argparse
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from dataloader import SeaDataset
from utils import get_network
import settings as settings


def train(epoch):
    net.train()  # 外耦合，网络模型
    for batch_index, (images, labels) in enumerate(sea_train_dataloader):  # 外耦合，dataloader
        images = Variable(images)
        images = images.to(device)  # 硬件选择，外耦合
        labels = Variable(labels)
        labels = labels.to(device)  # 硬件选择，外耦合
        
        optimizer.zero_grad()  # 梯度清零
        outputs = net(images)  # 前向传播
        cur_loss = loss_function(outputs, labels)  # 计算loss。 平均值还是总和？ outputs是one-hot还是float特征向量? label是int还是one-hot?
        cur_loss.backward()  # 反向传播
        optimizer.step()  # 梯度下降
        
        _, preds = torch.max(outputs.data, 1)  # 投票分类，int?
        correct_rate = preds.eq(labels).to(torch.float).mean()  # 求平均，本批准确率
        
        # 打印
        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}\tAcc: {:0.6f}'.format(
            cur_loss.item(),
            optimizer.param_groups[0]['lr'],  # 从优化器里获取学习率
            correct_rate,  # ?
            epoch=epoch,
            trained_samples=batch_index * args.b + len(images),  
            total_samples=len(sea_train_dataloader.dataset)  
        ))
        
        # 训练过程日志使用txt文件记录
        ftrain = open(os.path.join(settings.CHECKPOINT_PATH, netname, "train.txt"), "a+")
        ftrain.write('epoch_%d,iter_%d,Loss_%.03f,Acc_%.03f'
                      % (epoch,
                         batch_index+1,
                         cur_loss.item(),  # ? loss应该默认是均值
                         correct_rate,  # ? 求的是本批的accuracy
                         ))
        ftrain.write('\n')
        ftrain.close()
        
        # 绘制TensorBoard
        n_iter = (epoch - 1) * len(sea_train_dataloader) + batch_index + 1
        writer.add_scalar('Train/loss', cur_loss.item(), n_iter)
        writer.add_scalar('Train/Accuracy', correct_rate, n_iter)
    
    
def valid(epoch):
    net.eval()
    correct = 0.0
    loss = 0.0
    for batch_index, (images, labels) in enumerate(sea_valid_dataloader):
        images = Variable(images)
        images = images.to(device)
        labels = Variable(labels)
        labels = labels.to(device)
        
        outputs = net(images)
        loss += loss_function(outputs, labels)
        _, preds = torch.max(outputs.data, 1)
        correct += preds.eq(labels).sum()

    # 打印
    print('Valid: {epoch} Loss: {:0.4f}\tAcc: {:0.6f}'.format(
        loss.item() / len(sea_valid_dataloader),
        correct / len(sea_valid_dataloader.dataset),
        epoch=epoch
    ))
    
    # TXT文件
    fvalid = open(os.path.join(settings.CHECKPOINT_PATH, netname, "valid.txt"), "a+")
    fvalid.write('epoch_%d,Loss_%.03f,Acc_%.03f'
                  % (epoch,
                     loss.item() / len(sea_valid_dataloader),
                     correct / len(sea_valid_dataloader.dataset),
                     ))
    fvalid.write('\n')
    fvalid.close()

    # TensorBoard
    writer.add_scalar('Valid/loss', loss.item() / len(sea_valid_dataloader), epoch)
    writer.add_scalar('Valid/Accuracy', correct / len(sea_valid_dataloader.dataset), epoch)
    
    return correct / len(sea_valid_dataloader.dataset)


if __name__ == '__main__':
    
    # 超参数
    parser = argparse.ArgumentParser()  # 接收控制台的参数-训练超参数-模型名-运算硬件
    parser.add_argument('-gpu', type=bool, default=True, help='use gpu or not')  # 默认使用GPU
    parser.add_argument('-w', type=int, default=1, help='number of workers for dataloader')  # 默认1线程
    parser.add_argument('-b', type=int, default=settings.BATCH_SIZE, help='batch size for dataloader')  # BATCH_SIZE默认为2
    parser.add_argument('-s', type=bool, default=True, help='whether shuffle the dataset')  # 默认打乱数据
    parser.add_argument('-warm', type=int, default=0, help='warm up training phase')  # ？
    parser.add_argument('-lr', type=float, default=1e-4, help='initial learning rate')  # 学习率默认为0.0001
    parser.add_argument('-gpunum', type=int, default=0, help='use the kth gpu')  # 默认用第0块gpu
    parser.add_argument('-net', type=str, default='resnet50', help='network type')  # 默认使用ResNet50
    args = parser.parse_args()  # 封装成参数包
    
    if args.gpu and torch.cuda.is_available():
        device = 'cuda' + ':' + str(args.gpunum)
    else:
        device = 'cpu'

    # 网络模型
    net, netname = get_network(args, device)  # 获取相应的网络模型+硬件[model.to(device)]  utils.py
    
    # 数据导入及预处理
    normMean = [0.4948052, 0.48568845, 0.44682974]
    normStd = [0.24580306, 0.24236229, 0.2603115]
    normTransform = transforms.Normalize(normMean, normStd)

    trainTransform = transforms.Compose([
        transforms.Resize(size=(128, 128)),  # PIL.Image
        transforms.ToTensor(),
        normTransform
    ])
    
    validTransform = transforms.Compose([
        transforms.Resize(size=(128, 128)),  # PIL.Image
        transforms.ToTensor(),
        normTransform
    ])
    
    sea_train_data = SeaDataset(transform=trainTransform, istrain=True)  # dataloader.py
    sea_valid_data = SeaDataset(transform=validTransform, istrain=False)
    sea_train_dataloader = DataLoader(dataset=sea_train_data, batch_size=args.b, shuffle=True)
    sea_valid_dataloader = DataLoader(dataset=sea_valid_data, batch_size=args.b, shuffle=True)
    
    # 目标函数，优化器
    loss_function = nn.CrossEntropyLoss()  # next, we want a focal-loss instead
    optimizer = optim.Adam(net.parameters(), lr=args.lr)  # Adam优化器，学习率，动量？

    # 迭代训练
    print('Start Training...')
    
    # 开始绘制TensorBoard
    if not os.path.exists(settings.LOG_DIR):  # settings.py 设置TensorBoard保存路径和文件名
        os.makedirs(os.path.join(settings.LOG_DIR, netname))
    writer = SummaryWriter(log_dir=os.path.join(
             settings.LOG_DIR, netname, settings.TIME_NOW))

    # 开辟断点模型保存路径
    checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, netname, settings.TIME_NOW)
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')  # 预订格式控制

    best_acc = 0.0
    for epoch in range(1, settings.EPOCH+1):
        train(epoch)  # many iterations
        with torch.no_grad():
            acc = valid(epoch)  # many iterations

            # 择优保存断点
            if best_acc < acc:
                torch.save(net.state_dict(), checkpoint_path.format(net=args.net, epoch=epoch, type='best'))  # 格式填充
                # 不保存优化器状态啦？
                best_acc = acc
                continue
    
            # 按轮数间隔保存断点，最后一轮也要保存断点
            if (not epoch % settings.SAVE_EPOCH) or (epoch==settings.EPOCH):
                torch.save(net.state_dict(), checkpoint_path.format(net=args.net, epoch=epoch, type='regular'))

    writer.close()  # 训练结束，关闭TensorBoard文件流
