import torch
import torch.nn as nn
from time import time
from numpy import inf
from torch import optim
from xlwt import Workbook
from os import mkdir, path
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from models.lprnet import OldLPRNet, MidLPRNet, NewLPRNet
from models.lprload import LPRDataLoader, collate_fn, valuation, sparse_tuple, adjust_lr


def train(train_dir='', val_dir='', pre_model='', net_=0, epoch=100, lr=(), batch_size=128, device=''):
    # 初始化设置
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') if not device else torch.device(device)
    mkdir('logs') if not path.exists('logs') else None
    save_path = ''
    for i in range(1, 9999):
        save_path = f'logs/{net_}_{i}/'
        if not path.exists(save_path):
            mkdir(save_path)
            break
    # 记录测试数据
    sheet = [['轮数', '正确率', '全部正确', '长度错误', '字符错误', '随机Loss', '轮Loss', '总Loss', 'lr']]
    writer = SummaryWriter(save_path)
    writer.add_text(' ', '\n'.join([str(i) for i in [train_dir, val_dir, net_, epoch, lr, batch_size, device]]), 0)
    # 加载神经网络
    if pre_model:
        net = torch.load(pre_model, map_location=device)
    else:
        def weights_init(m):  # 初始化模型权重
            for key in m.state_dict():
                if key.split('.')[-1] == 'weight':
                    if 'conv' in key:
                        nn.init.kaiming_normal_(m.state_dict()[key], mode='fan_out')
                    if 'bn' in key:
                        nn.init.xavier_uniform(1)
                elif key.split('.')[-1] == 'bias':
                    m.state_dict()[key][...] = 0.01

        net = [OldLPRNet(), MidLPRNet(), NewLPRNet()][net_]
        net.backbone.apply(weights_init)
        net.container.apply(weights_init)
        net.to(device)
    # 加载数据集
    val_dataset = LPRDataLoader(path.expanduser(val_dir).split(','), new=True if net_ == 2 else False)
    train_dataset = LPRDataLoader(path.expanduser(train_dir).split(','), new=True if net_ == 2 else False)
    train_data_loader = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=3, collate_fn=collate_fn)
    print(net, f'\n网络:{net_} 设备:{device} 轮数:{epoch} batch_size:{batch_size} l_r:{lr}')
    # 优化函数
    optimizer = optim.RMSprop(net.parameters(), lr=0.0003, alpha=0.9, eps=1e-08, momentum=0.9)
    # 损失函数
    ctc_loss = nn.CTCLoss(blank=65, reduction='mean').to(device)
    # 训练
    t0, acc, total_loss, loss, lr_ = time(), [], 0, 0, 0
    for i in range(1, epoch + 1):
        t1, loss_, = time(), 0
        for img, lab, lengths in train_data_loader:
            if str(device) == 'cpu':
                img = Variable(img, requires_grad=False)
                lab = Variable(lab, requires_grad=False)
            else:
                img = Variable(img, requires_grad=False).cuda()
                lab = Variable(lab, requires_grad=False).cuda()
            input_lengths, target_lengths = sparse_tuple(18, lengths)
            lr_ = adjust_lr(optimizer, total_epoch=epoch, now_epoch=i, lr=lr)
            pre = net(img).permute(2, 0, 1).log_softmax(2).requires_grad_()
            optimizer.zero_grad()
            loss = ctc_loss(pre, lab, input_lengths, target_lengths)
            if loss.item() == inf:
                continue
            loss.backward()
            loss = loss.item()
            loss_ += loss
            optimizer.step()
        total_loss += loss_
        print(f'\n轮数:{i}/{epoch} Loss:{loss} 轮Loss:{loss_} 总Loss:{total_loss} 学习率:{lr_} 耗时:{time() - t1}')
        acc_, true, false_1, false_2 = valuation(net, val_dataset, batch_size, device)
        acc.append(acc_)
        sheet.append([float(_) for _ in [i, acc_, true, false_1, false_2, loss, loss_, total_loss, lr_]])
        [writer.add_scalar(tag=tag, scalar_value=data, global_step=i) for (tag, data) in
         [('验证正确率', acc_), ('全部正确', true), ('长度错误', false_1), ('字符错误', false_2), ('随机Loss', loss),
          ('单轮Loss', loss_), ('累计Loss', total_loss), ('学习率', lr_)]]
        torch.save(net, save_path + f'best_{len(acc)}.pth') if acc_ == max(acc) else None
    # 保存日志及权重
    book = Workbook(encoding='utf-8', style_compression=0)
    excel = book.add_sheet('数据', cell_overwrite_ok=True)
    for j in range(len(sheet)):
        for k in range(len(sheet[0])):
            excel.write(j, k, sheet[j][k])
    book.save(save_path + '训练记录.xls')
    writer.close()
    torch.save(net, save_path + f'last_{epoch}.pth')
    print(f'\n训练完成!总耗时{time() - t0}!\n权重及日志保存在{save_path}\n最佳测试{acc.index(max(acc)) + 1}:{max(acc)}')


if __name__ == "__main__":
    train_ = "../ccpd4/lpr/train"
    val_ = "../ccpd4/lpr/val"
    train(train_, val_, pre_model='', net_=2, epoch=500, lr=(0.0005, 0.00002), batch_size=16, device='')
