"""
@Time    : 2019/10/14 9:30
@Author  : CcH
"""
import os
import torch
from torch.autograd import Variable
from torch import nn
from tqdm import tqdm
from torch import optim
from tensorboardX import SummaryWriter
from DataLoader.Picture_Process import file_2_allpicture
import argparse
from random import shuffle
import math
from model.model import LipNet,LipSeqLoss
# from model.resnet3d import resnet10,LipSeqLoss
# from model.merge_model import MergeLipNet
# from model.Dense3D import LipNet
import numpy as np


writer = SummaryWriter(log_dir='logs')
# device = torch.device('cuda:0')

def parse_arguments():
    p = argparse.ArgumentParser(description='Hyperparams')
    p.add_argument('-epochs', type=int, default=200,
                   help='number of epochs for train')
    p.add_argument('-batch_size', type=int, default=20)
    p.add_argument('-lr', type=float, default=0.001,
                   help='initial learning rate')
    return p.parse_args()


train_batch_num = 0


def remove_len0(path):
    picture_len0 = []
    # path = r"D:\XW_Bank\LipRecognition\train\mouth_train"
    dirlist = os.listdir(path)
    for id in dirlist:
        newpath = path + "//" + id
        length = len(os.listdir(newpath))
        if length > 1:
            picture_len0.append(id)
    return picture_len0


def train(model, path, trian_list, train_labels, optimizer, batch_size, criterion):
    model.train()
    total_loss = 0
    shuffle(trian_list)
    batch_num = math.ceil(len(trian_list) / batch_size)
    for idx in tqdm(range(batch_num), desc='  - (Training)  '):
        dir_batch = trian_list[idx * batch_size: idx * batch_size + batch_size]
        batch_out = []
        trg = []
        batch_len = []
        for id in dir_batch:
            src_id, pct_len = file_2_allpicture(path + "//" + id)
            batch_out.append(src_id)
            trg.append([train_labels[id]])
            batch_len.append(pct_len)
        src = torch.cat(batch_out, dim=0)
        src, trg, batch_len = Variable(src), Variable(torch.LongTensor(trg)), Variable(torch.LongTensor(batch_len))
        src, trg, batch_len = src.cuda(), trg.cuda(), batch_len.cuda()
        optimizer.zero_grad()
        output = model(src)
        # loss = criterion(output,batch_len,trg)
        loss = criterion(output, trg.squeeze(1))
        global train_batch_num
        train_batch_num += 1
        writer.add_scalar('Train', loss, train_batch_num)
        loss.backward()
        optimizer.step()
        total_loss += loss.data.item()
    return total_loss / batch_num


def test(model, path, test_list, train_labels, criterion):
    model.eval()
    eval_loss = 0.0
    zq_num = 0
    for id in tqdm(test_list, desc='  - (Test)  '):
        src,pct_len = file_2_allpicture(path + "//" + id)
        src = src.cuda()
        target = torch.LongTensor([[train_labels[id]]]).cuda()
        pct_len = torch.LongTensor([pct_len])
        with torch.no_grad():
            output = model(src)
            # loss = criterion(output,pct_len,target)
            loss = criterion(output, target.squeeze(1))
            eval_loss += loss.data.item()

            # average_volumn = output[0, :pct_len[0]].sum(0)
            # _, max_index = torch.max(average_volumn, 0)
            # pred = max_index.data.item()
            pred = output.cpu().max(1)[1].item()
            if pred == train_labels[id]:
                zq_num += 1
    return eval_loss / len(test_list), zq_num / 313


def epoch_2_lr(e,value,w_decay):
    lr = value
    w_dy = w_decay
    # if e >= 50:
    #     bcs = round(math.sqrt(e-49), 4)
    #     lr = round((lr)/bcs,5)
    if e < 50:
        lr = lr
        # w_dy = 0.005
    elif e >= 50 and e < 80:
        lr = lr/2
    elif e >= 80 and e < 100:
        lr = lr/4
    elif e >= 100 and e < 110:
        lr = lr/8
    elif e >= 110 and e < 130:
        lr = lr/10
    elif e >= 130 and e < 150:
        lr = lr/16
    elif e >= 130 and e < 150:
        lr = lr/20
        w_dy = 0.004
    elif e > 150:
        lr = lr/50
        w_dy = 0.004
    return round(lr,6),w_dy


def main(path):
    args = parse_arguments()
    criterion = nn.NLLLoss()
    # criterion = LipSeqLoss().cuda()
    chinese_info = torch.load("./DataLoader/train_labels.pt")
    train_labels = chinese_info["train_labels"]
    word2idx = chinese_info["word2idx"]
    label2id = {}
    youxiao_id = tuple(remove_len0(path))
    for key, value in train_labels.items():
        if key in youxiao_id:
            if value in label2id:
                label2id[value].append(key)
            else:
                label2id[value] = [key]

    test_id = []
    train_id = []

    for ids in label2id.values():
        rd_len = len(ids)
        rd_idx = np.random.randint(0, rd_len - 1, 1)
        test_id.append(ids[rd_idx[0]])
        # test_id.append(ids[rd_idx[1]])
        del (ids[rd_idx[0]])
        # del (ids[rd_idx[1]])
        train_id = train_id + ids
    shuffle(test_id)
    shuffle(train_id)
    #
    # with open("./data_link/train_id_link.txt","w",encoding="utf-8") as fw1:
    #     for tid in train_id:
    #         fw1.write(tid+"\n")
    #
    # with open("./data_link/val_id_link.txt", "w", encoding="utf-8") as fw2:
    #     for tid in test_id:
    #         fw2.write(tid + "\n")

    # with open("./data_link/train_id_link.txt","r",encoding="utf-8") as fr1:
    #     for line in fr1.readlines():
    #         train_id.append(line.strip())
    #
    # with open("./data_link/val_id_link.txt", "r", encoding="utf-8") as fr2:
    #     for line in fr2.readlines():
    #         test_id.append(line.strip())
    # shuffle(test_id)
    # shuffle(train_id)

    labels_len = len(word2idx)

    # model = resnet10().cuda()
    model = LipNet(labels_len).cuda()
    # model.load_state_dict(torch.load("./weight/Lip_Recognition_acc.pkl"))
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.005)  # , weight_decay=0.01
    min_loss = 21
    acc_min = 0.0
    min_train_loss = 21
    for e in range(1, args.epochs + 1):
        if e >= 50 and e < 70:
            optimizer = optim.Adam(model.parameters(), lr=args.lr/2, weight_decay=0.005)
        elif e >= 70 and e < 90:
            optimizer = optim.Adam(model.parameters(), lr=args.lr/5, weight_decay=0.005)
        elif e >= 90 and e < 120:
            optimizer = optim.Adam(model.parameters(), lr=args.lr/10, weight_decay=0.005)
        elif e >= 120 and e < 150:
            optimizer = optim.Adam(model.parameters(), lr=args.lr/50, weight_decay=0.005)
        elif e >= 150:
            optimizer = optim.Adam(model.parameters(), lr=args.lr/100, weight_decay=0.005)
        epoch_loss = train(model, path, train_id, train_labels, optimizer, args.batch_size, criterion)
        print("第" + str(e) + "epoch train_loss :", epoch_loss)
        eval_loss, acc = test(model, path, test_id, train_labels, criterion)
        print("第" + str(e) + "epoch eval_loss :", eval_loss)
        print("第" + str(e) + "epoch eval_acc :", acc)
        if eval_loss < min_loss:
            torch.save(model.state_dict(), 'Lip_Recognition_val_loss' + '.pkl')
            print("model val loss save!")
            min_loss = eval_loss
        if acc > acc_min:
            torch.save(model.state_dict(), 'Lip_Recognition_val_acc' + '.pkl')
            print("model val acc save!")
            acc_min = acc
        if epoch_loss < min_train_loss:
            torch.save(model.state_dict(), 'Lip_Recognition' + '.pkl')
            print("model train loss save!")
            min_train_loss = epoch_loss
    writer.close()


if __name__ == '__main__':
    # 处理后train data 目录
    path = r"D:\XW_Bank\LipRecognition\train\lip_100_50_train"
    main(path)