#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   classifier_lstm.py
@Contact :   xxzhang16@fudan.edu.cn

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2021/6/9 14:54   zxx      1.0         None
'''

import jieba
from torchtext.legacy.data import Field
import torch
from torch import optim
import torch.nn as nn
import numpy as np

import get_data_loader_torchtext
import models

import os
import time
import random
import argparse


DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def seed_torch(seed=13):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)  # 为了禁止hash随机化，使得实验可复现
    np.random.seed(seed)
    torch.manual_seed(seed)
    if DEVICE == 'cuda':
        torch.cuda.manual_seed(seed)
        #torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True


def metric(pred, label):
    # temp = (pred > 0.5).flatten()
    temp = torch.argmax(pred, dim=1)
    acc = torch.mean(
        torch.eq(
            temp,
            label),
        dtype=torch.float32)
    return acc.item()

def save_ckpt(model: nn.Module, optimizer: optim.Optimizer, lr, epoch, opt_msg, best_acc):
    if not os.path.exists('./check_point'):
        os.mkdir('./check_point')
    try:
        torch.save({
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr': lr,
            'epoch': epoch,
            'opt': opt_msg,
            'best acc': best_acc
        },'./check_point/best_model.pth')
        print('save finished')
    except RuntimeError as e:
        print(f'Error: {e}')


def train(
        model,
        dataloader,
        criterion: nn.CrossEntropyLoss,
        optimier: optim.Optimizer,
        epoch,
        f):
    total_loss = 0.0
    acc = 0.0
    log_step = 5
    skip_time = 0
    model.train()
    for batch_idx, data in enumerate(dataloader):
        optimier.zero_grad()
        # label = data.label.to(DEVICE).float()
        label = data.label.to(DEVICE)
        if label.shape[0] == 1:
            skip_time += 1
            continue
        title, _ = data.title
        title = title.to(DEVICE)
        content, _ = data.content
        content = content.to(DEVICE)

        pred = model(title, content)
        # loss = criterion(pred, label.reshape(label.shape[0], -1))
        loss = criterion(pred, label)
        flood = (loss - f).abs() + f

        # loss.backward()
        flood.backward()
        optimier.step()

        total_loss += loss.item()
        acc += metric(pred, label)

        if (batch_idx + 1) % log_step == 0:
            log = f'Train log: epoch: {epoch} [{batch_idx}/{len(dataloader)}({batch_idx / len(dataloader):.0%})]\t' \
                  f'loss: {total_loss / (batch_idx + 1)}\t' \
                  f'acc: {acc / (batch_idx + 1)}\n'
            print(log)

    return total_loss / (len(dataloader) - skip_time), acc / \
        (len(dataloader) - skip_time)


def val(model, dataloader, criterion: nn.CrossEntropyLoss, epoch):
    total_loss = 0.0
    acc = 0.0
    log_step = 5
    skip_time = 0
    model.eval()
    for batch_idx, data in enumerate(dataloader):
        with torch.no_grad():
            # label = data.label.to(DEVICE).float()
            label = data.label.to(DEVICE)
            if label.shape[0] == 1:
                skip_time += 1
                continue
            title, _ = data.title
            title = title.to(DEVICE)
            content, _ = data.content
            content = content.to(DEVICE)

            pred = model(title, content)
            # loss = criterion(pred, label.reshape(label.shape[0], -1))
            loss = criterion(pred, label)

            total_loss += loss.item()
            acc += metric(pred, label)

            if (batch_idx + 1) % log_step == 0:
                log = f'Val log: epoch: {epoch} [{batch_idx}/{len(dataloader)}({batch_idx / len(dataloader):.0%})]\t' \
                      f'loss: {total_loss / (batch_idx + 1)}\t' \
                      f'acc: {acc / (batch_idx + 1)}\n'
                print(log)

    return total_loss / (len(dataloader) - skip_time), acc / \
        (len(dataloader) - skip_time)


def main(opt):
    EPOCHS = opt.epoch
    LR = opt.lr
    SEED = opt.seed_num
    seed_torch(SEED)
    WEIGHT_DECAY = opt.weight_decay

    TIME = time.strftime('%H-%M-%S')
    if opt.vector_mode == 'word':
        def tokenize(x): return jieba.lcut(x)
    elif opt.vector_mode == 'char':
        def tokenize(x): return x.split()
    sentence_field = Field(
        sequential=True,
        tokenize=tokenize,
        lower=False,
        batch_first=True,
        include_lengths=True,
        init_token='BOS',
        eos_token='EOS')
    label_field = Field(sequential=False, use_vocab=False)
    train_iterator, test_iterator, vectors = get_data_loader_torchtext.get_iterator(
        opt, root_path='data', sentence_field=sentence_field, label_field=label_field, vectors_path='F:\\安装包\\temp')

    VOCAB_SIZE = len(sentence_field.vocab)

    model = models.Classifier(VOCAB_SIZE, opt)
    # criterion = nn.BCEWithLogitsLoss()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(
        model.parameters(),
        lr=LR,
        weight_decay=WEIGHT_DECAY)
    model.to(DEVICE)
    criterion.to(DEVICE)
    model.embedding.weight.data.copy_(vectors)
    model.embedding.weight.requies_grad = False
    # print(label_field.vocab.stoi)
    best_val_acc = -999
    best_epoch = -1
    log_dir = f'./log/{model.name}/'

    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    log_lst = []

    for e in range(EPOCHS):
        train_loss, train_acc = train(
            model, train_iterator, criterion, optimizer, e + 1, f=opt.f)
        val_loss, val_acc = val(model, test_iterator, criterion, e + 1)
        log = f'epoch: {e + 1}\ttrain loss: {train_loss}\ttrain acc: {train_acc}\t' \
              f'val loss: {val_loss}\tval acc: {val_acc}\n'
        print(log)
        log_lst.append(log)
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_epoch = e + 1
            save_ckpt(model, optimizer, LR, e, opt.__str__(), best_val_acc)

    with open(log_dir + f'{LR}_{WEIGHT_DECAY}_{TIME}.txt', 'w') as f:
        f.write(opt.__str__() + '\n')
        f.write(f'best acc: {best_val_acc}, best epoch: {best_epoch}\n')
        f.writelines(log_lst)

    total_log_fn = './log/total_log.txt'
    best_log = f'{model.name}——best acc: {best_val_acc}, best epoch: {best_epoch}\n\n'
    print(best_log)
    with open(total_log_fn, 'a') as f:
        f.write(opt.__str__() + '\n')
        f.write(best_log)

    # for batch in test_iterator:
    #     print(batch)
    #     break
    # print(sentence_field.vocab.itos[0])
    # print(vectors[-10:])
    # print(vectors.size())


if __name__ == '__main__':
    parse = argparse.ArgumentParser()
    parse.add_argument(
        '-cuda',
        action='store_true',
        default=False,
        help='if True: use cuda. if False: use cpu')
    parse.add_argument('-lr', type=float, default=13e-4, help='learning rate')
    parse.add_argument('-f', type=float, default=0, help='flood of loss')
    parse.add_argument(
        '-weight_decay',
        type=float,
        default=1e-3,
        help='weight decay')
    parse.add_argument('-seed_num', type=int, default=13, help='random seed')
    parse.add_argument(
        '-batch_size',
        type=int,
        default=32,
        help='batch size number')
    parse.add_argument(
        '-with_conv',
        type=int,
        default=False,
        help='whether to use conv layer')
    parse.add_argument(
        '-with_att',
        type=int,
        default=True,
        help='whether to use attention')
    parse.add_argument(
        '-kernel_nums',
        type=int,
        default=256,
        help='num of kernels in conv layer')
    parse.add_argument(
        '-hidden_size',
        type=int,
        default=128,
        help='size of lstm hidden layer')

    parse.add_argument(
        '-vector_mode',
        type=str,
        default='word',
        help='mode of word vector, char or word')
    parse.add_argument(
        '-embedding_dim',
        type=int,
        default=100,
        help='dim of embedding, must match vector cache')
    parse.add_argument(
        '-with_dict',
        type=int,
        default=True,
        help='whether to use dict')

    parse.add_argument(
        '-epoch',
        type=int,
        default=7,
        help='size of lstm hidden layer')
    parse.add_argument('-min_freq', type=int, default=5,
                       help='build vocab\'s min word freq')
    opt = parse.parse_args()
    # print(opt.__str__())
    main(opt)
