# -*- coding: utf-8 -*-

import os
import sys
import argparse
import torch
import numpy as np

from load_data import load_vocabs, reverse_k_v
from load_data import TrainIteratorWrapper, TestIteratorWrapper
from utils import load_vocabs_from_wv
from train_eval_test import train, eval, test, infer
from models.model.lstm_softmax import LSTM_Softmax
from models.model.bert_fc import Bert_FC
from models.model.textcnn import TextCNN
from models.model.lstm_attn import LSTM_Attn
from models.model.lstm_textcnn import LSTM_TextCNN
from models.model.lstm_cnn import LSTM_CNN
from models.model.linear_softmax import Linear_Softmax
from models.embedding.char_embedding import CharEmbedding
from models.embedding.char_word_embedding import CharWordEmbedding
from models.embedding.word_embedding import WordEmbedding
from models.embedding.bert_embeding import BertEmbedding
from models.embedding.joint_char_word_embedding import JointCharWordEmbedding


if __name__ == "__main__":
    if len(sys.argv) < 4:
        print("Model for entity type inference. Run: \n"
              "  python3 main.py embed model train/eval/test/infer\n")
        exit(0)

    parser = argparse.ArgumentParser()
    parser.add_argument('--train-file', type=str, default='./data/train.txt', help='path of test dataset')
    parser.add_argument('--eval-file', type=str, default='./data/eval.txt', help='path of test dataset')
    parser.add_argument('--test-file', type=str, default='./data/test.txt', help='path of test dataset')
    parser.add_argument('--char-vocabs', type=str, default='./pretrained/word_char/vocab.txt')
    parser.add_argument('--word-vocabs', action='store', type=str, default=None, help="word vocab file.")
    parser.add_argument('--type-vocabs', action='store', type=str, default="./data/type_vocabs.txt")
    parser.add_argument('--epochs', action='store', type=int, default=20)
    parser.add_argument('--batch_size', action='store', type=int, default=64)
    parser.add_argument('--lr', action='store', type=float, default=1e-3)
    parser.add_argument('--accumulation-steps', type=int, default=1, help="gredient accumulation.")
    parser.add_argument('--max-len', type=int, default=32, help="max sequence length for train to test.")
    parser.add_argument('--restore', type=bool, default=True)
    parser.add_argument('--checkpoint', action="store", type=str, default="./checkpoint/models.epochs", help="select a checkpoint to restore.")
    parser.add_argument('--save-path', action="store", type=str, default="./checkpoint/models.epoch{}")
    parser.add_argument('--use-cuda', action='store', type=bool, default=True, help="whether use cuda.")
    parser.add_argument('--word-vec-path', action='store', type=str, default='./pretrained/word_char/sgns.target.word-character.char1-2.dynwin5.thr10.neg5.dim300.iter5')
    parser.add_argument('--pretrained-model-path', action='store', type=str, default=None, help='specify pretrained word_vec or bert.')
    parser.add_argument('--out-validate', action='store', type=str, default=None, help='post_test mode, output the validate.')
    parser.add_argument('--entity', action='store', type=str, default=None, help="entity sentence for infer.")
    args = parser.parse_args(sys.argv[4:])

    embed = sys.argv[1]
    model = sys.argv[2]
    action = sys.argv[3]
    is_for_bert = True if "bert" in (embed, model) else False
    UNK, PAD = ("<UNK>", "<PAD>") if not is_for_bert else ("[UNK]", "[PAD]")
    use_pe = False  # True if "lstm" not in model else False
    class_list = ["其他", "医学专科", "检查科目", "疾病", "病毒", "症状", "细菌", "药物"]

    train_file = args.train_file
    eval_file = args.eval_file
    test_file = args.test_file
    char_vocabs_file = args.char_vocabs
    word_vocabs_file = args.word_vocabs
    type_vocabs_file = args.type_vocabs

    char_vocabs = load_vocabs(char_vocabs_file)
    type_vocabs = load_vocabs(type_vocabs_file)
    idx2char = reverse_k_v(char_vocabs)
    idx2tag = reverse_k_v(type_vocabs)

    unk_idx = char_vocabs.get(UNK)
    pad_idx = char_vocabs.get(PAD)

    word_vec_dir = os.path.abspath(os.path.dirname(args.word_vec_path))
    cache_wv_path = os.path.join(word_vec_dir, "wv.npy")
    cache_vocab_path = os.path.join(word_vec_dir, "vocab.txt")
    if args.word_vocabs:
        word_vocabs = load_vocabs(args.word_vocabs)
    else:
        word_vocabs = load_vocabs_from_wv(args.word_vec_path, cache_vocab_path)

    epochs = args.epochs
    batch_size = args.batch_size  # 64
    max_len = 32    # 100
    emb_dim = 300   # 300
    hidden_size = 512  # 256
    num_layers = 2
    keep_prob = 0.5
    use_cuda = args.use_cuda
    device = torch.device("cuda:0") if use_cuda else torch.device("cpu:0")

    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    train_iter = TrainIteratorWrapper(train_file, batch_size, max_len, char_vocabs, word_vocabs, type_vocabs, unk_idx, pad_idx, is_for_bert, device)
    eval_iter = TrainIteratorWrapper(eval_file, batch_size, max_len, char_vocabs, word_vocabs, type_vocabs, unk_idx, pad_idx, is_for_bert, device)
    test_iter = TestIteratorWrapper(test_file, batch_size, max_len, char_vocabs, word_vocabs, unk_idx, pad_idx, is_for_bert, device)

    if embed == "char":
        embed = CharEmbedding(len(char_vocabs), emb_dim, max_len, pad_idx, device)
    elif embed == "word":
        if args.word_vec_path is None:
            raise Exception("must supply pretrained word vector path.")
        embed = WordEmbedding(args.pretrained_model_path, emb_dim, pad_idx, None, cache_wv_path, cache_vocab_path)
    elif embed == "char_word":
        if args.word_vec_path is None:
            raise Exception("must supply pretrained word vector path.")
        embed = CharWordEmbedding(args.word_vec_path, emb_dim, pad_idx, use_pe, max_len, None, cache_wv_path, cache_vocab_path, device)
    elif embed == "bert":
        if args.pretrained_model_path is None:
            raise Exception("must supply pretrained bert model path.")
        d_ff = 256  # 512
        dropout = 0.5
        embed = BertEmbedding(args.pretrained_model_path, 768, d_ff, emb_dim, dropout, pad_idx, use_token=True)
    elif embed == "joint_char_word":
        embed = JointCharWordEmbedding(args.pretrained_model_path, args.word_vec_path, 300, 300, max_len, keep_prob, pad_idx, use_cuda, cache_wv_path, cache_vocab_path)
    else:
        raise Exception("Unsupported embedding model.'")

    if model in "lstm":
        model = LSTM_Softmax(embed, len(type_vocabs), emb_dim, max_len, hidden_size, pad_idx=pad_idx, use_cuda=use_cuda)
    elif model in "bert":
        if not args.pretrained_model_path:
            raise Exception("pretrained bert path must be specified .")
        hidden_size = 768
        keep_prob = 0.5
        model = Bert_FC(args.pretrained_model_path, hidden_size, len(type_vocabs), keep_prob, use_cuda)
    elif model == "linear":
        model = Linear_Softmax(embed, emb_dim, len(type_vocabs), keep_prob, use_cuda)
    elif model == "lstm_textcnn":
        kernel_sizes = [2, 3, 4]
        kernel_nums = 32
        model = LSTM_TextCNN(embed, emb_dim, num_layers, hidden_size, len(type_vocabs), kernel_sizes, kernel_nums, max_len, keep_prob, pad_idx, use_cuda=use_cuda)
    elif model == "lstm_cnn":
        model = LSTM_CNN(embed, emb_dim, num_layers, hidden_size, len(type_vocabs), max_len, keep_prob, pad_idx, use_cuda)
    elif model in "textcnn":
        emb_dim = 300
        kernel_sizes = [1, 2, 3]  # [2, 3, 4, 5]
        kernel_nums = 32  # 128
        keep_prob = 0.5
        model = TextCNN(embed, len(type_vocabs), emb_dim, kernel_sizes, kernel_nums, max_len, pad_idx, keep_prob, use_cuda)
    elif model in "lstm_attn":
        emb_dim = 300  # 300
        num_layers = 2
        hidden_size = 256  # 256
        keep_prob = 0.5
        num_heads = 8
        attn_keep_prob = 0.1
        d_ff = 512
        # ## hidden_size: 512
        # d_ff: 256, num_heads: 1 ,  Eval Loss:  0.89,  Eval Acc: 86.40%
        # d_ff: 512, num_heads: 1 ,  Eval Loss:   1.1,  Eval Acc: 88.10%
        # d_ff: 1024, num_heads: 1,  Eval Loss:   1.9,  Eval Acc: 86.60%
        # d_ff: 512, num_heads: 2,   Eval Loss:   1.4,  Eval Acc: 86.40%
        # d_ff: 512, num_heads: 4,   Eval Loss:   1.0,  Eval Acc: 86.30%
        # d_ff: 512, num_heads: 8,   Eval Loss:   1.4,  Eval Acc: 87.80%
        # ## hidden_size: 256
        # d_ff: 256, num_heads: 1,   Eval Loss:  0.98,  Eval Acc: 87.20%
        # d_ff: 512, num_heads: 1,   Eval Loss:   1.0,  Eval Acc: 87.10%
        # d_ff: 1024, num_heads: 1,  Eval Loss:   1.1,  Eval Acc: 87.00%
        # d_ff: 512,  num_heads: 8,  Eval Loss:   1.1,  Eval Acc: 87.30%
        model = LSTM_Attn(len(char_vocabs), len(type_vocabs), emb_dim, max_len, hidden_size, num_layers, keep_prob, num_heads, attn_keep_prob, d_ff, pad_idx, use_cuda)

    if args.restore and os.path.exists(args.checkpoint):
        model.load_state_dict(torch.load(args.checkpoint))
        print("restore model from %s ." % args.checkpoint)

    if action in "train":
        train(model, train_iter, eval_iter, args.lr, epochs, idx2char, idx2tag, save_path=args.save_path)
    elif action in "eval":
        eval(model, eval_iter, idx2char, idx2tag)
    elif action in "test":
        if args.out_validate is None:
            raise Exception("out-validate must be specified in test mode.")
        test(model, test_iter, idx2char, idx2tag, args.out_validate, test_file)
    elif action in "infer":
        if args.entity is None:
            raise Exception("entity must be specified in test mode.")
        infer(model, args.entity, char_vocabs, idx2char, idx2tag, is_for_bert, max_len, unk_idx, pad_idx, device)
    else:
        raise Exception("Unsupported action: {}".format(action))

""" Run:
python3 main.py lstm train ./data/train.txt ./data/eval.txt ./data/test.txt ./data/char_vocabs.txt ./data/type_vocabs.txt

python3 main.py bert train ./data/train.txt ./data/eval.txt ./data/test.txt ./pretrained/bert/vocab.txt ./data/type_vocabs.txt  \
--pretrained-model-path ./pretrained/bert \
--epochs 50 \
--batch_size 64 \
--lr 5e-5

"""
