import argparse
import json
import os
import torch.nn as nn
from torch.optim import AdamW

from torch.utils.data import DataLoader
from transformers import AutoModelForSeq2SeqLM, AutoConfig, BertTokenizer

from constants import DOMAIN_SLOT_TAG, VALUE_SLOT_TAG, STATUS
from src.dataset import TripleExtraDataset
from src.trainer import Trainer
from src.utils import load_model_and_parallel


def define_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', default='data', type=str)
    parser.add_argument('--model_name_or_path', default='./resources/bart-base-chinese')
    parser.add_argument('--config_name', default='./resources/bart-base-chinese')
    parser.add_argument('--tokenizer_name', default='./resources/bart-base-chinese')
    parser.add_argument('--ontology', default='./data/ontology.json')
    parser.add_argument('--examples', default='./data/example_dict.json', type=str)
    parser.add_argument('--output_dir', default="./output/", type=str)

    parser.add_argument('--t_batch_size', default=16, type=int)
    parser.add_argument('--e_batch_size', default=16, type=int)
    parser.add_argument('--gpu_ids', default='3', type=str)

    parser.add_argument('--max_source_length', default=500, type=int)
    parser.add_argument('--max_target_length', default=100, type=int)
    parser.add_argument('--dropout', default=0.1, type=float)
    parser.add_argument('--epochs', default=10, type=int)
    parser.add_argument('--lr', default=3e-5, type=float)
    parser.add_argument('--other_lr', default=1e-4, type=float)
    parser.add_argument('--max_grad_norm', default=1.0, type=float)
    parser.add_argument('--adam_epsilon', default=1e-8, type=float)
    parser.add_argument('--weight_decay', default=1e-4, type=float)
    parser.add_argument('--use_scheduler', default=False, type=bool)

    parser.add_argument('--num_beams', default=3, type=int)
    parser.add_argument('--eval_beams', default=3, type=int)
    parser.add_argument('--prediction_loss_only', default=False, type=bool)
    parser.add_argument('--predict_with_generate', default=True, type=bool)

    return parser.parse_args()


def add_ontology_special_tokens(f_name):
    ontology = json.load(open(f_name, 'r', encoding='utf-8'))
    special_tokens = []
    slots_name, values_name = [], []
    labels_map = {}

    for k, v in ontology.items():
        if k == STATUS:
            for v_i in v:
                values_name.append(k + ':' + v_i)
                special_tokens.append(k + ':' + v_i)
        else:
            for v_i in v:
                slots_name.append(k + ':' + v_i)
                special_tokens.append(k + ':' + v_i)

    for s in slots_name:
        for v in values_name:
            labels_map[s + '-' + v] = len(labels_map)
    return special_tokens, labels_map


def main():
    args = define_args()
    config = AutoConfig.from_pretrained(
        args.config_name if args.config_name else args.model_path_or_name,
        decoder_start_token_id=0,
        early_stopping=False,
        no_repeat_ngram_size=0,
        dropout=args.dropout
    )
    tokenizer = BertTokenizer.from_pretrained(
        args.tokenizer_name if args.tokenizer_name else args.model_path_or_name
    )

    ontology_special_tokens, labels_map = add_ontology_special_tokens(args.ontology)
    all_special_tokens = ontology_special_tokens + [DOMAIN_SLOT_TAG] + [VALUE_SLOT_TAG]
    tokenizer.add_special_tokens({'additional_special_tokens': all_special_tokens})

    model = AutoModelForSeq2SeqLM.from_pretrained(
        args.model_name_or_path,
        config=config
    )

    model, device = load_model_and_parallel(args, model)

    train_path = os.path.join(args.data_dir, 'train.json')
    dev_path = os.path.join(args.data_dir, 'dev.json')
    train_dataset = TripleExtraDataset(args, train_path, tokenizer)
    dev_dataset = TripleExtraDataset(args, dev_path, tokenizer)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.t_batch_size)
    dev_loader = DataLoader(dev_dataset, shuffle=False, batch_size=args.e_batch_size)

    model.resize_token_embeddings(len(tokenizer))
    loss_function = nn.CrossEntropyLoss()
    optimizer = AdamW(model.parameters(), lr=args.lr)
    trainer = Trainer(args, model, tokenizer, labels_map, train_loader, dev_loader, loss_function, optimizer, None,
                      device)
    trainer.train()


if __name__ == '__main__':
    main()
