# -*- coding:utf-8 -*-
# editor: xab
# date: 2022/8/25

import argparse
import logging
from model import BertNer, BertNerLstm, BertNerPrompt
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from datasets2 import *
import torch
from tqdm import tqdm
import os
from eval import evaluate
from openprompt import PromptDataLoader


print(torch.cuda.is_available())

device = torch.device('cuda:0')


def train(model ,args):
    savepath = os.path.join('saved', args.save_path)
    try:
        os.mkdir(savepath)
    except FileExistsError as e:
        pass

    rpath = os.path.join(savepath, args.rpath)

    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

    dict_file = pickle.load(open(args.ents2ids, 'rb'))
    t_2_ents = pickle.load(open(args.intent2ents, 'rb'))
    id2label = pickle.load(open(args.id2ent, 'rb'))
    my_dataset = mydataset(data_file=os.path.join(args.dataroot, args.train_file), ent_type='ent',
                           dict_file=dict_file, intent_2_ent=t_2_ents)
    test_dataset = mydataset(data_file=os.path.join(args.dataroot, args.test_file), ent_type='ent',
                             dict_file=dict_file, intent_2_ent=t_2_ents)

    # dict_file = pickle.load(open(r'data/ents2ids.pkl', 'rb'))
    # # dict_file = pickle.load(open(r'data/TrainingData/tri_prop.pkl', 'rb'))
    # t_2_ents = pickle.load(open(r'data/TrainingData/intent2prop_24.pkl', 'rb'))
    # my_dataset = mydataset_traindata(data_file=r'data/TrainingData/train_data.json', ent_type='tri',
    #                        dict_file=dict_file, intent_2_ent=t_2_ents)
    # test_dataset = mydataset_traindata(data_file=r'data/TrainingData/test_data.json', ent_type='tri',
    #                          dict_file=dict_file, intent_2_ent=t_2_ents)

    # model = BertNerPrompt(num_class=len(dict_file) * 2).to(device)
    # model.load_state_dict(torch.load(load_path, map_location='cpu'))
    # data_loader = PromptDataLoader(
    #     dataset=my_dataset,
    #     tokenizer=tokenizer,
    #     template=promptTemplate,
    #     tokenizer_wrapper_class=WrapperClass,
    # )

    dataloader = DataLoader(my_dataset, batch_size=32, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    optimizer = Adam(lr=args.lr, params=model.parameters())
    k1, k2, k3, k4, k5, k6 = 0, 0, 0, 0, 0, 0
    for epoch in range(args.epoches):
        model.train()
        dataloader_tqdm = tqdm(dataloader)
        for tokenized_ids, segment_ids, slot_values, _ in dataloader_tqdm:
            # return None
            try:
                tokenized_ids = tokenized_ids.long().to(device)
                segment_ids = segment_ids.long().to(device)
                slot_values = slot_values.long().to(device)
                inputs = (tokenized_ids, segment_ids)

                optimizer.zero_grad()

                crfloss, ner_output = model.calc_loss(inputs, slot_values)
                crfloss.backward()
                optimizer.step()

                # print(crfloss.item())
                predicted_ner = torch.argmax(ner_output, dim=-1)

                acc = sum(sum(predicted_ner == slot_values)) / (slot_values.shape[0] * slot_values.shape[1])
                loss = crfloss.item()

                # print(predicted_ner)
                # print(slot_values)

                dataloader_tqdm.set_description("acc : {:3f} , loss : {:3f}".format(acc, loss))

                # print('***'*20)
            except RuntimeError as e:
                pass

        word_precision, word_recall, word_f1, precision, recall, f1 = evaluate(model, test_dataloader, record_path=rpath, id2label=id2label)

        if word_precision > k1 or word_recall > k2 or word_f1 > k3 or precision > k4 or recall > k5 or f1 > k6:
            torch.save(model.state_dict(), os.path.join(savepath,
                                                    'epoch_{:2}_{:3f}_{:3f}_{:3f}_{:3f}_{:3f}_{:3f}.pt'.format(epoch,
                                                                                                               word_precision,
                                                                                                               word_recall,
                                                                                                               word_f1,
                                                                                                               precision,
                                                                                                               recall,
                                                                                                               f1)))
            k1, k2, k3, k4, k5, k6 = max(word_precision, k1), max(word_recall, k2), max(k3, word_f1), max(precision,
                                                                                                          k4), max(
                recall, k5), max(f1, k6)


def test(mdoel ,args):

    # dict_file = pickle.load(open(r'data/TrainingData/tri_prop.pkl', 'rb'))
    # t_2_ents = pickle.load(open(r'data/TrainingData/intent2prop09.pkl', 'rb'))

    # dict_file = pickle.load(open(r'data/TrainingData/ent_type.pkl','rb'))
    # t_2_ents = pickle.load(open(r'data/TrainingData/intent2ents09.pkl','rb'))

    dict_file = pickle.load(open(args.ents2ids, 'rb'))
    t_2_ents = pickle.load(open(args.intent2ents, 'rb'))
    id2label = pickle.load(open(args.id2ent, 'rb'))


    test_dataset = mydataset(data_file=os.path.join(args.dataroot, args.test_file),
                             ent_type='ent',dict_file=dict_file, intent_2_ent=t_2_ents)

    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    word_precision, word_recall, word_f1, precision, recall, f1 = evaluate(model, test_dataloader, args.rpath,
                                                                       use_mask_logit=False, id2label=id2label)

    return word_precision, word_recall, word_f1, precision, recall, f1


if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument("--params_file", type=str, help="JSON configuration file")
    parser.add_argument("--ents2ids", type=str, default='data/ents2ids.pkl')
    parser.add_argument("--intent2ents", type=str, default='data/intent2ents.pkl')
    parser.add_argument("--id2ent", type=str, default='data/id2ent.pkl')
    parser.add_argument("--lr", type=float, default=1e-5)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_class", type=int, default=78)
    parser.add_argument("--epoches", type=float, default=10)
    parser.add_argument("--batch_size", type=int)
    parser.add_argument("--model_name_or_path", type=str, help="model_name_or_path", default='gpt2')
    parser.add_argument("--eval_only", action="store_true",
                        help="Perform evaluation only")
    parser.add_argument("--predict_only", action="store_true",
                        help="Perform evaluation only")
    parser.add_argument("--task", type=str, choices=('train', 'eval', 'test'))
    parser.add_argument("--checkpoint", type=str, default='saved/bert_base_crf_train_data_1027/epoch_56_0.538722_0.563281_0.550723_0.630557_0.588387_0.608738.pt', help="Saved checkpoint directory")
    parser.add_argument("--dataroot", type=str, default="data",
                        help="Path to dataset.")
    parser.add_argument("--train_file", type=str, default="train_500.json")
    parser.add_argument("--val_file", type=str, default="")
    parser.add_argument("--test_file", type=str, default="test_500.json")
    parser.add_argument("--output_file", type=str, default="", help="Predictions will be written to this file.")
    parser.add_argument("--save_path", type=str, default="bert_base_crf_train_data_1122")
    parser.add_argument("--rpath", type=str, default='case.txt')
    parser.add_argument("--debug", type=int, default=0,
                        help="If set, will only use a small number (==debug) of data for training and test.")
    parser.add_argument("--exp_name", type=str, default="",
                        help="Name of the experiment, checkpoints will be stored in saved/{exp_name}")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
                        help="Device (cuda or cpu)")
    args = parser.parse_args()

    dict_file = pickle.load(open(args.ents2ids, 'rb'))
    print(dict_file)

    args.task = 'train'
    args.checkpoint = None
    model = BertNer(num_class=args.num_class * 2).to(device)
    # model = BertNerPrompt(num_class=len(dict_file) * 2).to(device)

    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint, map_location='cpu'))


    if args.task == 'train':
        train(model, args)
    elif args.task == 'test':
        test(model, args)
