import random
import sys

sys.path += ['./']
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import AutoModelForSeq2SeqLM, AutoConfig, AutoTokenizer, MBartTokenizer, XLMRobertaTokenizer, \
    AutoModelForSequenceClassification, MBartForConditionalGeneration
from accelerate import Accelerator
from utils.evaluation import eval_f1, eval_all
from utils.evaluation import f1_score
from utils.io import write_file
import torch.nn.functional as F
import nltk
import json
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import torch.nn as nn
import copy
import torch
import os
import logging
import warnings
import codecs

from utils.io import read_pkl, read_file
from utils.mine_utils import init_seed, random_index

warnings.filterwarnings("ignore")
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)

mbart_lang_to_id = {'ar_AR': 250001, 'cs_CZ': 250002, 'de_DE': 250003, 'en_XX': 250004, 'es_XX': 250005,
                    'et_EE': 250006, 'fi_FI': 250007, 'fr_XX': 250008, 'gu_IN': 250009, 'hi_IN': 250010,
                    'it_IT': 250011, 'ja_XX': 250012, 'kk_KZ': 250013, 'ko_KR': 250014, 'lt_LT': 250015,
                    'lv_LV': 250016, 'my_MM': 250017, 'ne_NP': 250018, 'nl_XX': 250019, 'ro_RO': 250020,
                    'ru_RU': 250021, 'si_LK': 250022, 'tr_TR': 250023, 'vi_VN': 250024, 'zh_CN': 250025}

mbart_lang_to_id = {k[:2]: v for k, v in mbart_lang_to_id.items()}


def get_best_match(query, candidates):
    return max([[f1_score(query.lower(), [line.lower()]), line] for line in candidates], key=lambda x: x[0])


def norm_text(text):
    return text
    # return ' '.join(nltk.word_tokenize(text)).strip().lower()


def load_data(context_file, pool_pkl, knowledge_file):
    print(f"load context from {context_file}, load pool from {pool_pkl}, load knowledge from {knowledge_file}.")
    con_contexts = read_file(context_file)
    knowledges = read_file(knowledge_file)
    pool_ids = read_pkl(pool_pkl)
    tag = ["<wizard> ", "<apprentice> "]
    contexts = []
    responses = []
    knowledgess = []
    for id, conc in enumerate(con_contexts):
        con = conc.split("\t")
        topic = con[0]
        response = con[-1]
        context = con[1:-1]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(context) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        contexts.append(con[:-1])
        responses.append(response.lower())

        pool_id = pool_ids[id]
        knowledge = []
        knowledge = [("<topic> " + knowledges[pid].split("\t")[0] + " </s> " + knowledges[pid].split("\t")[1]).lower() for pid in pool_id]
        # positive = knowledge[0]
        # negative = [s for s in knowledge if f1_score(s.lower(), [positive.lower()]) < 0.5]
        knowledgess.append(knowledge)
    print(f"total examples {len(contexts)}")
    return contexts, knowledgess, responses

def load_ckgc_data(context_file, response_file, pool_file, knowledge_file):
    print(f"load context from {context_file}, load response from {response_file}, load pool from {pool_file}, "
          f"load knowledge from {knowledge_file}")
    context = read_file(context_file)
    response = read_file(response_file)
    pool = read_file(pool_file)
    knowledge = read_file(knowledge_file)

    pool_ids = [[int(i) for i in p[1:-1].split(",")] for p in pool]

    contexts, knowledgess, responses = [], [], []
    tag = ["<wizard> ", "<apprentice> "]
    for id, con in enumerate(context):
        con = con.split(" </s> ")
        topic = con[0]
        oncon = con[1:]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(oncon) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        contexts.append(con)
        responses.append(response[id].lower())

        pool_id = pool_ids[id]
        knowledges = [f"<topic> {knowledge[pid]}".lower() if pid != 0 else f"<topic> {knowledge[pid]} </s> {knowledge[pid]}" for pid in pool_id]
        knowledgess.append(knowledges)
    print(f"total examples {len(contexts)}")
    return contexts, knowledgess, responses


class GenerateData(Dataset):
    def __init__(self, context, knowledge, response, tokenizer, context_len=256, response_len=128, lang_code=250004,
                 con_num=3, neg_num=7, pad_none=True, shuffle=True):
        super(Dataset, self).__init__()
        self.context = context
        self.knowledge = knowledge
        self.response = response
        self.tokenizer = tokenizer
        self.context_len = context_len
        self.response_len = response_len
        self.lang_code = lang_code
        self.con_num = con_num
        self.neg_num = neg_num
        self.pad_none = pad_none
        self.shuffle = shuffle
        print(f"GenerateData, context_len {context_len}, response_len {response_len}, lang_code {lang_code}, con_num {con_num}, "
              f"neg_num {neg_num}, pad_none {pad_none}, shuffle {shuffle}")

    def __getitem__(self, index):
        context = self.context[index]
        response = self.response[index]
        knowledge = self.knowledge[index]

        topic = context[0]
        topic_len = len(self.tokenizer.encode(topic, add_special_tokens=False))
        his = self.tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-2]  # default: xx </s> lang_token, skip </s> and lang_token
        his = his[:topic_len] + his[-(self.context_len - topic_len):] if len(his) > self.context_len else his

        neg = knowledge[1:]
        if self.pad_none:
            random.shuffle(neg)
            if self.shuffle:
                i = random_index(k=self.con_num, pool_size=len(knowledge))
                knowledge[i], knowledge[0] = knowledge[0], knowledge[i]
            neg = neg + ['<none>'] * (self.neg_num - len(neg))

        neg = neg[:self.neg_num]
        knowledge = [knowledge[0]] + neg
        response = [self.lang_code] + self.tokenizer.encode(response, truncation=True, max_length=self.response_len)[:-1]  # skip lang_token and put lang_token forward

        con_knowledge = []
        for k in knowledge[:self.con_num]:
            con_knowledge += self.tokenizer.encode("<knowledge> " + k)[:-1]  # skip lang_token
        # con_knowledge = self.tokenizer.encode("<knowledge>", add_special_tokens=False)
        # for k in knowledge:
        #     if len(con_knowledge) < self.con_num * self.response_len:
        #         con_knowledge += self.tokenizer.encode(k)[:-2]  # skip </s> and lang_token
        #     else:
        #         break
        batch_context = con_knowledge[:self.con_num * self.response_len] + his
        return torch.tensor(batch_context), torch.tensor(response)

    def __len__(self):
        return len(self.context)

    @staticmethod
    def collate_fn(data):
        batch_context, response = zip(*data)
        context = pad_sequence(batch_context, batch_first=True, padding_value=1)
        response = pad_sequence(response, batch_first=True, padding_value=1)
        return {
            'context': context,
            'context_mask': context.ne(1).long().detach(),
            'response': response,
            'response_mask': response.ne(1).long().detach()
        }

# seen 25
# unseen 20
def main(language):
    init_seed(123456)
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 10
    batch_size = 4
    lr = 5e-5

    model_name = r"/data/tianhongtao-slurm/data/pretrain_models/mbart-large-cc25"
    ckpt_path = "ckpt"
    ckpt_name = f"mbart-generate-{language}"
    tokenizer = MBartTokenizer.from_pretrained(model_name)
    tokenizer.lang_code_to_id = mbart_lang_to_id

    print(f"epochs {epochs}, batch_size {batch_size}, learning_rate {lr}, mode train")
    print(f"model name '{model_name}', ckpt path '{ckpt_path}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")

    dataset = GenerateData(context, knowledge, response, tokenizer, context_len=256, response_len=128, lang_code=mbart_lang_to_id[language],
                           con_num=3, neg_num=7, pad_none=True, shuffle=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    config = AutoConfig.from_pretrained(model_name)
    model = MBartForConditionalGeneration.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))

    optimizer = AdamW(model.parameters(), lr=lr)

    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        for batch in tk0:
            # batch = {k: v.cuda() for k, v in batch.items()}
            predict = model(input_ids=batch["context"], attention_mask=batch["context_mask"], decoder_input_ids=batch["response"],
                           decoder_attention_mask=batch["response_mask"], return_dict=True)["logits"]

            b_s, s_l, v_s = predict.size()
            predict = predict[:, :-1, :].reshape(-1, v_s)
            gt = batch["response"][:, 1:].reshape(-1)

            loss = F.cross_entropy(predict, gt, ignore_index=tokenizer.pad_token_id)

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses))

        os.makedirs(f'{ckpt_path}/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'{ckpt_path}/{ckpt_name}/{epoch}.pt')


def lower(text):
    if isinstance(text, str):
        text = text.strip().lower()
        text = ' '.join(nltk.word_tokenize(text))
        return text.strip()
    return [lower(item) for item in text]


def test(mode, language):
    # mode: test_ckgc, test_seen, test_unseen, dev_seen, dev_unseen
    # language: en, es, fr, zh
    epochs = 10
    batch_size = 32
    con_num = 3

    model_name = r"../pretrain_models/mbart-large-cc25"
    ckpt_path = r"ckpt-pre"
    ckpt_name = f"mbart-generate-{language}"
    tokenizer = MBartTokenizer.from_pretrained(model_name)
    tokenizer.lang_code_to_id = mbart_lang_to_id

    print(f"epochs {epochs}, batch_size {batch_size}, mode {mode}")
    print(f"model name '{model_name}', ckpt path '{ckpt_path}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    icato, imode = mode.split("_")
    if imode == "ckgc":
        data_path = f"./ckgc/{language}/"
        context, knowledge, response = load_ckgc_data(f"{data_path}/context.txt", f"{data_path}/response.txt",
                                                      f"{data_path}/pool.txt", f"{data_path}/knowledge.txt")
    elif imode in ["seen", "unseen"] and icato == "test":
        data_path = f"./wow/{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")
    elif imode in ["seen", "unseen"] and icato == "dev":
        data_path = f"./wow/{icato}_{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")
    else:
        raise AssertionError

    dataset = GenerateData(context, knowledge, response, tokenizer, context_len=256, response_len=128, lang_code=mbart_lang_to_id[language],
                           con_num=con_num, neg_num=128, pad_none=True, shuffle=False)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    config = AutoConfig.from_pretrained(model_name)
    model = MBartForConditionalGeneration.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))
    model = model.cuda()
    for epoch in range(epochs):
        if not os.path.exists(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'):
            continue
        print(f'Test {ckpt_path}/{ckpt_name}/{epoch}.pt')
        model.load_state_dict(torch.load(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'))
        tk0 = tqdm(data_loader, total=len(data_loader))
        f1_report = []
        outputs_predict = []
        outputs_true = []
        model.eval()
        with torch.no_grad():
            for batch in tk0:
                batch = {k: v.cuda() for k, v in batch.items()}
                predict = model.generate(input_ids=batch["context"], attention_mask=batch["context_mask"],
                                         decoder_start_token_id=mbart_lang_to_id[language],
                                         num_beams=3, max_length=128)

                predict_sent = tokenizer.batch_decode(predict, skip_special_tokens=True)
                label_sent = tokenizer.batch_decode(batch["response"], skip_special_tokens=True)

                outputs_predict.extend(predict_sent)
                outputs_true.extend(label_sent)

                if language == "zh":
                    f1 = [f1_score(' '.join(pred), [' '.join(label)]) for pred, label in zip(predict_sent, label_sent)]
                else:
                    f1 = [f1_score(pred, [label]) for pred, label in zip(predict_sent, label_sent)]
                f1_report.extend(f1)
                tk0.set_postfix(f1score=round(sum(f1_report) / len(f1_report), 4))
        if language == "zh":
            outputs_true = [' '.join(ot) for ot in outputs_true]
            outputs_predict = [' '.join(op) for op in outputs_predict]
        print(eval_all(outputs_predict, outputs_true))
        write_file(outputs_predict, f'{ckpt_path}/{ckpt_name}/{epoch}.txt')
        # true_text_collect = dataset.response
        # print(eval_all(lower(output_text_collect), lower(true_text_collect)))
        # write_file(output_text_collect, f'ckpt/{ckpt_name}/{path.split("/")[2]}/{epoch}.txt')


def inference(mode, language):
    batch_size = 1
    con_num = 3
    # model_name = r"D:\0MData\pretrain models\xlm-roberta-base"
    model_name = r"../pretrain_models/xlm-roberta-base/"
    ckpt_path = "ckpt"
    ckpt_name = f"xlmr-rank-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)
    print(f"Test {mode.split('_')[1]}")
    print(f"model name '{model_name}', ckpt path '{ckpt_path}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = f"./wow/{mode.split('_')[1]}/"
    context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")
    dataset = GenerateData(context, knowledge, response, tokenizer, context_len=256, response_len=128,
                           lang_code=mbart_lang_to_id[language],
                           con_num=con_num, neg_num=128, pad_none=False)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))
    model = model.cuda()
    epoch = 5
    print(f'Test {ckpt_path}/{ckpt_name}/{epoch}.pt')
    model.load_state_dict(torch.load(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'))
    tk0 = tqdm(data_loader, total=len(data_loader))
    output_text_collect = []
    acc = []
    model.eval()
    saved = []
    with torch.no_grad():
        for batch in tk0:
            batch = {k: v.cuda() for k, v in batch.items()}
            output = model(**batch)
            logits = output.logits.view(1, -1)
            logits = -logits
            rank = logits.argsort(-1).cpu().tolist()
            # print(rank)
            saved.extend(rank)
            acc.extend([int(rr[0] == 0) for rr in rank])
            tk0.set_postfix(acc=sum(acc) / len(acc))
    print(sum(acc) / len(acc))
    json.dump(saved, open(f'{ckpt_path}/{ckpt_name}/train-rank-{epoch}.json', 'w'))

    # true_text_collect = dataset.response
    # print(eval_all(lower(output_text_collect), lower(true_text_collect)))
    # write_file(output_text_collect, f'ckpt/{ckpt_name}/{epoch}.txt')


if __name__ == '__main__':
    # 训练和评测纯生成模型
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"  # 24G GPU 条件下
    # main("zh")
    init_seed(123456)
    # test("test_seen", "zh")
    # test("test_unseen", "zh")
    test("test_ckgc", "zh")
    # inference()
