import random
import sys

sys.path += ['./']
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import AutoModelForSequenceClassification, BertTokenizer, AutoConfig, XLMRobertaTokenizer
from accelerate import Accelerator
from utils.evaluation import eval_f1, eval_all
from utils.evaluation import f1_score
from utils.io import write_file
import torch.nn.functional as F
import nltk
import json
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import torch.nn as nn
import copy
import torch
import os
import logging
import warnings
import codecs

from utils.io import read_pkl, read_file
from utils.mine_utils import init_seed

warnings.filterwarnings("ignore")
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)

# wiki = json.load(open('dataset/wiki_small.json'))


def get_best_match(query, candidates):
    return max([[f1_score(query.lower(), [line.lower()]), line] for line in candidates], key=lambda x: x[0])


def norm_text(text):
    return text
    # return ' '.join(nltk.word_tokenize(text)).strip().lower()


def load_data(context_file, pool_pkl, knowledge_file):
    print(f"load context from {context_file}, load pool from {pool_pkl}, load knowledge from {knowledge_file}.")
    con_contexts = read_file(context_file)
    knowledges = read_file(knowledge_file)
    pool_ids = read_pkl(pool_pkl)
    tag = ["<wizard> ", "<apprentice> "]
    contexts = []
    responses = []
    knowledgess = []
    for id, conc in enumerate(con_contexts):
        con = conc.split("\t")
        topic = con[0]
        response = con[-1]
        context = con[1:-1]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(context) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        contexts.append(con[:-1])
        responses.append(response.lower())

        pool_id = pool_ids[id]
        knowledge = []
        knowledge = [("<topic> " + knowledges[pid].split("\t")[0] + " </s> " + knowledges[pid].split("\t")[1]).lower() for pid in pool_id]
        # positive = knowledge[0]
        # negative = [s for s in knowledge if f1_score(s.lower(), [positive.lower()]) < 0.5]
        knowledgess.append(knowledge)
    print(f"total examples {len(contexts)}")
    return contexts, knowledgess, responses

def load_ckgc_data(context_file, response_file, pool_file, knowledge_file):
    print(f"load context from {context_file}, load response from {response_file}, load pool from {pool_file}, "
          f"load knowledge from {knowledge_file}")
    context = read_file(context_file)
    response = read_file(response_file)
    pool = read_file(pool_file)
    knowledge = read_file(knowledge_file)

    pool_ids = [[int(i) for i in p[1:-1].split(",")] for p in pool]

    contexts, knowledgess, responses = [], [], []
    tag = ["<wizard> ", "<apprentice> "]
    for id, con in enumerate(context):
        con = con.split(" </s> ")
        topic = con[0]
        oncon = con[1:]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(oncon) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        contexts.append(con)
        responses.append(response[id].lower())

        pool_id = pool_ids[id]
        knowledges = [f"<topic> {knowledge[pid]}".lower() if pid != 0 else f"<topic> {knowledge[pid]} </s> {knowledge[pid]}" for pid in pool_id]
        knowledgess.append(knowledges)
    print(f"total examples {len(contexts)}")
    return contexts, knowledgess, responses


class RankData(Dataset):
    def __init__(self, context, knowledge, response, tokenizer, context_len=256, response_len=128, neg_num=4,
                 pad_none=True):
        super(Dataset, self).__init__()
        self.context = context
        self.knowledge = knowledge
        self.response = response
        self.tokenizer = tokenizer
        self.context_len = context_len
        self.response_len = response_len
        self.neg_num = neg_num
        self.pad_none = pad_none
        print(f"RankData, context_len {context_len}, response_len {response_len}, neg_num {neg_num}, pad_none {pad_none}")

    def __getitem__(self, index):
        context = self.context[index]
        response = self.response[index]
        knowledge = self.knowledge[index]

        topic = context[0]
        topic_len = len(self.tokenizer.encode(topic, add_special_tokens=False))
        his = self.tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-1]  # default: <cls> xx </s>, we can skip </s>
        his = his[:topic_len + 1] + his[-(self.context_len - topic_len - 1):] if len(his) > self.context_len else his

        neg = knowledge[1:]
        if self.pad_none:
            random.shuffle(neg)
            neg = neg + ['<none>'] * (self.neg_num - len(neg))
        neg = neg[:self.neg_num]
        knowledge = [knowledge[0]] + neg
        # response = self.tokenizer.encode(response, truncation=True, max_length=self.response_len, add_special_tokens=False)  # skip <cls> and </s>
        batch_context = []
        for k in knowledge:
            context = torch.tensor(his + self.tokenizer.encode(  # for knowledge, we can put <knowledge> forward <s>
                ' <knowledge> ' + norm_text(k), truncation=True, max_length=self.response_len))
            batch_context.append(context)
        return batch_context

    def __len__(self):
        return len(self.context)

    @staticmethod
    def collate_fn(data):
        batch_context = zip(*data)
        batch_context = sum(batch_context, [])
        context = pad_sequence(batch_context, batch_first=True, padding_value=1)
        return {
            'input_ids': context,
            'attention_mask': context.ne(1),
        }

# seen 25
# unseen 20
def main(language):
    init_seed(123456)
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 10
    batch_size = 8
    lr = 2e-5

    model_name = r"../pretrain_models/infoxlm-base/"  # 2
    ckpt_name = f"infoxlm-base-rank-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epochs {epochs}, batch_size {batch_size}, learning_rate {lr}, mode train")
    print(f"model name '{model_name}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")

    dataset = RankData(context, knowledge, response, tokenizer, context_len=256, response_len=128, neg_num=3)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))

    optimizer = AdamW(model.parameters(), lr=lr)

    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        for batch in tk0:
            output = model(**batch)
            logits = output.logits.view(-1, 4)
            loss = F.cross_entropy(logits, torch.zeros((logits.size(0),)).long().cuda())
            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses))

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


def lower(text):
    if isinstance(text, str):
        text = text.strip().lower()
        text = ' '.join(nltk.word_tokenize(text))
        return text.strip()
    return [lower(item) for item in text]


def test(mode, language):
    # mode: test_ckgc, test_seen, test_unseen, dev_seen, dev_unseen
    # language: en, es, fr, zh
    epochs = 10
    batch_size = 4  # 4--20G  2--10G

    model_name = r"../pretrain_models/infoxlm-base/"
    ckpt_path = "ckpt"
    ckpt_name = f"infoxlm-base-rank-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epochs {epochs}, batch_size {batch_size}, mode {mode}")
    print(f"model name '{model_name}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    icato, imode = mode.split("_")
    if imode == "ckgc":
        data_path = f"./ckgc/{language}/"
        context, knowledge, response = load_ckgc_data(f"{data_path}/context.txt", f"{data_path}/response.txt",
                                                      f"{data_path}/pool.txt", f"{data_path}/knowledge.txt")
    elif imode in ["seen", "unseen"] and icato == "test":
        data_path = f"./wow/{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")
    elif imode in ["seen", "unseen"] and icato == "dev":
        data_path = f"./wow/{icato}_{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl", data_path + "0.txt")
    else:
        raise AssertionError

    dataset = RankData(context, knowledge, response, tokenizer, context_len=256, response_len=128, neg_num=128)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))
    model = model.cuda()
    for epoch in range(epochs):
        if not os.path.exists(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'):
            continue
        print(f'Test {ckpt_path}/{ckpt_name}/{epoch}.pt')
        model.load_state_dict(torch.load(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'))
        tk0 = tqdm(data_loader, total=len(data_loader))
        output_text_collect = []
        acc = []
        model.eval()
        with torch.no_grad():
            for batch in tk0:
                batch = {k: v.cuda() for k, v in batch.items()}
                output = model(**batch)
                logits = output.logits.view(-1, 129)
                acc.append((logits.argmax(-1) == 0).float().mean().item())
                tk0.set_postfix(acc=sum(acc) / len(acc))
        print(sum(acc) / len(acc))

        # true_text_collect = dataset.response
        # print(eval_all(lower(output_text_collect), lower(true_text_collect)))
        # write_file(output_text_collect, f'ckpt/{ckpt_name}/{path.split("/")[2]}/{epoch}.txt')


def inference(mode, language):
    # mode: test_ckgc, test_seen, test_unseen, dev_seen, dev_unseen
    # language: en, es, fr, zh
    epoch = 5
    batch_size = 1

    model_name = r"../pretrain_models/infoxlm-base/"
    ckpt_path = "ckpt"
    ckpt_name = f"infoxlm-base-rank-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epoch {epoch}, batch_size {batch_size}, mode {mode}")
    print(f"model name '{model_name}', ckpt path '{ckpt_path}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    icato, imode = mode.split("_")
    if imode == "ckgc":
        data_path = f"./ckgc/{language}/"
        context, knowledge, response = load_ckgc_data(f"{data_path}/context.txt", f"{data_path}/response.txt",
                                                      f"{data_path}/pool.txt", f"{data_path}/knowledge.txt")
    elif imode in ["seen", "unseen"] and icato == "test":
        data_path = f"./wow/{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl",
                                                 data_path + "0.txt")
    elif imode in ["seen", "unseen"] and icato == "dev":
        data_path = f"./wow/{icato}_{imode}/"
        context, knowledge, response = load_data(f"{data_path}/{language}.txt", data_path + "pool.pkl",
                                                 data_path + "0.txt")
    else:
        raise AssertionError

    dataset = RankData(context, knowledge, response, tokenizer,
                       context_len=256, response_len=128, neg_num=512, pad_none=False)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))
    model = model.cuda()
    print(f'Test {ckpt_path}/{ckpt_name}/{epoch}.pt')
    model.load_state_dict(torch.load(f'{ckpt_path}/{ckpt_name}/{epoch}.pt'))
    tk0 = tqdm(data_loader, total=len(data_loader))
    output_text_collect = []
    acc = []
    model.eval()
    saved = []
    with torch.no_grad():
        for batch in tk0:
            batch = {k: v.cuda() for k, v in batch.items()}
            output = model(**batch)
            logits = output.logits.view(1, -1)
            logits = -logits
            rank = logits.argsort(-1).cpu().tolist()
            # print(rank)
            saved.extend(rank)
            acc.extend([int(rr[0] == 0) for rr in rank])
            tk0.set_postfix(acc=sum(acc) / len(acc))
    print(sum(acc) / len(acc))
    json.dump(saved, open(f'{ckpt_path}/{ckpt_name}/train-rank-{epoch}.json', 'w'))

    # true_text_collect = dataset.response
    # print(eval_all(lower(output_text_collect), lower(true_text_collect)))
    # write_file(output_text_collect, f'ckpt/{ckpt_name}/{epoch}.txt')


if __name__ == '__main__':
    # main("en")
    test("test_ckgc", "zh")
    # inference("test_ckgc", "zh")
