import os
import argparse
import json
import logging
import os
import re
import time
from datetime import date

import torch
from Bart_SPARQL.sparql_engine import get_sparql_answer
from tqdm import tqdm
from transformers import BartConfig, BartForConditionalGeneration, BartTokenizerFast

from Bart_SPARQL.data import DataLoader
from utils.load_kb import DataForSPARQL
from utils.misc import seed_everything

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
rootLogger = logging.getLogger()
import warnings

warnings.simplefilter("ignore")  # hide warnings that caused by invalid sparql query


def whether_equal(answer, pred):
    """
    check whether the two arguments are equal as attribute value
    """

    def truncate_float(x):
        # convert answer from '100.0 meters' to '100 meters'
        try:
            v, *u = x.split()
            v = float(v)
            if v - int(v) < 1e-5:
                v = int(v)
            if len(u) == 0:
                x = str(v)
            else:
                x = "{} {}".format(str(v), " ".join(u))
        except:
            pass
        return x

    def equal_as_date(x, y):
        # check whether x and y are equal as type of date or year
        try:
            x_split = x.split("-")
            y_split = y.split("-")
            if len(x_split) == 3:
                x = date(int(x_split[0]), int(x_split[1]), int(x_split[2]))
            else:
                x = int(x)
            if len(y_split) == 3:
                y = date(int(y_split[0]), int(y_split[1]), int(y_split[2]))
            else:
                y = int(y)
            if isinstance(x, date) and isinstance(y, date):
                return x == y
            else:
                x = x.year if isinstance(x, date) else x
                y = y.year if isinstance(y, date) else y
                return x == y
        except:
            return False

    answer = truncate_float(answer)
    pred = truncate_float(pred)
    if equal_as_date(answer, pred):
        return True
    else:
        return answer == pred


def decode_for_sparql(ids, tokenizer: BartTokenizerFast):
    tmp = "".join(
        [tokenizer.convert_ids_to_tokens(i) for i in ids if i > 2]
    ).replace("Ġ", " ")
    return tmp


def vis(args, kb, model, data, device, tokenizer):
    while True:
        # text = 'Who is the father of Tony?'
        # text = 'Donald Trump married Tony, where is the place?'
        text = input("Input your question:")
        with torch.no_grad():
            input_ids = tokenizer.batch_encode_plus(
                [text],
                max_length=512,
                pad_to_max_length=True,
                return_tensors="pt",
                truncation=True,
            )
            source_ids = input_ids["input_ids"].to(device)
            _outputs = model.generate(
                input_ids=source_ids,
                max_length=500,
            )
            outputs = [
                decode_for_sparql(ids, tokenizer) for ids in _outputs.tolist()
            ]
            print(outputs[0])


def validate(args, model, data, device, tokenizer, ids):
    model.eval()
    count, correct = 0, 0
    with torch.no_grad():
        ques_ids = []
        all_outputs = []
        all_answers = []
        for batch in tqdm(data, total=len(data), desc="Validating"):
            source_ids, source_mask, choices, target_ids, answer = [
                x.to(device) for x in batch
            ]
            _outputs = model.generate(
                input_ids=source_ids,
                max_length=500,
            )
            ques_ids.extend(source_ids.cpu().tolist())
            all_outputs.extend(_outputs.cpu().tolist())
            all_answers.extend(answer.cpu().tolist())
            # break
        
        pred_sparql = [
            decode_for_sparql(ids, tokenizer) for ids in all_outputs
        ]
        questions = [
            decode_for_sparql(ids, tokenizer) for ids in ques_ids
        ]
        
        given_answer = [data.vocab["answer_idx_to_token"][a] for a in all_answers]
        
        assert len(pred_sparql) == len(given_answer) == len(questions)
        res = []
        for q, a, s in tqdm(zip(questions, given_answer, pred_sparql)):
            pred_answer = get_sparql_answer(s)
            res.append(
                {
                    "question": q,
                    "gold_ans": a,
                    "pred_sparql": s,
                    "pred_ans": pred_answer,
                    "is_correct": pred_answer == a,
                }
            )
            is_match = whether_equal(a, pred_answer)
            if is_match:
                correct += 1
            # else:
            #     print("Given answer:", a, " || Predicted answer:", pred_answer)
            count += 1
        
        # add id
        assert len(res) == len(ids)
        for i, r in enumerate(res):
            r["id"] = ids[i]
    
    json.dump(res, open(args.ckpt + "/pred_sparql.json", "w"), ensure_ascii=False, indent=4)
    logging.warning("Saving prediction results to: "+ args.ckpt + "/pred_sparql.json")
    acc = correct / count
    logging.info("\nValid Accuracy: %.4f\n" % acc)
    return acc


def run(args):
    device = "cuda" if torch.cuda.is_available() else "cpu"

    logging.info("Create train_loader and val_loader.........")
    ids = json.load(open(os.path.join(args.input_dir, "val-ids.json")))
    vocab_json = os.path.join(args.input_dir, "vocab.json")
    val_pt = os.path.join(args.input_dir, "val.pt")
    val_loader = DataLoader(vocab_json, val_pt, args.batch_size)

    logging.info("Create model.........")
    config_class, model_class, tokenizer_class = (
        BartConfig,
        BartForConditionalGeneration,
        BartTokenizerFast,
    )
    tokenizer = tokenizer_class.from_pretrained(args.ckpt)
    model = model_class.from_pretrained(args.ckpt)
    model = model.to(device)

    # vis(args, kb, model, val_loader, device, tokenizer)

    validate(args, model, val_loader, device, tokenizer, ids)


def main():
    parser = argparse.ArgumentParser()
    # input and output
    parser.add_argument("--input_dir", required=True)
    parser.add_argument(
        "--save_dir", required=True, help="path to save checkpoints and logs"
    )
    parser.add_argument("--ckpt", required=True)

    # training parameters
    parser.add_argument("--batch_size", default=64, type=int)
    parser.add_argument("--seed", type=int, default=666, help="random seed")

    # validating parameters
    # parser.add_argument('--num_return_sequences', default=1, type=int)
    # parser.add_argument('--top_p', default=)
    # model hyperparameters
    parser.add_argument("--dim_hidden", default=1024, type=int)
    parser.add_argument("--alpha", default=1e-4, type=float)
    args = parser.parse_args()

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
    fileHandler = logging.FileHandler(
        os.path.join(args.save_dir, "{}.predict.log".format(time_))
    )
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)
    # args display
    for k, v in vars(args).items():
        logging.info(k + ":" + str(v))

    seed_everything(666)

    run(args)


if __name__ == "__main__":
    main()
