import os

from chaonan_src.doc_retrieval_experiment import DocRetrievalExperimentSpiral, DocRetrievalExperiment, \
    DocRetrievalExperimentTwoStep
from chaonan_src._doc_retrieval.item_rules_spiral import ItemRuleBuilderSpiral, ItemRuleBuilderNoPageview
from chaonan_src._doc_retrieval.item_rules_test import ItemRuleBuilderTest
from chaonan_src.doc_retrieval_experiment_client import DocRetrievalClient
from utils import common
import utils
import config
from utils.tokenize_fever import tokenized_claim
from utils import c_scorer
from typing import Dict
from sentence_retrieval import bert_nnmodel
from simi_sampler_nli_v0 import simi_sampler
import nli.mesim_wn_simi_v1_2
from nli import concat_nli,gnn_nli
import copy
import json
import numpy as np
import nn_doc_retrieval.disabuigation_training as disamb
from nn_doc_retrieval import nn_doc_model




PIPELINE_DIR = config.RESULT_PATH / "pipeline_r"

default_model_path_dict: Dict[str, str] = {
    'nn_doc_selector': config.PRO_ROOT / 'saved_models/nn_doc_selector/i(9000)_epoch(1)_(tra_score:0.9212421242124212|pr:0.4299679967996279|rec:0.8818631863186318|f1:0.5780819247968391)',
    'nn_sent_selector':config.PRO_ROOT / 'saved_models/nn_sent_selector/i(400000)_epoch(1)_(hit_rate:0.9170917091709171|raw_acc:1.0|pr:0.26402515251521413|rec:0.8756375637563757|f1:0.40571712668668775)',
    'concat_nli': config.PRO_ROOT / 'saved_models/03-18-21:23:58_concat_nli/i(15000)_epoch(1)_dev(0.7096209620962096)_loss(0.6673312743838662)_seed(12)',
    'xlnet_gnn_nli': config.PRO_ROOT / 'new_saved_models/03-25-09:20:13_xlnet_gnn_nli/i(66000)_epoch(1)_dev(0.7294229422942294)_loss(0.866934278957299)_seed(12)'
}


id2label = {
    0: "SUPPORTS",
    1: "REFUTES",
    2: "NOT ENOUGH INFO",
}

class HAONAN_DOCRETRI_OBJECT:
    def __init__(self):
        self.instance = None


def init_haonan_docretri_object(object, method='word_freq'):
    if method == 'server':
        object.instance = DocRetrievalClient
    else:
        item_rb_selector = {
            'word_freq': ItemRuleBuilderTest,
            'pageview': ItemRuleBuilderSpiral,
            'nopageview': ItemRuleBuilderNoPageview,
        }[method]
        if object.instance is None:
            object.instance = DocRetrievalExperimentTwoStep(item_rb_selector())


def pipeline(in_file, eval_file=None,
             model_path_dict=default_model_path_dict,steps=None):
    """
    :param in_file: The raw input file.
    :param eval_file: Whether to provide evaluation along the line.
    :return:
    """

    nn_doc_top_k = 10
    nn_doc_retri_threshold = 0.00001

    nn_sent_top_k = 5

    enhance_retri_1_scale_prob = -1

    build_submission = True

    doc_retrieval_method = 'word_freq'

    haonan_docretri_object = HAONAN_DOCRETRI_OBJECT()

    if not PIPELINE_DIR.exists():
        PIPELINE_DIR.mkdir()

    if steps['s1.tokenizing']['do']:
        time_stamp = utils.get_current_time_str()
        current_pipeline_dir = PIPELINE_DIR / f"{time_stamp}_r"
    else:
        current_pipeline_dir = steps['s1.tokenizing']['out_file'].parent

    print("Current Result Root:", current_pipeline_dir)

    if not current_pipeline_dir.exists():
        current_pipeline_dir.mkdir()

    eval_list = common.load_jsonl(eval_file) if eval_file is not None else None

    in_file_stem = in_file.stem
    tokenized_file = current_pipeline_dir / f"t_{in_file_stem}.jsonl"

    # Save code into directory
    script_name = os.path.basename(__file__)
    with open(os.path.join(str(current_pipeline_dir), script_name), 'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()

    print("*"*50,"Step 1.Tokenizing.")
    if steps['s1.tokenizing']['do']:
        tokenized_claim(in_file, tokenized_file)  # Auto Saved
        print("Tokenized file saved to:", tokenized_file)
    else:
        tokenized_file = steps['s1.tokenizing']['out_file']
        print("Use preprocessed file:", tokenized_file)
    print("*"*50,"Step 2.1 Keyword Document Retrieval")
    if steps['s2.1doc_retri']['do']:
        doc_retrieval_result_list = first_doc_retrieval(haonan_docretri_object, tokenized_file,
                                                        method=doc_retrieval_method)
        doc_retrieval_file_1 = current_pipeline_dir / f"doc_retr_1_{in_file_stem}.jsonl"
        common.save_jsonl(doc_retrieval_result_list, doc_retrieval_file_1)
        print("First Document Retrieval file saved to:", doc_retrieval_file_1)
    else:
        doc_retrieval_file_1 = steps['s2.1doc_retri']['out_file']
        doc_retrieval_result_list = common.load_jsonl(doc_retrieval_file_1)
        print("Use preprocessed file:", doc_retrieval_file_1)

    disamb.item_remove_old_rule(doc_retrieval_result_list)
    disamb.item_resorting(doc_retrieval_result_list)

    if eval_list is not None:
        print("Evaluating 1st Doc Retrieval")
        eval_mode = {'check_doc_id_correct': True, 'standard': False}
        print(c_scorer.fever_score(doc_retrieval_result_list, eval_list, mode=eval_mode, verbose=False))
        print("Max_doc_num_5:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=5))
        print("Max_doc_num_10:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=10))
        print("Max_doc_num_15:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=15))
    print("*"*50,"Step 2.2 NSMN Document Retrieval")
    if steps['s2.2doc_nn_retri']['do']:
        nn_doc_list = nn_doc_model.pipeline_function(doc_retrieval_file_1, model_path_dict['nn_doc_selector'])
        nn_doc_file = current_pipeline_dir / f"nn_doc_list_1_{in_file_stem}.jsonl"
        common.save_jsonl(nn_doc_list, nn_doc_file)
        nn_doc_list = common.load_jsonl(nn_doc_file)
    else:
        nn_doc_file = steps['s2.2doc_nn_retri']['out_file']
        nn_doc_list = common.load_jsonl(nn_doc_file)

    disamb.enforce_disabuigation_into_retrieval_result_v2(nn_doc_list,
                                                          doc_retrieval_result_list, prob_sh=nn_doc_retri_threshold)
    if eval_list is not None:
        print("Evaluating 1st 2.s Neural Doc Retrieval")
        eval_mode = {'check_doc_id_correct': True, 'standard': False}
        # disamb.item_remove_old_rule(doc_retrieval_result_list)
        # disamb.item_resorting(doc_retrieval_result_list)
        print(c_scorer.fever_score(doc_retrieval_result_list, eval_list, mode=eval_mode, verbose=False))
        print("Max_doc_num_5:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=5))
        print("Max_doc_num_10:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=10))
        print("Max_doc_num_15:", c_scorer.fever_doc_only(doc_retrieval_result_list, eval_list, max_evidence=15))
    nn_doc_retrieval_file_1 = current_pipeline_dir / f"nn_doc_retr_1_{in_file_stem}.jsonl"
    common.save_jsonl(doc_retrieval_result_list, nn_doc_retrieval_file_1)
    print("*"*50,"Step 3 Sentence Selection")
    if steps['s3.sen_select']['do']:
        dev_sent_list_1 = bert_nnmodel.pipeline_first_sent_selection(tokenized_file, nn_doc_retrieval_file_1,
                                                                          model_path_dict['nn_sent_selector'],
                                                                          top_k=nn_doc_top_k)
        dev_sent_file_1 = current_pipeline_dir / f"dev_sent_score_1_{in_file_stem}_docnum({nn_doc_top_k}).jsonl"
        common.save_jsonl(dev_sent_list_1, dev_sent_file_1)

        print("First Sentence Selection file saved to:", dev_sent_file_1)

    else:
        dev_sent_file_1 = steps['s3.sen_select']['out_file']
        dev_sent_list_1 = common.load_jsonl(dev_sent_file_1)
        print("Use preprocessed file:", dev_sent_file_1)


    if eval_list is not None:
        print("Evaluating 1st Sentence Selection")
        sent_select_result_list_1=bert_nnmodel.score_converter_v0(tokenized_file, dev_sent_list_1,sent_retri_top_k=nn_sent_top_k)
        print(c_scorer.fever_score(sent_select_results_list_1, eval_list, mode=eval_mode, verbose=False))

    sent_select_result_list_1=bert_nnmodel.score_converter_v0(tokenized_file, dev_sent_list_1,sent_retri_top_k=nn_sent_top_k)
    sent_select_result_file_1 = current_pipeline_dir / f"dev_sent_select_1_{in_file_stem}_sentnum({nn_sent_top_k}).jsonl"
    common.save_jsonl(sent_select_result_list_1, sent_select_result_file_1)
    print("*"*50,"Step 4.0 Build the Graph")
    if steps['s4.0openie']['do']:
        mode=steps['s4.0openie']['mode']
        openie_result_list=concat_nli.open_ie(sent_select_result_file_1,mode)
        openie_result_file = current_pipeline_dir / f"nli_openie_{mode}.jsonl"
        common.save_jsonl(openie_result_list, openie_result_file)
    else:
        openie_result_file = steps['s4.0openie']['out_file']
        #openie_results = common.load_jsonl(openie_results_file)
    assert 1==2
    print("*"*50,"Step 4.1 NLI")
    if steps['s4.1nli']['do']:
        openie_result_list = common.load_jsonl(openie_result_file)
        # nli_results = nli.concat_nli.pipeline_concat_nli_run(tokenized_file,
        #                                               openie_result_list,
        #                                               model_path_dict['concat_nli'],
        #                                               with_logits=True,
        #                                               with_probs=True)

        nli_results = nli.gnn_nli.pipeline_gnn_nli_run(tokenized_file,
                                                openie_result_list,
                                                model_path_dict['xlnet_gnn_nli'],
                                                with_logits=True,
                                                with_probs=True)

        nli_results_file = current_pipeline_dir / f"nli_r_{in_file_stem}.jsonl"
        common.save_jsonl(nli_results, nli_results_file)
    else:
        nli_results_file = steps['s6.nli']['out_file']
        nli_results = common.load_jsonl(nli_results_file)


    print("Post Processing enhancement")
    delete_unused_evidence(nli_results)


    if build_submission:
        output_file = current_pipeline_dir / "predictions.jsonl"
        build_submission_file(nli_results, output_file)


def first_doc_retrieval(retri_object, in_file, method='word_freq'):
    print("*"*50,"doc1:initialize the reteieval object:")
    init_haonan_docretri_object(retri_object, method=method)
    print("*"*50,"doc1:load the tokenized json file:")
    d_list = common.load_jsonl(in_file)
    print("*"*50,"doc1:retrieve the doc with priority:")
    print(d_list[0].keys())
    retri_object.instance.sample_answer_with_priority(d_list)
    return d_list


def build_submission_file(d_list, filename):
    with open(filename, encoding='utf-8', mode='w') as out_f:
        for item in d_list:
            instance_item = dict()
            instance_item['id'] = item['id']
            instance_item['predicted_label'] = item['predicted_label']
            instance_item['predicted_evidence'] = item['predicted_evidence']
            out_f.write(json.dumps(instance_item) + "\n")


# New method added
def delete_unused_evidence(d_list):
    for item in d_list:
        if item['predicted_label'] == 'NOT ENOUGH INFO':
            item['predicted_evidence'] = []



if __name__ == '__main__':
    #train
    # date="2021_03_08_14:35:16_r"

    #shared_task_dev
    date="2021_03_08_17:40:14_r"

    #shared_task_test
    # date="2021_03_19_09:37:03_r"
    # #train
    # p_steps = {
    #     's1.tokenizing': {
    #         'do': False,
    #         'out_file': config.RESULT_PATH / "pipeline_r"/ date / "t_train.jsonl"
    #     },
    #     's2.1doc_retri': {
    #         'do': False,
    #         'out_file': config.RESULT_PATH / "pipeline_r" / date / "doc_retr_1_train.jsonl"
    #     },
    #     's2.2doc_nn_retri': {
    #         'do': False,
    #         'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'nn_doc_list_1_train.jsonl'
    # 	},
    #     's3.sen_select': {
    #         'do': False,
    #         'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'dev_sent_score_1_train_docnum(10).jsonl',
    #     },
    #     's4.0openie':{
    #         'do':False,
    #         'mode':"train",
    #         'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'nli_openie_train.jsonl',
    #     },
    #     's4.1nli': {
    #             'do': True,
    #             'out_file': "hidden"
    #         }
    # }
    p_steps = {
        's1.tokenizing': {
            'do': False,
            'out_file': config.RESULT_PATH / "pipeline_r"/ date / "t_shared_task_dev.jsonl"
        },
        's2.1doc_retri': {
            'do': False,
            'out_file': config.RESULT_PATH / "pipeline_r" / date / "doc_retr_1_shared_task_dev.jsonl"
        },
        's2.2doc_nn_retri': {
            'do': False,
            'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'nn_doc_list_1_shared_task_dev.jsonl'
    	},
        's3.sen_select': {
            'do': False,
            'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'dev_sent_score_1_shared_task_dev_docnum(10).jsonl',
        },
        's4.0openie':{
            'do':True,
            'mode':"shared_task_dev",
            'out_file': config.RESULT_PATH / 'pipeline_r' / date / 'nli_openie_shared_task_dev.jsonl',
        },
        's4.1nli': {
                'do': True,
                'out_file': "hidden"
            }
    }

    pipeline(config.DATA_ROOT / "fever/shared_task_dev.jsonl",
             eval_file=None,
             model_path_dict=default_model_path_dict,
             steps=p_steps)

