import argparse
import json
import os, json


zero_shot_datasets = [ "truthfulqa_gen", ]
greedy_until_tasks = [
    "coqa", # understand a text passage and answer a series of interconnected questions that appear in a conversation
    "drop", # a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or sorting)
    "triviaqa", # This dataset is more challenging, as the answers for a question may not be directly obtained by span prediction and the context is very long
    # "truthfulqa_gen", # a benchmark to measure whether a language model is truthful in generating answers to questions
    # MATH is a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations.
    "math_algebra", 
    "math_counting_and_prob",
    "math_geometry",
    "math_intermediate_algebra",
    "math_num_theory",
    "math_prealgebra",
    "math_precalc",
    "gsm8k", # a dataset of high quality linguistically diverse grade school math word problems, that take between 2 and 8 steps of elementary calculations (+ − ×÷) to solve.
    # Unscramble is a small battery of 5 “character manipulation” tasks. Each task involves giving the model a word distorted by some combination of scrambling, addition, or deletion of characters, and asking it to recover the original word.
    "anagrams1",
    "anagrams2",
    "cycle_letters",
    "random_insertion",
    "reversed_words",
    # Multilingual Grade School Math Benchmark (MGSM)
    # The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are:
    # - Spanish
    # - French
    # - German
    # - Russian
    # - Chinese
    # - Japanese
    # - Thai
    # - Swahili
    # - Bengali
    # - Telugu
    "mgsm_en",
    "mgsm_es",
    "mgsm_fr",
    "mgsm_de",
    "mgsm_ru",
    "mgsm_zh",
    "mgsm_ja",
    "mgsm_th",
    "mgsm_sw",
    "mgsm_bn",
    "mgsm_te",
    ]


def parse_args(dataset_id, model_id):
    from lm_eval import tasks, utils
    num_fewshot = 0 if dataset_id in zero_shot_datasets else 8
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="llm_api")
    parser.add_argument("--model_args", default=f"engine={model_id}")
    parser.add_argument("--tasks", default=dataset_id, choices=utils.MultiChoice(tasks.ALL_TASKS))
    parser.add_argument("--provide_description", action="store_true")
    parser.add_argument("--num_fewshot", type=int, default=num_fewshot)
    parser.add_argument("--batch_size", type=str, default=None)
    parser.add_argument("--max_batch_size", type=int, default=None,
                        help="Maximal batch size to try with --batch_size auto")
    parser.add_argument("--device", type=str, default=None)
    parser.add_argument("--output_path", default=None)
    parser.add_argument("--limit", type=float, default=None,
                        help="Limit the number of examples per task. "
                             "If <1, limit is a percentage of the total number of examples.")
    parser.add_argument("--data_sampling", type=float, default=None)
    parser.add_argument("--no_cache", action="store_true")
    parser.add_argument("--decontamination_ngrams_path", default=None)
    parser.add_argument("--description_dict_path", default=None)
    parser.add_argument("--check_integrity", action="store_true")
    parser.add_argument("--write_out", action="store_true", default=False)
    parser.add_argument("--output_base_path", type=str, default=None)

    return parser.parse_args()


def main_1(dataset_id, model_id):
    import logging
    from lm_eval import tasks, evaluator, utils
    logging.getLogger("openai").setLevel(logging.WARNING)
    '''
    export HF_ENDPOINT="https://hf-mirror.com"
    export HF_DATASETS_CACHE="datasets_cache"
    '''
    import lm_eval
    import llm_api
    lm_eval.models.MODEL_REGISTRY['llm_api'] = llm_api.LLM_API

    if isinstance(model_id, (tuple,list)):
        model_id = '_'.join(model_id)
    args = parse_args(dataset_id, model_id)

    assert not args.provide_description  # not implemented

    if args.limit:
        print(
            "WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
        )

    if args.tasks is None:
        task_names = tasks.ALL_TASKS
    else:
        task_names = utils.pattern_match(args.tasks.split(","), tasks.ALL_TASKS)

    print(f"Selected Tasks: {task_names}")

    description_dict = {}
    if args.description_dict_path:
        with open(args.description_dict_path, "r") as f:
            description_dict = json.load(f)

    results = evaluator.simple_evaluate(
        model=args.model,
        model_args=args.model_args,
        tasks=task_names,
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
        max_batch_size=args.max_batch_size,
        device=args.device,
        no_cache=args.no_cache,
        limit=args.limit,
        description_dict=description_dict,
        decontamination_ngrams_path=args.decontamination_ngrams_path,
        check_integrity=args.check_integrity,
        write_out=args.write_out,
        output_base_path=args.output_base_path,
    )

    dumped = json.dumps(results, indent=2)
    print(dumped)
    with open('lm_eval_results.jsonl', 'a') as fp:
        json.dump(results, fp)
        fp.write('\n\n')
    with open('lm_eval_results.txt', 'a') as fp:
        dataset_id = list(results['results'].keys())[0]
        metrics = results['results'][dataset_id]
        mkeys = sorted(metrics.keys())
        metrics = ' '.join(f'{k}: {metrics[k]:.4f}' for k in mkeys)
        model_id = results['config']['model_args'].split('=')[-1]
        fp.write(f'{model_id} {dataset_id} {metrics}\n')

    if args.output_path:
        os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
        with open(args.output_path, "w") as f:
            f.write(dumped)

    batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))
    print(
        f"{args.model} ({args.model_args}), limit: {args.limit}, provide_description: {args.provide_description}, "
        f"num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
    )
    print(evaluator.make_table(results))


def _write_response_json(response, resp):
    response.write_first_line_(200)
    response.write_content_type_header_("application/json", "UTF-8")
    response.write_header_("Connection", "close")
    response.write_end_header_()
    resp = json.dumps(resp)+'\n\n'
    response.write_(resp)
    response.request_.socket_file_.flush()
    return True

current_file = None
def _cat_greedy_until(prompt, stop, max_gen_toks):
    with open(current_file, 'a') as fp:
        fp.write(json.dumps(prompt, ensure_ascii=False) + '\n')
        fp.write(json.dumps(stop, ensure_ascii=False) + '\n')
        fp.write(json.dumps(max_gen_toks) + '\n\n')
    return ''

def _do_post(request, response, route_args):
    try:
        auth = request.headers_['authorization']
        # assert auth==f'Bearer {API_KEY}', auth
        model_id = route_args['model_id'].strip()
        prompt = request.params_['prompt']
        stop = request.params_['stop']
        top_k = request.params_['top_k']
        max_gen_toks = request.params_['max_gen_toks']
        assert top_k==1
        resp = _cat_greedy_until(prompt, stop, max_gen_toks)
        return _write_response_json(response, {'text':resp})
    except:
        import traceback
        ex_info = traceback.format_exc()
        print(ex_info)
        return _write_response_json(response, {'__ex__':ex_info})

def start_server(port=8007):
    from serv.lib.http_ import Http_
    http = Http_(ip_='0.0.0.0', port_=port, web_path_='web', max_threads_=100)
    http.add_route_('/engines/{model_id}/completions', _do_post, 'POST')
    print(f'Cool server is started on port: {port}')
    http.start_()

def save_dataset():
    import _thread
    _thread.start_new_thread(start_server, ())
    global current_file
    for dataset_id in greedy_until_tasks:
        current_file = f'dataset_8_shot/{dataset_id}.txt'
        print(current_file)
        main_1(dataset_id, 'save')



# TPODO
def main():
    models = [
        # ['glm4_9b'],
        # ['llama3_8B','phi3_mini'],
        ['llama3_8B','phi3_mini','glm4_9b'],
        # ['rerank'],
    ]
    for model_id in models:
        # for dataset_id in greedy_until_tasks:
        for dataset_id in ['math_algebra','mgsm_en','mgsm_fr']:
            try:
                print(f'Eval: dataset_id={dataset_id} model_id={model_id}')
                main_1(dataset_id, model_id)
            except Exception as ex:
                print(f'Exception: {ex}')
                import traceback
                traceback.print_exc()
                pass





if __name__ == "__main__":
    os.system('rm -rf lm_cache')
    import sys
    args = sys.argv[1:]
    if '--save_dataset' in args:
        save_dataset()
    else:
        main()
