import LLM_prompt
from elasticsearch import Elasticsearch
from FlagEmbedding import FlagModel
import faiss
import re
from rag import es_retrieve
from tqdm import tqdm
import json
from openai import OpenAI

import argparse
import os
import logging
import process_data

logging.basicConfig(filename='inference.log', level=logging.INFO, filemode='w',
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str)  # , choices=['Qwen2.5-7B-Instruct', "glm-4-9b-chat"])
parser.add_argument("--data_dir", type=str)
parser.add_argument("--data_file", type=str)
parser.add_argument("--rag", type=str, choices=["None", "es"], default="None")
parser.add_argument("--target_type", type=str, choices=["modern", "traditional"])
parser.add_argument("--task", type=str, choices=["predict", "convert"])

args = parser.parse_args()


def ChatGPT_inference(system_prompt, user_prompt):
    key = os.environ.get("ChatGPT_API_KEY")
    if key:
        print(f"The value of MY_VARIABLE is: {key}")
    else:
        print("MY_VARIABLE is not set.")
    url = "https://api.b3n.fun/v1"
    client = OpenAI(api_key=key, base_url=url)

    messages = [
        {
            "role": "system", "content": system_prompt,
        },
        {
            "role": "user", "content": user_prompt,
        },
    ]
    completion = client.chat.completions.create(
        model="gpt-4",
        messages=messages,
    )

    print(completion)
    return completion


def QWen_inference(system_prompt, user_prompt):
    client = OpenAI(
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key=os.getenv("QWen_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    completion = client.chat.completions.create(
        model="qwen-max",  # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=[
            {'role': 'system', 'content': system_prompt},
            {'role': 'user', 'content': user_prompt}],
    )
    print(completion)
    result = completion.choices[0].message.content
    # completion = completion.model_dump_json()
    # result = json.loads(completion)["choices"][0]["message"]['content']
    print(result)
    return result


def update_eval_score(previous, new):
    previous["sample_num"] += 1
    if new is not None:
        for k in new:
            if k not in previous:
                previous[k] = new[k]
            else:
                previous[k] += new[k]
    return previous


def correct_response_prompt(response, example=None):
    """
    使用本地大模型调整格式以符合json
    """
    prompt = """%s\n上面文本不符合json规范，请将格式修正成标准的json格式，可以参考%s """ % (response, example)
    return prompt


def json_normalization_llm(parse_json_exception, example):
    """
    借助其它大模型专门调整格式
    https://openai.com/index/introducing-structured-outputs-in-the-api/
    POST /v1/chat/completions
{
  "model": "gpt-4o-2024-08-06",
  "messages": [
    {
      "role": "system",
      "content": "You are a helpful math tutor."
    },
    {
      "role": "user",
      "content": "solve 8x + 31 = 2"
    }
  ],
  "response_format": {
    "type": "json_schema",
    "json_schema": {
      "name": "dosage_response",
      "strict": true,
      "schema": {
        "type": "object",
        "properties": {
                "药物剂量": {
                  "type": "object"
                },
              "required": ["药物剂量"],
        },
        "additionalProperties": false
      }
    }
  }
}
    """
    if type(parse_json_exception) is str:
        exceptions = json.load(open(parse_json_exception))
    else:
        assert type(parse_json_exception) is list
        exceptions = parse_json_exception
    llm_json_responses = []
    system_prompt = "你是一位懂得中医的计算机专家"
    for record in exceptions:
        response, gold_answer, user_record = record
        json_prompt = correct_response_prompt(response, json.dumps(example["assistant"], ensure_ascii=False))
        json_response = QWen_inference(system_prompt=system_prompt, user_prompt=json_prompt)
        llm_json_responses.append((json_response, gold_answer))
    return llm_json_responses


def predict_doses(records, task, llm, target_type, es=None, llm_normalize=False):
    results = []
    score = {"sample_num": 0}
    json_parse_exceptions = []
    exception_parse_num = 0
    parse_exception_writer = open(target_type + "_" + str(task) + "_json_parse_exceptions.jsonl", "w")
    result_writer = open(target_type + "_" + str(task) + "_results.json", "w")
    for record in tqdm(records):
        if args.rag == "None":
            example = None
            # 使用一条普通例子
        elif args.rag == "static":
            if task == "convert":
                example = {}
                example["主治"] = """外感风寒表实证。恶寒发热，头身疼痛，无汗而喘，舌苔薄白，脉浮紧。"""
                example["med_dose"] = {"麻黄": "3两", "桂枝": "2两", "杏仁": "70个", "甘草": "1两"}
                example["assistant"] = {"麻黄": "9g", "桂枝": "6g", "杏仁": "9g", "甘草": "3g"}
            else:
                example = records[-1]
        else:
            # 使用检索得到的相关例子
            # 可以先试用elasticsearch
            example = es_retrieve(es, {"主治": record["user"]["主治"]})[0][1]
        if task == "predict":
            user_prompt = LLM_prompt.build_predict_prompt(target_type, record["user"], example=example)
        elif task == "convert":
            user_prompt = LLM_prompt.build_convert_prompt(target_type, record["user"]["主治"],
                                                          #pseudo_input=record["pseudo"],
                                                          record["user"]["药物剂量"],
                                                          example=example)
        else:
            raise Exception("Wrong task")
        messages = llm.build_message_from_template(user_prompt)
        gold_answer = record["assistant"] if task == "predict" else record["gold"]
        response = llm.respond(messages)
        try:
            predict_result, possible_json = llm.parse_result(response, logger)
            if possible_json == predict_result:
                logger.info("parsing exception %s" % response)
                json_parse_exceptions.append((response, gold_answer, record["user"]))
                parse_exception_writer.write(
                    json.dumps((response, gold_answer, record["user"]), ensure_ascii=False) + "\n")
                exception_parse_num += 1
            # results.append({"predict": predict_result, "assistant": gold_answer, "user": record["user"]})
            result_writer.write(
                json.dumps({"predict": predict_result, "assistant": gold_answer, "user": record["user"]},
                           ensure_ascii=False) + "\n")
            update_eval_score(score, LLM_prompt.my_metric(predict_result, gold_answer))
        except:
            logger.info("parsing exception %s" % response)
            json_parse_exceptions.append((response, gold_answer, record["user"]))
            parse_exception_writer.write(json.dumps((response, gold_answer, record["user"]), ensure_ascii=False) + "\n")
            exception_parse_num += 1
    if llm_normalize:
        llm_json_responses = json_normalization_llm(json_parse_exceptions, records[-1])
        llm_corrected_num = 0
        for json_response, gold_answer, record_user in llm_json_responses:
            try:
                predict_result = llm.parse_result(json_response, logger)
                # results.append({"predict": predict_result, "assistant": gold_answer, "user": record_user})
                result_writer.write(
                    json.dumps({"predict": predict_result, "assistant": gold_answer, "user": record_user},
                               ensure_ascii=False) + "\n")
                update_eval_score(score, LLM_prompt.my_metric(predict_result, gold_answer))
                llm_corrected_num += 0
            except:
                update_eval_score(score, None)
        logger.info("llm corrected number: %d" % llm_corrected_num)
    for k in score:
        if k == "sample_num":
            logger.info("valid sample number: %d" % (score[k]))
            continue
        score[k] /= score["sample_num"]
        logger.info("%s evaluation score: %.4f" % (k, score[k]))
    logger.info("exception parsing number: %d" % exception_parse_num)
    # json.dump(json_parse_exceptions, open("json_parse_exceptions.json", "w"), ensure_ascii=False, indent=4)
    # json.dump(results, open(input_type + "_results.json", "w"), ensure_ascii=False, indent=4)
    return results, score


if __name__ == '__main__':
    llm = LLM_prompt.MyLLM(os.path.join("/home/liwei23", args.model_dir))
    llm.model.eval()
    fpath = os.path.join(args.data_dir, args.data_file)
    if args.task == "predict":
        dosage_records = process_data.load_predict_data(fpath)
    elif args.task == "convert":
        dosage_records = process_data.load_convert_data(fpath)
    else:
        raise Exception("Wrong task")
    results, score = predict_doses(dosage_records, args.task, llm, target_type=args.target_type)
    # results: List[{predict_result, gold_answer)]
