import json
import os
import random
import re
import copy
import LLM_prompt
from enum import Enum
import cn2an
import demjson
import argparse
import finetune_bge

parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, choices=["extract", "predict", "rate"])
parser.add_argument("--model_dir", type=str, choices=['Qwen2.5-7B-Instruct', "glm-4-9b-chat"],
                    default='Qwen2.5-7B-Instruct')
parser.add_argument("--data_dir", type=str)
parser.add_argument("--data_file", type=str)
parser.add_argument("--data_type", type=str)
parser.add_argument("--renew_data", action="store_true")
parser.add_argument("--rag", type=str, choices=["None", "es"], default="None")

args = parser.parse_args()

num_pat = re.compile(
    r'(?!^)(([0-9\.]+)|([一二三四五六七八九十百半]+))(?<![倍味夏合角])')  # here remains a big problem for number


class DoseType(Enum):
    modern = 1
    tradition = 2
    vacant = 3


herb_vocab = {}
for l in open("data/herb_vocab.txt"):
    term = l.strip().split(" ")
    if len(term) == 2:
        herb_vocab[term[0]] = int(term[1])

irregular_dose = set()
irregular_num = set()


def split_finetune_data(data, val_num=1000, test_num=1000):
    train_num = len(data) - val_num - test_num
    random.shuffle(data)
    return data[:train_num], data[train_num:train_num + val_num], data[train_num + val_num:]


def classify(record):
    prescription = record[u"组成"]
    meds = parse_prescription(prescription)
    tradition_dose_pat = re.compile(
        r'([0-9\.]+)|([一二三四五六七八九十百]+(两|钱|斤|升|斗|枚|个|分|只|枝|片|条))|(半(两|钱|斤|升|斗|枚|个|分|只|枝|片|条))')  # here remains a big problem for number
    modern_dose_pat = re.compile(u"([0-9\\.]+|[一二三四五六七八九十百]+)(g|克)")
    for med, dose in meds:
        if dose is None:
            continue
        if re.search(modern_dose_pat, dose):
            # print(re.search(modern_dose_pat, dose).group())
            return DoseType.modern
        if re.search(tradition_dose_pat, dose):
            return DoseType.tradition
    return DoseType.vacant


def remove_extra_space(medstr: str):
    """
    处理数字前后带有空格
    :param medstr:
    :return:
    """
    extra_space_pat = re.compile(
        r"(\s+[0-9\.一二三四五六七八九十各]+(?<![倍味夏合角]))|([0-9\.一二三四五六七八九十]+(?<![倍味夏合角])\s+)")
    extra_space_matches = re.finditer(extra_space_pat, medstr)
    positions = []
    for match in extra_space_matches:
        positions.append((match.start(), match.end()))
    if len(positions) == 0:
        result = medstr
    else:
        result = ""
        old_start, old_end = 0, 0
        for i in range(len(positions)):
            start, end = positions[i]
            result += medstr[old_end:start]
            if "三七" in medstr[start:end]:
                med = medstr[start:end]
            else:
                med = medstr[start:end].strip()
            result += med
            old_start, old_end = start, end
        result += medstr[old_end:]
    return result


def modify_respectively_ge(med_list: list):
    """
    处理各多少克
    :param med_list:
    :return:
    """
    # todo:　熟地砂仁各
    result = []
    queue = []
    for med, dose in med_list:
        if dose is not None:
            if med.endswith("各"):
                if len(med) > 1:
                    # 如果只有各一个字，那么说明前面的药是各的内容
                    queue.append(med[:-1])
                for ge_med in queue:
                    result.append((ge_med, dose))
                queue = []
            else:
                result.append((med, dose))
        else:
            queue.append(med)
    if len(queue) != 0:
        result += [(m, None) for m in queue]
    return result


def merge_none_med(med_list):
    """
    黄耆 甘草（炙）各1.5克 人参（去芦）0.9克 当归身0.6克
    :param med_list:
    :return:
    """
    i = 0
    result = []
    while i < len(med_list):
        if med_list[i][1] is None and med_list[i][0] is not None and i + 1 < len(med_list) and len(
                med_list[i + 1][0]) == 0 and med_list[i + 1][1] is not None:
            result.append((med_list[i][0], med_list[i + 1][1]))
            i += 2
        else:
            result.append(med_list[i])
            i += 1
    return result


def parse_prescription(constituent: str):
    constituent = constituent.strip()
    pat = re.compile(
        r'(（.+?）)|(\(.+?\))|(第[0-9一二三四五六七八九十]+日)|(各?((等分)|(少许)|(减半)|(不拘多少)))')  # remove brackets, either in English or in Chinese
    constituent = remove_extra_space(constituent)
    medstr = re.sub(pat, ' ', constituent)
    sep = re.compile(u'、|，|,|:|：|；')
    medstr = re.sub(sep, ' ', medstr)
    medstr = medstr.split(u'。')[0]
    meds = re.split(re.compile(r"\s+"), medstr)
    normalized_med_list = [normalize(med) for med in meds if len(med) > 0]
    normalized_med_list = merge_none_med(normalized_med_list)
    if u"各" in medstr:
        normalized_med_list = modify_respectively_ge(normalized_med_list)
    return normalized_med_list


def normalize(medicine: str):
    # 排除三七
    irregular_herb_pat = re.compile(u"(三七)")
    irregular_match = re.search(irregular_herb_pat, medicine)
    if irregular_match:
        med = medicine[:irregular_match.end()]
        dose = medicine[irregular_match.end():]
        return med, dose
    # 排除半夏、百合、三角、五倍子、五味子
    # print(medicine)
    num_match = re.search(num_pat, medicine)
    if num_match:
        start = num_match.start()
        end = num_match.end()
        med = medicine[:start]
        dose = medicine[start:]  # 会包含剂量单位
        parsed_dose = parse_dose(dose)
        if parsed_dose is None or len(parsed_dose) == 1:
            if parsed_dose is None:
                irregular_dose.add(dose)
            return med, dose
        return med, "".join(parsed_dose)
    else:
        return medicine, None


def parse_dose(dose: str):
    cn_pat = re.compile(u"[一二三四五六七八九十百]+")
    an_pat = re.compile(r"[0-9\.]+")
    cn_pos = re.search(cn_pat, dose)
    if cn_pos:
        start_index = cn_pos.start()
        end_index = cn_pos.end()
        try:
            num = cn2an.cn2an(dose[start_index:end_index])
        except ValueError:
            num = dose[start_index:end_index]
            irregular_num.add(num)
        unit = dose[end_index:]
        if unit == "g":
            unit = u"克"
        return str(num), unit
    an_pos = re.search(an_pat, dose)
    if an_pos:
        start_index = an_pos.start()
        end_index = an_pos.end()
        num = dose[start_index:end_index]
        unit = dose[end_index:]
        return num, unit
    return None


def justify_llm_result(constituent: str, med_dose: dict):
    pass


def judge_herb(med_dose, threshold=2):
    for med, dose in med_dose:
        if med not in herb_vocab or herb_vocab[med] <= threshold:
            return False
    return True


def build_extract_finetune_data_for_llm(data):
    # 构造提取剂量数据
    result = []
    for record in data:
        if u"组成" not in record or record[u"组成"] is None:
            # print(record.keys())
            continue
        assert u"组成" in record, record.keys()
        med_dose = parse_prescription(record[u"组成"])
        med_justificability = judge_herb(med_dose, threshold=5)
        if med_justificability:
            meds = [md[0] for md in med_dose]
            doses = [md[1] for md in med_dose]
            invalid_num = 0
            for med, dose in med_dose:
                if med is None:
                    invalid_num = max(3, len(med_dose))
                    break
                if dose is None:
                    invalid_num += 1
            if invalid_num > 2:
                continue
            user_prompt = LLM_prompt.build_extract_prompt(record[u'组成'])
            assistant_prompt = {u"药物剂量": {med: dose for med, dose in zip(meds, doses)}}
            messages = LLM_prompt.MyLLM.build_message_from_template(user_prompt,
                                                                    assistant_prompt=json.dumps(assistant_prompt,
                                                                                                ensure_ascii=False))
            result.append(messages)
    return result


def process_data(data: list, llm=None, renew_data=False):
    if not renew_data:
        if llm is None and os.path.exists("./data/modern_records.json"):
            modern_list = json.load(open("./data/modern_records.json"))
            traditional_list = json.load(open("./data/traditional_records.json"))
            vacant_list = json.load(open("./data/vacant_records.json"))
            return modern_list, traditional_list, vacant_list
        elif llm is not None and os.path.exists("./data/llm_modern_records.json"):
            modern_list = json.load(open("./data/llm_modern_records.json"))
            traditional_list = json.load(open("./data/llm_traditional_records.json"))
            vacant_list = json.load(open("./data/llm_vacant_records.json"))
            return modern_list, traditional_list, vacant_list
    modern_list = []
    tradition_list = []
    vacant_list = []
    for record in data:
        if u"组成" not in record or record[u"组成"] is None:
            # print(record.keys())
            continue
        assert u"组成" in record, record.keys()
        med_dose = parse_prescription(record[u"组成"])
        med_justificability = judge_herb(med_dose)
        if llm and not med_justificability:
            llm_result = llm_parse_prescription(record[u"组成"], llm)
            if llm_result:
                meds = [md[0] for md in llm_result]
                doses = [md[1] for md in llm_result]
            else:
                continue
        else:
            meds = [md[0] for md in med_dose]
            doses = [md[1] for md in med_dose]
        dose_type = classify(record)
        match dose_type:
            case DoseType.modern:
                modern_list.append({**record, "药物": meds, "剂量": doses})
            case DoseType.tradition:
                tradition_list.append({**record, "药物": meds, "剂量": doses})
            case DoseType.vacant:
                vacant_list.append({**record, "药物": meds})
    if llm:
        print("modern records num", len(modern_list))
        json.dump(modern_list, open("./data/llm_modern_records.json", "w"), ensure_ascii=False, indent=3)
        print("tradition records num", len(tradition_list))
        json.dump(tradition_list, open("./data/llm_traditional_records.json", "w"), ensure_ascii=False, indent=3)
        print("vacant records num", len(vacant_list))
        json.dump(vacant_list, open("./data/llm_vacant_records.json", "w"), ensure_ascii=False, indent=3)
    else:
        print("modern records num", len(modern_list))
        json.dump(modern_list, open("./data/modern_records.json", "w"), ensure_ascii=False, indent=3)
        print("tradition records num", len(tradition_list))
        json.dump(tradition_list, open("./data/traditional_records.json", "w"), ensure_ascii=False, indent=3)
        print("vacant records num", len(vacant_list))
        json.dump(vacant_list, open("./data/vacant_records.json", "w"), ensure_ascii=False, indent=3)
    return modern_list, tradition_list, vacant_list


def llm_parse_prescription(constituent, llm):
    user_prompt = LLM_prompt.build_extract_prompt(constituent)
    messages = llm.build_message_from_template(user_prompt)
    response = llm.respond(messages)
    json_pat = re.compile(r'```json\s?\{[^{}]+\}```')
    json_match = re.search(json_pat, response)
    print(response)
    if json_match:
        try:
            result = demjson.decode(json_match.group(1)[7:-3])
            return result
        except demjson.JSONDecodeError:
            return None
    return None


def build_predict_data():
    data = json.load(open("data/data_sorted.json"))
    llm = LLM_prompt.MyLLM(os.path.join("/home/liwei23", args.model_dir))
    llm.model.eval()
    process_data(data, llm)
    json.dump(list(irregular_dose), open("irregular_dose_set.json", "w"), ensure_ascii=False, indent=4)
    json.dump(list(irregular_num), open("irregular_num_set.json", "w"), ensure_ascii=False, indent=4)


def load_predict_data(data_path):
    data = json.load(open(data_path))
    result = []
    for record in data:
        user_input = {u"组成": record[u"药物"]}
        if u"主治" in record:
            user_input[u"主治"] = record[u"主治"]
        assistant_output = {u"药物剂量": {med: dose for med, dose in zip(record[u"药物"], record[u"剂量"])}}
        result.append({"user": user_input, "assistant": assistant_output})
    return result


def load_convert_data(fpath):
    result = []
    for line in open(fpath):
        sample = json.loads(line)
        predict = sample["predict"]["药物剂量"]
        gold = sample["assistant"]["药物剂量"]  # 原始的，可能是现代也可能是古代
        user_record = sample["user"]  # 组成、主治
        record = {"pseudo": predict, "gold": gold, "user": user_record}
        result.append(record)
    return result


def filter_traditional_record(record):
    vacant_num = sum([1 if not dose else 0 for dose in record["assistant"]["药物剂量"].values()])
    if vacant_num > 1:
        return True
    return False


def build_rate_finetune_dataset(renew_data):
    # 不区分古代还是现代
    data = json.load(open("data/data_sorted.json"))
    llm = LLM_prompt.MyLLM("/home/liwei23/tcm_dosage/extraction_output/checkpoint-8000")
    llm.model.eval()
    modern_records, traditional_records, _ = process_data(data, llm, renew_data)
    records = modern_records + traditional_records
    contrastive_pairs = finetune_bge.build_contrastive_data_for_dose(records)
    result = []
    for pair in contrastive_pairs:
        cure_text, anchor, pos, neg = pair
        if random.random() < 0.5:
            previous_better = 1
        else:
            previous_better = 0
        if previous_better == 1:
            user_prompt = LLM_prompt.build_rating_prompt(mode="contrastive", cure_text=cure_text, original_dose=anchor,
                                                         rate_dose=pos, rate_dose_other=neg)
        else:
            user_prompt = LLM_prompt.build_rating_prompt(mode="contrastive", cure_text=cure_text, original_dose=anchor,
                                                         rate_dose=neg, rate_dose_other=pos)
        assistant_prompt = "%d 号方剂剂量更好" % (previous_better + 1)
        messages = LLM_prompt.MyLLM.build_message_from_template(user_prompt,
                                                                assistant_prompt=json.dumps(assistant_prompt,
                                                                                            ensure_ascii=False))
        # assistant_prompt=MyLLM.flat_dict_to_str(record["assistant"]))
        result.append(messages)
    return result


def build_predict_finetune_dataset(data_path, data_type):
    data = load_predict_data(data_path)
    result = []
    for record in data:
        if filter_traditional_record(record):
            continue
        user_prompt = LLM_prompt.build_predict_prompt(input_type=data_type, user_input=record["user"])
        assistant_prompt = LLM_prompt.modify_assistant(record["assistant"]) if args.modify_assistant else record[
            "assistant"]
        messages = LLM_prompt.MyLLM.build_message_from_template(user_prompt,
                                                                assistant_prompt=json.dumps(assistant_prompt,
                                                                                            ensure_ascii=False))
        # assistant_prompt=MyLLM.flat_dict_to_str(record["assistant"]))
        result.append(messages)
    return result


def write_message_jsonl(fnames, data_parts, dir):
    for fname, data in zip(fnames, data_parts):
        with open(os.path.join(dir, fname), "w") as writer:
            for sample in data:
                writer.write(json.dumps({"messages": sample}, ensure_ascii=False) + "\n")


if __name__ == "__main__":
    data = json.load(open("data/data_sorted.json"))
    if args.task == "extract":
        finetune_data = build_extract_finetune_data_for_llm(data)
    elif args.task == "predict":
        # 需要区分是traditional还是modern
        if args.data_type == "traditional":
            finetune_data = build_predict_finetune_dataset("./data/llm_traditional_records.json", args.data_type)
        elif args.data_type == "modern":
            finetune_data = build_predict_finetune_dataset("./data/llm_modern_records.json", args.data_type)
    elif args.task == "rate":
        finetune_data = build_rate_finetune_dataset(args.renew_data)
    else:
        raise Exception("Unexpected task")
    train_data, val_data, test_data = split_finetune_data(finetune_data)
    write_message_jsonl(["train.jsonl", "val.jsonl", "test.jsonl"], [train_data, val_data, test_data], args.data_dir)
