import json
import datasets
import feature_util
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from dotenv import load_dotenv
from logger import get_logger
import re
load_dotenv()

llm = ChatOpenAI(temperature=0.1, model="qwen2.5-72b-instruct")

muser = datasets.load_from_disk("MUSER-main/muser-dataset")

logger = get_logger()

with open("MUSER-main/component.json") as fin:
    logic_dict = json.load(fin)


atom_dict = {}
with open("merge_res_dict.json") as fin:
    for line in fin:
        line = line.strip()
        obj = json.loads(line)
        atom_dict[obj["idx"]] = obj["atom"]


dataset = datasets.load_from_disk("MUSER-main/muser-dataset")
# dataset = dataset.select(range(0, 160))


def make_prompt(event_type, fact, know_atom, question):
    prompt = """\
下面给你一段放在三引号中的关于“{event_type}”的`法律文书事实描述`：
```
{fact}
```
根据上述描述，已知的信息有：
{know_atom}

请你根据上述信息，判断下面的问题是否成立：
`{question}`
你的回答应该按照以下步骤进行：
1.不要着急回答问题，先根据事实描述和已知信息进行分析;
2.根据分析得出你的结论;
3.最后再给出你的答案，以“【@是/否@】”的格式，格式化你的答案进行输出。
"""

    template = ChatPromptTemplate([
        ("system", "你是一个精通法律知识的助手，可以帮助人们分析法律文书，并回答相关问题。"),
        ("human", prompt),
    ])
    
    prompt_value = template.invoke(
        {
            "event_type": event_type,
            "fact": fact,
            "know_atom": know_atom,
            "question": question
        }
    )
    return prompt_value

def extract_from_resp(text):
    pattern = r"【@([^@]+)@】"
    match = re.search(pattern, text)

    if match:
        answer = match.group(1)  # 提取匹配的内容
        return answer
    else:
        return "否"

def dojudge(question, info_dict):
    fact = info_dict["fact"]
    event_atom_dict = info_dict["atom"]
    event_type = info_dict["event"]
    
    atom_list = []
    for k, v in event_atom_dict.items():
        if "不存在" not in v:
            atom_list.append(f"{k}：{v}")
    
    atom_str = ";\n".join(atom_list)
    prompt = make_prompt(event_type, fact, atom_str, question)
    
    resp = llm.invoke(prompt).content
    # print('-' * 50)
    # print("问题：", question)
    # print("回答：", resp)
    res = extract_from_resp(resp)
    # print("抽取出的答案：", res)
    if "否" in res:
        return False
    else:
        return True

def dfs(tree, u, feature, info_dict):
    if tree[u]["叶子"]:
        return
    
    res = dojudge(u, info_dict)
    if res:
        feature[feature_util.get_f_index(u)] = 1
        dfs(tree, tree[u]["是"], feature, info_dict)
    else:
        dfs(tree, tree[u]["否"], feature, info_dict)
    

fout = open("feature-72B.json", 'a+')

def process(data):
    res_dict = {}
    feature = [0] * feature_util.get_f_size()
    idx = data["idx"]
    fact = data["fact"]
    res_dict["idx"] = data["idx"]
    res_dict["index"] = data["index"]
    event_types = data["event_type"]
    info_dict = {}
    info_dict["fact"] = fact
    for event in event_types:
        if event not in logic_dict.keys():
            continue
        known_atom = atom_dict[idx][event]
        info_dict["atom"] = known_atom
        info_dict["event"] = event
        logic_list = logic_dict[event]
        for tree in logic_list:
            root = tree["根"]
            dfs(tree, root, feature, info_dict)
    
    res_dict["feature"] = feature
    fout.write(json.dumps(res_dict, ensure_ascii=False) + "\n")
    fout.flush()

dataset.map(process)
fout.close()
