from treesearch import *
import xml.etree.ElementTree as ET
import os
import json

def XmlReGen():

    DataSet = [
        "director",
        "emsplus",
        "rcp",
        "umac"
    ]

    SelectDataSet = 3

    NodeTreeXmlFile = f"nodetrees/{DataSet[SelectDataSet]}/nodetree.xml"
    TxtDataBasePath = f"data/{DataSet[SelectDataSet]}"

    # 验证文件是否存在
    def validate_file(url):
        file_path = os.path.join(TxtDataBasePath, url)
        return str(os.path.exists(file_path))

    # 读取并解析原始XML文件
    def read_xml(file_path):
        tree = ET.parse(file_path)
        return tree.getroot()

    # 生成新的XML结构
    def generate_new_xml(root):
        new_root = ET.Element('nodes')

        def traverse_and_create(node, new_parent):
            node_name = node.attrib.get('name', '')
            node_id = node.attrib.get('id', '')
            node_url = node.attrib.get('url', '')

            # 处理url
            path = os.path.normpath(node_url)
            paths = path.split('\\')
            # print(paths)
            try:
                paths.remove("topics")
            except ValueError:
                pass
            # paths = paths[paths.index("documents") + 1 :]
            new_node_url = f"{os.path.splitext('/'.join(paths))[0]}.txt"
            # final_save_path = os.path.join(output_path, save_path)

            # 创建新的节点
            new_node = ET.SubElement(new_parent, 'node', 
                name=node_name, 
                id=node_id, 
                url=new_node_url,
                vd=validate_file(new_node_url)
            )

            for child in node:
                traverse_and_create(child, new_node)

        traverse_and_create(root, new_root)
        return new_root

    # 将新的XML结构保存到文件
    def write_xml(new_root, output_file):
        tree = ET.ElementTree(new_root)
        tree.write(output_file, encoding='utf-8', xml_declaration=True)
    
    # 主要逻辑

    input_file = NodeTreeXmlFile
    output_file = f"nodetrees/{DataSet[SelectDataSet]}/nodetree_output.xml"

    root = read_xml(input_file)
    new_root = generate_new_xml(root)
    write_xml(new_root, output_file)
    print(f"New XML file has been created and saved to {output_file}")


def TestExtor():
    aim_file_path = "/Users/savenneer/工作内容/aiops/data/director/nodes/操作与维护.txt"
    related_dirs = [
        "/Users/savenneer/工作内容/aiops/data/director/操作指南",
        "/Users/savenneer/工作内容/aiops/data/director/nodes"
    ]
    related_dirs = ScanInDirs(related_dirs)
    extor = RelaSubjectExtor(related_dirs)
    result = extor.SearchInTree(aim_file_path)
    for one in result:
        print(one)


def PartQuery():
    file_path = "question.jsonl"
    all = ""
    with open(file_path,"r",encoding="utf-8") as file:
        all = file.readlines()
    
    # 拆分
    outDict = dict()
    for line in all:
        ques = json.loads(line)
        print(ques)
        key = ques["document"]
        if key not in outDict:
            outDict[key] = []
        outDict[key].append(str(line))
    
    # 输出到文件
    for key,lineList in outDict.items():
        with open(f"questions/{key}.jsonl","w",encoding="utf-8") as file:
            for line in lineList:
                file.write(line)

def SubTopicSummary():
    BasePath = "subtopicjsonl"
    data = os.listdir(BasePath)
    print(data)

    outDict = dict()
    for file in data:
        file_path = os.path.join(BasePath,file)
        with open(file_path,"r",encoding="utf-8") as file:
            all = file.readlines()
            for line in all:
                ques = json.loads(line)
                # print(ques)
                id = int(ques["id"])
                outDict[id] = line
    # 输出
    with open("subtopic_all.jsonl","w",encoding="utf-8") as file:
        for id in range(1,103+1):
            if outDict.__contains__(id) == False:
                print(f"缺少id:{id}")
            else:
                file.write(outDict[id])
    pass


def ShowSubmitJsonl():
    from vistools.ans import get_submit_jsonl,show_submit
    show_submit()
    pass

def BgeResult_to_InputResults():
    file_path = "05基于demo的bge检索/topk50/director/bge.json"
    with open(file_path,"r",encoding="utf-8") as file:
        data = json.load(file)
    result_list = []
    for one in data:
        id = one["id"]
        query = one["query"]
        file_path_list = list(one["file_path"])
        score_list = list(one["score"])

        nodes_list = []
        for file_path,score in zip(file_path_list,score_list):
            # print(file_path,score)
            nodes_list.append({
                "text": "",
                "metadata": {
                    "file_path": file_path,
                    "file_name": os.path.basename(file_path)
                },
                "score": score
            })

        outDict = {
            "id": id,
            "query": query,
            "count": len(file_path_list),
            "nodes": nodes_list
        }
        result_list.append(outDict)

    with open("input_results.json","w",encoding="utf-8") as fp:
        json.dump(result_list,fp,ensure_ascii=False,indent=4)


def RagKeywrod_to_InputResults():
    ragkeyw_file_path = "output_rag_keyword.json"
    output_path = "input_results.json"

    results_list = []

    with open(ragkeyw_file_path,"r",encoding="utf-8") as file:
        data = file.read()
        data = json.loads(data)

    for one in data:
        id = one["id"]
        query = one["query"]

        outDict = dict()

        selected_file_path = list(one["selected_file_path"])
        nodes = []
        for res in selected_file_path:
            file_path = res["file_path"]
            score = res["score"]

            if outDict.__contains__(file_path):
                if outDict[file_path] < score:
                    outDict[file_path] = score
            else:
                outDict[file_path] = score
            
        for file_path,score in outDict.items():
            nodes.append({
                "text": "",
                "metadata":{
                    "file_path":file_path,
                    "file_name": os.path.basename(file_path)
                },
                "score": float(score)
            })
        
        # sorted(nodes,key=lambda x:x["score"],reverse=True)
        # topk = 10
        # nodes = nodes[:topk]

        outOne = {
            "id": id,
            "query": query,
            "count": len(nodes),
            "nodes": nodes
        }
        print("len = ", len(nodes))
        results_list.append(outOne)
    
    with open(output_path,"w",encoding="utf-8") as file:
        json.dump(results_list,file,ensure_ascii=False,indent=4)


def CompareRagSelectedFiles():
    """
    用于比较RAG模型选择的文件与gt文件是否一致
    """
    from eval.rag_match import GtLoader,RagItem,AnswerLoader
    gt = GtLoader("rag_gt/rcp")
    ans = AnswerLoader("input_results.json")

    # exit(0)

    summary = ""

    question_all_count = len(gt.IDs())
    summary += f"问题总数量:{question_all_count}\n"
    match_all_cout = 0

    for id in ans.IDs():
        ans_item = ans.get_ragitem(id)
        gt_item = gt.get_ragitem(id)
        # break
        gts = len(gt_item.file_path_list)
        if gts == 0:
            question_all_count -= 1
            # continue
        match = 0
        for file_path in gt_item.file_path_list:
            if file_path in ans_item.file_path_list:
                match += 1
        print(f"ID:{id} GT:{gts} Match:{match}")
        if match > 0:
            match_all_cout += 1

        if match == 0:
            print("ans => ",ans_item)
            print("gt => ",gt_item)
        
        print("================\n")
    
    summary += f"具有有效gt的问题总数:{question_all_count}\n"
    summary += f"被命中的问题的数量:{match_all_cout}\n"

    print(summary)
    # print(f"Question Count:{question_all_count} Match Count:{match_all_cout}")


def DoubleIndex_to_InputResult():
    """
    将双层索引的结果转换为InputResults的格式
    """
    # doubleindex_input_file = "04二级检索emsplus/doube_index_emplus.json"
    doubleindex_input_file = "04二级检索emsplus/llm_selected/01-select-温度0.75.json"

    result_list = []
    with open(doubleindex_input_file,"r",encoding="utf-8") as file:
        data = file.read()
        data = json.loads(data)

    for one in data:
        id = one["id"]
        query = one["query"]
        file_path_list = one["file_path"]

        nodes = []
        for file_path in file_path_list:
            nodes.append({
                "text": "",
                "metadata":{
                    "file_path":file_path,
                    "file_name": os.path.basename(file_path)
                },
                "score": -1
            })

        outOne = {
            "id": id,
            "query": query,
            "count": len(file_path_list),
            "nodes": nodes
        }
        result_list.append(outOne)
    
    outfile = "input_results.json"
    with open(outfile,"w",encoding="utf-8") as file:
        json.dump(result_list,file,ensure_ascii=False,indent=4)


if __name__ == "__main__":
    # XmlReGen()
    # TestExtor()
    # PartQuery()
    # SubTopicSummary()
    # ShowSubmitJsonl()

    RagKeywrod_to_InputResults()
    # DoubleIndex_to_InputResult()
    # BgeResult_to_InputResults()
    CompareRagSelectedFiles()

