import json

from tqdm import tqdm
from utils.connect_iwiki import WikiTreeNode, Wiki
from llama_index.core.node_parser import HTMLNodeParser, MarkdownNodeParser
from llama_index.core import Document
from llama_index.core import VectorStoreIndex

from llama_index.llms.openai_like import OpenAILike
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from config.config import *

# Settings.embed_model = OpenAILikeEmbedding(
#     model_name="qwen3-32b-fp8",
#     api_base="http://v2.open.venus.oa.com/llmproxy",
#     api_key="lqySgeYWKMK3kmoaBJQ9ClRG@2745",
#     embed_batch_size=10,
# )
Settings.embed_model = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-en-v1.5"
)

llm = OpenAILike(
    model=ModelConfig.NAME,
    api_base=ModelConfig.API_BASE,
    api_key=ModelConfig.TOKEN,
    is_chat_model=True,
    is_function_calling_model=True,
)
Settings.llm = llm
# Path to storage directory
        


class DocTreeIndexer:
    # markdown 格式解析文档，默认无法解析图片格式，但分块效果较好
    def __init__(self, doc_format="markdown"):
        
        wiki = Wiki.from_file("rio.env")
        self.wiki_tree_node: WikiTreeNode = WikiTreeNode.from_file("docs/tree.json", wiki=wiki)
        
        parser = MarkdownNodeParser() if doc_format == "markdown" else HTMLNodeParser()
        documents = []
        
        # Check if documents.json exists and is readable
        documents_file = "docs/documents.json"
        try:
            with open(documents_file, 'r', encoding='utf-8') as f:
                doc_texts = json.load(f)
                if isinstance(doc_texts, list) and all(isinstance(text, str) for text in doc_texts):
                    documents = [Document(text=text) for text in doc_texts]
                    print(f"Loaded {len(documents)} documents from {documents_file}")
                else:
                    raise ValueError("Invalid format in documents.json")
        except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
            print(f"Could not load documents from {documents_file}: {e}")
            print("Falling back to traversing index tree...")
            
            total_nodes = self.wiki_tree_node.count_total_nodes()
            print(f"Total nodes: {total_nodes}")
            with tqdm(total=total_nodes, desc="Collecting Documents", unit="nodes") as pbar:
                for next_node in self.wiki_tree_node.traverse_iteratively():
                    documents.append(Document(
                        text=next_node.get_doc_body(style="markdown" if doc_format == "markdown" else "html")
                    ))
                    pbar.update(1)
            print("Traversing index tree finished.")
            with open(documents_file, 'w', encoding='utf-8') as f:
                json.dump([doc.text for doc in documents], f, ensure_ascii=False, indent=4)
                print(f"Saved {len(documents)} documents to {documents_file}")
        nodes = parser.get_nodes_from_documents(documents)
        print("Nodes created from documents.")
        self.index = VectorStoreIndex(nodes)
        
        self.retriver = self.index.as_retriever()
        self.query_engine = self.index.as_query_engine()
        
    def retrieve(self, query):
        res = []
        for node in self.retriver.retrieve(query):
            res.append({
                "text": node.text,
                "score": node.score
            })
        return res

if __name__ == "__main__":
    parser = HTMLNodeParser()
    wiki = Wiki.from_file("rio.env")
    node = WikiTreeNode.from_file("docs/tree.json", wiki=wiki)
    doc_str = node.children[0].children[0].get_doc_body(style="html")
    print(doc_str)
    documents = [Document(text=doc_str)]
    # print(documents)
    nodes = parser.get_nodes_from_documents(documents)
    print(nodes)
    # print(nodes[3].text)
    # print(nodes[3].metadata)
    print("nodes length: ", len(nodes))
    index = VectorStoreIndex(nodes)
    query_engine = index.as_query_engine()
    
    retriever = index.as_retriever() 
    node_scores = retriever.retrieve("what is the content of this document?")
    print(node_scores)
    print("node_scores length: ", len(node_scores))
    