import os
import sys
from typing import Dict, Any

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import OLLAMA_BASE_URL, LLM_MODEL, ARCHITECT_PROMPT
from utils.embeddings import create_embeddings_model
from utils.callgraph import CallGraphBuilder
from agents.base_agent import BaseAgent
from pydantic import BaseModel, Field  # 从pydantic库中导入BaseModel和Field，用于定义数据模型
from langchain.output_parsers import PydanticOutputParser  # 导入LangChain的PydanticOutputParser，用于解析模型输出
from langchain_core.messages import AIMessage

# 定义章节模型
class Section(BaseModel):
    title: str = Field(description="该章节的标题")  
    description: str = Field(description="该章节的内容") 

class ArchitectAgent(BaseAgent):
    def __init__(self, vector_store, project_path, call_graph):
        super().__init__("架构师")
        self.project_path = project_path
        self.call_graph = call_graph
        self.vector_store = vector_store
        self.parser = PydanticOutputParser(pydantic_object=Section)
        # 加载提示词
        with open(ARCHITECT_PROMPT, 'r', encoding='utf-8') as f:
            architect_template = f.read()
        
        # 创建提示模板
        self.prompt = PromptTemplate(
            template=architect_template,
            input_variables=["call_graph", "project_context", "file_path", "file_content"],
 #           partial_variables={"format_instructions": self.parser.get_format_instructions()} 
        )
   
        
        # 创建LLM链
        self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
        
        # 设置向量检索器
        self.retriever = vector_store.as_retriever(
            search_kwargs={"k": 5}
        )
    
    def _get_relevant_context(self, file_path):
        """获取与指定文件相关的上下文"""
        query = f"文件 {file_path} 的功能和作用"
        docs = self.retriever.invoke(query)
        return "\n".join([doc.page_content for doc in docs])
    
    def analyze_file(self, file_path):
        """分析特定文件并生成概述"""
        # 获取文件内容
        full_path = os.path.join(self.project_path, file_path)
        try:
            with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
                file_content = f.read()
        except Exception as e:
            return f"Error reading file {file_path}: {str(e)}"
            
        # 获取相关上下文
        project_context = self._get_relevant_context(file_path)
        
        # 准备调用图信息
        file_functions = []
        for func_name, info in self.call_graph['function_info'].items():
            if info['file'] == file_path:
                file_functions.append({
                    'name': info['name'],
                    'callers': self.call_graph['caller_graph'].get(func_name, []),
                    'callees': self.call_graph['call_graph'].get(func_name, [])
                })
        
        # 准备输入
        inputs = {
            'call_graph': str(file_functions),
            'project_context': project_context,
            'file_path': file_path,
            'file_content': file_content
        }
        print("🤖 架构师得到了输入:",inputs)
        # 使用流式输出运行
        result = self.run_with_streaming(
            self.prompt,
            inputs,
            step="分析文件结构",
            file_path=file_path
        )
        # message = AIMessage(result)
        # self.parser.invoke(message)
        # result = self.parser.parse(message)

        
        return {
            'file_path': file_path,
            'overview': result,
            'inputs': inputs
        }
