from typing import Dict, Any, List, TypedDict
from langgraph.graph import StateGraph, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from config import Config
from config.thresholds import ModelThresholds
from rag.chroma_manager import chroma_manager
from monitoring import trace_function, trace_workflow
import logging

# 注释掉Langfuse OpenAI集成，因为使用的是LangChain调用方式
# try:
#     from langfuse.openai import openai
# except ImportError:
#     pass

# Langfuse 可用性检查 - 延迟导入
LANGFUSE_AVAILABLE = None

def _check_langfuse_available():
    """检查Langfuse是否可用"""
    global LANGFUSE_AVAILABLE
    if LANGFUSE_AVAILABLE is None:
        try:
            import langfuse
            LANGFUSE_AVAILABLE = True
        except ImportError:
            LANGFUSE_AVAILABLE = False
    return LANGFUSE_AVAILABLE

logger = logging.getLogger(__name__)

# 状态定义 - 保持与原有逻辑一致
class CommitWorkflowState(TypedDict):
    summaries: List[Dict[str, Any]]
    sorted_summaries: List[Dict[str, Any]]
    summary_contents: List[str]
    change_types: List[str]
    file_paths: List[str]
    summary_info: Dict[str, str]
    commit_message: str
    structured_output: Dict[str, Any]

class CommitMessageWorkflow:
    """Commit Message生成工作流 - 重构版本，保持原有逻辑"""
    
    def __init__(self, model_name: str = None):
        # 保持与原有CommitMessageModel相同的初始化逻辑
        self.model_name = model_name or Config.LLM_MODEL_NAME
        self.llm = ChatOpenAI(
            model=self.model_name,
            openai_api_key=Config.OPENAI_API_KEY,
            openai_api_base=Config.OPENAI_BASE_URL,
            temperature=ModelThresholds.MIN_TEMPERATURE  # 保持原有温度设置
        )
        self._build_workflow()
    
    def _build_workflow(self):
        """构建工作流图 - 对应原有的处理步骤"""
        workflow = StateGraph(CommitWorkflowState)
        
        # 添加节点 - 对应原有的方法
        workflow.add_node("sort_summaries", self._sort_summaries)
        workflow.add_node("extract_contents", self._extract_contents)
        workflow.add_node("analyze_changes", self._analyze_changes)
        workflow.add_node("build_summary_info", self._build_summary_info)
        workflow.add_node("generate_commit_message", self._generate_commit_message)
        workflow.add_node("create_structured_output", self._create_structured_output)
        
        # 设置入口点
        workflow.set_entry_point("sort_summaries")
        
        # 设置边 - 保持原有的处理顺序
        workflow.add_edge("sort_summaries", "extract_contents")
        workflow.add_edge("extract_contents", "analyze_changes")
        workflow.add_edge("analyze_changes", "build_summary_info")
        workflow.add_edge("build_summary_info", "generate_commit_message")
        workflow.add_edge("generate_commit_message", "create_structured_output")
        workflow.add_edge("create_structured_output", END)
        
        self.graph = workflow.compile()
    
    def _sort_summaries(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """按order排序summaries - 对应原有的排序逻辑"""
        summaries = state["summaries"]
        
        # 保持原有的排序逻辑
        sorted_summaries = sorted(summaries, key=lambda x: x.get("order", 0))
        logger.info(f"按order排序后的summaries: {sorted_summaries}")
        
        return {
            **state,
            "sorted_summaries": sorted_summaries
        }
    
    def _extract_contents(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """提取summary内容 - 对应原有的内容提取逻辑"""
        sorted_summaries = state["sorted_summaries"]
        
        # 保持原有的内容提取逻辑
        summary_contents = [item.get("summary", "") for item in sorted_summaries]
        logger.info(f"提取的summary内容: {summary_contents}")
        
        return {
            **state,
            "summary_contents": summary_contents
        }
    
    def _analyze_changes(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """分析变更类型 - 对应原有的变更类型分析逻辑"""
        summary_contents = state["summary_contents"]
        
        # 保持原有的变更类型分析逻辑
        change_types = set()
        file_paths = set()
        
        for summary in summary_contents:
            # 保持原有的关键词分析逻辑
            if "安全" in summary or "验证" in summary:
                change_types.add("安全修复")
            if "性能" in summary or "缓存" in summary:
                change_types.add("性能优化")
            if "重构" in summary or "代码" in summary:
                change_types.add("代码重构")
            if "bug" in summary.lower() or "修复" in summary:
                change_types.add("bug修复")
            if "功能" in summary or "新增" in summary:
                change_types.add("功能增强")
        
        # 保持原有的默认类型逻辑
        if not change_types:
            change_types.add("代码改进")
        
        logger.info(f"识别到的变更类型: {change_types}")
        
        return {
            **state,
            "change_types": list(change_types),
            "file_paths": list(file_paths)
        }
    
    def _build_summary_info(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """构建汇总信息 - 对应原有的_build_summary_info方法"""
        summaries = state["summaries"]
        summary_contents = state["summary_contents"]
        change_types = state["change_types"]
        
        # 保持原有的汇总信息构建逻辑
        if not summaries:
            logger.warning("没有收到summary数据")
            summary_info = {
                "file_path": "无变更",
                "change_type": "无变更",
                "change_content": "无变更",
                "diff_content": ""
            }
        else:
            # 将多个commit messages格式化为列表形式
            commit_messages_list = []
            for i, content in enumerate(summary_contents):
                # 如果内容已经是commit message格式，直接使用
                if content.strip().startswith('`') and content.strip().endswith('`'):
                    # 移除反引号
                    clean_content = content.strip()[1:-1]
                    commit_messages_list.append(f"- `{clean_content}`")
                else:
                    # 如果不是标准格式，直接添加
                    commit_messages_list.append(f"- `{content}`")
            
            # 将所有commit messages组合成一个字符串
            change_content = "\n".join(commit_messages_list)
            
            logger.info(f"格式化后的commit messages: {change_content}")
            
            # 构建汇总描述
            file_path = f"多个文件({len(summaries)}个改进)"
            change_type = "代码改进"  # 这个字段在新的prompt中不再使用，但保持兼容性
            diff_content = f"汇总了{len(summaries)}个commit messages"
            
            summary_info = {
                "file_path": file_path,
                "change_type": change_type,
                "change_content": change_content,
                "diff_content": diff_content
            }
        
        logger.info(f"构建的汇总信息: {summary_info}")
        
        return {
            **state,
            "summary_info": summary_info
        }
    
    def _generate_commit_message(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """生成commit message - 对应原有的生成逻辑"""
        summary_info = state["summary_info"]
        
        # 使用配置中的COMMIT_PROMPT模板
        commit_prompt_template = ChatPromptTemplate.from_messages([
            ("system", "你是一个专业的Git commit message生成助手。"),
            ("human", Config.get_commit_prompt(
                file_path=summary_info["file_path"],
                change_type=summary_info["change_type"],
                change_content=summary_info["change_content"],
                diff_content=summary_info["diff_content"]
            ))
        ])
        
        # 保持原有的链式调用
        chain = commit_prompt_template | self.llm | StrOutputParser()
        
        logger.info("调用LLM生成commit message...")
        logger.info(f"使用的prompt参数: file_path={summary_info['file_path']}, change_type={summary_info['change_type']}")
        
        # 获取已初始化的Langfuse回调处理器
        from monitoring.langfuse_manager import langfuse_manager
        langfuse_handler = langfuse_manager.handler if langfuse_manager.is_enabled() else None
        
        # 传递参数给 prompt 模板，使用Langfuse回调
        if langfuse_handler:
            result = chain.invoke({
                "file_path": summary_info["file_path"],
                "change_type": summary_info["change_type"],
                "change_content": summary_info["change_content"],
                "diff_content": summary_info["diff_content"]
            }, config={"callbacks": [langfuse_handler]})
        else:
            result = chain.invoke({
                "file_path": summary_info["file_path"],
                "change_type": summary_info["change_type"],
                "change_content": summary_info["change_content"],
                "diff_content": summary_info["diff_content"]
            })
        
        logger.info(f"LLM生成结果: {result}")
        
        return {
            **state,
            "commit_message": result
        }
    
    def _create_structured_output(self, state: CommitWorkflowState) -> CommitWorkflowState:
        """创建结构化输出 - 对应原有的generate_structured_summary方法"""
        summaries = state["summaries"]
        commit_message = state["commit_message"]
        change_types = state["change_types"]
        
        # 保持原有的结构化输出逻辑
        from datetime import datetime
        
        structured_output = {
            "result": commit_message,
            "model_used": self.model_name,
            "timestamp": datetime.now().isoformat(),
            "commit_count": len(summaries),
            "files_affected": len(summaries),  # 保持原有逻辑
            "change_types": change_types
        }
        
        logger.info(f"最终生成的结构化结果: {structured_output}")
        
        return {
            **state,
            "structured_output": structured_output
        }
    
    def generate_summary_commit_message(self, summaries: List[Dict[str, Any]]) -> str:
        """根据多个summary信息生成一条汇总的commit message - 保持原有接口"""
        try:
            logger.info(f"开始处理 {len(summaries)} 个summary")
            
            result = self.run(summaries)
            
            if "error" in result:
                return self._handle_error(Exception(result["error"]), "生成汇总commit message")
            
            return result["commit_message"]
            
        except Exception as e:
            logger.error(f"生成汇总commit message时发生错误: {str(e)}")
            return self._handle_error(e, "生成汇总commit message")
    
    def generate_structured_summary(self, summaries: List[Dict[str, Any]], user_id: str = None, session_id: str = None) -> Dict[str, Any]:
        """生成结构化的汇总commit message - 保持原有接口"""
        logger.info(f"开始生成结构化汇总，输入summaries: {summaries}")
        
        try:
            result = self.run(summaries, user_id=user_id, session_id=session_id)
            
            if "error" in result:
                return {
                    "error": result["error"],
                    "result": "工作流执行失败"
                }
            
            return result["structured_output"]
            
        except Exception as e:
            logger.error(f"生成结构化汇总失败: {str(e)}")
            return {
                "error": f"生成结构化汇总失败: {str(e)}",
                "result": "工作流执行失败"
            }
    
    def _extract_change_types(self, summaries: List[Dict[str, Any]]) -> List[str]:
        """提取变更类型 - 保持原有方法"""
        change_types = []
        for summary in summaries:
            content = summary.get("summary", "").lower()
            if "安全" in content or "验证" in content:
                change_types.append("安全修复")
            elif "性能" in content or "缓存" in content:
                change_types.append("性能优化")
            elif "重构" in content or "代码" in content:
                change_types.append("代码重构")
            elif "bug" in content or "修复" in content:
                change_types.append("bug修复")
            elif "功能" in content or "新增" in content:
                change_types.append("功能增强")
            else:
                change_types.append("代码改进")
        return change_types
    
    def _handle_error(self, error: Exception, operation: str = "操作") -> str:
        """统一错误处理 - 保持原有方法"""
        return f"{operation}时发生错误: {str(error)}"
    
    @trace_function(name="commit_workflow_run")
    def run(self, summaries: List[Dict[str, Any]], user_id: str = None, session_id: str = None) -> Dict[str, Any]:
        """运行工作流"""
        try:
            initial_state = {
                "summaries": summaries,
                "sorted_summaries": [],
                "summary_contents": [],
                "change_types": [],
                "file_paths": [],
                "summary_info": {},
                "commit_message": "",
                "structured_output": {}
            }
            
            result = self.graph.invoke(initial_state)
            
            # 追踪工作流步骤
            steps = [
                {"name": "sort_summaries", "input": {"summaries_count": len(summaries)}, "output": {"sorted_count": len(result.get("sorted_summaries", []))}},
                {"name": "extract_contents", "input": {"sorted_count": len(result.get("sorted_summaries", []))}, "output": {"contents_count": len(result.get("summary_contents", []))}},
                {"name": "analyze_changes", "input": {"contents_count": len(result.get("summary_contents", []))}, "output": {"change_types": result.get("change_types", [])}},
                {"name": "build_summary_info", "input": {"change_types": result.get("change_types", [])}, "output": {"summary_info": result.get("summary_info", {})}},
                {"name": "generate_commit_message", "input": {"summary_info": result.get("summary_info", {})}, "output": {"commit_message": result.get("commit_message", "")}},
                {"name": "create_structured_output", "input": {"commit_message": result.get("commit_message", "")}, "output": {"structured_output": result.get("structured_output", {})}}
            ]
            
            final_result = {
                "commit_message": result["commit_message"],
                "structured_output": result["structured_output"],
                "workflow_steps": len(result) - 1
            }
            
            # 追踪工作流
            trace_workflow("commit_message", steps, final_result, {
                "summaries_count": len(summaries),
                "workflow_type": "commit_message_generation"
            }, user_id=user_id, session_id=session_id)
            
            return final_result
            
        except Exception as e:
            logger.error(f"工作流执行失败: {str(e)}")
            error_result = {"error": f"工作流执行失败: {str(e)}"}
            
            # 追踪错误
            trace_workflow("commit_message", [], error_result, {
                "summaries_count": len(summaries),
                "workflow_type": "commit_message_generation",
                "error": str(e)
            }, user_id=user_id, session_id=session_id)
            
            return error_result

# RAG工作流 - 重构现有的RAG系统
class RAGWorkflowState(TypedDict):
    question: str
    collection_name: str
    top_k: int
    rerank_threshold: float
    retrieved_docs: List[Any]
    reranked_docs: List[Any]
    context: str
    answer: str
    sources: List[Dict[str, str]]
    result: Dict[str, Any]

class RAGWorkflow:
    """RAG问答工作流 - 重构版本，保持原有逻辑"""
    
    def __init__(self, rag_system):
        self.rag_system = rag_system
        self.llm = ChatOpenAI(
            model=Config.LLM_MODEL_NAME,
            openai_api_key=Config.OPENAI_API_KEY,
            openai_api_base=Config.OPENAI_BASE_URL,
            temperature=0.1
        )
        self._build_workflow()
    
    def _build_workflow(self):
        """构建RAG工作流"""
        workflow = StateGraph(RAGWorkflowState)
        
        # 添加节点 - 对应原有的RAG系统步骤
        workflow.add_node("retrieve_documents", self._retrieve_documents)
        workflow.add_node("rerank_documents", self._rerank_documents)
        workflow.add_node("build_context", self._build_context)
        workflow.add_node("generate_answer", self._generate_answer)
        workflow.add_node("extract_sources", self._extract_sources)
        workflow.add_node("create_result", self._create_result)
        
        # 设置入口点
        workflow.set_entry_point("retrieve_documents")
        
        # 设置边
        workflow.add_edge("retrieve_documents", "rerank_documents")
        workflow.add_edge("rerank_documents", "build_context")
        workflow.add_edge("build_context", "generate_answer")
        workflow.add_edge("generate_answer", "extract_sources")
        workflow.add_edge("extract_sources", "create_result")
        workflow.add_edge("create_result", END)
        
        self.graph = workflow.compile()
    
    def _retrieve_documents(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """检索文档 - 对应原有的检索逻辑"""
        question = state["question"]
        collection_name = state["collection_name"]
        top_k = state["top_k"]
        
        # 保持原有的检索逻辑
        from config.thresholds import RAGThresholds
        initial_top_k = top_k * RAGThresholds.INITIAL_RETRIEVAL_MULTIPLIER if top_k else RAGThresholds.DEFAULT_TOP_K * RAGThresholds.INITIAL_RETRIEVAL_MULTIPLIER
        
        relevant_docs = self.rag_system.document_processor.search_similar(
            question, collection_name, initial_top_k
        )
        
        return {
            **state,
            "retrieved_docs": relevant_docs
        }
    
    def _rerank_documents(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """重排序文档 - 对应原有的重排序逻辑"""
        question = state["question"]
        retrieved_docs = state["retrieved_docs"]
        top_k = state["top_k"]
        rerank_threshold = state["rerank_threshold"]
        
        if not retrieved_docs:
            return {
                **state,
                "reranked_docs": []
            }
        
        # 保持原有的重排序逻辑
        if self.rag_system.use_reranker and self.rag_system.reranker:
            rerank_result = self.rag_system.reranker.rerank_with_metadata(
                query=question,
                documents=retrieved_docs,
                top_k=top_k or 5,
                threshold=rerank_threshold
            )
            reranked_docs = rerank_result["reranked_documents"]
        else:
            reranked_docs = retrieved_docs[:top_k] if top_k else retrieved_docs[:5]
        
        return {
            **state,
            "reranked_docs": reranked_docs
        }
    
    def _build_context(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """构建上下文 - 对应原有的上下文构建逻辑"""
        reranked_docs = state["reranked_docs"]
        
        if not reranked_docs:
            context = "未找到相关文档"
        else:
            # 保持原有的上下文构建逻辑
            context_parts = []
            for i, doc in enumerate(reranked_docs, 1):
                source = doc.metadata.get("source", "未知来源")
                content = doc.page_content.strip()
                context_parts.append(f"文档 {i} (来源: {source}):\n{content}\n")
            context = "\n".join(context_parts)
        
        return {
            **state,
            "context": context
        }
    
    def _generate_answer(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """生成答案 - 对应原有的答案生成逻辑"""
        question = state["question"]
        context = state["context"]
        
        if context == "未找到相关文档":
            # 即使没有找到相关文档，也尝试基于问题给出一般性回答
            try:
                # 使用LLM基于问题给出一般性回答
                prompt = f"""
基于以下问题，请给出一个有用且相关的回答。即使没有具体的文档参考，也请基于你的知识给出合理的回答：

问题：{question}

请提供一个有帮助的回答：
"""
                
                # 获取已初始化的Langfuse回调处理器
                from monitoring.langfuse_manager import langfuse_manager
                langfuse_handler = langfuse_manager.handler if langfuse_manager.is_enabled() else None
                
                # 使用Langfuse回调调用LLM
                if langfuse_handler:
                    answer = self.llm.invoke(prompt, config={"callbacks": [langfuse_handler]}).content
                else:
                    answer = self.llm.invoke(prompt).content
            except Exception as e:
                logger.warning(f"生成一般性回答失败: {str(e)}")
                answer = f"抱歉，我在知识库中没有找到与您问题相关的具体信息。不过我可以基于一般知识回答：{question} 是一个常见的问题，通常涉及相关的概念和实践。如果您需要更具体的答案，建议上传相关的文档到知识库中。"
        else:
            # 保持原有的答案生成逻辑
            answer = self.rag_system.rag_model.generate_response(question, context)
        
        return {
            **state,
            "answer": answer
        }
    
    def _extract_sources(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """提取源文档信息 - 对应原有的源文档提取逻辑"""
        reranked_docs = state["reranked_docs"]
        
        # 保持原有的源文档提取逻辑
        sources = []
        for doc in reranked_docs:
            source_info = {
                "source": doc.metadata.get("source", "未知来源"),
                "file_name": doc.metadata.get("file_name", "未知文件"),
                "file_type": doc.metadata.get("file_type", "未知类型"),
                "content_preview": doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content
            }
            sources.append(source_info)
        
        return {
            **state,
            "sources": sources
        }
    
    def _create_result(self, state: RAGWorkflowState) -> RAGWorkflowState:
        """创建结果 - 对应原有的结果构建逻辑"""
        question = state["question"]
        answer = state["answer"]
        sources = state["sources"]
        context = state["context"]
        retrieved_docs = state["retrieved_docs"]
        reranked_docs = state["reranked_docs"]
        
        # 保持原有的结果构建逻辑
        result = {
            "status": "success",
            "question": question,
            "answer": answer,
            "sources": sources,
            "context_length": len(context),
            "documents_retrieved": len(retrieved_docs),
            "documents_used": len(reranked_docs)
        }
        
        return {
            **state,
            "result": result
        }
    
    def ask_question(self, question: str, collection_name: str = "default", top_k: int = None, rerank_threshold: float = None) -> Dict[str, Any]:
        """基于RAG回答问题 - 保持原有接口"""
        try:
            from config.thresholds import RAGThresholds
            default_threshold = RAGThresholds.DEFAULT_RERANK_THRESHOLD
            
            result = self.run(question, collection_name, top_k, rerank_threshold or default_threshold)
            
            if "error" in result:
                return {
                    "status": "error",
                    "question": question,
                    "answer": result["error"],
                    "sources": [],
                    "context_length": 0,
                    "documents_retrieved": 0,
                    "documents_used": 0
                }
            
            return result["result"]
            
        except Exception as e:
            logger.error(f"回答问题失败: {str(e)}")
            return {
                "status": "error",
                "question": question,
                "answer": f"回答问题失败: {str(e)}",
                "sources": [],
                "context_length": 0,
                "documents_retrieved": 0,
                "documents_used": 0
            }
    
    @trace_function(name="rag_workflow_run")
    def run(self, question: str, collection_name: str = "default", top_k: int = None, rerank_threshold: float = None) -> Dict[str, Any]:
        """运行RAG工作流"""
        try:
            from config.thresholds import RAGThresholds
            
            initial_state = {
                "question": question,
                "collection_name": collection_name,
                "top_k": top_k or RAGThresholds.DEFAULT_TOP_K,
                "rerank_threshold": rerank_threshold or RAGThresholds.DEFAULT_RERANK_THRESHOLD,
                "retrieved_docs": [],
                "reranked_docs": [],
                "context": "",
                "answer": "",
                "sources": [],
                "result": {}
            }
            
            result = self.graph.invoke(initial_state)
            
            # 追踪工作流步骤
            steps = [
                {"name": "retrieve_documents", "input": {"question": question, "collection": collection_name}, "output": {"retrieved_count": len(result.get("retrieved_docs", []))}},
                {"name": "rerank_documents", "input": {"retrieved_count": len(result.get("retrieved_docs", []))}, "output": {"reranked_count": len(result.get("reranked_docs", []))}},
                {"name": "build_context", "input": {"reranked_count": len(result.get("reranked_docs", []))}, "output": {"context_length": len(result.get("context", ""))}},
                {"name": "generate_answer", "input": {"context_length": len(result.get("context", ""))}, "output": {"answer_length": len(result.get("answer", ""))}},
                {"name": "extract_sources", "input": {"answer_length": len(result.get("answer", ""))}, "output": {"sources_count": len(result.get("sources", []))}},
                {"name": "create_result", "input": {"sources_count": len(result.get("sources", []))}, "output": {"result": result.get("result", {})}}
            ]
            
            final_result = {
                "result": result["result"],
                "workflow_steps": len(result) - 1
            }
            
            # 追踪工作流
            trace_workflow("rag_qa", steps, final_result, {
                "question": question,
                "collection_name": collection_name,
                "top_k": top_k,
                "rerank_threshold": rerank_threshold,
                "workflow_type": "rag_question_answering"
            }, user_id=kwargs.get("user_id"), session_id=kwargs.get("session_id"))
            
            return final_result
            
        except Exception as e:
            logger.error(f"RAG工作流执行失败: {str(e)}")
            error_result = {
                "error": f"RAG工作流执行失败: {str(e)}",
                "result": {
                    "status": "error",
                    "question": question,
                    "answer": f"RAG工作流执行失败: {str(e)}",
                    "sources": [],
                    "context_length": 0,
                    "documents_retrieved": 0,
                    "documents_used": 0
                }
            }
            
            # 追踪错误
            trace_workflow("rag_qa", [], error_result, {
                "question": question,
                "collection_name": collection_name,
                "workflow_type": "rag_question_answering",
                "error": str(e)
            }, user_id=kwargs.get("user_id"), session_id=kwargs.get("session_id"))
            
            return error_result 