import os
import json
import logging
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

from .data_manager import DataManager
from .elasticsearch_service import ElasticsearchService

logger = logging.getLogger(__name__)

class SmartCustomerService:
    """智能客服服务类 - 兼容LangChain 0.3.x"""
    
    def __init__(self):
        self.embeddings = DashScopeEmbeddings(
            dashscope_api_key=os.getenv("DASHSCOPE_API_KEY"),
            model="text-embedding-v4"
        )
        self.llm = ChatOpenAI(
            model="qwen-max",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self.data_manager = DataManager()
        self.es_service = ElasticsearchService()
        
        # 客服提示词模板 - LangChain 0.3.x兼容
        self.customer_service_prompt = ChatPromptTemplate.from_template(
            """
            您好，我是滴滴出行车主端智能客服助手。请严格依据以下上下文内容 {context}，为车主解答问题：{question}。
            
            回答要求：
            1. 只引用上下文中的信息，不添加外部知识或主观推测；
            2. 针对车主问题，清晰、准确地引用文档中的具体条款、流程或数据；
            3. 回答风格需正式、专业且亲切，体现客服的服务意识；
            4. 如上下文未明确涉及车主问题的答案，请回复："很抱歉，根据现有文档暂未提及相关信息，建议您联系客服进一步确认。"；
            5. 结尾可加"如有其他疑问，欢迎随时咨询客服！"。
            
            请用智能客服的口吻作答。
            """
        )
        
        # 问题分类提示词 - LangChain 0.3.x兼容
        self.classification_prompt = ChatPromptTemplate.from_template(
            """
            请将以下车主问题分类到最合适的类别：
            问题：{question}
            
            可选类别：
            - 注册流程：关于车主注册、认证、审核等问题
            - 订单类型：关于实时单、预约单、顺风车等订单类型问题
            - 车辆要求：关于车辆条件、车型要求、车辆认证等问题
            - 账号管理：关于登录、密码、手机号更换等账号问题
            - 接单技巧：关于如何接到更多订单、提高收入等问题
            - 派单规则：关于派单算法、订单分配规则等问题
            - 政策法规：关于网约车政策、法规要求等问题
            - 其他：不属于以上类别的问题
            
            请只返回类别名称，不要其他内容。
            """
        )
    
    def classify_question(self, question: str) -> str:
        """问题分类 - LangChain 0.3.x兼容"""
        try:
            # LangChain 0.3.x的调用方式
            chain = self.classification_prompt | self.llm
            result = chain.invoke({"question": question})
            return result.content.strip()
        except Exception as e:
            logger.error(f"问题分类失败: {e}")
            return "其他"
    
    def search_relevant_documents(self, question: str, category: str = None, k: int = 5) -> List[Dict[str, Any]]:
        """搜索相关文档 - 优先使用ES，回退到向量搜索"""
        try:
            # 1. 首先尝试ES搜索
            if self.es_service.es and self.es_service.es.ping():
                es_results = self.es_service.search_documents(question, category, size=k)
                if es_results:
                    logger.info(f"ES搜索找到 {len(es_results)} 个相关文档")
                    return es_results
            
            # 2. ES不可用时，使用向量搜索
            logger.info("ES不可用，使用向量搜索")
            vector_results = self.data_manager.search_similar_documents(question, k=k)
            return vector_results
            
        except Exception as e:
            logger.error(f"搜索相关文档失败: {e}")
            # 3. 最后回退到向量搜索
            try:
                return self.data_manager.search_similar_documents(question, k=k)
            except Exception as e2:
                logger.error(f"向量搜索也失败: {e2}")
                return []
    
    def generate_answer(self, question: str, context_docs: List[Dict[str, Any]]) -> str:
        """生成答案 - LangChain 0.3.x兼容"""
        try:
            if not context_docs:
                return "很抱歉，暂时没有找到相关信息，建议您联系客服进一步咨询。"
            
            # 构建上下文
            context_parts = []
            for doc in context_docs:
                if isinstance(doc, dict):
                    # ES搜索结果格式
                    if 'score' in doc:
                        content = doc.get('content', '')
                        title = doc.get('title', '未知来源')
                        category = doc.get('category', '未知类别')
                        highlight = doc.get('highlight', {})
                        
                        # 如果有高亮内容，优先使用
                        if highlight and 'content' in highlight:
                            content = ' ... '.join(highlight['content'])
                        
                        context_parts.append(f"[{category}] {title}: {content}")
                    else:
                        # 向量搜索结果格式
                        content = doc.get('content', '')
                        metadata = doc.get('metadata', {})
                        title = metadata.get('title', '未知来源')
                        category = metadata.get('category', '未知类别')
                        context_parts.append(f"[{category}] {title}: {content}")
                else:
                    context_parts.append(str(doc))
            
            context = "\n\n".join(context_parts)
            
            # 生成答案 - LangChain 0.3.x兼容
            chain = self.customer_service_prompt | self.llm
            result = chain.invoke({
                "context": context,
                "question": question
            })
            
            return result.content
            
        except Exception as e:
            logger.error(f"生成答案失败: {e}")
            return "很抱歉，系统暂时无法回答您的问题，请稍后重试或联系客服。"
    
    def process_customer_query(self, question: str, user_id: str = None) -> Dict[str, Any]:
        """处理客户查询"""
        try:
            start_time = datetime.now()
            
            # 1. 问题分类
            category = self.classify_question(question)
            
            # 2. 搜索相关文档
            relevant_docs = self.search_relevant_documents(question, category)
            
            # 3. 生成答案
            answer = self.generate_answer(question, relevant_docs)
            
            # 4. 计算处理时间
            processing_time = (datetime.now() - start_time).total_seconds()
            
            # 5. 构建响应
            response = {
                "question": question,
                "answer": answer,
                "category": category,
                "relevant_docs_count": len(relevant_docs),
                "processing_time": processing_time,
                "timestamp": datetime.now().isoformat(),
                "user_id": user_id,
                "search_method": "elasticsearch" if self.es_service.es and self.es_service.es.ping() else "vector_search",
                "relevant_docs": [
                    {
                        "title": doc.get('title', doc.get('metadata', {}).get('title', '未知')),
                        "category": doc.get('category', doc.get('metadata', {}).get('category', '未知')),
                        "url": doc.get('url', doc.get('metadata', {}).get('url', '')),
                        "score": doc.get('score', 0.0),
                        "content_preview": doc.get('content', '')[:200] + "..." if len(doc.get('content', '')) > 200 else doc.get('content', ''),
                        "highlight": doc.get('highlight', {})
                    }
                    for doc in relevant_docs[:3]  # 只返回前3个相关文档
                ]
            }
            
            # 6. 记录查询日志
            self._log_query(response)
            
            return response
            
        except Exception as e:
            logger.error(f"处理客户查询失败: {e}")
            return {
                "question": question,
                "answer": "很抱歉，系统暂时无法处理您的问题，请稍后重试。",
                "category": "其他",
                "error": str(e),
                "timestamp": datetime.now().isoformat()
            }
    
    def sync_data_to_elasticsearch(self, documents: List[Dict[str, Any]]) -> bool:
        """同步数据到Elasticsearch"""
        try:
            if not self.es_service.es or not self.es_service.es.ping():
                logger.warning("Elasticsearch不可用，跳过同步")
                return False
            
            # 准备ES文档格式
            es_documents = []
            for doc in documents:
                es_doc = {
                    "title": doc.get("title", ""),
                    "content": doc.get("content", ""),
                    "category": doc.get("category", ""),
                    "url": doc.get("url", ""),
                    "tags": doc.get("tags", []),
                    "priority": doc.get("priority", 1),
                    "crawl_time": doc.get("crawl_time", datetime.now().isoformat()),
                    "processed_time": doc.get("processed_time", datetime.now().isoformat()),
                    "word_count": doc.get("word_count", 0),
                    "source": doc.get("source", ""),
                    "chunk_id": doc.get("chunk_id", 0)
                }
                es_documents.append(es_doc)
            
            # 索引到ES
            success = self.es_service.index_documents(es_documents)
            
            if success:
                logger.info(f"成功同步 {len(es_documents)} 个文档到Elasticsearch")
            else:
                logger.error("同步到Elasticsearch失败")
            
            return success
            
        except Exception as e:
            logger.error(f"同步数据到Elasticsearch失败: {e}")
            return False
    
    def get_knowledge_statistics(self) -> Dict[str, Any]:
        """获取知识库统计信息"""
        # 获取向量数据库统计
        vector_stats = self.data_manager.get_document_statistics()
        
        # 获取ES统计
        es_stats = self.es_service.get_statistics()
        
        return {
            "vector_database": vector_stats,
            "elasticsearch": es_stats,
            "es_available": self.es_service.es and self.es_service.es.ping() if self.es_service.es else False
        }
    
    def search_by_category(self, category: str, limit: int = 10) -> List[Dict[str, Any]]:
        """按类别搜索知识库"""
        # 优先使用ES搜索
        if self.es_service.es and self.es_service.es.ping():
            return self.es_service.search_by_category(category, limit)
        else:
            return self.data_manager.search_by_category(category, limit)
    
    def get_recent_knowledge(self, days: int = 7) -> List[Dict[str, Any]]:
        """获取最近更新的知识"""
        return self.data_manager.get_recent_documents(days)
    
    def update_knowledge_base(self, new_data: List[Dict[str, Any]]) -> bool:
        """更新知识库"""
        # 更新向量数据库
        vector_success = self.data_manager.update_knowledge_base(new_data)
        
        # 同步到ES
        es_success = self.sync_data_to_elasticsearch(new_data)
        
        return vector_success and es_success
    
    def get_es_health_status(self) -> Dict[str, Any]:
        """获取ES健康状态"""
        try:
            if not self.es_service.es:
                return {"status": "disconnected", "message": "ES客户端未初始化"}
            
            if not self.es_service.es.ping():
                return {"status": "disconnected", "message": "ES连接失败"}
            
            # 获取集群健康状态
            health = self.es_service.es.cluster.health()
            
            return {
                "status": "connected",
                "cluster_health": health.get('status', 'unknown'),
                "number_of_nodes": health.get('number_of_nodes', 0),
                "active_shards": health.get('active_shards', 0),
                "message": "ES连接正常"
            }
            
        except Exception as e:
            return {"status": "error", "message": f"ES健康检查失败: {str(e)}"}
    
    def _log_query(self, query_data: Dict[str, Any]):
        """记录查询日志"""
        try:
            log_file = "Didi/ai/data/query_logs.json"
            os.makedirs(os.path.dirname(log_file), exist_ok=True)
            
            # 读取现有日志
            logs = []
            if os.path.exists(log_file):
                with open(log_file, 'r', encoding='utf-8') as f:
                    logs = json.load(f)
            
            # 添加新日志
            logs.append(query_data)
            
            # 只保留最近1000条日志
            if len(logs) > 1000:
                logs = logs[-1000:]
            
            # 保存日志
            with open(log_file, 'w', encoding='utf-8') as f:
                json.dump(logs, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            logger.error(f"记录查询日志失败: {e}")
    
    def get_query_analytics(self, days: int = 30) -> Dict[str, Any]:
        """获取查询分析数据"""
        try:
            log_file = "Didi/ai/data/query_logs.json"
            if not os.path.exists(log_file):
                return {"error": "日志文件不存在"}
            
            with open(log_file, 'r', encoding='utf-8') as f:
                logs = json.load(f)
            
            # 过滤指定天数内的日志
            cutoff_date = datetime.now() - timedelta(days=days)
            recent_logs = []
            
            for log in logs:
                timestamp = log.get('timestamp', '')
                if timestamp:
                    try:
                        log_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
                        if log_date > cutoff_date:
                            recent_logs.append(log)
                    except Exception:
                        continue
            
            # 统计分析
            total_queries = len(recent_logs)
            category_stats = {}
            search_method_stats = {}
            avg_processing_time = 0
            
            for log in recent_logs:
                category = log.get('category', '其他')
                category_stats[category] = category_stats.get(category, 0) + 1
                
                search_method = log.get('search_method', 'unknown')
                search_method_stats[search_method] = search_method_stats.get(search_method, 0) + 1
                
                processing_time = log.get('processing_time', 0)
                avg_processing_time += processing_time
            
            if total_queries > 0:
                avg_processing_time /= total_queries
            
            return {
                "total_queries": total_queries,
                "category_distribution": category_stats,
                "search_method_distribution": search_method_stats,
                "avg_processing_time": round(avg_processing_time, 3),
                "period_days": days,
                "analysis_time": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"获取查询分析失败: {e}")
            return {"error": str(e)}
    
    def export_query_logs(self, output_path: str = None) -> str:
        """导出查询日志"""
        if not output_path:
            output_path = f"Didi/ai/data/query_logs_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        
        try:
            log_file = "Didi/ai/data/query_logs.json"
            if os.path.exists(log_file):
                with open(log_file, 'r', encoding='utf-8') as f:
                    logs = json.load(f)
                
                with open(output_path, 'w', encoding='utf-8') as f:
                    json.dump(logs, f, ensure_ascii=False, indent=2)
                
                logger.info(f"查询日志导出成功: {output_path}")
                return output_path
            else:
                logger.error("查询日志文件不存在")
                return ""
                
        except Exception as e:
            logger.error(f"导出查询日志失败: {e}")
            return ""

 