import os
import json
import logging
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import pandas as pd

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter

logger = logging.getLogger(__name__)

class DataManager:
    """数据管理服务类 - 兼容sentence-transformers 5.0.0"""
    
    def __init__(self):
        self.embeddings = DashScopeEmbeddings(
            dashscope_api_key=os.getenv("DASHSCOPE_API_KEY"),
            model="text-embedding-v4"
        )
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len,
        )
        self.data_dir = "Didi/ai/data"
        self.vector_db_path = f"{self.data_dir}/vector_db"
        self.raw_data_path = f"{self.data_dir}/raw_data.json"
        
        # 确保数据目录存在
        os.makedirs(self.data_dir, exist_ok=True)
    
    def load_vector_database(self) -> Optional[FAISS]:
        """加载向量数据库 - 兼容sentence-transformers 5.0.0"""
        try:
            if os.path.exists(self.vector_db_path):
                vectorstore = FAISS.load_local(self.vector_db_path, self.embeddings)
                logger.info("向量数据库加载成功")
                return vectorstore
            else:
                logger.warning("向量数据库不存在")
                return None
        except Exception as e:
            logger.error(f"加载向量数据库失败: {e}")
            return None
    
    def search_similar_documents(self, query: str, k: int = 5) -> List[Dict[str, Any]]:
        """搜索相似文档 - 兼容sentence-transformers 5.0.0"""
        vectorstore = self.load_vector_database()
        if not vectorstore:
            return []
        
        try:
            docs = vectorstore.similarity_search(query, k=k)
            results = []
            
            for doc in docs:
                result = {
                    "content": doc.page_content,
                    "metadata": doc.metadata,
                    "similarity_score": 0.0  # FAISS不直接提供相似度分数
                }
                results.append(result)
            
            return results
        except Exception as e:
            logger.error(f"搜索文档失败: {e}")
            return []
    
    def get_document_statistics(self) -> Dict[str, Any]:
        """获取文档统计信息"""
        try:
            # 加载原始数据
            if os.path.exists(self.raw_data_path):
                with open(self.raw_data_path, 'r', encoding='utf-8') as f:
                    raw_data = json.load(f)
                
                # 统计信息
                total_documents = len(raw_data)
                categories = {}
                total_words = 0
                
                for item in raw_data:
                    category = item.get('category', '未知')
                    categories[category] = categories.get(category, 0) + 1
                    total_words += item.get('word_count', 0)
                
                # 向量数据库信息
                vectorstore = self.load_vector_database()
                vector_chunks = len(vectorstore.index_to_docstore_id) if vectorstore else 0
                
                return {
                    "total_documents": total_documents,
                    "total_chunks": vector_chunks,
                    "total_words": total_words,
                    "categories": categories,
                    "last_update": self._get_last_update_time(),
                    "data_size_mb": self._get_data_size()
                }
            else:
                return {"error": "数据文件不存在"}
                
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {"error": str(e)}
    
    def update_knowledge_base(self, new_data: List[Dict[str, Any]]) -> bool:
        """更新知识库"""
        try:
            # 加载现有数据
            existing_data = []
            if os.path.exists(self.raw_data_path):
                with open(self.raw_data_path, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
            
            # 合并新数据
            updated_data = existing_data + new_data
            
            # 去重处理
            unique_data = self._remove_duplicates(updated_data)
            
            # 保存更新后的数据
            with open(self.raw_data_path, 'w', encoding='utf-8') as f:
                json.dump(unique_data, f, ensure_ascii=False, indent=2)
            
            # 重新创建向量数据库
            self._rebuild_vector_database(unique_data)
            
            logger.info(f"知识库更新成功，新增 {len(new_data)} 个文档")
            return True
            
        except Exception as e:
            logger.error(f"更新知识库失败: {e}")
            return False
    
    def _remove_duplicates(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """去除重复数据"""
        seen_urls = set()
        unique_data = []
        
        for item in data:
            url = item.get('url', '')
            if url not in seen_urls:
                seen_urls.add(url)
                unique_data.append(item)
        
        return unique_data
    
    def _rebuild_vector_database(self, data: List[Dict[str, Any]]) -> bool:
        """重建向量数据库 - 兼容sentence-transformers 5.0.0"""
        try:
            # 准备文档
            documents = []
            for item in data:
                # 将内容分块
                chunks = self.text_splitter.split_text(item['content'])
                
                for i, chunk in enumerate(chunks):
                    doc = {
                        "page_content": chunk,
                        "metadata": {
                            "title": item['title'],
                            "category": item['category'],
                            "url": item['url'],
                            "chunk_id": i,
                            "source": item['source'],
                            "crawl_time": item['crawl_time']
                        }
                    }
                    documents.append(doc)
            
            # 创建向量存储 - 兼容sentence-transformers 5.0.0
            texts = [doc["page_content"] for doc in documents]
            metadatas = [doc["metadata"] for doc in documents]
            
            vectorstore = FAISS.from_texts(
                texts=texts,
                embedding=self.embeddings,
                metadatas=metadatas
            )
            
            # 保存向量数据库
            vectorstore.save_local(self.vector_db_path)
            
            logger.info(f"向量数据库重建成功，共 {len(documents)} 个文档块")
            return True
            
        except Exception as e:
            logger.error(f"重建向量数据库失败: {e}")
            return False
    
    def _get_last_update_time(self) -> str:
        """获取最后更新时间"""
        try:
            if os.path.exists(self.raw_data_path):
                stat = os.stat(self.raw_data_path)
                return datetime.fromtimestamp(stat.st_mtime).isoformat()
            return "未知"
        except Exception:
            return "未知"
    
    def _get_data_size(self) -> float:
        """获取数据大小（MB）"""
        try:
            total_size = 0
            if os.path.exists(self.raw_data_path):
                total_size += os.path.getsize(self.raw_data_path)
            if os.path.exists(self.vector_db_path):
                for root, dirs, files in os.walk(self.vector_db_path):
                    for file in files:
                        total_size += os.path.getsize(os.path.join(root, file))
            return round(total_size / (1024 * 1024), 2)
        except Exception:
            return 0.0
    
    def export_data_summary(self, output_path: str = None) -> str:
        """导出数据摘要"""
        if not output_path:
            output_path = f"{self.data_dir}/data_summary_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        
        try:
            stats = self.get_document_statistics()
            
            # 加载原始数据用于详细分析
            if os.path.exists(self.raw_data_path):
                with open(self.raw_data_path, 'r', encoding='utf-8') as f:
                    raw_data = json.load(f)
                
                # 按类别分析
                category_analysis = {}
                for item in raw_data:
                    category = item.get('category', '未知')
                    if category not in category_analysis:
                        category_analysis[category] = {
                            "count": 0,
                            "total_words": 0,
                            "avg_words": 0,
                            "titles": []
                        }
                    
                    category_analysis[category]["count"] += 1
                    category_analysis[category]["total_words"] += item.get('word_count', 0)
                    category_analysis[category]["titles"].append(item.get('title', ''))
                
                # 计算平均字数
                for category in category_analysis:
                    count = category_analysis[category]["count"]
                    total_words = category_analysis[category]["total_words"]
                    category_analysis[category]["avg_words"] = round(total_words / count, 2) if count > 0 else 0
                
                summary = {
                    "statistics": stats,
                    "category_analysis": category_analysis,
                    "export_time": datetime.now().isoformat(),
                    "total_documents": len(raw_data)
                }
                
                with open(output_path, 'w', encoding='utf-8') as f:
                    json.dump(summary, f, ensure_ascii=False, indent=2)
                
                logger.info(f"数据摘要导出成功: {output_path}")
                return output_path
            else:
                logger.error("原始数据文件不存在")
                return ""
                
        except Exception as e:
            logger.error(f"导出数据摘要失败: {e}")
            return ""
    
    def search_by_category(self, category: str, limit: int = 10) -> List[Dict[str, Any]]:
        """按类别搜索文档"""
        try:
            if not os.path.exists(self.raw_data_path):
                return []
            
            with open(self.raw_data_path, 'r', encoding='utf-8') as f:
                raw_data = json.load(f)
            
            filtered_data = [
                item for item in raw_data 
                if item.get('category', '').lower() == category.lower()
            ][:limit]
            
            return filtered_data
            
        except Exception as e:
            logger.error(f"按类别搜索失败: {e}")
            return []
    
    def get_recent_documents(self, days: int = 7) -> List[Dict[str, Any]]:
        """获取最近更新的文档"""
        try:
            if not os.path.exists(self.raw_data_path):
                return []
            
            with open(self.raw_data_path, 'r', encoding='utf-8') as f:
                raw_data = json.load(f)
            
            cutoff_date = datetime.now() - timedelta(days=days)
            recent_docs = []
            
            for item in raw_data:
                crawl_time = item.get('crawl_time', '')
                if crawl_time:
                    try:
                        doc_date = datetime.fromisoformat(crawl_time.replace('Z', '+00:00'))
                        if doc_date > cutoff_date:
                            recent_docs.append(item)
                    except Exception:
                        continue
            
            return recent_docs
            
        except Exception as e:
            logger.error(f"获取最近文档失败: {e}")
            return []

 