import os
from pathlib import Path
from typing import List, Dict, Any, Optional
import dashscope
from dashscope.audio.asr import Recognition
from dashscope import TextEmbedding, MultiModalConversation
from pypdf import PdfReader
import chromadb
from chromadb.utils import embedding_functions
from chromadb.utils.embedding_functions import EmbeddingFunction
from typing import List
import logging
from config import DASHSCOPE_API_KEY, EMBEDDING_MODEL, VECTOR_DB_DIR, MULTIMODAL_MODEL
import io
from PIL import Image
import base64

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置API密钥
dashscope.api_key = DASHSCOPE_API_KEY

# 自定义DashScope嵌入函数
class DashScopeEmbeddingFunction(EmbeddingFunction):
    """通义千问嵌入函数，用于生成文本的向量表示"""
    
    def __init__(self, api_key: str, model_name: str = "text-embedding-v4"):
        """初始化嵌入函数
        
        Args:
            api_key: 通义千问API密钥
            model_name: 嵌入模型名称
        """
        self.api_key = api_key
        self.model_name = model_name
        dashscope.api_key = api_key
    
    def __call__(self, texts: List[str]) -> List[List[float]]:
        """生成文本的嵌入向量
        
        Args:
            texts: 文本列表
            
        Returns:
            嵌入向量列表
        """
        if not texts:
            return []
        
        embeddings = []
        for text in texts:
            try:
                # 调用通义千问API获取嵌入
                response = TextEmbedding.call(
                    model=self.model_name,
                    input=text
                )
                
                if response.status_code == 200 and response.output and 'embeddings' in response.output:
                    # 提取嵌入向量
                    embedding = response.output['embeddings'][0]['embedding']
                    embeddings.append(embedding)
                else:
                    logger.error(f"获取嵌入向量失败: {response.code} - {response.message}")
                    # 返回零向量作为占位符
                    embeddings.append([0.0] * 1024)  # 假设向量维度为1024
            except Exception as e:
                logger.error(f"生成嵌入向量时出错: {str(e)}")
                # 返回零向量作为占位符
                embeddings.append([0.0] * 1024)  # 假设向量维度为1024
        
        return embeddings

class PDFProcessor:
    """PDF处理类，用于提取文本、生成向量嵌入并存储到向量数据库"""
    
    def __init__(self, vector_db_path: str = None):
        """初始化PDF处理器
        
        Args:
            vector_db_path: 向量数据库路径，默认使用配置文件中的路径
        """
        self.vector_db_path = vector_db_path or str(VECTOR_DB_DIR)
        
        # 初始化向量数据库
        self.client = chromadb.PersistentClient(path=self.vector_db_path)
        
        # 使用通义千问的嵌入模型
        self.embedding_function = self._get_dashscope_embeddings()
        
        # 确保集合存在
        self._ensure_collections()
    
    def _get_dashscope_embeddings(self):
        """获取通义千问的嵌入函数"""
        # 使用自定义的DashScopeEmbeddingFunction
        return DashScopeEmbeddingFunction(
            api_key=DASHSCOPE_API_KEY,
            model_name=EMBEDDING_MODEL
        )
    
    def _ensure_collections(self):
        """确保必要的集合存在"""
        # 实验指导书集合
        try:
            self.guide_collection = self.client.get_collection(
                name="experiment_guides",
                embedding_function=self.embedding_function
            )
        except:
            self.guide_collection = self.client.create_collection(
                name="experiment_guides",
                embedding_function=self.embedding_function
            )
        
        # 实验报告集合
        try:
            self.report_collection = self.client.get_collection(
                name="experiment_reports",
                embedding_function=self.embedding_function
            )
        except:
            self.report_collection = self.client.create_collection(
                name="experiment_reports",
                embedding_function=self.embedding_function
            )
    
    def extract_images_from_page(self, page) -> List[Image.Image]:
        """从PDF页面中提取图片
        
        Args:
            page: PDF页面对象
            
        Returns:
            PIL Image对象列表
        """
        images = []
        try:
            if '/XObject' in page['/Resources']:
                xObject = page['/Resources']['/XObject'].get_object()
                
                for obj in xObject:
                    if xObject[obj]['/Subtype'] == '/Image':
                        try:
                            # 获取图片数据
                            size = (xObject[obj]['/Width'], xObject[obj]['/Height'])
                            data = xObject[obj].get_data()
                            
                            # 尝试解析图片
                            if xObject[obj]['/ColorSpace'] == '/DeviceRGB':
                                img = Image.frombytes('RGB', size, data)
                            else:
                                # 尝试直接从字节流创建图片
                                img = Image.open(io.BytesIO(data))
                            
                            images.append(img)
                            logger.info(f"成功提取图片: {size}")
                        except Exception as e:
                            logger.warning(f"提取图片时出错: {str(e)}")
                            continue
        except Exception as e:
            logger.debug(f"页面没有图片或提取失败: {str(e)}")
        
        return images
    
    def describe_image_with_ai(self, image: Image.Image) -> str:
        """使用多模态AI描述图片内容
        
        Args:
            image: PIL Image对象
            
        Returns:
            图片描述文本
        """
        try:
            # 将图片转换为base64
            buffered = io.BytesIO()
            image.save(buffered, format="PNG")
            img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
            img_data = f"data:image/png;base64,{img_base64}"
            
            # 调用多模态API
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"text": "请详细描述这张图片的内容，包括其中的文字、图表、拓扑结构、网络配置等信息。如果是网络实验相关的图片，请特别注意设备名称、IP地址、端口号等关键信息。"},
                        {"image": img_data}
                    ]
                }
            ]
            
            response = MultiModalConversation.call(
                model=MULTIMODAL_MODEL,
                messages=messages
            )
            
            if response.status_code == 200:
                choices = response.output.get('choices', [])
                if choices:
                    message = choices[0].get('message', {})
                    content = message.get('content', [])
                    
                    text_parts = []
                    for item in content:
                        if isinstance(item, dict) and 'text' in item:
                            text_parts.append(item['text'])
                        elif isinstance(item, str):
                            text_parts.append(item)
                    
                    description = ' '.join(text_parts)
                    logger.info(f"AI图片描述成功，长度: {len(description)}")
                    return description
            
            logger.warning("AI图片描述失败，使用默认描述")
            return "[图片：无法识别内容]"
        except Exception as e:
            logger.error(f"AI描述图片时出错: {str(e)}")
            return "[图片：识别出错]"
    
    def extract_text_from_pdf(self, pdf_path: str, include_images: bool = True) -> List[Dict[str, str]]:
        """从PDF中提取文本和图片，并按页分割
        
        Args:
            pdf_path: PDF文件路径
            include_images: 是否提取并理解图片内容
            
        Returns:
            包含页码和文本内容的字典列表
        """
        try:
            reader = PdfReader(pdf_path)
            chunks = []
            
            # 提取文件名作为文档标识
            filename = Path(pdf_path).stem
            
            # 按页提取文本和图片
            for i, page in enumerate(reader.pages):
                text = page.extract_text()
                combined_text = text if text.strip() else ""
                images = []
                
                # 如果启用图片提取
                if include_images:
                    images = self.extract_images_from_page(page)
                    if images:
                        logger.info(f"第 {i+1} 页包含 {len(images)} 张图片，开始AI识别...")
                        combined_text += f"\n\n[该页面包含 {len(images)} 张图片]\n"
                        
                        for img_idx, img in enumerate(images):
                            # 使用AI描述图片
                            description = self.describe_image_with_ai(img)
                            combined_text += f"\n图片 {img_idx + 1} 的内容描述：\n{description}\n"
                
                if combined_text.strip():  # 确保有内容
                    chunks.append({
                        "id": f"{filename}_page_{i+1}",
                        "text": combined_text,
                        "page": i+1,
                        "source": pdf_path,
                        "has_images": len(images) > 0
                    })
            
            logger.info(f"从 {pdf_path} 提取了 {len(chunks)} 页内容（图片识别: {'开启' if include_images else '关闭'}）")
            return chunks
        except Exception as e:
            logger.error(f"提取PDF内容时出错: {str(e)}")
            return []
    
    def process_pdf(self, pdf_path: str, is_guide: bool = True, include_images: bool = True) -> bool:
        """处理PDF文件，提取文本和图片并存入向量数据库
        
        Args:
            pdf_path: PDF文件路径
            is_guide: 是否为实验指导书（否则为实验报告）
            include_images: 是否提取并识别PDF中的图片
            
        Returns:
            处理是否成功
        """
        try:
            # 提取文本（和图片）
            chunks = self.extract_text_from_pdf(pdf_path, include_images)
            if not chunks:
                return False
            
            # 选择合适的集合
            collection = self.guide_collection if is_guide else self.report_collection
            
            # 准备数据
            ids = [chunk["id"] for chunk in chunks]
            texts = [chunk["text"] for chunk in chunks]
            metadatas = [{
                "page": chunk["page"],
                "source": chunk["source"],
                "filename": Path(chunk["source"]).stem,
                "has_images": chunk.get("has_images", False)
            } for chunk in chunks]
            
            # 添加到向量数据库
            collection.add(
                ids=ids,
                documents=texts,
                metadatas=metadatas
            )
            
            logger.info(f"成功将 {pdf_path} 添加到向量数据库")
            return True
        except Exception as e:
            logger.error(f"处理PDF时出错: {str(e)}")
            return False
    
    def search_similar(self, query: str, collection_name: str = "both", limit: int = 5) -> List[Dict[str, Any]]:
        """搜索与查询相似的文档
        
        Args:
            query: 查询文本
            collection_name: 要搜索的集合，可以是"guides"、"reports"或"both"
            limit: 返回结果数量限制
            
        Returns:
            相似文档列表
        """
        results = []
        
        try:
            if collection_name in ["guides", "both"]:
                guide_results = self.guide_collection.query(
                    query_texts=[query],
                    n_results=limit
                )
                
                for i, (doc, metadata, distance) in enumerate(zip(
                    guide_results["documents"][0],
                    guide_results["metadatas"][0],
                    guide_results["distances"][0]
                )):
                    results.append({
                        "text": doc,
                        "metadata": metadata,
                        "similarity": 1 - distance,  # 转换为相似度
                        "type": "guide"
                    })
            
            if collection_name in ["reports", "both"]:
                report_results = self.report_collection.query(
                    query_texts=[query],
                    n_results=limit
                )
                
                for i, (doc, metadata, distance) in enumerate(zip(
                    report_results["documents"][0],
                    report_results["metadatas"][0],
                    report_results["distances"][0]
                )):
                    results.append({
                        "text": doc,
                        "metadata": metadata,
                        "similarity": 1 - distance,  # 转换为相似度
                        "type": "report"
                    })
            
            # 按相似度排序
            results.sort(key=lambda x: x["similarity"], reverse=True)
            
            # 限制结果数量
            results = results[:limit]
            
            return results
        except Exception as e:
            logger.error(f"搜索向量数据库时出错: {str(e)}")
            return []
    
    def search_exact_text(self, query: str, collection_name: str = "both", limit: int = 10) -> List[Dict[str, Any]]:
        """精确文本搜索（关键词匹配）
        
        Args:
            query: 查询文本
            collection_name: 要搜索的集合，可以是"guides"、"reports"或"both"
            limit: 返回结果数量限制
            
        Returns:
            包含查询文本的文档列表
        """
        results = []
        query_lower = query.lower()
        
        try:
            collections_to_search = []
            if collection_name in ["guides", "both"]:
                collections_to_search.append((self.guide_collection, "guide"))
            if collection_name in ["reports", "both"]:
                collections_to_search.append((self.report_collection, "report"))
            
            for collection, col_type in collections_to_search:
                # 获取集合中的所有文档
                all_docs = collection.get()
                
                # 遍历所有文档，查找包含查询文本的文档
                for i, (doc_id, doc, metadata) in enumerate(zip(
                    all_docs["ids"],
                    all_docs["documents"],
                    all_docs["metadatas"]
                )):
                    # 不区分大小写的精确文本匹配
                    if query_lower in doc.lower():
                        # 找到包含查询文本的位置
                        start_idx = doc.lower().find(query_lower)
                        
                        # 提取上下文（前后各150个字符）
                        context_start = max(0, start_idx - 150)
                        context_end = min(len(doc), start_idx + len(query) + 150)
                        context = doc[context_start:context_end]
                        
                        # 如果不是从头开始，添加省略号
                        if context_start > 0:
                            context = "..." + context
                        if context_end < len(doc):
                            context = context + "..."
                        
                        results.append({
                            "text": doc,
                            "context": context,
                            "metadata": metadata,
                            "type": col_type,
                            "match_position": start_idx
                        })
                        
                        if len(results) >= limit:
                            return results
            
            logger.info(f"精确搜索 '{query}' 找到 {len(results)} 个结果")
            return results
        except Exception as e:
            logger.error(f"精确文本搜索时出错: {str(e)}")
            return []
    
    def delete_pdf(self, filename: str, is_guide: bool = True) -> bool:
        """从向量数据库中删除PDF文件的所有记录
        
        Args:
            filename: PDF文件名（不含路径）
            is_guide: 是否为实验指导书（否则为实验报告）
            
        Returns:
            删除是否成功
        """
        try:
            # 移除.pdf后缀（如果有）
            if filename.endswith('.pdf'):
                filename = filename[:-4]
            
            # 选择合适的集合
            collection = self.guide_collection if is_guide else self.report_collection
            
            # 获取所有文档ID
            all_docs = collection.get()
            
            # 找到所有匹配该文件名的文档ID
            ids_to_delete = []
            for doc_id, metadata in zip(all_docs["ids"], all_docs["metadatas"]):
                if metadata.get("filename") == filename:
                    ids_to_delete.append(doc_id)
            
            if ids_to_delete:
                # 删除这些文档
                collection.delete(ids=ids_to_delete)
                logger.info(f"从向量数据库中删除了 {len(ids_to_delete)} 条记录: {filename}")
                return True
            else:
                logger.warning(f"未找到文件记录: {filename}")
                return False
        except Exception as e:
            logger.error(f"删除PDF记录时出错: {str(e)}")
            return False
    
    def batch_process_directory(self, directory: str, is_guide: bool = True) -> Dict[str, bool]:
        """批量处理目录中的所有PDF文件
        
        Args:
            directory: 目录路径
            is_guide: 是否为实验指导书（否则为实验报告）
            
        Returns:
            处理结果字典，键为文件名，值为是否成功
        """
        results = {}
        dir_path = Path(directory)
        
        if not dir_path.exists() or not dir_path.is_dir():
            logger.error(f"目录不存在: {directory}")
            return results
        
        for pdf_file in dir_path.glob("*.pdf"):
            success = self.process_pdf(str(pdf_file), is_guide)
            results[pdf_file.name] = success
        
        return results
