import torch
import os
import json
import time
import gc
from PIL import Image
from transformers.utils.import_utils import is_flash_attn_2_available
from colpali.colpali_engine.models import ColQwen2, ColQwen2Processor
from pdf2image import convert_from_path
from typing import List, Dict, Tuple, Optional
import argparse

class ColPaliRetriever:
    def __init__(self, model_path: str, cache_dir: str = "./cache", device: str = "cuda", 
                 quantization: str = None, assert_pooling: bool = False):
        """初始化ColPali检索器并管理缓存"""
        self.device = device
        self.use_cuda = self.device.lower() == "cuda" and torch.cuda.is_available()
        self.quantization = quantization
        self.assert_pooling = assert_pooling  # CPU模式下是否仅检查缓存
        
        # 缓存结构
        self.pdf_cache = {}
        self.cache_dir = cache_dir
        os.makedirs(cache_dir, exist_ok=True)
        self.cache_path = os.path.join(cache_dir, "embeddings_cache.json")

        # 加载现有缓存
        if os.path.exists(self.cache_path):
            self._load_cache()
        else:
            if self.assert_pooling:
                raise ValueError("缓存文件不存在，无法在assert_pooling模式下继续")
        
        # 仅在非断言模式或使用GPU时加载模型
        if not self.assert_pooling or self.use_cuda:
            self._load_model(model_path)
        else:
            print("CPU模式下启用assert_pooling，跳过模型加载")

    def _load_model(self, model_path: str):
        """加载模型（仅在需要时调用）"""
        if self.use_cuda:
            print("使用CUDA设备")
            torch_dtype = torch.float16
            if self.quantization == "int8":
                torch_dtype = torch.int8
            elif self.quantization == "bf16":
                torch_dtype = torch.bfloat16
                
            self.model = ColQwen2.from_pretrained(
                model_path,
                torch_dtype=torch_dtype,
                device_map="cuda:0",
                attn_implementation="flash_attention_2" if is_flash_attn_2_available() else None,
            ).eval()
        else:
            print("使用CPU设备")
            torch_dtype = torch.float32
            if self.quantization == "int8":
                self.model = ColQwen2.from_pretrained(
                    model_path,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                    load_in_8bit=True,
                ).eval()
            else:
                self.model = ColQwen2.from_pretrained(
                    model_path,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                ).eval()
                
            self.model = self.model.to("cpu")
            
        self.processor = ColQwen2Processor.from_pretrained(model_path)

    def _load_cache(self):
        """从磁盘加载缓存"""
        try:
            with open(self.cache_path, 'r') as f:
                self.pdf_cache = json.load(f)
            print(f"已加载缓存: {len(self.pdf_cache)} 个PDF文件")
        except Exception as e:
            print(f"加载缓存失败: {e}")
            self.pdf_cache = {}

    def _save_cache(self):
        """保存缓存到磁盘"""
        with open(self.cache_path, 'w') as f:
            json.dump(self.pdf_cache, f)
        print(f"已保存缓存到 {self.cache_path}")

    def process_pdfs(self, pdf_paths: List[str], dpi: int = 100) -> Dict[str, List[torch.Tensor]]:
        """处理PDF文件并计算/加载嵌入"""
        all_embeddings = []
        page_to_pdf_map = []

        for pdf_path in pdf_paths:
            pdf_hash = self._get_file_hash(pdf_path)

            # 检查缓存
            if pdf_hash in self.pdf_cache:
                print(f"从缓存加载: {pdf_path}")
                # 根据设备加载到相应位置
                device = "cuda" if self.use_cuda else "cpu"
                pdf_embeddings = [torch.tensor(e).to(device) for e in self.pdf_cache[pdf_hash]['embeddings']]
                all_embeddings.extend(pdf_embeddings)
                page_to_pdf_map.extend([(pdf_path, i) for i in range(len(pdf_embeddings))])
            else:
                if self.assert_pooling:
                    # 在断言模式下，发现缺少缓存立即报错
                    raise ValueError(f"PDF {pdf_path} 的嵌入值不在缓存中，无法在assert_pooling模式下继续")
                
                print(f"处理新PDF: {pdf_path}")
                # 转换PDF为图像
                images = []
                try:
                    pdf_pages = convert_from_path(pdf_path, dpi=dpi)
                    for page_img in pdf_pages:
                        images.append(page_img)
                except Exception as e:
                    print(f"处理PDF失败: {pdf_path}, 错误: {e}")
                    continue

                # 计算嵌入（仅在非断言模式下执行）
                if images and not self.assert_pooling:
                    batch_size = 5 if self.use_cuda else 1
                    pdf_embeddings = []
                    
                    for i in range(0, len(images), batch_size):
                        batch_images = images[i:i + batch_size]
                        processed_batch = self.processor.process_images(batch_images).to(self.model.device)
                        
                        with torch.no_grad(), torch.inference_mode():
                            outputs = self.model(**processed_batch)
                            batch_embeddings = self._pool_embeddings(outputs)
                            pdf_embeddings.extend(batch_embeddings)

                        del processed_batch, outputs, batch_embeddings
                        gc.collect()
                        if self.use_cuda:
                            torch.cuda.empty_cache()

                    # 保存到缓存
                    self.pdf_cache[pdf_hash] = {
                        'path': pdf_path,
                        'timestamp': time.time(),
                        'embeddings': [e.cpu().tolist() for e in pdf_embeddings]
                    }
                    self._save_cache()

                    all_embeddings.extend(pdf_embeddings)
                    page_to_pdf_map.extend([(pdf_path, i) for i in range(len(pdf_embeddings))])

        # 在断言模式下，检查是否所有PDF都有嵌入值
        if self.assert_pooling:
            missing_pdfs = [os.path.basename(p) for p in pdf_paths if self._get_file_hash(p) not in self.pdf_cache]
            if missing_pdfs:
                raise ValueError(f"以下PDF的嵌入值不在缓存中: {', '.join(missing_pdfs)}")
        
        return {
            'embeddings': all_embeddings,
            'page_to_pdf_map': page_to_pdf_map
        }

    def _pool_embeddings(self, hidden_states: torch.Tensor, strategy: str = "mean") -> List[torch.Tensor]:
        """应用token pooling策略获取固定长度的表示"""
        if len(hidden_states.shape) != 3:
            raise ValueError(
                f"期望嵌入张量形状为 [batch_size, num_patches, embedding_dim]，但得到 {hidden_states.shape}")

        if strategy == "mean":
            return [hidden_states[i].mean(dim=0) for i in range(hidden_states.shape[0])]
        elif strategy == "max":
            return [hidden_states[i].max(dim=0)[0] for i in range(hidden_states.shape[0])]
        else:
            raise ValueError(f"不支持的池化策略: {strategy}")

    def _get_file_hash(self, file_path: str) -> str:
        """生成文件的简单哈希（基于文件名和修改时间）"""
        try:
            mtime = os.path.getmtime(file_path)
            return f"{os.path.basename(file_path)}_{mtime}"
        except:
            return f"{os.path.basename(file_path)}_{time.time()}"

    def retrieve(self, query: str, pdf_data: Dict, k: int = 3) -> List[Tuple[float, int, str]]:
        """检索与查询最匹配的页面"""
        if self.assert_pooling:
            raise RuntimeError("在assert_pooling模式下无法执行检索操作")
        
        # 计算查询嵌入
        with torch.no_grad(), torch.inference_mode():
            batch_queries = self.processor.process_queries([query]).to(self.model.device)
            query_output = self.model(**batch_queries)
            query_embedding = self._pool_embeddings(query_output)[0]

        # 计算相似度
        similarities = []
        for i, page_embedding in enumerate(pdf_data['embeddings']):
            page_embedding = page_embedding.to(query_embedding.device)
            sim = torch.cosine_similarity(
                query_embedding.unsqueeze(0),
                page_embedding.unsqueeze(0)
            ).item()
            similarities.append((sim, i))

        # 排序并获取前k个结果
        similarities.sort(key=lambda x: x[0], reverse=True)
        top_k = similarities[:k]

        # 转换为最终结果格式
        results = []
        for score, page_idx in top_k:
            pdf_path, page_num = pdf_data['page_to_pdf_map'][page_idx]
            results.append((score, page_num, pdf_path))

        return results


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_path", default="./models/colqwen2-v1.0-merged", help="模型路径")
    parser.add_argument("--pdf_dir", default="./test_data", help="PDF文件目录")
    parser.add_argument("--device", choices=["cuda", "cpu"], default="cuda", help="使用CUDA或CPU")
    parser.add_argument("--quantization", choices=[None, "int8", "bf16"], default=None, 
                        help="量化方法，仅在CPU上支持int8")
    parser.add_argument("--dpi", type=int, default=200, help="PDF转图像的DPI")
    parser.add_argument("--assert_pooling", action="store_true", 
                        help="在CPU模式下仅检查缓存中是否存在嵌入值，不加载模型")
    args = parser.parse_args()

    # 获取目录下所有PDF文件
    pdf_paths = [os.path.join(args.pdf_dir, f) for f in os.listdir(args.pdf_dir) 
                if f.lower().endswith('.pdf')]
    
    if not pdf_paths:
        print(f"在目录 {args.pdf_dir} 中未找到PDF文件")
        exit(1)

    try:
        # 初始化检索器
        retriever = ColPaliRetriever(
            args.model_path, 
            device=args.device,
            quantization=args.quantization,
            assert_pooling=args.assert_pooling and args.device == "cpu"
        )

        # 处理PDF文件
        pdf_data = retriever.process_pdfs(pdf_paths, dpi=args.dpi)
        
        if not args.assert_pooling:
            # 查询示例
            queries = [
                "What is ColPali?",
                "How does ColPali work?",
                "Do you know VLM?",
                "Whats the relation between ColPali and RAG?",
                "How does RAG work?"
            ]

            for query in queries:
                print(f"\n查询: {query}")
                results = retriever.retrieve(query, pdf_data, k=10)

                # 显示结果
                for rank, (score, page_num, pdf_path) in enumerate(results, 1):
                    print(f"排名 {rank}: 相似度={score:.4f}, PDF: {os.path.basename(pdf_path)}, 页面: {page_num + 1}")
        else:
            print("成功检查：所有PDF的嵌入值都存在于缓存中")
            
    except ValueError as e:
        print(f"错误: {e}")
        exit(1)
    except RuntimeError as e:
        print(f"运行时错误: {e}")
        exit(1)