import torch
import os
import json
import time
import gc
import threading
from PIL import Image
from transformers.utils.import_utils import is_flash_attn_2_available
from colpali_engine.models import ColQwen2, ColQwen2Processor
from pdf2image import convert_from_path
from typing import List, Dict, Tuple, Optional
import argparse
from datasets import load_dataset
from math import ceil


class ColPaliRetriever:
    def __init__(self, model_path: str, cache_dir: str = "./cache", device: str = "cuda", 
                 quantization: str = None, assert_pooling: bool = False, num_threads: int = 4):
        """初始化ColPali检索器并管理缓存"""
        self.device = device
        self.use_cuda = self.device.lower() == "cuda" and torch.cuda.is_available()
        self.quantization = quantization
        self.assert_pooling = assert_pooling
        self.num_threads = num_threads  # 新增：线程数量
        self.cache_lock = threading.Lock()  # 新增：缓存读写锁
        
        # 缓存结构（保持不变）
        self.pdf_cache = {}
        self.cache_dir = cache_dir
        os.makedirs(cache_dir, exist_ok=True)
        self.cache_path = os.path.join(cache_dir, "embeddings_cache.json")

        # 加载现有缓存（保持不变）
        if os.path.exists(self.cache_path):
            self._load_cache()
        else:
            if self.assert_pooling:
                raise ValueError("缓存文件不存在，无法在assert_pooling模式下继续")
        
        # 仅在非断言模式或使用GPU时加载模型（保持不变）
        if not self.assert_pooling or self.use_cuda:
            self._load_model(model_path)
        else:
            print("CPU模式下启用assert_pooling，跳过模型加载")


    # 【新增】线程任务：处理Parquet子批次
    def _process_parquet_subset(self, ds, indices, id_field, image_field):
        """单个线程处理的子任务：处理一部分索引，已存在缓存的样本将跳过"""
        batch_size = 5 if self.use_cuda else 1
        processed_count = 0  # 记录实际处理的样本数
        
        for i in range(0, len(indices), batch_size):
            batch_indices = indices[i:i+batch_size]
            batch = ds.select(batch_indices)
            
            # 检查批次中每个样本是否已在缓存中
            ids = [str(item[id_field]) for item in batch]
            with self.cache_lock:
                cached_ids = {eid for eid in ids if eid in self.pdf_cache}
            
            # 如果全部已缓存，则跳过整个批次
            if len(cached_ids) == len(ids):
                print(f"批次 {i+1}-{i+len(ids)} 已全部缓存，跳过")
                continue
                
            # 过滤出需要处理的样本
            need_process = [i for i, eid in enumerate(ids) if eid not in cached_ids]
            if not need_process:
                continue
                
            # 准备需要处理的图像
            images = [batch[i][image_field] for i in need_process]
            process_ids = [ids[i] for i in need_process]
            
            try:
                processed_batch = self.processor.process_images(images).to(self.model.device)
                
                with torch.no_grad(), torch.inference_mode():
                    outputs = self.model(**processed_batch)
                    batch_embeddings = self._pool_embeddings(outputs)
                
                # 加锁写入缓存，避免线程冲突
                with self.cache_lock:
                    for eid, emb in zip(process_ids, batch_embeddings):
                        if eid not in self.pdf_cache:  # 再次确认避免竞态条件
                            self.pdf_cache[eid] = {
                                'embedding': emb.cpu().tolist(),
                                'timestamp': time.time(),
                                'meta': {'index': eid}
                            }
                    self._save_cache()  # 每个子批次保存一次
                
                processed_count += len(process_ids)
                print(f"已处理 {i+1}-{i+len(ids)} 批次中的 {len(process_ids)} 个样本")
                
            except Exception as e:
                print(f"线程处理批次失败: {e}，跳过该批次")
                continue
            finally:
                # 清理内存（无论成功失败都执行）
                del processed_batch, outputs, batch_embeddings
                gc.collect()
                if self.use_cuda:
                    torch.cuda.empty_cache()
        
        print(f"线程完成：处理了 {processed_count} 个新样本")

    def process_parquet_dataset(self, parquet_dir: str, split: str = "validation", n: int = 0, m: int = None, 
                               id_field: str = "questionId", image_field: str = "image", num_threads: int = None):
        """多线程处理Parquet数据集"""
        import glob
        parquet_files = sorted(glob.glob(os.path.join(parquet_dir, f"{split}-*.parquet")))
        if not parquet_files:
            print(f"未找到 {split} parquet 文件: {parquet_dir}")
            return
        
        ds = load_dataset(
            "parquet",
            data_files={split: parquet_files},
            split=split
        )
        total = len(ds)
        m = m if m is not None else total
        print(f"处理 {split} 集: 共{total}条, 选取区间[{n}, {m})")
        indices = list(range(n, m))
        if not indices:
            print("无数据需要处理")
            return
        
        # 确定线程数（优先使用方法参数，否则用初始化时的默认值）
        num_threads = num_threads or self.num_threads
        num_threads = min(num_threads, len(indices))  # 避免线程数超过任务数
        print(f"使用 {num_threads} 个线程并行处理")
        
        # 拆分索引列表到多个线程
        thread_indices = []
        step = ceil(len(indices) / num_threads)
        for i in range(num_threads):
            start = i * step
            end = min((i+1)*step, len(indices))
            thread_indices.append(indices[start:end])
        
        # 创建并启动线程
        threads = []
        for idx_subset in thread_indices:
            t = threading.Thread(
                target=self._process_parquet_subset,
                args=(ds, idx_subset, id_field, image_field)
            )
            threads.append(t)
            t.start()
        
        # 等待所有线程完成
        for t in threads:
            t.join()
        
        print(f"已完成 {split} 集 [{n}, {m}) 区间的处理，共 {len(indices)} 条数据")


    # 【新增】线程任务：处理PDF子列表
    def _process_pdf_subset(self, pdf_paths, dpi, results, lock):
        """单个线程处理的子任务：处理一部分PDF路径"""
        for pdf_path in pdf_paths:
            pdf_hash = self._get_file_hash(pdf_path)
            try:
                if pdf_hash in self.pdf_cache:
                    # 从缓存加载
                    with lock:  # 保护共享结果列表
                        device = "cuda" if self.use_cuda else "cpu"
                        pdf_embeddings = [torch.tensor(e).to(device) for e in self.pdf_cache[pdf_hash]['embeddings']]
                        results['embeddings'].extend(pdf_embeddings)
                        results['page_to_pdf_map'].extend([(pdf_path, i) for i in range(len(pdf_embeddings))])
                    continue
                
                if self.assert_pooling:
                    raise ValueError(f"PDF {pdf_path} 不在缓存中（assert模式）")
                
                # 处理新PDF
                pdf_pages = convert_from_path(pdf_path, dpi=dpi)
                images = [page_img for page_img in pdf_pages]
                if not images:
                    continue
                
                # 计算嵌入
                batch_size = 5 if self.use_cuda else 1
                pdf_embeddings = []
                for i in range(0, len(images), batch_size):
                    batch_images = images[i:i+batch_size]
                    processed_batch = self.processor.process_images(batch_images).to(self.model.device)
                    with torch.no_grad(), torch.inference_mode():
                        outputs = self.model(** processed_batch)
                        batch_embeddings = self._pool_embeddings(outputs)
                        pdf_embeddings.extend(batch_embeddings)
                    del processed_batch, outputs
                    gc.collect()
                    if self.use_cuda:
                        torch.cuda.empty_cache()
                
                # 加锁更新缓存和结果
                with self.cache_lock:
                    self.pdf_cache[pdf_hash] = {
                        'path': pdf_path,
                        'timestamp': time.time(),
                        'embeddings': [e.cpu().tolist() for e in pdf_embeddings]
                    }
                    self._save_cache()
                
                with lock:  # 保护共享结果列表
                    results['embeddings'].extend(pdf_embeddings)
                    results['page_to_pdf_map'].extend([(pdf_path, i) for i in range(len(pdf_embeddings))])
                
            except Exception as e:
                print(f"处理PDF {pdf_path} 失败: {e}，跳过该文件")
                continue


    def process_pdfs(self, pdf_paths: List[str], dpi: int = 100, num_threads: int = None) -> Dict[str, List[torch.Tensor]]:
        """多线程处理PDF文件"""
        # 共享结果（用字典+锁保护，避免线程冲突）
        results = {
            'embeddings': [],
            'page_to_pdf_map': []
        }
        result_lock = threading.Lock()  # 保护结果列表的锁
        
        # 确定线程数
        num_threads = num_threads or self.num_threads
        num_threads = min(num_threads, len(pdf_paths))
        print(f"使用 {num_threads} 个线程并行处理 {len(pdf_paths)} 个PDF文件")
        
        # 拆分PDF路径列表到多个线程
        step = ceil(len(pdf_paths) / num_threads)
        pdf_subsets = []
        for i in range(num_threads):
            start = i * step
            end = min((i+1)*step, len(pdf_paths))
            pdf_subsets.append(pdf_paths[start:end])
        
        # 创建并启动线程
        threads = []
        for subset in pdf_subsets:
            t = threading.Thread(
                target=self._process_pdf_subset,
                args=(subset, dpi, results, result_lock)
            )
            threads.append(t)
            t.start()
        
        # 等待所有线程完成
        for t in threads:
            t.join()
        
        return results


    # 其他原有方法（_load_model, _load_cache, _save_cache, _pool_embeddings, retrieve等）保持不变
    def _load_model(self, model_path: str):
        """加载模型（仅在需要时调用）"""
        if self.use_cuda:
            print("使用CUDA设备")
            torch_dtype = torch.float16
            if self.quantization == "int8":
                torch_dtype = torch.int8
            elif self.quantization == "bf16":
                torch_dtype = torch.bfloat16
                
            self.model = ColQwen2.from_pretrained(
                model_path,
                torch_dtype=torch_dtype,
                device_map="cuda:0",
                attn_implementation="flash_attention_2" if is_flash_attn_2_available() else None,
            ).eval()
        else:
            print("使用CPU设备")
            torch_dtype = torch.float32
            if self.quantization == "int8":
                self.model = ColQwen2.from_pretrained(
                    model_path,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                    load_in_8bit=True,
                ).eval()
            else:
                self.model = ColQwen2.from_pretrained(
                    model_path,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                ).eval()
                
            self.model = self.model.to("cpu")
            
        self.processor = ColQwen2Processor.from_pretrained(model_path)

    def _load_cache(self):
        """从磁盘加载缓存"""
        try:
            with open(self.cache_path, 'r') as f:
                self.pdf_cache = json.load(f)
            print(f"已加载缓存: {len(self.pdf_cache)} 个PDF文件")
        except Exception as e:
            print(f"加载缓存失败: {e}")
            self.pdf_cache = {}

    def _save_cache(self):
        """保存缓存到磁盘"""
        with open(self.cache_path, 'w') as f:
            json.dump(self.pdf_cache, f)
        print(f"已保存缓存到 {self.cache_path}")

    def process_pdfs(self, pdf_paths: List[str], dpi: int = 100) -> Dict[str, List[torch.Tensor]]:
        """处理PDF文件并计算/加载嵌入"""
        all_embeddings = []
        page_to_pdf_map = []

        for pdf_path in pdf_paths:
            pdf_hash = self._get_file_hash(pdf_path)

            # 检查缓存
            if pdf_hash in self.pdf_cache:
                print(f"从缓存加载: {pdf_path}")
                # 根据设备加载到相应位置
                device = "cuda" if self.use_cuda else "cpu"
                pdf_embeddings = [torch.tensor(e).to(device) for e in self.pdf_cache[pdf_hash]['embeddings']]
                all_embeddings.extend(pdf_embeddings)
                page_to_pdf_map.extend([(pdf_path, i) for i in range(len(pdf_embeddings))])
            else:
                if self.assert_pooling:
                    # 在断言模式下，发现缺少缓存立即报错
                    raise ValueError(f"PDF {pdf_path} 的嵌入值不在缓存中，无法在assert_pooling模式下继续")
                
                print(f"处理新PDF: {pdf_path}")
                # 转换PDF为图像
                images = []
                try:
                    pdf_pages = convert_from_path(pdf_path, dpi=dpi)
                    for page_img in pdf_pages:
                        images.append(page_img)
                except Exception as e:
                    print(f"处理PDF失败: {pdf_path}, 错误: {e}")
                    continue

                # 计算嵌入（仅在非断言模式下执行）
                if images and not self.assert_pooling:
                    batch_size = 5 if self.use_cuda else 1
                    pdf_embeddings = []
                    
                    for i in range(0, len(images), batch_size):
                        batch_images = images[i:i + batch_size]
                        processed_batch = self.processor.process_images(batch_images).to(self.model.device)
                        
                        with torch.no_grad(), torch.inference_mode():
                            outputs = self.model(**processed_batch)
                            batch_embeddings = self._pool_embeddings(outputs)
                            pdf_embeddings.extend(batch_embeddings)

                        del processed_batch, outputs, batch_embeddings
                        gc.collect()
                        if self.use_cuda:
                            torch.cuda.empty_cache()

                    # 保存到缓存
                    self.pdf_cache[pdf_hash] = {
                        'path': pdf_path,
                        'timestamp': time.time(),
                        'embeddings': [e.cpu().tolist() for e in pdf_embeddings]
                    }
                    self._save_cache()

                    all_embeddings.extend(pdf_embeddings)
                    page_to_pdf_map.extend([(pdf_path, i) for i in range(len(pdf_embeddings))])

        # 在断言模式下，检查是否所有PDF都有嵌入值
        if self.assert_pooling:
            missing_pdfs = [os.path.basename(p) for p in pdf_paths if self._get_file_hash(p) not in self.pdf_cache]
            if missing_pdfs:
                raise ValueError(f"以下PDF的嵌入值不在缓存中: {', '.join(missing_pdfs)}")
        
        return {
            'embeddings': all_embeddings,
            'page_to_pdf_map': page_to_pdf_map
        }

    def _pool_embeddings(self, hidden_states: torch.Tensor, strategy: str = "mean") -> List[torch.Tensor]:
        """应用token pooling策略获取固定长度的表示"""
        if len(hidden_states.shape) != 3:
            raise ValueError(
                f"期望嵌入张量形状为 [batch_size, num_patches, embedding_dim]，但得到 {hidden_states.shape}")

        if strategy == "mean":
            return [hidden_states[i].mean(dim=0) for i in range(hidden_states.shape[0])]
        elif strategy == "max":
            return [hidden_states[i].max(dim=0)[0] for i in range(hidden_states.shape[0])]
        else:
            raise ValueError(f"不支持的池化策略: {strategy}")

    def _get_file_hash(self, file_path: str) -> str:
        """生成文件的简单哈希（基于文件名和修改时间）"""
        try:
            mtime = os.path.getmtime(file_path)
            return f"{os.path.basename(file_path)}_{mtime}"
        except:
            return f"{os.path.basename(file_path)}_{time.time()}"

    def retrieve(self, query: str, pdf_data: Dict, k: int = 3) -> List[Tuple[float, int, str]]:
        """检索与查询最匹配的页面"""
        if self.assert_pooling:
            raise RuntimeError("在assert_pooling模式下无法执行检索操作")
        
        # 计算查询嵌入
        with torch.no_grad(), torch.inference_mode():
            batch_queries = self.processor.process_queries([query]).to(self.model.device)
            query_output = self.model(**batch_queries)
            query_embedding = self._pool_embeddings(query_output)[0]

        # 计算相似度
        similarities = []
        for i, page_embedding in enumerate(pdf_data['embeddings']):
            page_embedding = page_embedding.to(query_embedding.device)
            sim = torch.cosine_similarity(
                query_embedding.unsqueeze(0),
                page_embedding.unsqueeze(0)
            ).item()
            similarities.append((sim, i))

        # 排序并获取前k个结果
        similarities.sort(key=lambda x: x[0], reverse=True)
        top_k = similarities[:k]

        # 转换为最终结果格式
        results = []
        for score, page_idx in top_k:
            pdf_path, page_num = pdf_data['page_to_pdf_map'][page_idx]
            results.append((score, page_num, pdf_path))

        return results


    def encode_images(self, 
                      images: list[Image.Image], 
                      image_ids: list[str],  # 新增：与images一一对应的唯一标识（如questionId）
                      batch_size: int = 8) -> list[torch.Tensor]:
        """
        编码图像列表为嵌入向量列表（优先使用缓存）
        Args:
            images: PIL图像列表（每个元素是单张图片）
            image_ids: 图像唯一标识列表（与images一一对应，如questionId）
            batch_size: 批量处理大小
        Returns:
            图像嵌入向量列表（顺序与输入images一致）
        """
        if self.assert_pooling:
            raise RuntimeError("assert_pooling模式下不支持图像编码")
        if not images or len(images) != len(image_ids):
            raise ValueError("images与image_ids长度必须一致")
        
        all_embeddings = []
        device = self.model.device
        uncached_indices = []  # 需要处理的图像索引
        uncached_images = []   # 需要处理的图像
        uncached_ids = []      # 需要处理的图像ID

        # 第一步：检查缓存，分离已缓存和未缓存的图像
        with self.cache_lock:
            for idx, img_id in enumerate(image_ids):
                if img_id in self.pdf_cache:
                    # 从缓存加载（转换为当前设备的张量）
                    emb = torch.tensor(self.pdf_cache[img_id]['embedding'], device=device)
                    all_embeddings.append((idx, emb))  # 记录原始索引
                else:
                    # 加入待处理列表
                    uncached_indices.append(idx)
                    uncached_images.append(images[idx])
                    uncached_ids.append(img_id)

        # 第二步：处理未缓存的图像（批量编码）
        if uncached_images:
            print(f"发现{len(uncached_images)}张未缓存图像，开始编码...")
            batch_embeddings = []
            # 分批次处理
            for i in range(0, len(uncached_images), batch_size):
                batch_imgs = uncached_images[i:i+batch_size]
                with torch.no_grad(), torch.inference_mode():
                    processed = self.processor.process_images(batch_imgs).to(device)
                    outputs = self.model(** processed)
                    batch_emb = self._pool_embeddings(outputs)  # 单批次的嵌入列表
                    batch_embeddings.extend(batch_emb)
                
                # 清理内存
                del processed, outputs
                gc.collect()
                if self.use_cuda:
                    torch.cuda.empty_cache()

            # 保存新计算的嵌入到缓存，并记录原始索引
            with self.cache_lock:
                for idx, img_id, emb in zip(uncached_indices, uncached_ids, batch_embeddings):
                    # 保存到缓存（转为CPU列表格式）
                    self.pdf_cache[img_id] = {
                        'embedding': emb.cpu().tolist(),
                        'timestamp': time.time(),
                        'meta': {'image_id': img_id}
                    }
                    all_embeddings.append((idx, emb))  # 记录原始索引
                self._save_cache()  # 批量保存一次缓存

        # 第三步：按原始图像顺序排序嵌入列表
        # 先按原始索引排序，再提取嵌入向量
        all_embeddings.sort(key=lambda x: x[0])  # 按输入images的顺序排序
        return [emb for (idx, emb) in all_embeddings]  # 最终顺序与输入images一致


    # 新增：从缓存中批量加载图像嵌入（可选，用于快速验证）
    def load_cached_image_embeddings(self, image_ids: list[str]) -> list[torch.Tensor or None]:
        """从缓存加载图像嵌入，不存在则返回None"""
        embeddings = []
        with self.cache_lock:
            for img_id in image_ids:
                if img_id in self.pdf_cache:
                    emb = torch.tensor(
                        self.pdf_cache[img_id]['embedding'],
                        device=self.model.device if not self.assert_pooling else "cpu"
                    )
                    embeddings.append(emb)
                else:
                    embeddings.append(None)
        return embeddings

    def encode_text(self, text: str) -> torch.Tensor:
        """
        编码文本（问题）为嵌入向量
        Args:
            text: 输入的问题文本
        Returns:
            文本的嵌入向量（shape: [embedding_dim]）
        """
        if self.assert_pooling:
            raise RuntimeError("assert_pooling模式下不支持文本编码")
        
        with torch.no_grad(), torch.inference_mode():
            # 处理文本（包装为列表以支持批量接口）
            processed_queries = self.processor.process_queries([text]).to(self.model.device)
            # 通过模型获取输出
            query_output = self.model(** processed_queries)
            # 池化得到单个向量（取第一个元素，因为输入是单条文本）
            query_embedding = self._pool_embeddings(query_output)[0]
        
        return query_embedding  # 返回单条文本的嵌入向量

    def compute_similarity(self, text_embedding: torch.Tensor, image_embeddings: list[torch.Tensor]) -> torch.Tensor:
        """
        计算文本嵌入与每个图像嵌入的余弦相似度
        Args:
            text_embedding: 文本（问题）的嵌入向量（shape: [embedding_dim]）
            image_embeddings: 图像嵌入向量列表（每个元素shape: [embedding_dim]）
        Returns:
            相似度张量（shape: [num_images]，值越高越相似）
        """
        if not image_embeddings:
            return torch.tensor([])
        
        # 确保所有向量在同一设备
        device = text_embedding.device
        image_embeddings = [emb.to(device) for emb in image_embeddings]
        
        # 堆叠图像嵌入为矩阵（shape: [num_images, embedding_dim]）
        image_emb_matrix = torch.stack(image_embeddings, dim=0)
        # 文本嵌入扩展为（1, embedding_dim）以匹配矩阵维度
        text_emb_expanded = text_embedding.unsqueeze(0)
        
        # 计算余弦相似度（shape: [num_images]）
        similarities = torch.cosine_similarity(text_emb_expanded, image_emb_matrix, dim=1)
        
        return similarities

        