#!/usr/bin/env python
# coding:utf-8
import numpy as np
import logging
import cupy as cp
from typing import List, Tuple

class GPUWorker:
    def __init__(self, gpu_id: int):
        self.gpu_id = gpu_id
        cp.cuda.Device(gpu_id).use()

    def process_batch(self, src_feature: np.ndarray, threshold: float) -> List[Tuple[int, float]]:
        """在指定 GPU 上处理一批特征"""
        try:
            cp.cuda.Device(self.gpu_id).use()
            # 检查 dst_features_gpu_list 是否有效
            threshold = float(threshold)
            # 将源特征转移到 GPU
            src_feature_gpu = cp.array(src_feature)
            src_norm = float(cp.linalg.norm(src_feature_gpu))

            # 使用常驻的 dst_features_gpu
            with cp.cuda.Device(self.gpu_id):
                if dst_features_gpu_list[self.gpu_id].size == 0:
                    logging.warning(f"GPU {self.gpu_id} 批次为空")
                    return []
                # GPU 计算
                dot_products = cp.dot(dst_features_gpu_list[self.gpu_id], src_feature_gpu)
                dst_norms = cp.linalg.norm(dst_features_gpu_list[self.gpu_id], axis=1)
                cosine_similarities = dot_products / (dst_norms * src_norm)
                # 转回 CPU
                cosine_similarities_cpu = cp.asnumpy(cosine_similarities)

            # 清理临时 GPU 内存
            mempool = cp.get_default_memory_pool()
            pinned_mempool = cp.get_default_pinned_memory_pool()
            mempool.free_all_blocks()
            pinned_mempool.free_all_blocks()

            # 返回高于阈值的结果
            results = []
            for j, score in enumerate(cosine_similarities_cpu):
                if score >= threshold:
                    results.append(( batch_size*self.gpu_id + j, float(score)))
            return results
        except Exception as e:
            logging.error(f"GPU {self.gpu_id} 处理错误: {str(e)}\n堆栈跟踪:\n{traceback.format_exc()}")
            return []
