#!/usr/bin/env python
# coding:utf-8
import numpy as np
import math
import logging
import time
import cupy as cp
from typing import List, Tuple
import multiprocessing
import os
from flask import Flask, request, jsonify
import c_datas
import threading
import traceback
from concurrent.futures import ThreadPoolExecutor

app = Flask(__name__)
dst_features_gpu_list = []  # 每个 GPU 的 dst_features_gpu 列表
mysql_result_cpu = None
batch_size=0

class GPUWorker:
    def __init__(self, gpu_id: int):
        self.gpu_id = gpu_id
        cp.cuda.Device(gpu_id).use()

    def process_batch(self, src_feature: np.ndarray, threshold: float) -> List[Tuple[int, float]]:
        """在指定 GPU 上处理一批特征"""
        try:
            cp.cuda.Device(self.gpu_id).use()
            # 检查 dst_features_gpu_list 是否有效
            threshold = float(threshold)
            # 将源特征转移到 GPU
            src_feature_gpu = cp.array(src_feature)
            src_norm = float(cp.linalg.norm(src_feature_gpu))

            # 使用常驻的 dst_features_gpu
            with cp.cuda.Device(self.gpu_id):
                if dst_features_gpu_list[self.gpu_id].size == 0:
                    logging.warning(f"GPU {self.gpu_id} 批次为空")
                    return []
                # GPU 计算
                dot_products = cp.dot(dst_features_gpu_list[self.gpu_id], src_feature_gpu)
                dst_norms = cp.linalg.norm(dst_features_gpu_list[self.gpu_id], axis=1)
                cosine_similarities = dot_products / (dst_norms * src_norm)
                # 转回 CPU
                cosine_similarities_cpu = cp.asnumpy(cosine_similarities)

            # 清理临时 GPU 内存
            mempool = cp.get_default_memory_pool()
            pinned_mempool = cp.get_default_pinned_memory_pool()
            mempool.free_all_blocks()
            pinned_mempool.free_all_blocks()

            # 返回高于阈值的结果
            results = []
            for j, score in enumerate(cosine_similarities_cpu):
                if score >= threshold:
                    results.append(( batch_size*self.gpu_id + j, float(score)))
            return results
        except Exception as e:
            logging.error(f"GPU {self.gpu_id} 处理错误: {str(e)}\n堆栈跟踪:\n{traceback.format_exc()}")
            return []

@app.route('/proc', methods=['POST'])
def proc():
    global mysql_result_cpu,dst_features_gpu_list,batch_size
    try:
        global_start_time = time.time()
        data = request.json
        feature_src = data.get('feature')
        scheme = data.get('scheme')
        threshold = data.get('score')
        log_id = data.get('log_id')
        logging.info(f"{log_id}_______开始计时")

        if not mysql_result_cpu:
            mysql_result_cpu = c_datas.getTable(scheme, log_id)
        if not mysql_result_cpu:
            logging.error(f"{log_id}___MySQL 数据为空")
            return jsonify([])
        mysql_time = time.time()
        #logging.info(f"{log_id}___获取 MySQL 时间：{mysql_time - global_start_time}, 共：{len(mysql_result_cpu)}")

        # 解码源特征
        src_feature_arr = c_datas.decode_single_feature(feature_src)
        decode_time = time.time()

        # 获取 GPU 数量并验证
        num_gpus = cp.cuda.runtime.getDeviceCount()
        if num_gpus == 0:
            logging.error(f"{log_id}___未检测到 GPU")
            return jsonify([])
        if len(dst_features_gpu_list) != num_gpus:
            logging.error(f"{log_id}___dst_features_gpu_list 长度 ({len(dst_features_gpu_list)}) 与 GPU 数量 ({num_gpus}) 不匹配")
            return jsonify([])

        total_features = len(mysql_result_cpu)

        # 分配任务
        tasks = []
        for gpu_id in range(num_gpus):
            tasks.append((gpu_id, src_feature_arr, threshold))


        # 并行处理
        results = []
        with ThreadPoolExecutor(max_workers=num_gpus) as executor:
            futures = [executor.submit(GPUWorker(gpu_id).process_batch, src_feat, thresh)
                       for gpu_id, src_feat, thresh in tasks]
            for future in futures:
                results.extend(future.result())

        end_time = time.time()
        total_time = end_time - global_start_time

        all_result = []
        if not results:
            logging.info(f"{log_id}___无匹配结果")
            return jsonify([])

        for result in results:
            id_value = mysql_result_cpu[result[0]][0]
            single = {"id": id_value, "score": result[1]}
            all_result.append(single)

        results = sorted(all_result, key=lambda x: x['score'], reverse=True)
        logging.info(f"{log_id}___找到 {len(results)} 个匹配项")
        logging.info(f"{log_id}___总计算时间: {total_time:.4f}s")
        logging.info(f"{log_id}___平均处理速度: {len(mysql_result_cpu)/total_time:.2f} 特征/s")
        return jsonify(results)
    except Exception as e:
        logging.error(f"{log_id}___发生错误: {e}")
        return jsonify([])

def update_dst_features_gpu():
    """更新每个 GPU 上的 dst_features_gpu"""
    global dst_features_gpu_list, mysql_result_cpu,batch_size
    try:
        log_id = time.time()
        logging.info(f"{log_id}___更新 GPU 数据")
        # 获取数据
        mysql_result_cpu_temp = None
        mysql_result_cpu_temp = c_datas.getTable("album", log_id)
        if not mysql_result_cpu_temp:
            logging.error(f"{log_id}___MySQL 数据为空，无法更新 GPU 数据")
            return

        mysql_result_cpu=mysql_result_cpu_temp

        dst_features = []
        for i, feature_dst in enumerate(mysql_result_cpu):
            try:
                dst_feature_arr = c_datas.decode_single_feature(feature_dst[1])
                dst_features.append(dst_feature_arr)
            except Exception as e:
                logging.warning(f"Failed to decode feature at index {i}: {e}")
        if not dst_features:
            logging.error(f"{log_id}___特征数据为空，无法更新 GPU 数据")
            return

        # 分配到每个 GPU
        num_gpus = cp.cuda.runtime.getDeviceCount()
        batch_size = math.ceil(len(dst_features) / num_gpus)
        new_dst_features_gpu_list = []

        for gpu_id in range(num_gpus):
            with cp.cuda.Device(gpu_id):
                start_idx = gpu_id * batch_size
                end_idx = min(start_idx + batch_size, len(dst_features))
                if start_idx >= len(dst_features):
                    logging.warning(f"{log_id}___GPU {gpu_id} 无数据分配")
                    new_dst_features_gpu_list.append(cp.array([]))
                    continue
                batch_features = cp.array(dst_features[start_idx:end_idx])
                new_dst_features_gpu_list.append(batch_features)
                logging.info(f"{log_id}___GPU {gpu_id} 分配 {len(batch_features)} 条特征")

        # 更新全局变量
        dst_features_gpu_list = new_dst_features_gpu_list

        logging.info(f"{log_id}___GPU 数据更新完成：{len(dst_features_gpu_list)}")
    except Exception as e:
        logging.error(f"{log_id}___更新 dst_features_gpu 失败: {e}")


def initialize_gpu_data():
    """程序启动时初始化 GPU 数据"""
    global dst_features_gpu_list
    try:
        log_id = time.time()
        logging.info(f"{log_id}___初始化 GPU 数据")
        num_gpus = cp.cuda.runtime.getDeviceCount()
        if num_gpus == 0:
            logging.error(f"{log_id}___未检测到 GPU，无法初始化")
            return
        dst_features_gpu_list = [cp.array([]) for _ in range(num_gpus)]  # 初始化空数组
        update_dst_features_gpu()  # 初始加载数据
    except Exception as e:
        logging.error(f"{log_id}___初始化 GPU 数据失败: {e}")
def background_updater():
    """后台定时更新任务"""
    while True:
        try:
            update_dst_features_gpu()
            time.sleep(60)  # 每分钟检查一次
        except Exception as e:
            logging.error(f"后台更新错误: {e}")

if __name__ == "__main__":
    logging.basicConfig(
        filename= os.path.expanduser('~')+'/app_c.log',
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    multiprocessing.set_start_method("spawn")

    # 初始化 GPU 数据
    initialize_gpu_data()

    #updater_thread = threading.Thread(target=background_updater, daemon=True)
    #updater_thread.start()

    # 启动 Flask 服务
    app.run(host='0.0.0.0', port=6003)