#!/usr/bin/env python
# coding:utf-8
import numpy as np
import math
import logging
import time
import cupy as cp
from typing import List, Tuple
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
import os
from flask import Flask, request, jsonify
import datas
import threading
import traceback

app = Flask(__name__)
dst_features_gpu_list = []  # 每个 GPU 的 dst_features_gpu 列表
mysql_result_cpu = None
update_lock = threading.Lock()  # 用于线程安全的更新
batch_size=0

class GPUWorker:
    def __init__(self, gpu_id: int):
        self.gpu_id = gpu_id
        cp.cuda.Device(gpu_id).use()

    def process_batch(self, src_feature: np.ndarray,batch_features,batch_size, threshold: float) -> List[Tuple[int, float]]:
        """在指定 GPU 上处理一批特征"""
        try:
            # 检查 dst_features_gpu_list 是否有效

            threshold = float(threshold)
            # 将源特征转移到 GPU
            src_feature_gpu = cp.array(src_feature)
            src_norm = float(cp.linalg.norm(src_feature_gpu))

            # 使用常驻的 dst_features_gpu
            with cp.cuda.Device(self.gpu_id):
                if batch_features.size == 0:
                    logging.warning(f"GPU {self.gpu_id} 批次为空")
                    return []
                # GPU 计算
                dot_products = cp.dot(batch_features, src_feature_gpu)
                dst_norms = cp.linalg.norm(batch_features, axis=1)
                cosine_similarities = dot_products / (dst_norms * src_norm)
                # 转回 CPU
                cosine_similarities_cpu = cp.asnumpy(cosine_similarities)

            # 清理临时 GPU 内存
            mempool = cp.get_default_memory_pool()
            pinned_mempool = cp.get_default_pinned_memory_pool()
            mempool.free_all_blocks()
            pinned_mempool.free_all_blocks()

            # 返回高于阈值的结果
            results = []
            for j, score in enumerate(cosine_similarities_cpu):
                if score >= threshold:
                    results.append(( batch_size*self.gpu_id + j, float(score)))
            return results
        except Exception as e:
            logging.error(f"GPU {self.gpu_id} 处理错误: {str(e)}\n堆栈跟踪:\n{traceback.format_exc()}")
            return []

@app.route('/proc', methods=['POST'])
def proc():
    global mysql_result_cpu,dst_features_gpu_list,batch_size
    try:
        global_start_time = time.time()
        data = request.json
        feature_src = data.get('feature')
        scheme = data.get('scheme')
        threshold = data.get('score')
        log_id = data.get('log_id')
        logging.info(f"{log_id}_______开始计时")

        if not mysql_result_cpu:
            mysql_result_cpu = datas.getTable(scheme, log_id)
        if not mysql_result_cpu:
            logging.error(f"{log_id}___MySQL 数据为空")
            return jsonify([])
        mysql_time = time.time()
        logging.info(f"{log_id}___获取 MySQL 时间：{mysql_time - global_start_time}, 共：{len(mysql_result_cpu)}")

        # 解码源特征
        src_feature_arr = datas.decode_single_feature(feature_src)
        decode_time = time.time()
        logging.info(f"{log_id}___解码时间：{decode_time - mysql_time}")

        # 获取 GPU 数量并验证
        num_gpus = cp.cuda.runtime.getDeviceCount()
        if num_gpus == 0:
            logging.error(f"{log_id}___未检测到 GPU")
            return jsonify([])
        if len(dst_features_gpu_list) != num_gpus:
            logging.error(f"{log_id}___dst_features_gpu_list 长度 ({len(dst_features_gpu_list)}) 与 GPU 数量 ({num_gpus}) 不匹配")
            return jsonify([])

        total_features = len(mysql_result_cpu)

        # 分配任务
        tasks = []
        for gpu_id in range(num_gpus):
            tasks.append((gpu_id, src_feature_arr,dst_features_gpu_list[gpu_id],batch_size, threshold))


        # 并行处理
        results = []
        with ProcessPoolExecutor(max_workers=num_gpus) as executor:
            futures = [executor.submit(GPUWorker(gpu_id).process_batch, src_feat,sub_data,batch_size, thresh)
                       for gpu_id, src_feat,sub_data,batch_size, thresh in tasks]
            for future in futures:
                results.extend(future.result())

        end_time = time.time()
        total_time = end_time - global_start_time

        all_result = []
        if not results:
            logging.info(f"{log_id}___无匹配结果")
            return jsonify([])

        for result in results:
            id_value = mysql_result_cpu[result[0]][0]
            single = {"id": id_value, "score": result[1]}
            all_result.append(single)

        results = sorted(all_result, key=lambda x: x['score'], reverse=True)
        logging.info(f"{log_id}___找到 {len(results)} 个匹配项")
        logging.info(f"{log_id}___总计算时间: {total_time:.4f}s")
        logging.info(f"{log_id}___平均处理速度: {len(mysql_result_cpu)/total_time:.2f} 特征/s")
        return jsonify(results)
    except Exception as e:
        logging.error(f"{log_id}___发生错误: {e}")
        return jsonify([])

def update_dst_features_gpu():
    """更新每个 GPU 上的 dst_features_gpu"""
    global dst_features_gpu_list, mysql_result_cpu,batch_size
    try:
        log_id = time.time()
        logging.info(f"{log_id}___更新 GPU 数据")
        with update_lock:
            # 获取数据
            mysql_result_cpu = datas.getTable("album", log_id)
            if not mysql_result_cpu:
                logging.error(f"{log_id}___MySQL 数据为空，无法更新 GPU 数据")
                return
            dst_features = datas.getFeature(mysql_result_cpu, "album_feature", log_id)
            if not dst_features:
                logging.error(f"{log_id}___特征数据为空，无法更新 GPU 数据")
                return

            # 分配到每个 GPU
            num_gpus = cp.cuda.runtime.getDeviceCount()
            batch_size = math.ceil(len(dst_features) / num_gpus)
            new_dst_features_gpu_list = []

            for gpu_id in range(num_gpus):
                with cp.cuda.Device(gpu_id):
                    start_idx = gpu_id * batch_size
                    end_idx = min(start_idx + batch_size, len(dst_features))
                    if start_idx >= len(dst_features):
                        logging.warning(f"{log_id}___GPU {gpu_id} 无数据分配")
                        new_dst_features_gpu_list.append(cp.array([]))
                        continue
                    batch_features = cp.array(dst_features[start_idx:end_idx])
                    new_dst_features_gpu_list.append(batch_features)
                    logging.info(f"{log_id}___GPU {gpu_id} 分配 {len(batch_features)} 条特征")

            # 更新全局变量
            dst_features_gpu_list = new_dst_features_gpu_list

            logging.info(f"{log_id}___GPU 数据更新完成：{len(dst_features_gpu_list)}")
    except Exception as e:
        logging.error(f"{log_id}___更新 dst_features_gpu 失败: {e}")

def background_updater():
    """后台定时更新任务"""
    while True:
        try:
            if datas.isUpdate():
                update_dst_features_gpu()
                datas.updateF()
            time.sleep(60)  # 每分钟检查一次
        except Exception as e:
            logging.error(f"后台更新错误: {e}")

def initialize_gpu_data():
    """程序启动时初始化 GPU 数据"""
    global dst_features_gpu_list
    try:
        log_id = time.time()
        logging.info(f"{log_id}___初始化 GPU 数据")
        num_gpus = cp.cuda.runtime.getDeviceCount()
        if num_gpus == 0:
            logging.error(f"{log_id}___未检测到 GPU，无法初始化")
            return
        dst_features_gpu_list = [cp.array([]) for _ in range(num_gpus)]  # 初始化空数组
        update_dst_features_gpu()  # 初始加载数据
    except Exception as e:
        logging.error(f"{log_id}___初始化 GPU 数据失败: {e}")

if __name__ == "__main__":
    logging.basicConfig(
        filename= os.path.expanduser('~')+'/app_.log',
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    multiprocessing.set_start_method("spawn")

    # 初始化 GPU 数据
    initialize_gpu_data()

    # 启动后台更新线程
    updater_thread = threading.Thread(target=background_updater, daemon=True)
    updater_thread.start()

    # 启动 Flask 服务
    app.run(host='0.0.0.0', port=6000)