#!/usr/bin/env python
# coding:utf-8

import pybase64
import struct
import numpy as np
import math
import argparse
import logging
import time
import cupy as cp
from typing import List, Tuple
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
import os
import pymysql
from mysql import MYSQL_CONFIG
from flask import Flask, request, jsonify
from datetime import datetime
import sys

import json
import pickle

app = Flask(__name__)

class GPUWorker:
    def __init__(self, gpu_id: int):
        self.gpu_id = gpu_id
        cp.cuda.Device(gpu_id).use()
        
    def process_batch(self, src_feature: np.ndarray, batch_features: List[np.ndarray], 
                     start_idx: int, threshold: float) -> List[Tuple[int, float]]:
        """在指定GPU上处理一批特征"""
        #cp.cuda.Device(self.gpu_id).use()
        try:
            threshold = float(threshold)
            # 将源特征转移到GPU
            src_feature_gpu = cp.array(src_feature)
            src_norm = float(cp.linalg.norm(src_feature_gpu))
            # 将目标特征转移到GPU
            dst_features_gpu = cp.array(batch_features)
            
            # GPU计算
            dot_products = cp.dot(dst_features_gpu, src_feature_gpu)
            dst_norms = cp.linalg.norm(dst_features_gpu, axis=1)
            cosine_similarities = dot_products / (dst_norms * src_norm)
            # 将结果转回CPU
            cosine_similarities_cpu = cp.asnumpy(cosine_similarities)
            
            # 清理GPU内存
            mempool = cp.get_default_memory_pool()
            pinned_mempool = cp.get_default_pinned_memory_pool()
            mempool.free_all_blocks()
            pinned_mempool.free_all_blocks()
            # 返回高于阈值的结果
            results = []
            for j, score in enumerate(cosine_similarities_cpu):
                if score >= threshold:
                    results.append((start_idx + j, float(score)))
            
            return results
            
        except Exception as e:
            logging.error(f"GPU {self.gpu_id} error: {str(e)}")
            return []

def decode_single_feature(feature: str) -> np.ndarray:
    """解码单个特征"""
    try:
        # 使用 pybase64 解码
        feature_dec = pybase64.b64decode(feature)
        feature_dec_size = len(feature_dec) // 4
        # 解析为 NumPy 数组
        return np.array(struct.unpack(f"{feature_dec_size}f", feature_dec)[8:])
    except Exception as e:
        logging.warning(f"Failed to decode feature: {e}")
        return None

def decode_feature_batch(feature_dst_list: List[str]) -> List[np.ndarray]:
    """批量解码特征（合并任务）"""

    def decode_single(feature: str) -> np.ndarray:
        try:
            feature_dec = pybase64.b64decode(feature)
            feature_dec_size = len(feature_dec) // 4
            return np.array(struct.unpack(f"{feature_dec_size}f", feature_dec)[8:])
        except Exception as e:
            logging.warning(f"Failed to decode feature: {e}")
            return None

    # 设置线程数为 CPU 逻辑核心数
    cpu_count = os.cpu_count()
    with ThreadPoolExecutor(max_workers=cpu_count) as executor:
        results = list(executor.map(decode_single, [x[1] for x in feature_dst_list]))

    # 合并结果
    return [x for batch in results for x in batch]

def feature_compare_multi_gpu(feature_src: str, feature_dst_list: List[str],scheme:str,
                            threshold: float = 0.0, num_gpus: int = None,log_id:str="") -> List[Tuple[int, float]]:
    """使用多GPU进行特征比对
    
    Args:
        feature_src: 源特征base64字符串
        feature_dst_list: 目标特征base64字符串列表
        threshold: 相似度阈值
        num_gpus: 使用的GPU数量，None表示使用所有可用GPU
    """
    start_time = time.time()
    
    # 获取可用的GPU数量
    num_gpus = cp.cuda.runtime.getDeviceCount()

    # 解码源特征
    src_feature_arr = decode_single_feature(feature_src)
    decode_time = time.time()
    logging.info(f"{log_id}Source feature decode time: {decode_time - start_time:.4f}s")
    r = redis.Redis(host='localhost', port=6379, db=2)
    # 解码所有目标特征
    feature_data=r.get(scheme+"_feature")
    logging.info(f"{log_id}开始解码")
    decode_start = time.time()
    dst_features = []
    if feature_data:
        logging.info(f"{log_id}使用redis的feature数据")
        dst_features = pickle.loads(feature_data)
        logging.info(f"{log_id}____1111")
        #dst_features = [np.array(arr) for arr in dst_featuress]
        #logging.info(f"{log_id}____2222")
    else:
        logging.info(f"{log_id}CPU解码")
        for i, feature_dst in enumerate(feature_dst_list):
            try:
                dst_feature_arr = decode_single_feature(feature_dst[1])
                dst_features.append(dst_feature_arr)
            except Exception as e:
                logging.warning(f"Failed to decode feature at index {i}: {e}")
        #dst_features_list = [arr.tolist() for arr in dst_features]
        r.set(scheme+'_feature', pickle.dumps(dst_features))
    decode_end = time.time()
    logging.info(f"{log_id}解码结束时间: {decode_end - decode_start:.4f}s  {len(dst_features)}个")


    # 将目标特征批量传输到 GPU
    dst_features_gpu = cp.array(dst_features)
    logging.info(f"{log_id}____2222")
    # 计算每个GPU处理的批次大小
    total_features = len(dst_features)
    batch_size = math.ceil(total_features / num_gpus)
    logging.info(f"{log_id}每个gpu大小:{batch_size}")
    # 准备每个GPU的任务
    tasks = []
    for gpu_id in range(num_gpus):
        start_idx = gpu_id * batch_size
        end_idx = min(start_idx + batch_size, total_features)
        logging.info(f"{log_id}{gpu_id}开始干活:{start_idx} , {end_idx}")
        if start_idx < end_idx:
            tasks.append((gpu_id, src_feature_arr, dst_features_gpu[start_idx:end_idx], start_idx, threshold))
    
    # 创建进程池进行并行处理
    results = []
    with ProcessPoolExecutor(max_workers=num_gpus) as executor:
        futures = []
        for gpu_id, src_feat, batch_feat, start_idx, thresh in tasks:
            worker = GPUWorker(gpu_id)
            future = executor.submit(worker.process_batch, src_feat, batch_feat, start_idx, thresh)
            futures.append(future)
        
        # 收集结果
        for future in futures:
            results.extend(future.result())
    
    end_time = time.time()
    total_time = end_time - start_time
    
    logging.info(f"{log_id}Total processing time: {total_time:.4f}s")
    logging.info(f"{log_id}Average speed: {len(feature_dst_list)/total_time:.2f} features/s")

    all_result=[]
    if not results:
        #logging.error("{}")
        return

    for result in results:
        id_value, feature_value = feature_dst_list[result[0]][:2]
        single = {"id": id_value, "feature": feature_value, "score": result[1]}
        all_result.append(single)
    return sorted(all_result, key=lambda x: x['score'],reverse=True)

@app.route('/process', methods=['POST'])
def process():
    # 从请求中获取JSON数据
    data = request.json

    # 提取参数
    feature_src = data.get('feature')
    scheme = data.get('scheme')
    score = data.get('score')
    start_time = data.get('start_time')
    end_time = data.get('end_time')
    log_id = data.get('log_id')
    # 日期时间格式校验
    """ try:
        start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')  # 转换为datetime对象
        end_time = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')      # 转换为datetime对象
    except ValueError:
        return jsonify({"error": "Invalid datetime format. Use YYYY-MM-DD HH:MM:SS."}), 400
    # 检查结束时间是否大于开始时间
    if end_time < start_time:
        return jsonify({"error": "end_time must be greater than or equal to start_time"}), 400 """

    try:
        global_start_time = time.time()

        redis_data = r.get(scheme)
        if redis_data:
            logging.info("使用redis原始数据")
            # 反序列化 JSON 数据
            mysql_results=json.loads(redis_data)

        else:
        #logging.info("Reading mysql...")
            logging.info("使用mysql数据")
            conn = pymysql.connect(**MYSQL_CONFIG)

            # 创建游标对象
            cursor = conn.cursor()

            #columns = [col[0] for col in cursor.description]
            # 执行查询
            #logging.info(f"sql:::SELECT id,feature FROM tp_{scheme}_feature where feature!='' and create_time>'{start_time}' and create_time<'{end_time}'")
            logging.info(f"sql:::SELECT id,feature FROM tp_{scheme}_feature where feature!=''")
            #cursor.execute(f"SELECT id,feature FROM tp_{scheme}_feature where feature!='' and create_time>'{start_time}' and create_time<'{end_time}'")
            cursor.execute(f"SELECT id,feature FROM tp_{scheme}_feature where feature!=''")
            # 获取所有结果
            mysql_results = cursor.fetchall()
            r.set(scheme, json.dumps(mysql_results))


        #mysql_results = [dict(zip(columns, row)) for row in mysql_results]
        global_midtime=time.time()-global_start_time
        logging.info(f"获取mysql时间：{global_midtime},共：{len(mysql_results)}")

        if not mysql_results:
            logging.error("No destination features found in file")
            return "[]"

        # 执行特征比对
        results = feature_compare_multi_gpu(
            feature_src,
            mysql_results,
            scheme,
            score,
            1000000,
            log_id
        )
        
        # 输出结果       
            
        if not results:
            return "[]"

        logging.info(f"{log_id}Found {len(results)} matches:")

        global_end_time = time.time()
        total_time = global_end_time - global_start_time
        logging.info(f"\n{log_id}Total execution time: {total_time:.4f}s")
        logging.info(f"{log_id}Average processing speed: {len(mysql_results)/total_time:.2f} features/s")
        return jsonify(results)
    except Exception as e:
        logging.error(f"{log_id}Error occurred: {e}")
        return "[]"

if __name__ == "__main__":
    logging.basicConfig(
        filename= os.path.expanduser('~')+'/app.log',
        level=logging.INFO, 
        format='%(asctime)s - %(levelname)s - %(message)s' 
    )
    multiprocessing.set_start_method("spawn")
    app.run(host='0.0.0.0', port=5000)