import os
import time
import json
import torch
import numpy as np
import pandas as pd
import psutil
import GPUtil
import logging
import signal
import traceback
import pickle
from multiprocessing import Process, Queue, Manager, Value, Lock
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
import pymysql
from pymilvus import connections, Collection
import sys
sys.path.append("../")
from DSSM.run_update_user_item_class import RecEmbeddingUpdater
from tqdm import tqdm

# 使用您的日志系统
import sys, datetime
sys.path.append("../")
from logs.log import setup_custom_logger

# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = '../Logfiles'
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
# my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")

class DatabaseConnectionPool:
    """管理数据库连接的连接池"""
    def __init__(self, max_connections=10, connection_timeout=30, **db_params):
        self.db_params = db_params
        self.max_connections = max_connections
        self.connection_timeout = connection_timeout
        self.connections = Queue(maxsize=max_connections)
        self.lock = Lock()
        self.active_connections = Value('i', 0)
        
        # 初始化连接池
        for _ in range(max_connections):
            try:
                conn = self._create_connection()
                self.connections.put(conn)
            except Exception as e:
                print(e)
                # my_logger.error(f"Failed to initialize DB connection: {e}")
    
    def _create_connection(self):
        """创建新的数据库连接"""
        try:
            conn = pymysql.connect(**self.db_params)
            return conn
        except Exception as e:
            print(e)
            # my_logger.error(f"Error creating DB connection: {e}")
            raise
    
    @contextmanager
    def get_connection(self):
        """获取连接的上下文管理器"""
        conn = None
        try:
            # 尝试从池中获取连接
            try:
                conn = self.connections.get(timeout=self.connection_timeout)
                with self.lock:
                    self.active_connections.value += 1
            except Exception:
                # 如果池中没有可用连接，创建新连接
                # my_logger.warning("Connection pool exhausted, creating new connection")
                conn = self._create_connection()
                with self.lock:
                    self.active_connections.value += 1
            
            # 检查连接是否有效
            try:
                conn.ping(reconnect=True)
            except:
                # my_logger.warning("Connection lost, reconnecting...")
                conn = self._create_connection()
            
            yield conn
        finally:
            # 归还连接到池中
            if conn:
                try:
                    # 如果连接仍然有效，归还到池中
                    if hasattr(conn, 'open') and conn.open:
                        self.connections.put(conn)
                    else:
                        # 如果连接已关闭，创建新连接放入池中
                        self.connections.put(self._create_connection())
                except:
                    # 如果归还过程出错，确保创建新连接
                    try:
                        self.connections.put(self._create_connection())
                    except:
                        pass
                
                with self.lock:
                    self.active_connections.value -= 1
    
    def close_all(self):
        """关闭所有连接"""
        while not self.connections.empty():
            try:
                conn = self.connections.get(block=False)
                if conn and hasattr(conn, 'close'):
                    conn.close()
            except:
                pass
        # my_logger.info("All database connections closed")

class GPUWorkerProcess(Process):
    def __init__(self, 
                 process_id, 
                 gpu_id, 
                 task_queue, 
                 result_queue, 
                 db_config,  # 改为直接传递配置字典而非文件路径
                 milvus_config,
                 stop_event,
                 health_check_event,
                 worker_heartbeats):
        super(GPUWorkerProcess, self).__init__()
        self.process_id = process_id
        self.gpu_id = gpu_id
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.db_config = db_config  # 存储配置字典
        self.milvus_config = milvus_config
        self.stop_event = stop_event
        self.health_check_event = health_check_event
        self.worker_heartbeats = worker_heartbeats
        self.daemon = True

    def run(self):
        # local_logger = setup_custom_logger(f"worker_{self.process_id}.log", log_level="INFO")
        # local_logger.info(f"Worker {self.process_id} started on GPU {self.gpu_id}")
        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
        signal.signal(signal.SIGTERM, self._handle_sigterm)
        signal.signal(signal.SIGINT, self._handle_sigterm)
        
        try:
            db_pool = DatabaseConnectionPool(max_connections=5, **self.db_config)
            while not self.stop_event.is_set():
                try:
                    self.worker_heartbeats[self.process_id] = time.time()  # Update heartbeat
                    try:
                        task = self.task_queue.get(timeout=1)
                    except Exception:
                        continue
                    if task is None:
                        # my_logger.warning("Received a None task, skipping...")
                        continue
                    if not isinstance(task, dict):
                        # my_logger.error("Task is not a dictionary, skipping...")
                        continue

                    group_name = task["group_name"]
                    task_id = task.get("task_id", "unknown")
                    # my_logger.info(f"Worker {self.process_id} on GPU {self.gpu_id} processing group: {group_name}, task_id: {task_id}")
                    
                    # 在进程内部创建RecEmbeddingUpdater实例
                    updater = RecEmbeddingUpdater(
                        db_config=self.db_config,
                        group_name=group_name,
                        milvus_host=self.milvus_config["host"],
                        milvus_port=self.milvus_config["port"]
                    )
                    
                    start_time = time.time()
                    result = updater.update_embeddings_for_group()
                    elapsed_time = time.time() - start_time
                    
                    # 确保结果是可pickle的基本类型
                    if not isinstance(result, (dict, list, str, int, float, bool, type(None))):
                        result = str(result)
                        
                    self.result_queue.put({
                        "task_id": task_id,
                        "group_name": group_name,
                        "status": "success",
                        "elapsed_time": elapsed_time,
                        "result": result,
                        "worker_id": self.process_id,
                        "gpu_id": self.gpu_id
                    })
                    # my_logger.info(f"Worker {self.process_id} completed task for group {group_name} in {elapsed_time:.2f}s")
                except Exception as e:
                    error_msg = f"Error in worker {self.process_id}: {str(e)}\n{traceback.format_exc()}"
                    print(error_msg)
                    # my_logger.error(error_msg)

                    self.result_queue.put({
                        "task_id": task.get("task_id", "unknown") if 'task' in locals() else "unknown",
                        "group_name": task.get("group_name", "unknown") if 'task' in locals() else "unknown",
                        "status": "error",
                        "error": str(e),
                        "traceback": traceback.format_exc(),
                        "worker_id": self.process_id,
                        "gpu_id": self.gpu_id
                    })
            db_pool.close_all()
            # my_logger.info(f"Worker {self.process_id} on GPU {self.gpu_id} shutting down gracefully")
        except Exception as e:
            print(e)
            # my_logger.error(f"Fatal error in worker {self.process_id}: {str(e)}\n{traceback.format_exc()}")
    
    def _handle_sigterm(self, signum, frame):
        # my_logger.info(f"Worker {self.process_id} received signal {signum}, shutting down")
        self.stop_event.set()

class GPUWorkerPool:
    def __init__(self, milvus_config, db_config_path, gpu_ids=None):
        # 在主进程中读取配置文件
        with open(db_config_path, 'r') as file:
            self.db_config = json.load(file)
            
        self.milvus_config = milvus_config
        available_gpus = GPUtil.getGPUs()
        if not available_gpus:
            raise RuntimeError("No GPU available for processing")
        if gpu_ids is None:
            self.gpu_ids = list(range(len(available_gpus)))
        else:
            self.gpu_ids = gpu_ids
        # my_logger.info(f"Initializing GPU worker pool with GPUs: {self.gpu_ids}")
        self.manager = Manager()
        self.task_queue = self.manager.Queue()
        self.result_queue = self.manager.Queue()
        self.stop_event = self.manager.Event()
        self.worker_heartbeats = self.manager.dict({i: time.time() for i in range(len(self.gpu_ids))})
        self.workers = {}
        self._start_workers()
        monitor_data = {
            "worker_heartbeats": self.worker_heartbeats,
            "stop_event": self.stop_event,
        }
        self.monitor_process = Process(target=monitor_workers, args=(monitor_data,))
        self.monitor_process.daemon = True
        self.monitor_process.start()
        # my_logger.info("GPU worker pool initialized successfully")

    def _start_workers(self):
        for i, gpu_id in enumerate(self.gpu_ids):
            self._start_worker(i, gpu_id)

    def _start_worker(self, process_id, gpu_id):
        health_event = self.manager.Event()
        worker = GPUWorkerProcess(
            process_id=process_id,
            gpu_id=gpu_id,
            task_queue=self.task_queue,
            result_queue=self.result_queue,
            db_config=self.db_config,  # 传递配置字典而非文件路径
            milvus_config=self.milvus_config,
            stop_event=self.stop_event,
            health_check_event=health_event,
            worker_heartbeats=self.worker_heartbeats
        )
        worker.start()
        self.workers[process_id] = worker
        # my_logger.info(f"Started worker {process_id} on GPU {gpu_id} with PID {worker.pid}")

    def submit_task(self, group_name, task_id=None):
        if self.stop_event.is_set():
            raise RuntimeError("Worker pool is shutting down, cannot accept new tasks")
        if task_id is None:
            task_id = f"task_{int(time.time())}_{np.random.randint(10000)}"
        task = {
            "task_id": task_id,
            "group_name": group_name,
            "submit_time": time.time()
        }
        self.task_queue.put(task)
        # my_logger.info(f"Submitted task {task_id} for group {group_name}")
        return task_id

    def get_result(self, timeout=None):
        try:
            return self.result_queue.get(timeout=timeout)
        except Exception:
            return None

    def shutdown(self, wait=True):
        # my_logger.info("Shutting down GPU worker pool...")
        if wait:
            while not self.task_queue.empty():
                time.sleep(1)
        self.stop_event.set()
        for worker in self.workers.values():
            worker.join(timeout=10)
            if worker.is_alive():
                worker.terminate()
        if self.monitor_process.is_alive():
            self.monitor_process.terminate()
            self.monitor_process.join(timeout=5)
        # my_logger.info("GPU worker pool shutdown complete")

def monitor_workers(monitor_data):
    worker_heartbeats = monitor_data["worker_heartbeats"]
    stop_event = monitor_data["stop_event"]
    threshold = 30  # seconds
    while not stop_event.is_set():
        current_time = time.time()
        for process_id, last_beat in worker_heartbeats.items():
            if current_time - last_beat > threshold:
                print(f"Worker {process_id} has not sent heartbeat for {current_time - last_beat} seconds")
                # my_logger.warning(f"Worker {process_id} has not sent heartbeat for {current_time - last_beat} seconds, considering it dead.")
        time.sleep(10)

if __name__ == "__main__":
    import multiprocessing
    multiprocessing.set_start_method('spawn')
    db_config_path = "../global_configs/mysql_db_connect_config.json"
    milvus_config = {"host": "10.240.1.3", "port": "19530"}
    pool = GPUWorkerPool(milvus_config=milvus_config, db_config_path=db_config_path, gpu_ids=[0])
    try:
        groups = ["SUDA"]
        task_ids = []
        for group in groups:
            task_id = pool.submit_task(group)
            task_ids.append(task_id)
            # my_logger.info(f"Submitted task for group {group} with ID {task_id}")
        results = []
        for _ in range(len(task_ids)):
            result = pool.get_result()
            results.append(result)
            # my_logger.info(f"Received result for task {result['task_id']}: {result['status']}")
            if result and result['status'] == 'error':
                print(f"Task {result['task_id']} failed: {result['error']}")
                # my_logger.error(f"Task {result['task_id']} failed: {result['error']}")
        success_count = sum(1 for r in results if r and r['status'] == 'success')
        print(f"Completed {success_count} out of {len(task_ids)} tasks successfully")
        # my_logger.info(f"Completed {success_count} out of {len(task_ids)} tasks successfully")
    finally:
        pool.shutdown()
