import ray
from ray import train, tune
from ray.util import inspect_serializability
from ray.runtime_env import RuntimeEnv
from typing import Dict, Any, List, Optional, Callable
import numpy as np
import pandas as pd
import time
import json
from pathlib import Path
import logging
from datetime import datetime
import psutil
import socket
from functools import partial

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("DistributedExecutor")

@ray.remote
class JobProcessingActor:
    """分布式任务处理执行器"""
    def __init__(self, node_id: str, config: Dict[str, Any]):
        self.node_id = node_id
        self.config = config
        self.task_count = 0
        self.resource_usage = []
        self._init_resources()
        
    def _init_resources(self):
        """初始化资源监控"""
        self.hostname = socket.gethostname()
        self.cpu_count = psutil.cpu_count()
        self.mem_total = psutil.virtual_memory().total / (1024 ** 3)  # GB
        
    def process_job(self, job_data: Dict[str, Any], 
                   processing_fn: Callable) -> Dict[str, Any]:
        """处理单个岗位数据"""
        start_time = time.time()
        task_id = f"{self.node_id}_{self.task_count}"
        self.task_count += 1
        
        try:
            # 记录资源使用前状态
            cpu_before = psutil.cpu_percent()
            mem_before = psutil.virtual_memory().used / (1024 ** 3)
            
            # 执行处理函数
            result = processing_fn(job_data)
            
            # 记录资源使用后状态
            cpu_after = psutil.cpu_percent()
            mem_after = psutil.virtual_memory().used / (1024 ** 3)
            
            # 记录资源使用情况
            self.resource_usage.append({
                "task_id": task_id,
                "cpu_usage": (cpu_before + cpu_after) / 2,
                "mem_usage": mem_after - mem_before,
                "duration": time.time() - start_time,
                "timestamp": datetime.now().isoformat()
            })
            
            return {
                "status": "success",
                "task_id": task_id,
                "result": result,
                "node_id": self.node_id,
                "processing_time": time.time() - start_time
            }
            
        except Exception as e:
            logger.error(f"任务 {task_id} 处理失败: {str(e)}", exc_info=True)
            return {
                "status": "failed",
                "task_id": task_id,
                "error": str(e),
                "node_id": self.node_id
            }
    
    def get_stats(self) -> Dict[str, Any]:
        """获取执行器统计信息"""
        return {
            "node_id": self.node_id,
            "hostname": self.hostname,
            "cpu_count": self.cpu_count,
            "mem_total": self.mem_total,
            "task_count": self.task_count,
            "resource_usage": self.resource_usage
        }

class DistributedExecutor:
    """分布式执行管理器"""
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.actors = []
        self._init_ray()
        self._create_actors()
        
    def _init_ray(self):
        """初始化Ray运行时"""
        if not ray.is_initialized():
            ray.init(
                address=self.config.get("ray_address", "auto"),
                _node_ip_address=self.config.get("node_ip", "127.0.0.1"),
                _redis_password=self.config.get("redis_password"),
                num_cpus=self.config.get("num_cpus", psutil.cpu_count()),
                num_gpus=self.config.get("num_gpus", 0),
                object_store_memory=self.config.get("object_store_memory"),
                runtime_env=self._create_runtime_env()
            )
        
    def _create_runtime_env(self) -> RuntimeEnv:
        """创建Ray运行时环境"""
        return RuntimeEnv(
            pip_packages=[
                "pandas",
                "numpy",
                "psutil"
            ],
            env_vars={
                "OMP_NUM_THREADS": str(self.config.get("omp_num_threads", 1))
            }
        )
        
    def _create_actors(self):
        """创建处理执行器actor"""
        num_actors = self.config.get("num_actors", 4)
        for i in range(num_actors):
            actor = JobProcessingActor.remote(
                node_id=f"node_{i}",
                config=self.config
            )
            self.actors.append(actor)
            
        logger.info(f"创建了 {num_actors} 个处理执行器")
    
    def process_batch(self, jobs_data: List[Dict[str, Any]],
                     processing_fn: Callable) -> List[Dict[str, Any]]:
        """批量处理岗位数据"""
        # 检查函数可序列化
        self._validate_processing_fn(processing_fn)
        
        # 分发任务
        futures = []
        for job_data in jobs_data:
            actor = self._select_actor()
            future = actor.process_job.remote(job_data, processing_fn)
            futures.append(future)
            
        # 等待结果
        results = ray.get(futures)
        
        # 记录执行统计
        self._log_execution_stats()
        
        return results
    
    def _validate_processing_fn(self, fn: Callable):
        """验证处理函数可序列化"""
        try:
            inspect_serializability(fn)
        except Exception as e:
            logger.error("处理函数无法序列化", exc_info=True)
            raise ValueError("处理函数必须可序列化") from e
    
    def _select_actor(self):
        """选择执行器actor (简单轮询)"""
        return self.actors[len(self.actors) % len(self.actors)]
    
    def _log_execution_stats(self):
        """记录执行统计信息"""
        stats = ray.get([actor.get_stats.remote() for actor in self.actors])
        
        log_dir = Path(self.config.get("log_dir", "b/data/logs/ray"))
        log_dir.mkdir(parents=True, exist_ok=True)
        
        log_file = log_dir / f"execution_stats_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        with open(log_file, "w") as f:
            json.dump(stats, f, indent=2)
            
        logger.info(f"执行统计已保存到 {log_file}")
    
    def tune_hyperparameters(self, 
                           config_space: Dict[str, Any],
                           training_fn: Callable,
                           num_samples: int = 10) -> tune.ResultGrid:
        """超参数调优"""
        # 配置Tune
        tuner = tune.Tuner(
            tune.with_resources(
                tune.with_parameters(training_fn),
                resources={"cpu": 2, "gpu": 0.5}
            ),
            param_space=config_space,
            tune_config=tune.TuneConfig(
                num_samples=num_samples,
                metric="loss",
                mode="min"
            ),
            run_config=train.RunConfig(
                name="job_recommendation_tuning",
                storage_path=self.config.get("tune_storage", "b/data/tune")
            )
        )
        
        # 运行调优
        results = tuner.fit()
        
        # 保存最佳配置
        best_config = results.get_best_result().config
        best_config_file = Path("b/data/tune/best_config.json")
        with open(best_config_file, "w") as f:
            json.dump(best_config, f, indent=2)
            
        logger.info(f"最佳配置已保存到 {best_config_file}")
        
        return results
    
    def shutdown(self):
        """关闭Ray运行时"""
        ray.shutdown()
        logger.info("Ray运行时已关闭")

if __name__ == "__main__":
    # 示例配置
    config = {
        "num_actors": 4,
        "log_dir": "b/data/logs/ray",
        "tune_storage": "b/data/tune"
    }
    
    # 示例处理函数
    def sample_processing(job_data: Dict[str, Any]) -> Dict[str, Any]:
        """示例处理函数"""
        time.sleep(1)  # 模拟处理延迟
        return {
            "processed": True,
            "job_id": job_data.get("job_id"),
            "skills": job_data.get("skills", [])
        }
    
    # 创建执行器
    executor = DistributedExecutor(config)
    
    try:
        # 示例数据
        sample_jobs = [
            {"job_id": f"job_{i}", "skills": ["Python", "SQL"]}
            for i in range(10)
        ]
        
        # 处理批量数据
        print("处理批量数据...")
        results = executor.process_batch(sample_jobs, sample_processing)
        print("处理结果:", json.dumps(results[:2], indent=2))  # 打印前两个结果
        
        # 示例超参数调优
        print("\n运行超参数调优...")
        config_space = {
            "lr": tune.loguniform(1e-4, 1e-2),
            "batch_size": tune.choice([16, 32, 64])
        }
        
        def sample_train(config: Dict[str, Any]) -> None:
            """示例训练函数"""
            for step in range(10):
                loss = np.random.rand() * config["lr"]  # 模拟损失
                train.report({"loss": loss})
        
        tune_results = executor.tune_hyperparameters(
            config_space, sample_train, num_samples=3
        )
        print("最佳配置:", tune_results.get_best_result().config)
        
    finally:
        executor.shutdown()