import ray
from ray import tune
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.algorithms import Algorithm
from ray.rllib.env import BaseEnv
from ray.rllib.evaluation import Episode, RolloutWorker
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from typing import Dict, Any, List, Optional
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import random
from datetime import datetime

class JobRecommendationEnv(BaseEnv):
    """岗位推荐RL环境"""
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.current_user = None
        self.available_jobs = []
        self.reset()
        
    def reset(self) -> Dict[str, Any]:
        """重置环境状态"""
        # 加载用户数据
        users = self._load_user_data()
        self.current_user = random.choice(users)
        
        # 加载可用岗位
        self.available_jobs = self._load_job_data()
        
        # 初始状态
        state = {
            "user_skills": self.current_user["skills"],
            "user_experience": self.current_user["experience"],
            "target_industry": self.current_user["target_industry"],
            "job_count": len(self.available_jobs)
        }
        return state
    
    def step(self, action: int) -> tuple:
        """执行推荐动作"""
        recommended_job = self.available_jobs[action]
        
        # 模拟用户反馈 (实际系统中从真实用户获取)
        feedback = self._simulate_feedback(recommended_job)
        
        # 计算奖励
        reward = self._calculate_reward(recommended_job, feedback)
        
        # 新状态
        next_state = {
            "user_skills": self.current_user["skills"],
            "user_experience": self.current_user["experience"],
            "target_industry": self.current_user["target_industry"],
            "job_count": len(self.available_jobs)
        }
        
        # 是否结束
        done = True  # 单步episode
        
        return next_state, reward, done, {"feedback": feedback}
    
    def _simulate_feedback(self, job: Dict[str, Any]) -> Dict[str, Any]:
        """模拟用户反馈"""
        match_score = self._calculate_match_score(job)
        
        # 基于匹配分数生成概率反馈
        if match_score > 0.8:
            click_prob = 0.9
            apply_prob = 0.7
        elif match_score > 0.6:
            click_prob = 0.6
            apply_prob = 0.3
        else:
            click_prob = 0.2
            apply_prob = 0.05
            
        return {
            "clicked": random.random() < click_prob,
            "applied": random.random() < apply_prob,
            "match_score": match_score
        }
    
    def _calculate_match_score(self, job: Dict[str, Any]) -> float:
        """计算岗位匹配分数"""
        user_skills = set(self.current_user["skills"])
        job_skills = set(job["required_skills"])
        
        # 简单Jaccard相似度
        intersection = len(user_skills & job_skills)
        union = len(user_skills | job_skills)
        return intersection / union if union > 0 else 0
    
    def _calculate_reward(self, job: Dict[str, Any], feedback: Dict[str, Any]) -> float:
        """计算奖励值"""
        base_reward = feedback["match_score"] * 10
        
        # 点击奖励
        if feedback["clicked"]:
            base_reward += 5
            
        # 申请奖励
        if feedback["applied"]:
            base_reward += 10
            
        # 行业匹配奖励
        if job["industry"] == self.current_user["target_industry"]:
            base_reward += 3
            
        return base_reward
    
    def _load_user_data(self) -> List[Dict[str, Any]]:
        """加载用户数据"""
        data_path = Path(self.config.get("data_path", "b/data"))
        user_file = data_path / "users.json"
        
        if user_file.exists():
            with open(user_file, "r") as f:
                return json.load(f)
        return self._generate_sample_users()
    
    def _load_job_data(self) -> List[Dict[str, Any]]:
        """加载岗位数据"""
        data_path = Path(self.config.get("data_path", "b/data"))
        jobs_file = data_path / "jobs.json"
        
        if jobs_file.exists():
            with open(jobs_file, "r") as f:
                return json.load(f)
        return self._generate_sample_jobs()
    
    def _generate_sample_users(self) -> List[Dict[str, Any]]:
        """生成示例用户数据"""
        return [
            {
                "user_id": "user1",
                "skills": ["Python", "Machine Learning", "SQL"],
                "experience": 3,
                "target_industry": "IT"
            },
            {
                "user_id": "user2",
                "skills": ["Java", "Spring", "SQL"],
                "experience": 5,
                "target_industry": "Finance"
            }
        ]
    
    def _generate_sample_jobs(self) -> List[Dict[str, Any]]:
        """生成示例岗位数据"""
        return [
            {
                "job_id": "job1",
                "title": "Data Scientist",
                "company": "TechCorp",
                "required_skills": ["Python", "Machine Learning", "Statistics"],
                "industry": "IT"
            },
            {
                "job_id": "job2",
                "title": "Java Developer",
                "company": "BankInc",
                "required_skills": ["Java", "Spring", "SQL"],
                "industry": "Finance"
            }
        ]

class RLOptimizer:
    """强化学习优化器"""
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        self.config = config or {}
        self._init_ray()
        self.algorithm = self._setup_algorithm()
        
    def _init_ray(self):
        """初始化Ray"""
        if not ray.is_initialized():
            ray.init(
                ignore_reinit_error=True,
                include_dashboard=False,
                num_cpus=self.config.get("num_cpus", 2)
            )
    
    def _setup_algorithm(self) -> Algorithm:
        """设置RL算法"""
        config = (
            PPOConfig()
            .environment(
                env=JobRecommendationEnv,
                env_config=self.config.get("env_config", {})
            )
            .framework("torch")
            .training(
                gamma=0.99,
                lr=0.001,
                train_batch_size=200
            )
            .resources(num_gpus=0)
        )
        
        return config.build()
    
    def train(self, num_iterations: int = 10):
        """训练RL模型"""
        results = []
        for i in range(num_iterations):
            result = self.algorithm.train()
            results.append(result)
            
            print(f"Iteration {i+1}")
            print(f"Mean reward: {result['episode_reward_mean']:.2f}")
            print("-" * 50)
            
            # 定期保存模型
            if (i + 1) % 5 == 0:
                self.save_model(f"iteration_{i+1}")
        
        return results
    
    def save_model(self, name: str):
        """保存模型"""
        save_path = Path(self.config.get("save_path", "b/models/rl"))
        save_path.mkdir(parents=True, exist_ok=True)
        
        checkpoint_path = self.algorithm.save(str(save_path / name))
        print(f"Model saved to {checkpoint_path}")
        return checkpoint_path
    
    def load_model(self, path: str) -> Algorithm:
        """加载模型"""
        self.algorithm.restore(path)
        return self.algorithm
    
    def recommend(self, state: Dict[str, Any]) -> Dict[str, Any]:
        """使用训练好的策略进行推荐"""
        action = self.algorithm.compute_single_action(state)
        return {
            "action": int(action),
            "explanation": self._explain_action(state, action)
        }
    
    def _explain_action(self, state: Dict[str, Any], action: int) -> str:
        """生成推荐解释"""
        return f"基于用户的技能和经验，推荐了最适合的岗位#{action}"
    
    def evaluate(self, num_episodes: int = 10) -> Dict[str, Any]:
        """评估模型性能"""
        total_reward = 0
        successful_recommendations = 0
        
        for _ in range(num_episodes):
            state = self.algorithm.workers.local_worker().env.reset()
            _, reward, _, info = self.algorithm.workers.local_worker().env.step(
                self.algorithm.compute_single_action(state)
            )
            
            total_reward += reward
            if info["feedback"]["applied"]:
                successful_recommendations += 1
        
        return {
            "average_reward": total_reward / num_episodes,
            "success_rate": successful_recommendations / num_episodes
        }

if __name__ == "__main__":
    # 示例使用
    optimizer = RLOptimizer({
        "num_cpus": 2,
        "save_path": "b/models/rl",
        "env_config": {
            "data_path": "b/data"
        }
    })
    
    # 训练模型
    print("开始训练...")
    results = optimizer.train(num_iterations=10)
    
    # 评估模型
    print("\n评估模型...")
    metrics = optimizer.evaluate()
    print(f"平均奖励: {metrics['average_reward']:.2f}")
    print(f"成功率: {metrics['success_rate']:.1%}")
    
    # 示例推荐
    print("\n示例推荐:")
    state = {
        "user_skills": ["Python", "SQL"],
        "user_experience": 2,
        "target_industry": "IT",
        "job_count": 5
    }
    recommendation = optimizer.recommend(state)
    print(f"推荐动作: {recommendation['action']}")
    print(f"解释: {recommendation['explanation']}")