'''
基于强化学习的制造业成本控制优化方案
一、业务分析与技术思路
1.1 成本控制核心要素分析
在制造业中，成本控制涉及多个维度的优化，需要在保证质量的前提下实现总成本最小化：

成本构成要素：

原材料成本：采购价格、库存成本、损耗率
人工成本：人员配置、加班费用、培训成本
设备成本：设备折旧、维护费用、能耗成本
质量成本：次品返工、质检费用、客户投诉处理
物流成本：运输费用、仓储成本、配送成本
质量约束条件：

产品合格率 ≥ 目标质量标准
客户满意度 ≥ 预设阈值
交付及时性 ≥ 服务水平要求
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from typing import Dict, List, Tuple, Optional
import logging
from dataclasses import dataclass
from enum import Enum
import matplotlib.pyplot as plt
import pandas as pd
from collections import deque, namedtuple
import random
import json

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class CostComponents:
    """成本组成要素"""
    material_cost: float      # 原材料成本
    labor_cost: float         # 人工成本
    equipment_cost: float     # 设备成本
    quality_cost: float       # 质量成本
    logistics_cost: float     # 物流成本
    overhead_cost: float      # 管理费用
    
    @property
    def total_cost(self) -> float:
        return (self.material_cost + self.labor_cost + self.equipment_cost + 
                self.quality_cost + self.logistics_cost + self.overhead_cost)

@dataclass
class QualityMetrics:
    """质量指标"""
    defect_rate: float           # 次品率
    customer_satisfaction: float # 客户满意度
    on_time_delivery: float      # 准时交付率
    first_pass_yield: float      # 一次通过率
    
    def meets_quality_constraints(self, constraints: Dict[str, float]) -> bool:
        """检查是否满足质量约束"""
        return (
            self.defect_rate <= constraints.get('max_defect_rate', 0.05) and
            self.customer_satisfaction >= constraints.get('min_customer_satisfaction', 0.85) and
            self.on_time_delivery >= constraints.get('min_on_time_delivery', 0.95) and
            self.first_pass_yield >= constraints.get('min_first_pass_yield', 0.90)
        )

class OptimizationAction(Enum):
    """优化动作类型"""
    ADJUST_MATERIAL_SOURCING = "adjust_material_sourcing"
    OPTIMIZE_WORKFORCE = "optimize_workforce" 
    SCHEDULE_MAINTENANCE = "schedule_maintenance"
    ADJUST_QUALITY_CONTROL = "adjust_quality_control"
    OPTIMIZE_INVENTORY = "optimize_inventory"
    IMPROVE_PROCESS_EFFICIENCY = "improve_process_efficiency"



class CostControlEnvironment:
    """成本控制优化环境"""
    
    def __init__(self, config: Dict):
        self.config = config
        
        # 环境参数
        self.num_production_units = config.get('num_production_units', 10)
        self.planning_horizon = config.get('planning_horizon', 30)  # 天数
        self.quality_constraints = config.get('quality_constraints', {
            'max_defect_rate': 0.03,
            'min_customer_satisfaction': 0.90,
            'min_on_time_delivery': 0.95,
            'min_first_pass_yield': 0.92
        })
        
        # 状态空间：成本要素 + 质量指标 + 市场条件 + 资源状态
        self.state_dim = (
            6 +  # 成本要素（6个）
            4 +  # 质量指标（4个）
            5 +  # 市场条件（5个）
            8    # 资源状态（8个）
        )
        
        # 动作空间：各类优化决策的参数调整
        self.action_dim = 12  # 6个主要优化维度，每个维度2个参数
        
        self.reset()
    
    def reset(self) -> np.ndarray:
        """重置环境状态"""
        # 初始化成本状态
        self.current_costs = CostComponents(
            material_cost=np.random.uniform(8000, 12000),
            labor_cost=np.random.uniform(5000, 8000),
            equipment_cost=np.random.uniform(3000, 5000),
            quality_cost=np.random.uniform(1000, 3000),
            logistics_cost=np.random.uniform(2000, 4000),
            overhead_cost=np.random.uniform(1500, 2500)
        )
        
        # 初始化质量状态
        self.current_quality = QualityMetrics(
            defect_rate=np.random.uniform(0.02, 0.04),
            customer_satisfaction=np.random.uniform(0.85, 0.95),
            on_time_delivery=np.random.uniform(0.90, 0.98),
            first_pass_yield=np.random.uniform(0.88, 0.95)
        )
        
        # 市场条件
        self.market_conditions = {
            'material_price_index': np.random.uniform(0.8, 1.2),
            'labor_market_tightness': np.random.uniform(0.7, 1.3),
            'demand_volatility': np.random.uniform(0.5, 1.5),
            'supplier_reliability': np.random.uniform(0.8, 1.0),
            'competitive_pressure': np.random.uniform(0.6, 1.4)
        }
        
        # 资源状态
        self.resource_state = {
            'inventory_level': np.random.uniform(0.6, 1.2),
            'equipment_utilization': np.random.uniform(0.7, 0.95),
            'workforce_efficiency': np.random.uniform(0.8, 1.0),
            'supplier_performance': np.random.uniform(0.85, 0.98),
            'process_automation_level': np.random.uniform(0.6, 0.9),
            'quality_system_maturity': np.random.uniform(0.7, 0.95),
            'maintenance_status': np.random.uniform(0.8, 1.0),
            'technology_advancement': np.random.uniform(0.7, 0.9)
        }
        
        self.current_step = 0
        self.cost_history = []
        self.quality_history = []
        
        return self._get_state()
    
    def _get_state(self) -> np.ndarray:
        """获取当前状态向量"""
        state = []
        
        # 成本要素（归一化到[0,1]）
        state.extend([
            self.current_costs.material_cost / 15000,
            self.current_costs.labor_cost / 10000,
            self.current_costs.equipment_cost / 8000,
            self.current_costs.quality_cost / 5000,
            self.current_costs.logistics_cost / 6000,
            self.current_costs.overhead_cost / 4000
        ])
        
        # 质量指标
        state.extend([
            1.0 - self.current_quality.defect_rate * 20,  # 转换为正向指标
            self.current_quality.customer_satisfaction,
            self.current_quality.on_time_delivery,
            self.current_quality.first_pass_yield
        ])
        
        # 市场条件
        state.extend([
            self.market_conditions['material_price_index'] / 1.5,
            self.market_conditions['labor_market_tightness'] / 1.5,
            self.market_conditions['demand_volatility'] / 2.0,
            self.market_conditions['supplier_reliability'],
            self.market_conditions['competitive_pressure'] / 1.5
        ])
        
        # 资源状态
        state.extend(list(self.resource_state.values()))
        
        return np.array(state, dtype=np.float32)
    
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict]:
        """执行优化动作"""
        # 解析并应用动作
        self._apply_optimization_actions(action)
        
        # 模拟环境变化
        self._simulate_environment_dynamics()
        
        # 计算奖励
        reward = self._calculate_reward()
        
        # 更新步数
        self.current_step += 1
        done = self.current_step >= self.planning_horizon
        
        # 记录历史
        self.cost_history.append(self.current_costs)
        self.quality_history.append(self.current_quality)
        
        info = self._get_info()
        
        return self._get_state(), reward, done, info
    
    def _apply_optimization_actions(self, action: np.ndarray):
        """应用优化动作"""
        action = np.clip(action, -1.0, 1.0)
        
        # 原材料采购优化
        material_sourcing_strategy = action[0]  # [-1,1] 成本vs质量权衡
        supplier_diversification = action[1]     # [-1,1] 供应商多样化程度
        
        # 人力资源优化
        workforce_sizing = action[2]             # [-1,1] 人员规模调整
        training_investment = action[3]          # [-1,1] 培训投资水平
        
        # 设备维护优化
        maintenance_frequency = action[4]        # [-1,1] 维护频率
        equipment_upgrade = action[5]            # [-1,1] 设备升级投入
        
        # 质量控制优化
        inspection_intensity = action[6]         # [-1,1] 检验严格程度
        process_control = action[7]              # [-1,1] 过程控制强度
        
        # 库存管理优化
        inventory_policy = action[8]             # [-1,1] 库存策略
        demand_forecasting = action[9]           # [-1,1] 需求预测准确性
        
        # 流程效率优化
        automation_level = action[10]            # [-1,1] 自动化程度
        lean_implementation = action[11]         # [-1,1] 精益管理实施
        
        # 应用优化效果
        self._update_costs_based_on_actions(action)
        self._update_quality_based_on_actions(action)
        self._update_resources_based_on_actions(action)
    
    def _update_costs_based_on_actions(self, action: np.ndarray):
        """基于动作更新成本"""
        # 原材料成本调整
        material_factor = 1.0 + action[0] * 0.1  # 成本vs质量权衡
        supplier_factor = 1.0 - action[1] * 0.05  # 多样化降低风险成本
        self.current_costs.material_cost *= material_factor * supplier_factor
        
        # 人工成本调整
        workforce_factor = 1.0 + action[2] * 0.15  # 人员规模影响
        training_factor = 1.0 + action[3] * 0.08   # 培训投资
        self.current_costs.labor_cost *= workforce_factor * training_factor
        
        # 设备成本调整
        maintenance_factor = 1.0 + action[4] * 0.12  # 维护频率
        upgrade_factor = 1.0 + action[5] * 0.20      # 设备升级
        self.current_costs.equipment_cost *= maintenance_factor * upgrade_factor
        
        # 质量成本调整（检验和过程控制）
        quality_investment = (action[6] + action[7]) * 0.1
        self.current_costs.quality_cost *= (1.0 + quality_investment)
        
        # 物流成本调整
        inventory_factor = 1.0 + abs(action[8]) * 0.08  # 库存策略
        forecasting_factor = 1.0 - action[9] * 0.05     # 预测准确性降低成本
        self.current_costs.logistics_cost *= inventory_factor * forecasting_factor
        
        # 管理费用调整
        automation_factor = 1.0 - action[10] * 0.10  # 自动化降低管理成本
        lean_factor = 1.0 - action[11] * 0.08        # 精益管理效果
        self.current_costs.overhead_cost *= automation_factor * lean_factor
    
    def _update_quality_based_on_actions(self, action: np.ndarray):
        """基于动作更新质量指标"""
        # 次品率改善
        quality_improvement = (
            -action[0] * 0.002 +      # 更好的原材料
            action[3] * 0.003 +       # 培训效果
            action[4] * 0.002 +       # 维护效果
            action[6] * 0.004 +       # 检验强度
            action[7] * 0.005 +       # 过程控制
            action[10] * 0.003        # 自动化效果
        )
        self.current_quality.defect_rate = max(0.005, 
            self.current_quality.defect_rate + quality_improvement)
        
        # 客户满意度
        satisfaction_improvement = (
            -action[0] * 0.01 +       # 质量vs成本权衡
            action[3] * 0.015 +       # 培训
            action[7] * 0.02 +        # 过程控制
            action[9] * 0.01          # 需求预测准确性
        )
        self.current_quality.customer_satisfaction = np.clip(
            self.current_quality.customer_satisfaction + satisfaction_improvement, 0.7, 1.0)
        
        # 准时交付率
        delivery_improvement = (
            action[1] * 0.01 +        # 供应商多样化
            action[8] * 0.015 +       # 库存策略
            action[9] * 0.02 +        # 需求预测
            action[11] * 0.01         # 精益管理
        )
        self.current_quality.on_time_delivery = np.clip(
            self.current_quality.on_time_delivery + delivery_improvement, 0.8, 1.0)
        
        # 一次通过率
        yield_improvement = (
            action[3] * 0.01 +        # 培训
            action[4] * 0.008 +       # 维护
            action[6] * 0.012 +       # 检验
            action[7] * 0.015 +       # 过程控制
            action[10] * 0.01         # 自动化
        )
        self.current_quality.first_pass_yield = np.clip(
            self.current_quality.first_pass_yield + yield_improvement, 0.8, 1.0)
    
    def _update_resources_based_on_actions(self, action: np.ndarray):
        """基于动作更新资源状态"""
        # 更新资源状态
        self.resource_state['equipment_utilization'] = np.clip(
            self.resource_state['equipment_utilization'] + action[4] * 0.02, 0.5, 1.0)
        
        self.resource_state['workforce_efficiency'] = np.clip(
            self.resource_state['workforce_efficiency'] + action[3] * 0.03, 0.6, 1.0)
        
        self.resource_state['process_automation_level'] = np.clip(
            self.resource_state['process_automation_level'] + action[10] * 0.02, 0.5, 1.0)
        
        self.resource_state['quality_system_maturity'] = np.clip(
            self.resource_state['quality_system_maturity'] + action[7] * 0.015, 0.6, 1.0)
    
    def _simulate_environment_dynamics(self):
        """模拟环境动态变化"""
        # 市场条件随机变化
        self.market_conditions['material_price_index'] *= np.random.uniform(0.98, 1.02)
        self.market_conditions['demand_volatility'] *= np.random.uniform(0.95, 1.05)
        self.market_conditions['competitive_pressure'] *= np.random.uniform(0.97, 1.03)
        
        # 资源状态自然衰减
        for key in self.resource_state:
            if key in ['equipment_utilization', 'maintenance_status']:
                self.resource_state[key] *= np.random.uniform(0.995, 1.0)
    
    def _calculate_reward(self) -> float:
        """计算奖励函数"""
        # 基础成本奖励（成本越低奖励越高）
        baseline_total_cost = 30000  # 基准总成本
        cost_reward = max(0, (baseline_total_cost - self.current_costs.total_cost) / baseline_total_cost)
        
        # 质量约束惩罚
        quality_penalty = 0.0
        if not self.current_quality.meets_quality_constraints(self.quality_constraints):
            # 严重的质量约束违反惩罚
            quality_penalty = -2.0
            
            # 详细惩罚
            if self.current_quality.defect_rate > self.quality_constraints['max_defect_rate']:
                quality_penalty -= (self.current_quality.defect_rate - 
                                  self.quality_constraints['max_defect_rate']) * 100
            
            if self.current_quality.customer_satisfaction < self.quality_constraints['min_customer_satisfaction']:
                quality_penalty -= (self.quality_constraints['min_customer_satisfaction'] - 
                                  self.current_quality.customer_satisfaction) * 10
        else:
            # 满足约束的质量奖励
            quality_reward = (
                (1.0 - self.current_quality.defect_rate * 20) * 0.2 +
                self.current_quality.customer_satisfaction * 0.2 +
                self.current_quality.on_time_delivery * 0.1 +
                self.current_quality.first_pass_yield * 0.1
            )
            quality_penalty = quality_reward
        
        # 效率奖励
        efficiency_reward = (
            self.resource_state['equipment_utilization'] * 0.1 +
            self.resource_state['workforce_efficiency'] * 0.1 +
            self.resource_state['process_automation_level'] * 0.05
        )
        
        # 可持续性奖励（长期考虑）
        sustainability_reward = (
            self.resource_state['quality_system_maturity'] * 0.05 +
            self.resource_state['maintenance_status'] * 0.05
        )
        
        # 总奖励
        total_reward = (
            cost_reward * 0.6 +           # 成本优化权重60%
            quality_penalty * 0.3 +       # 质量约束权重30%
            efficiency_reward * 0.05 +    # 效率权重5%
            sustainability_reward * 0.05  # 可持续性权重5%
        )
        
        return total_reward
    
    def _get_info(self) -> Dict:
        """获取环境信息"""
        return {
            'total_cost': self.current_costs.total_cost,
            'cost_breakdown': {
                'material': self.current_costs.material_cost,
                'labor': self.current_costs.labor_cost,
                'equipment': self.current_costs.equipment_cost,
                'quality': self.current_costs.quality_cost,
                'logistics': self.current_costs.logistics_cost,
                'overhead': self.current_costs.overhead_cost
            },
            'quality_metrics': {
                'defect_rate': self.current_quality.defect_rate,
                'customer_satisfaction': self.current_quality.customer_satisfaction,
                'on_time_delivery': self.current_quality.on_time_delivery,
                'first_pass_yield': self.current_quality.first_pass_yield
            },
            'quality_constraints_met': self.current_quality.meets_quality_constraints(
                self.quality_constraints),
            'step': self.current_step
        }
    

class CostOptimizationAgent:
    """成本优化智能体"""
    
    def __init__(self, state_dim: int, action_dim: int, lr: float = 3e-4):
        self.state_dim = state_dim
        self.action_dim = action_dim
        
        # Actor网络（策略网络）
        self.actor = nn.Sequential(
            nn.Linear(state_dim, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, action_dim),
            nn.Tanh()  # 输出范围[-1, 1]
        )
        
        # Critic网络（价值网络）
        self.critic = nn.Sequential(
            nn.Linear(state_dim, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )
        
        # 优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr * 0.5)
        
        # PPO参数
        self.clip_epsilon = 0.2 # 裁剪范围值
        self.gamma = 0.99 # 折扣因子
        self.gae_lambda = 0.95 # GAE参数
        self.entropy_weight = 0.01 # 熵正则化权重
        
        # 经验缓存
        self.memory = deque(maxlen=10000)
        
    def get_action(self, state: np.ndarray, deterministic: bool = False):
        """获取动作"""
        if len(state.shape) == 1:
            state = state.reshape(1, -1)
            
        state_tensor = torch.FloatTensor(state)

        with torch.no_grad():
            action_mean = self.actor(state_tensor)
            
            if deterministic:
                return action_mean.numpy()[0] # 将张量转换为 NumPy 数组并去除批量维度（从 [1, action_dim] 到 [action_dim]）。
            else:
                # 添加噪声进行探索
                action_std = torch.ones_like(action_mean) * 0.2
                dist = torch.distributions.Normal(action_mean, action_std) # 创建正态分布，均值为 action_mean，标准差为 action_std
                action = dist.sample() # 采样动作
                action_logprob = dist.log_prob(action).sum(dim=1) # log_prob(action) 计算采样动作的对数概率密度，并沿动作维度求和
                
                return action.numpy()[0], action_logprob.numpy()[0] # 采样的动作（去除批量维度）, 动作对应的对数概率
    
    def store_transition(self, state, action, reward, next_state, done, log_prob):
        """存储经验"""
        self.memory.append((state, action, reward, next_state, done, log_prob))
    
    def update(self):
        """更新网络参数"""
        if len(self.memory) < 64:  # 最小批次大小
            return
        
        # 采样经验
        batch_size = min(len(self.memory), 512)
        indices = random.sample(range(len(self.memory)), batch_size) # 随机采样经验
        batch = [self.memory[i] for i in indices]
        
        states, actions, rewards, next_states, dones, old_log_probs = zip(*batch)
        
        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(rewards)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.BoolTensor(dones)
        old_log_probs = torch.FloatTensor(old_log_probs)
        
        # 计算优势函数
        with torch.no_grad():
            values = self.critic(states).squeeze()
            next_values = self.critic(next_states).squeeze()
            
            # GAE计算
            advantages = []
            advantage = 0
            for i in reversed(range(len(rewards))):
                if i == len(rewards) - 1:
                    next_value = next_values[i] if not dones[i] else 0
                else:
                    next_value = values[i + 1]
                
                delta = rewards[i] + self.gamma * next_value - values[i]
                advantage = delta + self.gamma * self.gae_lambda * advantage
                advantages.insert(0, advantage)
            
            advantages = torch.FloatTensor(advantages)
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
            returns = advantages + values
        
        # 多次更新
        for _ in range(5):  # PPO更新次数
            # 计算当前策略的动作概率
            action_mean = self.actor(states)
            action_std = torch.ones_like(action_mean) * 0.2
            dist = torch.distributions.Normal(action_mean, action_std)
            new_log_probs = dist.log_prob(actions).sum(dim=1)
            
            # 计算比率
            ratio = torch.exp(new_log_probs - old_log_probs)
            
            # PPO损失
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            actor_loss = -torch.min(surr1, surr2).mean()
            
            # 熵奖励
            entropy = dist.entropy().sum(dim=1).mean()
            actor_loss -= self.entropy_weight * entropy
            
            # Critic损失
            current_values = self.critic(states).squeeze()
            critic_loss = nn.MSELoss()(current_values, returns)
            
            # 反向传播
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
            self.actor_optimizer.step()
            
            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
            self.critic_optimizer.step()
    
    def save_model(self, path: str):
        """保存模型"""
        torch.save({
            'actor_state_dict': self.actor.state_dict(),
            'critic_state_dict': self.critic.state_dict(),
            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
            'critic_optimizer_state_dict': self.critic_optimizer.state_dict()
        }, path)
    
    def load_model(self, path: str):
        """加载模型"""
        checkpoint = torch.load(path)
        self.actor.load_state_dict(checkpoint['actor_state_dict'])
        self.critic.load_state_dict(checkpoint['critic_state_dict'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])


class CostControlTrainingSystem:
    """成本控制训练系统"""
    
    def __init__(self, config: Dict):
        self.config = config
        
        # 创建环境和智能体
        self.env = CostControlEnvironment(config)
        self.agent = CostOptimizationAgent(
            state_dim=self.env.state_dim,
            action_dim=self.env.action_dim,
            lr=config.get('learning_rate', 3e-4)
        )
        
        # 训练指标
        self.training_metrics = {
            'episode_rewards': [],
            'total_costs': [],
            'cost_reductions': [],
            'quality_violations': [],
            'constraint_satisfaction_rate': []
        }
        
    def train(self, num_episodes: int = 2000):
        """训练智能体"""
        logger.info(f"开始训练成本控制智能体，共 {num_episodes} 个回合")
        
        baseline_cost = None
        
        for episode in range(num_episodes):
            state = self.env.reset()
            episode_reward = 0
            episode_cost = 0
            quality_violations = 0
            
            done = False
            step_count = 0
            
            while not done:
                # 获取动作
                if step_count < 10:  # 初期增加探索
                    action, log_prob = self.agent.get_action(state, deterministic=False)
                else:
                    action, log_prob = self.agent.get_action(state, deterministic=False)
                
                # 执行动作
                next_state, reward, done, info = self.env.step(action)
                
                # 存储经验
                self.agent.store_transition(state, action, reward, next_state, done, log_prob)
                
                # 累计指标
                episode_reward += reward
                episode_cost = info['total_cost']
                
                if not info['quality_constraints_met']:
                    quality_violations += 1
                
                state = next_state
                step_count += 1
            
            # 更新智能体
            if episode % 10 == 0:  # 每10个回合更新一次
                self.agent.update()
            
            # 记录指标
            self.training_metrics['episode_rewards'].append(episode_reward)
            self.training_metrics['total_costs'].append(episode_cost)
            self.training_metrics['quality_violations'].append(quality_violations)
            
            # 计算成本降低
            if baseline_cost is None:
                baseline_cost = episode_cost
                cost_reduction = 0
            else:
                cost_reduction = (baseline_cost - episode_cost) / baseline_cost * 100
            
            self.training_metrics['cost_reductions'].append(cost_reduction)
            
            # 约束满足率
            constraint_satisfaction = 1.0 - (quality_violations / step_count)
            self.training_metrics['constraint_satisfaction_rate'].append(constraint_satisfaction)
            
            # 日志输出
            if episode % 100 == 0:
                avg_reward = np.mean(self.training_metrics['episode_rewards'][-100:])
                avg_cost = np.mean(self.training_metrics['total_costs'][-100:])
                avg_cost_reduction = np.mean(self.training_metrics['cost_reductions'][-100:])
                avg_constraint_satisfaction = np.mean(
                    self.training_metrics['constraint_satisfaction_rate'][-100:])
                
                logger.info(f"Episode {episode}:")
                logger.info(f"  平均奖励: {avg_reward:.3f}")
                logger.info(f"  平均总成本: {avg_cost:.2f}")
                logger.info(f"  平均成本降低: {avg_cost_reduction:.2f}%")
                logger.info(f"  约束满足率: {avg_constraint_satisfaction:.3f}")
        
        logger.info("训练完成")
    
    def evaluate(self, num_episodes: int = 50) -> Dict:
        """评估智能体性能"""
        logger.info("开始评估智能体性能")
        
        eval_metrics = {
            'total_costs': [],
            'cost_reductions': [],
            'quality_violations': [],
            'constraint_satisfaction_rates': [],
            'episode_rewards': []
        }
        
        baseline_cost = np.mean(self.training_metrics['total_costs'][:100])  # 使用训练初期成本作为基准
        
        for episode in range(num_episodes):
            state = self.env.reset()
            episode_reward = 0
            quality_violations = 0
            step_count = 0
            
            done = False
            while not done:
                # 使用确定性策略
                action = self.agent.get_action(state, deterministic=True)
                state, reward, done, info = self.env.step(action)
                
                episode_reward += reward
                if not info['quality_constraints_met']:
                    quality_violations += 1
                step_count += 1
            
            # 记录评估指标
            eval_metrics['total_costs'].append(info['total_cost'])
            eval_metrics['episode_rewards'].append(episode_reward)
            eval_metrics['quality_violations'].append(quality_violations)
            
            cost_reduction = (baseline_cost - info['total_cost']) / baseline_cost * 100
            eval_metrics['cost_reductions'].append(cost_reduction)
            
            constraint_satisfaction = 1.0 - (quality_violations / step_count)
            eval_metrics['constraint_satisfaction_rates'].append(constraint_satisfaction)
        
        # 计算统计结果
        results = {}
        for key, values in eval_metrics.items():
            results[key] = {
                'mean': np.mean(values),
                'std': np.std(values),
                'min': np.min(values),
                'max': np.max(values)
            }
        
        logger.info("评估结果:")
        logger.info(f"  平均总成本: {results['total_costs']['mean']:.2f} ± {results['total_costs']['std']:.2f}")
        logger.info(f"  平均成本降低: {results['cost_reductions']['mean']:.2f}% ± {results['cost_reductions']['std']:.2f}%")
        logger.info(f"  平均约束满足率: {results['constraint_satisfaction_rates']['mean']:.3f}")
        
        return results
    
    def optimize_production_costs(self, current_state_data: Dict) -> Dict:
        """实时成本优化建议"""
        # 转换业务数据为模型状态
        state = self._convert_business_data_to_state(current_state_data)
        
        # 获取优化动作
        action = self.agent.get_action(state, deterministic=True)
        
        # 转换为业务建议
        recommendations = self._convert_action_to_business_recommendations(action)
        
        # 预估优化效果
        expected_impact = self._estimate_optimization_impact(action, current_state_data)
        
        return {
            'recommendations': recommendations,
            'expected_impact': expected_impact,
            'confidence_score': self._calculate_confidence_score(action)
        }
    
    def _convert_business_data_to_state(self, business_data: Dict) -> np.ndarray:
        """将业务数据转换为模型状态"""
        # 这里需要根据实际业务数据格式进行映射
        state = np.zeros(self.env.state_dim)
        
        # 成本要素
        costs = business_data.get('costs', {})
        state[0] = costs.get('material_cost', 10000) / 15000
        state[1] = costs.get('labor_cost', 6000) / 10000
        state[2] = costs.get('equipment_cost', 4000) / 8000
        state[3] = costs.get('quality_cost', 2000) / 5000
        state[4] = costs.get('logistics_cost', 3000) / 6000
        state[5] = costs.get('overhead_cost', 2000) / 4000
        
        # 质量指标
        quality = business_data.get('quality', {})
        state[6] = 1.0 - quality.get('defect_rate', 0.03) * 20
        state[7] = quality.get('customer_satisfaction', 0.9)
        state[8] = quality.get('on_time_delivery', 0.95)
        state[9] = quality.get('first_pass_yield', 0.9)
        
        # 其他状态填充默认值或从business_data获取
        for i in range(10, self.env.state_dim):
            state[i] = 0.8  # 默认值
        
        return state
    
    def _convert_action_to_business_recommendations(self, action: np.ndarray) -> Dict:
        """将模型动作转换为业务建议"""
        recommendations = {}
        
        # 原材料采购建议
        if abs(action[0]) > 0.1:
            recommendations['material_sourcing'] = {
                'action': 'optimize_supplier_mix' if action[0] > 0 else 'focus_on_cost_efficiency',
                'description': f"建议{'优化供应商组合，注重质量' if action[0] > 0 else '聚焦成本效率，优化采购策略'}",
                'priority': 'high' if abs(action[0]) > 0.3 else 'medium'
            }
        
        # 人力资源建议
        if abs(action[2]) > 0.1 or abs(action[3]) > 0.1:
            workforce_action = 'expand' if action[2] > 0 else 'optimize'
            training_action = 'increase' if action[3] > 0 else 'maintain'
            
            recommendations['workforce_management'] = {
                'workforce_sizing': workforce_action,
                'training_investment': training_action,
                'description': f"建议{workforce_action}人员规模，{training_action}培训投资",
                'expected_cost_impact': f"{'增加' if action[2] > 0 else '降低'}人工成本 {abs(action[2])*15:.1f}%"
            }
        
        # 设备维护建议
        if abs(action[4]) > 0.1 or abs(action[5]) > 0.1:
            maintenance_level = 'increase' if action[4] > 0 else 'optimize'
            upgrade_level = 'accelerate' if action[5] > 0 else 'postpone'
            
            recommendations['equipment_management'] = {
                'maintenance_strategy': maintenance_level,
                'upgrade_timeline': upgrade_level,
                'description': f"建议{maintenance_level}维护频率，{upgrade_level}设备升级",
                'risk_mitigation': '预防性维护' if action[4] > 0 else '成本优化导向维护'
            }
        
        # 质量控制建议
        if abs(action[6]) > 0.1 or abs(action[7]) > 0.1:
            inspection_level = 'strengthen' if action[6] > 0 else 'streamline'
            process_control = 'enhance' if action[7] > 0 else 'optimize'
            
            recommendations['quality_management'] = {
                'inspection_strategy': inspection_level,
                'process_control': process_control,
                'description': f"建议{inspection_level}检验强度，{process_control}过程控制",
                'quality_impact': '提升质量保证' if action[6] > 0 or action[7] > 0 else '平衡质量与成本'
            }
        
        # 库存管理建议
        if abs(action[8]) > 0.1 or abs(action[9]) > 0.1:
            inventory_strategy = 'increase_buffer' if action[8] > 0 else 'lean_inventory'
            forecasting = 'improve' if action[9] > 0 else 'maintain'
            
            recommendations['inventory_management'] = {
                'inventory_policy': inventory_strategy,
                'demand_forecasting': forecasting,
                'description': f"建议采用{inventory_strategy}库存策略，{forecasting}需求预测",
                'cash_flow_impact': '增加库存投资' if action[8] > 0 else '优化现金流'
            }
        
        # 流程优化建议
        if abs(action[10]) > 0.1 or abs(action[11]) > 0.1:
            automation = 'accelerate' if action[10] > 0 else 'selective'
            lean = 'intensify' if action[11] > 0 else 'maintain'
            
            recommendations['process_optimization'] = {
                'automation_strategy': automation,
                'lean_implementation': lean,
                'description': f"建议{automation}自动化进程，{lean}精益管理",
                'long_term_benefits': '提升效率和竞争力' if action[10] > 0 or action[11] > 0 else '稳步优化'
            }
        
        return recommendations
    
    def _estimate_optimization_impact(self, action: np.ndarray, current_data: Dict) -> Dict:
        """估算优化效果"""
        current_costs = current_data.get('costs', {})
        total_current_cost = sum(current_costs.values())
        
        # 估算成本影响
        material_impact = action[0] * 0.1 + action[1] * (-0.05)
        labor_impact = action[2] * 0.15 + action[3] * 0.08
        equipment_impact = action[4] * 0.12 + action[5] * 0.20
        quality_impact = (action[6] + action[7]) * 0.1
        logistics_impact = abs(action[8]) * 0.08 + action[9] * (-0.05)
        overhead_impact = action[10] * (-0.10) + action[11] * (-0.08)
        
        estimated_cost_change = (
            current_costs.get('material_cost', 0) * material_impact +
            current_costs.get('labor_cost', 0) * labor_impact +
            current_costs.get('equipment_cost', 0) * equipment_impact +
            current_costs.get('quality_cost', 0) * quality_impact +
            current_costs.get('logistics_cost', 0) * logistics_impact +
            current_costs.get('overhead_cost', 0) * overhead_impact
        )
        
        estimated_total_cost = total_current_cost + estimated_cost_change
        cost_reduction_percentage = (total_current_cost - estimated_total_cost) / total_current_cost * 100
        
        return {
            'estimated_cost_reduction': cost_reduction_percentage,
            'estimated_total_cost': estimated_total_cost,
            'cost_change_breakdown': {
                'material': material_impact * 100,
                'labor': labor_impact * 100,
                'equipment': equipment_impact * 100,
                'quality': quality_impact * 100,
                'logistics': logistics_impact * 100,
                'overhead': overhead_impact * 100
            },
            'implementation_timeline': '1-3个月',
            'risk_assessment': self._assess_implementation_risk(action)
        }
    
    def _assess_implementation_risk(self, action: np.ndarray) -> str:
        """评估实施风险"""
        high_risk_actions = np.sum(np.abs(action) > 0.5)
        total_change_magnitude = np.sum(np.abs(action))
        
        if high_risk_actions > 3 or total_change_magnitude > 4.0:
            return 'high'
        elif high_risk_actions > 1 or total_change_magnitude > 2.0:
            return 'medium'
        else:
            return 'low'
    
    def _calculate_confidence_score(self, action: np.ndarray) -> float:
        """计算建议置信度"""
        # 基于动作的一致性和训练收敛程度
        action_consistency = 1.0 - np.std(action) / (np.mean(np.abs(action)) + 1e-8)
        
        # 基于训练指标的收敛程度
        if len(self.training_metrics['episode_rewards']) > 100:
            recent_rewards = self.training_metrics['episode_rewards'][-100:]
            reward_stability = 1.0 - (np.std(recent_rewards) / (np.mean(recent_rewards) + 1e-8))
        else:
            reward_stability = 0.5
        
        confidence = min((action_consistency * 0.6 + reward_stability * 0.4), 1.0)
        return float(confidence)
    
    def plot_training_progress(self):
        """绘制训练进度"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        
        # 奖励趋势
        axes[0, 0].plot(self.training_metrics['episode_rewards'])
        axes[0, 0].set_title('训练奖励趋势')
        axes[0, 0].set_xlabel('回合')
        axes[0, 0].set_ylabel('奖励')
        
        # 总成本趋势
        axes[0, 1].plot(self.training_metrics['total_costs'])
        axes[0, 1].set_title('总成本趋势')
        axes[0, 1].set_xlabel('回合')
        axes[0, 1].set_ylabel('总成本')
        
        # 成本降低趋势
        axes[0, 2].plot(self.training_metrics['cost_reductions'])
        axes[0, 2].set_title('成本降低趋势')
        axes[0, 2].set_xlabel('回合')
        axes[0, 2].set_ylabel('成本降低 (%)')
        
        # 质量违规次数
        axes[1, 0].plot(self.training_metrics['quality_violations'])
        axes[1, 0].set_title('质量约束违规')
        axes[1, 0].set_xlabel('回合')
        axes[1, 0].set_ylabel('违规次数')
        
        # 约束满足率
        axes[1, 1].plot(self.training_metrics['constraint_satisfaction_rate'])
        axes[1, 1].set_title('约束满足率')
        axes[1, 1].set_xlabel('回合')
        axes[1, 1].set_ylabel('满足率')
        
        # 移动平均奖励
        if len(self.training_metrics['episode_rewards']) > 50:
            window_size = 50
            moving_avg = pd.Series(self.training_metrics['episode_rewards']).rolling(window=window_size).mean()
            axes[1, 2].plot(moving_avg)
            axes[1, 2].set_title(f'奖励移动平均 (窗口={window_size})')
            axes[1, 2].set_xlabel('回合')
            axes[1, 2].set_ylabel('平均奖励')
        
        plt.tight_layout()
        plt.show()