#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：python_learning 
@File ：生产计划优化神经网络系统.py
@IDE  ：PyCharm 
@Author ：李涵彬
@Date ：2025/7/9 下午1:06 
"""

"""
生产计划优化神经网络系统
工业级实现，包含数据准备、模型训练、结果校验等完整流程
"""

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Tuple, Optional, Any
import logging
import json
import pickle
from datetime import datetime
import warnings

warnings.filterwarnings('ignore')

# 配置日志
logging.basicConfig(
	level=logging.INFO,
	format='%(asctime)s - %(levelname)s - %(message)s',
	handlers=[
		logging.FileHandler('production_planning.log'),
		logging.StreamHandler()
	]
)
logger = logging.getLogger(__name__)


class ProductionDataset(Dataset):
	"""生产数据集类"""

	def __init__(self, features: np.ndarray, targets: np.ndarray):
		self.features = torch.FloatTensor(features)
		self.targets = torch.FloatTensor(targets)

	def __len__(self):
		return len(self.features)

	def __getitem__(self, idx):
		return self.features[idx], self.targets[idx]


class ProductionPlanningNN(nn.Module):
	"""生产计划优化神经网络模型"""

	def __init__(self, input_dim: int, hidden_dims: List[int], output_dim: int,
				 dropout_rate: float = 0.2):
		super(ProductionPlanningNN, self).__init__()

		layers = []
		prev_dim = input_dim

		# 构建隐藏层
		for hidden_dim in hidden_dims:
			layers.extend([
				nn.Linear(prev_dim, hidden_dim),
				nn.BatchNorm1d(hidden_dim, track_running_stats=False) if self.training else nn.Identity(),
				nn.ReLU(),
				nn.Dropout(dropout_rate)
			])
			prev_dim = hidden_dim

		# 输出层
		layers.append(nn.Linear(prev_dim, output_dim))

		self.network = nn.Sequential(*layers)

		# 权重初始化
		self._initialize_weights()

	def _initialize_weights(self):
		"""权重初始化"""
		for m in self.modules():
			if isinstance(m, nn.Linear):
				nn.init.xavier_uniform_(m.weight)
				nn.init.constant_(m.bias, 0)

	def forward(self, x):
		return self.network(x)


class DataPreprocessor:
	"""数据预处理器"""

	def __init__(self):
		self.feature_scaler = StandardScaler()
		self.target_scaler = StandardScaler()
		self.label_encoders = {}
		self.feature_columns = []
		self.target_columns = []

	def fit_transform(self, df: pd.DataFrame, feature_cols: List[str],
					  target_cols: List[str]) -> Tuple[np.ndarray, np.ndarray]:
		"""拟合并转换数据"""
		self.feature_columns = feature_cols
		self.target_columns = target_cols

		# 处理特征
		features = self._process_features(df[feature_cols], fit=True)

		# 处理目标
		targets = self._process_targets(df[target_cols], fit=True)

		return features, targets

	def transform(self, df: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
		"""转换数据"""
		features = self._process_features(df[self.feature_columns], fit=False)
		targets = self._process_targets(df[self.target_columns], fit=False)

		return features, targets

	def _process_features(self, df: pd.DataFrame, fit: bool = False) -> np.ndarray:
		"""处理特征数据"""
		processed_df = df.copy()

		# 处理类别特征
		categorical_cols = processed_df.select_dtypes(include=['object']).columns
		for col in categorical_cols:
			if fit:
				self.label_encoders[col] = LabelEncoder()
				processed_df[col] = self.label_encoders[col].fit_transform(
					processed_df[col].fillna('unknown')
				)
			else:
				processed_df[col] = self.label_encoders[col].transform(
					processed_df[col].fillna('unknown')
				)

		# 处理数值特征
		numerical_data = processed_df.select_dtypes(include=[np.number]).fillna(0)

		if fit:
			return self.feature_scaler.fit_transform(numerical_data)
		else:
			return self.feature_scaler.transform(numerical_data)

	def _process_targets(self, df: pd.DataFrame, fit: bool = False) -> np.ndarray:
		"""处理目标数据"""
		numerical_data = df.select_dtypes(include=[np.number]).fillna(0)

		if fit:
			return self.target_scaler.fit_transform(numerical_data)
		else:
			return self.target_scaler.transform(numerical_data)

	def inverse_transform_targets(self, targets: np.ndarray) -> np.ndarray:
		"""反转换目标数据"""
		return self.target_scaler.inverse_transform(targets)


class ProductionPlanningSystem:
	"""生产计划优化系统"""

	def __init__(self, config: Dict[str, Any]):
		self.config = config
		self.model = None
		self.preprocessor = DataPreprocessor()
		self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
		logger.info(f"使用设备: {self.device}")

	def load_data(self, file_path: str) -> pd.DataFrame:
		"""加载Excel数据"""
		try:
			df = pd.read_excel(file_path)
			logger.info(f"成功加载数据，形状: {df.shape}")
			return df
		except Exception as e:
			logger.error(f"数据加载失败: {e}")
			raise

	def prepare_data(self, df: pd.DataFrame) -> Tuple[DataLoader, DataLoader, DataLoader]:
		"""准备训练数据"""
		# 自动识别特征列和目标列
		feature_cols = self._identify_feature_columns(df)
		target_cols = self._identify_target_columns(df)

		logger.info(f"特征列: {feature_cols}")
		logger.info(f"目标列: {target_cols}")

		# 数据预处理
		features, targets = self.preprocessor.fit_transform(df, feature_cols, target_cols)

		# 数据分割
		X_train, X_temp, y_train, y_temp = train_test_split(
			features, targets, test_size=0.3, random_state=42
		)
		X_val, X_test, y_val, y_test = train_test_split(
			X_temp, y_temp, test_size=0.5, random_state=42
		)

		# 创建数据加载器
		train_dataset = ProductionDataset(X_train, y_train)
		val_dataset = ProductionDataset(X_val, y_val)
		test_dataset = ProductionDataset(X_test, y_test)

		train_loader = DataLoader(
		train_dataset, 
		batch_size=max(2, self.config['batch_size']),  # 确保批次大小≥2
		shuffle=True,
		drop_last=True  # 丢弃最后不完整批次
	)
		val_loader = DataLoader(
			val_dataset, batch_size=self.config['batch_size'], shuffle=False
		)
		test_loader = DataLoader(
			test_dataset, batch_size=self.config['batch_size'], shuffle=False
		)

		return train_loader, val_loader, test_loader

	def _identify_feature_columns(self, df: pd.DataFrame) -> List[str]:
		"""自动识别特征列"""
		# 这里可以根据实际业务需求调整
		# exclude_cols = ['优先级', '完成时间', '序号', '计划顺序']
		exclude_cols = ['出钢标记', '牌号', '轧厚', '轧宽', '硬度组', '出钢温度', '表面指数代码', 'Status_filter_a',
						'Status_model_name_sign_all', 'Status_zzc_sign', 'Status_czc_sign', 'Status_gdc_sign',
						'Status_tgc_sign', 'Status_hot_sign', 'Status_unit_func_sign', 'Status_klyc_sign']
		feature_cols = [col for col in df.columns if col not in exclude_cols]
		return feature_cols

	def _identify_target_columns(self, df: pd.DataFrame) -> List[str]:
		"""自动识别目标列"""
		# 这里假设目标是优化生产顺序
		# target_cols = ['优先级'] if '优先级' in df.columns else [df.columns[-1]]
		target_cols = ['序号'] if '序号' in df.columns else [df.columns[-1]]
		return target_cols

	def build_model(self, input_dim: int, output_dim: int) -> ProductionPlanningNN:
		"""构建模型"""
		model = ProductionPlanningNN(
			input_dim=input_dim,
			hidden_dims=self.config['hidden_dims'],
			output_dim=output_dim,
			dropout_rate=self.config['dropout_rate']
		)

		model.to(self.device)
		return model

	def train(self, train_loader: DataLoader, val_loader: DataLoader) -> Dict[str, List]:
		"""训练模型"""
		input_dim = next(iter(train_loader))[0].shape[1]
		output_dim = next(iter(train_loader))[1].shape[1]

		self.model = self.build_model(input_dim, output_dim)

		optimizer = optim.Adam(
			self.model.parameters(),
			lr=self.config['learning_rate'],
			weight_decay=self.config['weight_decay']
		)

		criterion = nn.MSELoss()
		scheduler = optim.lr_scheduler.ReduceLROnPlateau(
			optimizer, mode='min', factor=0.5, patience=10
		)
		# 手动添加学习率变化日志
		scheduler._log_lr = lambda epoch, lr: logger.info(f"Epoch {epoch} 学习率更新为: {lr:.2e}")

		history = {'train_loss': [], 'val_loss': []}
		best_val_loss = float('inf')
		patience_counter = 0

		for epoch in range(self.config['epochs']):
			# 训练阶段
			self.model.train()
			train_loss = 0.0

			for batch_features, batch_targets in train_loader:
				batch_features = batch_features.to(self.device)
				batch_targets = batch_targets.to(self.device)

				optimizer.zero_grad()
				outputs = self.model(batch_features)
				loss = criterion(outputs, batch_targets)
				loss.backward()
				optimizer.step()

				train_loss += loss.item()

			# 验证阶段
			self.model.eval()
			val_loss = 0.0

			with torch.no_grad():
				for batch_features, batch_targets in val_loader:
					batch_features = batch_features.to(self.device)
					batch_targets = batch_targets.to(self.device)

					outputs = self.model(batch_features)
					loss = criterion(outputs, batch_targets)
					val_loss += loss.item()

			train_loss /= len(train_loader)
			val_loss /= len(val_loader)

			history['train_loss'].append(train_loss)
			history['val_loss'].append(val_loss)

			scheduler.step(val_loss)

			# 早停机制
			if val_loss < best_val_loss:
				best_val_loss = val_loss
				patience_counter = 0
				torch.save(self.model.state_dict(), 'best_model.pth')
			else:
				patience_counter += 1
				if patience_counter >= self.config['early_stopping_patience']:
					logger.info(f"早停触发，停止训练于epoch {epoch}")
					break

			if epoch % 10 == 0:
				logger.info(f"Epoch {epoch}, Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}")

		# 加载最佳模型
		self.model.load_state_dict(torch.load('best_model.pth'))

		return history

	def evaluate(self, test_loader: DataLoader) -> Dict[str, float]:
		"""评估模型"""
		self.model.eval()
		predictions = []
		actuals = []

		with torch.no_grad():
			for batch_features, batch_targets in test_loader:
				batch_features = batch_features.to(self.device)
				batch_targets = batch_targets.to(self.device)

				outputs = self.model(batch_features)
				predictions.extend(outputs.cpu().numpy())
				actuals.extend(batch_targets.cpu().numpy())

		predictions = np.array(predictions)
		actuals = np.array(actuals)

		# 反转换到原始尺度
		predictions_orig = self.preprocessor.inverse_transform_targets(predictions)
		actuals_orig = self.preprocessor.inverse_transform_targets(actuals)

		# 计算评估指标
		mse = mean_squared_error(actuals_orig, predictions_orig)
		mae = mean_absolute_error(actuals_orig, predictions_orig)
		rmse = np.sqrt(mse)

		metrics = {
			'mse': mse,
			'mae': mae,
			'rmse': rmse
		}

		logger.info(f"测试集评估结果: {metrics}")
		return metrics

	def predict(self, df: pd.DataFrame) -> np.ndarray:
		"""预测生产计划"""
		self.model.eval()

		# 使用相同的特征列
		features, _ = self.preprocessor.transform(df)

		with torch.no_grad():
			features_tensor = torch.FloatTensor(features).to(self.device)
			predictions = self.model(features_tensor)
			predictions = predictions.cpu().numpy()

		# 反转换到原始尺度
		predictions_orig = self.preprocessor.inverse_transform_targets(predictions)

		return predictions_orig

	def optimize_production_sequence(self, df: pd.DataFrame) -> pd.DataFrame:
		"""优化生产序列"""
		predictions = self.predict(df)

		# 创建结果DataFrame
		result_df = df.copy()
		result_df['predicted_priority'] = predictions[:, 0]
		result_df['optimized_sequence'] = result_df['predicted_priority'].rank(method='dense')

		# 按优化后的序列排序
		result_df = result_df.sort_values('optimized_sequence').reset_index(drop=True)

		return result_df

	def visualize_results(self, history: Dict[str, List], metrics: Dict[str, float]):
		"""可视化结果"""
		fig, axes = plt.subplots(2, 2, figsize=(15, 10))

		# 训练历史
		axes[0, 0].plot(history['train_loss'], label='Training Loss')
		axes[0, 0].plot(history['val_loss'], label='Validation Loss')
		axes[0, 0].set_title('Training History')
		axes[0, 0].set_xlabel('Epoch')
		axes[0, 0].set_ylabel('Loss')
		axes[0, 0].legend()
		axes[0, 0].grid(True)

		# 评估指标
		metric_names = list(metrics.keys())
		metric_values = list(metrics.values())
		axes[0, 1].bar(metric_names, metric_values)
		axes[0, 1].set_title('Evaluation Metrics')
		axes[0, 1].set_ylabel('Value')

		# 模型复杂度
		total_params = sum(p.numel() for p in self.model.parameters())
		axes[1, 0].text(0.5, 0.5, f'Total Parameters: {total_params:,}',
						ha='center', va='center', fontsize=14)
		axes[1, 0].set_title('Model Complexity')
		axes[1, 0].axis('off')

		# 性能摘要
		summary_text = f"""
        最佳验证损失: {min(history['val_loss']):.4f}
        最终训练损失: {history['train_loss'][-1]:.4f}
        测试RMSE: {metrics['rmse']:.4f}
        测试MAE: {metrics['mae']:.4f}
        """
		axes[1, 1].text(0.1, 0.5, summary_text, ha='left', va='center', fontsize=12)
		axes[1, 1].set_title('Performance Summary')
		axes[1, 1].axis('off')

		plt.tight_layout()
		plt.savefig('training_results.png', dpi=300, bbox_inches='tight', format='png')
		logger.info("训练结果图表已保存至: training_results.png")
		plt.close('all')  # 释放绘图资源
		# plt.show()  # PyCharm后端兼容性问题，改用文件查看

	def save_model(self, file_path: str):
		"""保存模型"""
		torch.save({
			'model_state_dict': self.model.state_dict(),
			'preprocessor': self.preprocessor,
			'config': self.config
		}, file_path)
		logger.info(f"模型已保存到: {file_path}")

	def load_model(self, file_path: str):
		"""加载模型"""
		checkpoint = torch.load(file_path, map_location=self.device)
		self.config = checkpoint['config']
		self.preprocessor = checkpoint['preprocessor']

		# 重建模型结构
		input_dim = len(self.preprocessor.feature_columns)
		output_dim = len(self.preprocessor.target_columns)
		self.model = self.build_model(input_dim, output_dim)
		self.model.load_state_dict(checkpoint['model_state_dict'])

		logger.info(f"模型已从 {file_path} 加载")


def main():
	"""主函数"""
	# 配置参数
	config = {
		'batch_size': 32,
		'learning_rate': 0.001,
		'epochs': 100,
		'hidden_dims': [128, 64, 32],
		'dropout_rate': 0.2,
		'weight_decay': 1e-4,
		'early_stopping_patience': 15
	}

	# 创建系统实例
	system = ProductionPlanningSystem(config)

	# 示例使用流程
	try:
		# 1. 加载数据
		df = system.load_data('your_production_data.xlsx')

		# 2. 准备数据
		train_loader, val_loader, test_loader = system.prepare_data(df)

		# 3. 训练模型
		history = system.train(train_loader, val_loader)

		# 4. 评估模型
		metrics = system.evaluate(test_loader)

		# 5. 优化生产序列
		optimized_df = system.optimize_production_sequence(df)

		# 6. 可视化结果
		system.visualize_results(history, metrics)

		# 7. 保存模型
		system.save_model('production_planning_model.pth')

		logger.info("生产计划优化系统构建完成")

	except Exception as e:
		logger.error(f"系统运行错误: {e}")
		raise


if __name__ == "__main__":
	main()
