#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
修复版排列5模型训练脚本
确保数据预处理正确，将非0-9数据转换为正确范围
"""

import os
import sys
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import joblib
from collections import Counter

# 获取项目根目录
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

try:
    from model import LstmCRFModel
except ImportError as e:
    print(f"导入模型类失败: {e}")
    sys.exit(1)

# 配置
DATA_FILE = os.path.join(current_dir, "plw_history.csv")
MODEL_PATH = os.path.join(current_dir, "plw_model_fixed.pth")
SCALER_PATH = os.path.join(current_dir, "scaler_X_fixed.pkl")
BATCH_SIZE = 16
EPOCHS = 200
LEARNING_RATE = 0.001
WINDOW_SIZE = 5

class FixedPLWDataset(Dataset):
    """修复版排列5数据集，确保数据正确转换为0-9范围"""
    
    def __init__(self, csv_file, window_size):
        self.data = pd.read_csv(csv_file)
        print(f"原始数据形状: {self.data.shape}")
        print(f"数据列: {self.data.columns.tolist()}")
        
        self.scaler_X = MinMaxScaler()
        self.features, self.labels = self.preprocess_data(window_size)
        
    def preprocess_data(self, window_size):
        """预处理数据，确保正确转换为0-9范围"""
        features, labels = [], []
        
        if 'draw_numbers' not in self.data.columns:
            raise ValueError("数据文件缺少 'draw_numbers' 列")
        
        # 解析并转换数据
        number_data = []
        original_numbers = []  # 保存原始数字用于分析
        
        for _, row in self.data.iterrows():
            numbers_str = row['draw_numbers']
            if isinstance(numbers_str, str):
                # 去除引号并按逗号分割
                numbers = numbers_str.strip('\"').split(',')\n                # 取前5个数字并转换为0-9范围\n                if len(numbers) >= 5:\n                    original_nums = [int(num.strip()) for num in numbers[:5]]\n                    # 使用模运算转换为0-9范围\n                    converted_nums = [num % 10 for num in original_nums]\n                    \n                    number_data.append(converted_nums)\n                    original_numbers.extend(original_nums)\n                else:\n                    # 如果不足5个数字，用0填充\n                    original_nums = [int(num.strip()) for num in numbers] + [0] * (5 - len(numbers))\n                    converted_nums = [num % 10 for num in original_nums]\n                    number_data.append(converted_nums)\n                    original_numbers.extend(original_nums)\n        \n        number_data = np.array(number_data)\n        \n        # 分析数据转换效果\n        print(f\"\\n📊 数据转换分析:\")\n        print(f\"总期数: {len(number_data)}\")\n        print(f\"转换后数据形状: {number_data.shape}\")\n        print(f\"转换后数据范围: {number_data.min()} - {number_data.max()}\")\n        \n        # 分析原始数据和转换后数据的分布\n        original_counter = Counter(original_numbers)\n        converted_counter = Counter(number_data.flatten())\n        \n        print(f\"\\n原始数据分布（前10个）:\")\n        for num, count in original_counter.most_common(10):\n            print(f\"  {num}: {count}次\")\n        \n        print(f\"\\n转换后数据分布:\")\n        for digit in range(10):\n            count = converted_counter.get(digit, 0)\n            percentage = (count / len(number_data.flatten())) * 100\n            print(f\"  数字{digit}: {count}次 ({percentage:.2f}%)\")\n        \n        # 检查数据平衡性\n        max_count = max(converted_counter.values())\n        min_count = min(converted_counter.values())\n        balance_ratio = max_count / min_count if min_count > 0 else float('inf')\n        print(f\"\\n数据平衡性: 最大{max_count}次, 最小{min_count}次, 比例{balance_ratio:.2f}\")\n        \n        # 显示前几行转换效果\n        print(f\"\\n前5行转换效果:\")\n        for i in range(min(5, len(number_data))):\n            print(f\"  第{i+1}行: {number_data[i]}\")\n        \n        # 创建时间窗口特征和标签\n        for i in range(len(number_data) - window_size):\n            # 特征：窗口内的数据\n            feature_window = number_data[i:i + window_size]  # (window_size, 5)\n            features.append(feature_window)\n            \n            # 标签：下一期的数据\n            next_numbers = number_data[i + window_size]  # (5,)\n            labels.append(next_numbers)\n        \n        # 转换为NumPy数组\n        features_np = np.array(features)  # (num_samples, window_size, 5)\n        labels_np = np.array(labels)      # (num_samples, 5)\n        \n        print(f\"\\n特征数组形状: {features_np.shape}\")\n        print(f\"标签数组形状: {labels_np.shape}\")\n        print(f\"标签范围: {labels_np.min()} - {labels_np.max()}\")\n        \n        # 对特征进行缩放\n        original_shape = features_np.shape\n        features_scaled = self.scaler_X.fit_transform(\n            features_np.reshape(-1, features_np.shape[-1])\n        ).reshape(original_shape)\n        \n        print(f\"缩放后特征范围: {features_scaled.min():.3f} - {features_scaled.max():.3f}\")\n        \n        return (\n            torch.tensor(features_scaled, dtype=torch.float32),\n            torch.tensor(labels_np, dtype=torch.long)\n        )\n    \n    def __len__(self):\n        return len(self.features)\n    \n    def __getitem__(self, idx):\n        return self.features[idx], self.labels[idx]\n\ndef train_fixed_model():\n    \"\"\"训练修复版排列5模型\"\"\"\n    print(\"🚀 开始训练修复版排列5模型\")\n    print(\"=\" * 60)\n    \n    if not os.path.exists(DATA_FILE):\n        print(f\"❌ 数据文件不存在: {DATA_FILE}\")\n        return False\n    \n    # 创建数据集\n    dataset = FixedPLWDataset(DATA_FILE, WINDOW_SIZE)\n    dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n    \n    # 检查设备\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    print(f\"\\n使用设备: {device}\")\n    \n    # 创建模型\n    input_dim = 5  # 5个数字位置\n    hidden_dim = 64  # 减小隐藏层维度\n    output_dim = 10  # 0-9数字\n    output_seq_length = 5  # 5个位置\n    \n    model = LstmCRFModel(\n        input_dim=input_dim,\n        hidden_dim=hidden_dim,\n        output_dim=output_dim,\n        output_seq_length=output_seq_length,\n        num_layers=1  # 减少层数\n    ).to(device)\n    \n    print(f\"\\n模型参数:\")\n    print(f\"  输入维度: {input_dim}\")\n    print(f\"  隐藏维度: {hidden_dim}\")\n    print(f\"  输出维度: {output_dim}\")\n    print(f\"  输出序列长度: {output_seq_length}\")\n    print(f\"  总参数数: {sum(p.numel() for p in model.parameters())}\")\n    \n    # 优化器\n    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)\n    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n        optimizer, mode='min', factor=0.5, patience=10, verbose=True\n    )\n    \n    # 训练循环\n    best_loss = float('inf')\n    patience_counter = 0\n    patience = 20\n    \n    for epoch in range(EPOCHS):\n        model.train()\n        total_loss = 0\n        num_batches = 0\n        \n        for features, labels in dataloader:\n            features = features.to(device)\n            labels = labels.to(device)\n            \n            # 创建掩码（排列5所有位置都有效）\n            mask = torch.ones_like(labels, dtype=torch.bool)\n            \n            # 前向传播\n            loss = model(features, labels, mask)\n            \n            # 反向传播\n            optimizer.zero_grad()\n            loss.backward()\n            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n            optimizer.step()\n            \n            total_loss += loss.item()\n            num_batches += 1\n        \n        avg_loss = total_loss / num_batches if num_batches > 0 else 0\n        scheduler.step(avg_loss)\n        \n        # 每10个epoch评估一次\n        if (epoch + 1) % 10 == 0:\n            model.eval()\n            with torch.no_grad():\n                # 测试预测\n                sample_features = dataset.features[:1].to(device)\n                predictions = model(sample_features)\n                \n                print(f\"Epoch {epoch+1}/{EPOCHS}:\")\n                print(f\"  平均损失: {avg_loss:.4f}\")\n                print(f\"  学习率: {optimizer.param_groups[0]['lr']:.6f}\")\n                if isinstance(predictions, list) and len(predictions) > 0:\n                    print(f\"  样本预测: {predictions[0]}\")\n                else:\n                    print(f\"  样本预测: {predictions}\")\n        \n        # 早停检查\n        if avg_loss < best_loss:\n            best_loss = avg_loss\n            patience_counter = 0\n            \n            # 保存最佳模型\n            torch.save({\n                'model': model.state_dict(),\n                'optimizer': optimizer.state_dict(),\n                'epoch': epoch,\n                'loss': avg_loss,\n                'config': {\n                    'input_dim': input_dim,\n                    'hidden_dim': hidden_dim,\n                    'output_dim': output_dim,\n                    'output_seq_length': output_seq_length\n                }\n            }, MODEL_PATH)\n            \n        else:\n            patience_counter += 1\n            if patience_counter >= patience:\n                print(f\"\\n早停触发，最佳损失: {best_loss:.4f}\")\n                break\n    \n    # 保存缩放器\n    joblib.dump(dataset.scaler_X, SCALER_PATH)\n    \n    print(f\"\\n✅ 训练完成！\")\n    print(f\"模型保存到: {MODEL_PATH}\")\n    print(f\"缩放器保存到: {SCALER_PATH}\")\n    \n    # 最终测试\n    model.eval()\n    with torch.no_grad():\n        print(f\"\\n🎯 最终测试:\")\n        for i in range(3):\n            sample_features = dataset.features[i:i+1].to(device)\n            predictions = model(sample_features)\n            actual = dataset.labels[i].numpy()\n            \n            if isinstance(predictions, list) and len(predictions) > 0:\n                pred = predictions[0]\n            else:\n                pred = \"未知格式\"\n            \n            print(f\"  样本{i+1} - 实际: {actual}, 预测: {pred}\")\n    \n    return True\n\nif __name__ == \"__main__\":\n    train_fixed_model()