#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
快乐8模型训练脚本
"""
import sys
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from torchcrf import CRF
from sklearn.preprocessing import StandardScaler
import joblib
import logging
from datetime import datetime

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设备配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f"使用设备: {device}")

class KL8LSTMCRF(nn.Module):
    """快乐8 LSTM-CRF 模型"""
    def __init__(self, input_size, hidden_size, num_layers, num_tags, dropout=0.2):
        super(KL8LSTMCRF, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_tags = num_tags
        
        # LSTM层
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, 
                           batch_first=True, dropout=dropout if num_layers > 1 else 0)
        
        # 全连接层 - 输出20个号码，每个号码81个可能的标签
        self.fc = nn.Linear(hidden_size, num_tags * 20)  # 20个号码，每个号码81个可能的标签
        
        # CRF层
        self.crf = CRF(num_tags, batch_first=True)
        
    def forward(self, x, tags=None):
        # 处理4D输入 (batch_size, seq_len, ball_count, features)
        batch_size, seq_len, ball_count, features = x.shape
        
        # 重塑为3D输入 (batch_size, seq_len, ball_count * features)
        x = x.view(batch_size, seq_len, ball_count * features)
        
        # LSTM前向传播
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        
        out, _ = self.lstm(x, (h0, c0))
        emissions = self.fc(out)
        
        # 重塑输出为CRF期望的形状 (batch_size, sequence_length, 20, 81)
        emissions = emissions.view(batch_size, seq_len, 20, self.num_tags)
        
        # 只取最后一个时间步的输出用于预测
        emissions = emissions[:, -1, :, :].unsqueeze(1)  # (batch_size, 1, 20, 81)
        
        if tags is not None:
            # 训练模式，计算损失
            # 只使用最后一个时间步的标签
            if tags.dim() == 3:
                tags = tags[:, -1, :]  # 取最后一个时间步的标签
            # 由于torchcrf不直接支持多维输出，我们需要分别处理每个号码位置
            loss = 0
            for i in range(20):  # 20个号码位置
                emissions_i = emissions[:, :, i, :]  # (batch_size, 1, 81)
                tags_i = tags[:, i].unsqueeze(1)  # (batch_size, 1)
                loss += -self.crf(emissions_i, tags_i)
            return loss / 20  # 平均损失
        else:
            # 推理模式，解码最佳路径
            # 分别解码每个号码位置
            decoded_paths = []
            for i in range(20):  # 20个号码位置
                emissions_i = emissions[:, :, i, :]  # (batch_size, 1, 81)
                path = self.crf.decode(emissions_i)
                decoded_paths.append(path)
            # 合并结果
            return decoded_paths

def load_kl8_data(file_path):
    """加载快乐8历史数据"""
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path, encoding='utf-8')
        logger.info(f"成功加载数据，共 {len(df)} 期")
        
        # 提取开奖号码（第3列到第22列，对应1号到20号）
        numbers = df.iloc[:, 2:22].values.astype(int)
        
        return numbers, df
    except Exception as e:
        logger.error(f"加载数据失败: {e}")
        return None, None

def calculate_connection_weights(numbers):
    """计算连号关系权重（横连、坚连、左斜连、右斜连）
    
    Args:
        numbers: 开奖号码数组 (期数, 20)
        
    Returns:
        weights: 连号权重数组 (期数, 20)
    """
    periods, ball_count = numbers.shape
    weights = np.zeros_like(numbers, dtype=np.float32)
    
    # 将号码转换为8x10的矩阵形式（便于计算连号关系）
    # 按照快乐8的标准布局：1-10, 11-20, ..., 71-80
    def number_to_position(num):
        """将号码转换为矩阵位置"""
        row = (num - 1) // 10
        col = (num - 1) % 10
        return row, col
    
    def position_to_number(row, col):
        """将矩阵位置转换为号码"""
        return row * 10 + col + 1
    
    # 为每期计算连号权重
    for i in range(periods):
        # 创建当前期的号码矩阵（8行10列）
        matrix = np.zeros((8, 10), dtype=bool)
        for num in numbers[i]:
            if 1 <= num <= 80:
                row, col = number_to_position(num)
                matrix[row, col] = True
        
        # 计算每个号码的连号关系
        for j, num in enumerate(numbers[i]):
            if 1 <= num <= 80:
                row, col = number_to_position(num)
                connection_count = 0
                
                # 横连（左右相邻）
                if col > 0 and matrix[row, col-1]:
                    connection_count += 1
                if col < 9 and matrix[row, col+1]:
                    connection_count += 1
                    
                # 坚连（上下相邻）
                if row > 0 and matrix[row-1, col]:
                    connection_count += 1
                if row < 7 and matrix[row+1, col]:
                    connection_count += 1
                    
                # 左斜连（左上、右下）
                if row > 0 and col > 0 and matrix[row-1, col-1]:
                    connection_count += 1
                if row < 7 and col < 9 and matrix[row+1, col+1]:
                    connection_count += 1
                    
                # 右斜连（右上、左下）
                if row > 0 and col < 9 and matrix[row-1, col+1]:
                    connection_count += 1
                if row < 7 and col > 0 and matrix[row+1, col-1]:
                    connection_count += 1
                
                weights[i, j] = connection_count
    
    return weights

def prepare_training_data(numbers, sequence_length=10):
    """准备训练数据
    
    Args:
        numbers: 开奖号码数组 (期数, 20)
        sequence_length: 序列长度
        
    Returns:
        X: 输入特征 (样本数, sequence_length, 特征数)
        y: 标签 (样本数, 20)
    """
    periods, ball_count = numbers.shape
    
    # 计算连号权重
    weights = calculate_connection_weights(numbers)
    
    # 准备特征：将号码和权重组合
    # 特征包括：号码本身、权重、号码的统计特征等
    features = []
    for i in range(periods):
        # 基础特征：号码和权重
        period_features = np.column_stack([numbers[i], weights[i]])
        features.append(period_features)
    
    features = np.array(features)
    
    # 构建序列数据
    X, y = [], []
    for i in range(sequence_length, periods):
        # 输入特征：前sequence_length期的数据
        X.append(features[i-sequence_length:i])
        # 输出标签：当前期的号码
        y.append(numbers[i])
    
    return np.array(X), np.array(y)

def train_lstm_crf_model(X, y, model_path, scaler_path, epochs=1000, batch_size=32, learning_rate=0.001):
    """训练LSTM-CRF模型"""
    # 数据标准化
    samples, seq_len, ball_count, features = X.shape
    X_reshaped = X.reshape(-1, features)
    
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X_reshaped)
    X_scaled = X_scaled.reshape(samples, seq_len, ball_count, features)
    
    # 保存标准化器
    joblib.dump(scaler, scaler_path)
    logger.info(f"标准化器已保存到: {scaler_path}")
    
    # 转换为PyTorch张量
    X_tensor = torch.FloatTensor(X_scaled).to(device)
    y_tensor = torch.LongTensor(y).to(device)
    
    # 确保标签在有效范围内
    y_tensor = torch.clamp(y_tensor, 0, 80)
    
    # 创建数据加载器
    dataset = TensorDataset(X_tensor, y_tensor)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    
    # 初始化模型
    input_size = ball_count * features  # 20个球，每个球2个特征
    hidden_size = 128
    num_layers = 2
    num_tags = 81  # 号码范围1-80，加上0作为填充/特殊标记
    
    model = KL8LSTMCRF(input_size, hidden_size, num_layers, num_tags).to(device)
    
    # 调整y_tensor形状以匹配模型期望的输入
    # CRF期望的标签形状为(samples, sequence_length)
    # 我们将20个号码作为一个序列处理
    # 只使用最后一个时间步的标签
    y_tensor = y_tensor[:, -1, :] if y_tensor.dim() == 3 else y_tensor  # (batch_size, 20)
    
    # 损失函数和优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    # 学习率调度器
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.9)
    
    # 训练模型
    model.train()
    best_loss = float('inf')
    patience = 50  # 早停耐心值（增加耐心值）
    patience_counter = 0
    
    for epoch in range(epochs):
        total_loss = 0
        for batch_X, batch_y in dataloader:
            optimizer.zero_grad()
            loss = model(batch_X, batch_y)
            loss.backward()
            
            # 梯度裁剪防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            total_loss += loss.item()
        
        # 更新学习率
        scheduler.step()
        
        avg_loss = total_loss / len(dataloader)
        
        # 早停机制
        if avg_loss < best_loss:
            best_loss = avg_loss
            patience_counter = 0
            # 保存最佳模型
            torch.save(model.state_dict(), model_path + '.best')
        else:
            patience_counter += 1
            
        if patience_counter >= patience:
            logger.info(f"Early stopping at epoch {epoch+1}")
            break
        
        # 每20轮输出一次日志（减少日志输出频率）
        if (epoch + 1) % 20 == 0:
            logger.info(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}, LR: {scheduler.get_last_lr()[0]:.6f}")
    
    # 保存最终模型
    torch.save(model.state_dict(), model_path)
    logger.info(f"模型已保存到: {model_path}")
    
    return model, scaler

def main():
    """主函数"""
    # 文件路径
    data_file = os.path.join(os.path.dirname(__file__), 'kl8_history.csv')
    model_file = os.path.join(os.path.dirname(__file__), 'kl8_lstm_crf_model.pth')
    scaler_file = os.path.join(os.path.dirname(__file__), 'kl8_scaler.pkl')
    
    # 检查数据文件是否存在
    if not os.path.exists(data_file):
        logger.error(f"数据文件不存在: {data_file}")
        return
    
    # 加载数据
    numbers, df = load_kl8_data(data_file)
    if numbers is None:
        return
    
    # 准备训练数据
    logger.info("准备训练数据...")
    X, y = prepare_training_data(numbers)
    logger.info(f"训练数据形状: X={X.shape}, y={y.shape}")
    
    # 训练模型
    logger.info("开始训练LSTM-CRF模型...")
    model, scaler = train_lstm_crf_model(X, y, model_file, scaler_file)
    
    logger.info("模型训练完成!")

if __name__ == "__main__":
    main()