#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
快乐8随机模型脚本
"""
import sys
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import joblib
import logging
from datetime import datetime

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def load_kl8_data(file_path):
    """加载快乐8历史数据"""
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path, encoding='utf-8')
        logger.info(f"成功加载数据，共 {len(df)} 期")
        
        # 提取开奖号码（第3列到第22列，对应1号到20号）
        numbers = df.iloc[:, 2:22].values.astype(int)
        
        return numbers, df
    except Exception as e:
        logger.error(f"加载数据失败: {e}")
        return None, None

def calculate_connection_weights(numbers):
    """计算连号关系权重（横连、坚连、左斜连、右斜连）
    
    Args:
        numbers: 开奖号码数组 (期数, 20)
        
    Returns:
        weights: 连号权重数组 (期数, 20)
    """
    periods, ball_count = numbers.shape
    weights = np.zeros_like(numbers, dtype=np.float32)
    
    # 将号码转换为8x10的矩阵形式（便于计算连号关系）
    # 按照快乐8的标准布局：1-10, 11-20, ..., 71-80
    def number_to_position(num):
        """将号码转换为矩阵位置"""
        row = (num - 1) // 10
        col = (num - 1) % 10
        return row, col
    
    def position_to_number(row, col):
        """将矩阵位置转换为号码"""
        return row * 10 + col + 1
    
    # 为每期计算连号权重
    for i in range(periods):
        # 创建当前期的号码矩阵（8行10列）
        matrix = np.zeros((8, 10), dtype=bool)
        for num in numbers[i]:
            if 1 <= num <= 80:
                row, col = number_to_position(num)
                matrix[row, col] = True
        
        # 计算每个号码的连号关系
        for j, num in enumerate(numbers[i]):
            if 1 <= num <= 80:
                row, col = number_to_position(num)
                connection_count = 0
                
                # 横连（左右相邻）
                if col > 0 and matrix[row, col-1]:
                    connection_count += 1
                if col < 9 and matrix[row, col+1]:
                    connection_count += 1
                    
                # 坚连（上下相邻）
                if row > 0 and matrix[row-1, col]:
                    connection_count += 1
                if row < 7 and matrix[row+1, col]:
                    connection_count += 1
                    
                # 左斜连（左上、右下）
                if row > 0 and col > 0 and matrix[row-1, col-1]:
                    connection_count += 1
                if row < 7 and col < 9 and matrix[row+1, col+1]:
                    connection_count += 1
                    
                # 右斜连（右上、左下）
                if row > 0 and col < 9 and matrix[row-1, col+1]:
                    connection_count += 1
                if row < 7 and col > 0 and matrix[row+1, col-1]:
                    connection_count += 1
                
                weights[i, j] = connection_count
    
    return weights

def prepare_training_data(numbers, sequence_length=10):
    """准备训练数据
    
    Args:
        numbers: 开奖号码数组 (期数, 20)
        sequence_length: 序列长度
        
    Returns:
        X: 输入特征 (样本数, sequence_length, 特征数)
        y: 标签 (样本数, 20)
    """
    periods, ball_count = numbers.shape
    
    # 计算连号权重
    weights = calculate_connection_weights(numbers)
    
    # 准备特征：将号码和权重组合
    # 特征包括：号码本身、权重、号码的统计特征等
    features = []
    for i in range(periods):
        # 基础特征：号码和权重
        period_features = np.column_stack([numbers[i], weights[i]])
        features.append(period_features)
    
    features = np.array(features)
    
    # 构建序列数据
    X, y = [], []
    for i in range(sequence_length, periods):
        # 输入特征：前sequence_length期的数据
        X.append(features[i-sequence_length:i])
        # 输出标签：当前期的号码
        y.append(numbers[i])
    
    return np.array(X), np.array(y)

def create_random_model(input_size, hidden_size, num_layers, output_size):
    """创建随机模型用于测试"""
    import torch.nn as nn
    
    class RandomModel(nn.Module):
        def __init__(self, input_size, hidden_size, num_layers, output_size):
            super(RandomModel, self).__init__()
            self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
            self.fc = nn.Linear(hidden_size, output_size)
            
        def forward(self, x):
            out, _ = self.lstm(x)
            out = self.fc(out[:, -1, :])  # 取最后一个时间步的输出
            return out
    
    return RandomModel(input_size, hidden_size, num_layers, output_size)

def train_random_model(X, y, model_path, scaler_path, epochs=5, batch_size=16):
    """训练随机模型"""
    # 数据标准化
    samples, seq_len, features = X.shape
    X_reshaped = X.reshape(-1, features)
    
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X_reshaped)
    X_scaled = X_scaled.reshape(samples, seq_len, features)
    
    # 保存标准化器
    joblib.dump(scaler, scaler_path)
    logger.info(f"标准化器已保存到: {scaler_path}")
    
    # 转换为PyTorch张量
    X_tensor = torch.FloatTensor(X_scaled)
    y_tensor = torch.LongTensor(y)
    
    # 创建数据加载器
    dataset = TensorDataset(X_tensor, y_tensor)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    
    # 初始化模型
    input_size = features
    hidden_size = 64
    num_layers = 1
    output_size = 80  # 号码范围1-80
    
    model = create_random_model(input_size, hidden_size, num_layers, output_size)
    
    # 损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    model.train()
    for epoch in range(epochs):
        total_loss = 0
        for batch_X, batch_y in dataloader:
            optimizer.zero_grad()
            outputs = model(batch_X)
            # 将标签转换为适合交叉熵损失的格式
            loss = criterion(outputs, batch_y[:, 0])  # 只使用第一个号码作为示例
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        
        logger.info(f"Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader):.4f}")
    
    # 保存模型
    torch.save(model.state_dict(), model_path)
    logger.info(f"模型已保存到: {model_path}")
    
    return model, scaler

def main():
    """主函数"""
    # 文件路径
    data_file = os.path.join(os.path.dirname(__file__), 'kl8_history.csv')
    model_file = os.path.join(os.path.dirname(__file__), 'kl8_random_model.pth')
    scaler_file = os.path.join(os.path.dirname(__file__), 'kl8_scaler.pkl')
    
    # 检查数据文件是否存在
    if not os.path.exists(data_file):
        logger.error(f"数据文件不存在: {data_file}")
        return
    
    # 加载数据
    numbers, df = load_kl8_data(data_file)
    if numbers is None:
        return
    
    # 准备训练数据
    logger.info("准备训练数据...")
    X, y = prepare_training_data(numbers)
    logger.info(f"训练数据形状: X={X.shape}, y={y.shape}")
    
    # 训练模型
    logger.info("开始训练随机模型...")
    model, scaler = train_random_model(X, y, model_file, scaler_file)
    
    logger.info("随机模型训练完成!")

if __name__ == "__main__":
    main()