import sys
sys.path.append('src')
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.model_selection import cross_val_score, TimeSeriesSplit
from sklearn.metrics import accuracy_score, classification_report
import mysql.connector
import warnings
warnings.filterwarnings('ignore')
import joblib
import os
from datetime import datetime, timedelta
import json

class UltimateSSQPredictor:
    """终极双色球预测系统 - 整合世界最先进技术"""
    
    def __init__(self):
        # 数据库配置
        self.mysql_config = {
            'host': 'localhost',
            'port': 3306,
            'user': 'root',
            'password': 'Duchenyi619',
            'database': 'redAndBlue'
        }
        
        # 物理参数 - 基于真实彩票机参数
        self.physical_params = {
            'ball_weight': 3.2,  # 球重(g)
            'ball_diameter': 25,  # 球直径(mm)
            'machine_speed': 120,  # 机器转速(rpm)
            'temperature': 20,  # 温度(°C)
            'humidity': 50,  # 湿度(%)
            'air_pressure': 1013,  # 气压(hPa)
            'vibration_frequency': 50,  # 振动频率(Hz)
            'magnetic_field': 0.5,  # 磁场强度(mT)
        }
        
        # 模型配置
        self.models = {
            'red': {},
            'blue': {}
        }
        self.scalers = {}
        self.feature_importance = {}
        
        # 高级特征工程参数
        self.lookback_periods = [3, 5, 10, 20, 50, 100]  # 多时间窗口
        self.feature_groups = [
            'basic', 'statistical', 'physical', 'hot_cold', 
            'missing', 'trend', 'pattern', 'frequency', 
            'correlation', 'entropy', 'momentum', 'volatility'
        ]
        
        # 集成学习配置
        self.ensemble_weights = {
            'rf': 0.25,
            'gb': 0.25,
            'mlp': 0.20,
            'svm': 0.15,
            'lr': 0.15
        }
        
        # 模型保存路径
        self.model_path = 'models/ultimate'
        os.makedirs(self.model_path, exist_ok=True)
        
    def load_data(self):
        """加载历史数据"""
        try:
            conn = mysql.connector.connect(**self.mysql_config)
            query = """
            SELECT id, draw_number, draw_date, red_ball_1, red_ball_2, red_ball_3, 
                   red_ball_4, red_ball_5, red_ball_6, blue_ball, created_at
            FROM lottery_data 
            ORDER BY draw_date ASC
            """
            df = pd.read_sql(query, conn)
            conn.close()
            
            # 数据预处理
            df['draw_date'] = pd.to_datetime(df['draw_date'])
            df['red_numbers'] = df[['red_ball_1', 'red_ball_2', 'red_ball_3', 
                                   'red_ball_4', 'red_ball_5', 'red_ball_6']].values.tolist()
            df['blue_number'] = df['blue_ball']
            
            # 添加时间特征
            df['year'] = df['draw_date'].dt.year
            df['month'] = df['draw_date'].dt.month
            df['day'] = df['draw_date'].dt.day
            df['weekday'] = df['draw_date'].dt.weekday
            df['week'] = df['draw_date'].dt.isocalendar().week
            
            print(f"✅ 共加载 {len(df)} 条历史记录")
            print(f"📅 数据时间范围: {df['draw_date'].min()} 到 {df['draw_date'].max()}")
            return df
        except Exception as e:
            print(f"❌ 数据加载失败: {e}")
            return None
    
    def extract_ultimate_features(self, df, lookback=20):
        """提取终极特征集"""
        print(f"🔍 开始提取特征，回看期数: {lookback}")
        
        features = []
        feature_names = []
        
        for i in range(lookback, len(df)):
            feature_vector = []
            
            # 1. 基础特征
            basic_features = self._extract_basic_features(df.iloc[i-lookback:i])
            feature_vector.extend(basic_features)
            
            # 2. 统计特征
            statistical_features = self._extract_statistical_features(df.iloc[i-lookback:i])
            feature_vector.extend(statistical_features)
            
            # 3. 物理特征
            physical_features = self._extract_physical_features(df.iloc[i-lookback:i])
            feature_vector.extend(physical_features)
            
            # 4. 热冷号特征
            hot_cold_features = self._extract_hot_cold_features(df.iloc[:i])
            feature_vector.extend(hot_cold_features)
            
            # 5. 缺失值特征
            missing_features = self._extract_missing_features(df.iloc[:i])
            feature_vector.extend(missing_features)
            
            # 6. 趋势特征
            trend_features = self._extract_trend_features(df.iloc[i-lookback:i])
            feature_vector.extend(trend_features)
            
            # 7. 模式特征
            pattern_features = self._extract_pattern_features(df.iloc[i-lookback:i])
            feature_vector.extend(pattern_features)
            
            # 8. 频率特征
            frequency_features = self._extract_frequency_features(df.iloc[i-lookback:i])
            feature_vector.extend(frequency_features)
            
            # 9. 相关性特征
            correlation_features = self._extract_correlation_features(df.iloc[i-lookback:i])
            feature_vector.extend(correlation_features)
            
            # 10. 熵特征
            entropy_features = self._extract_entropy_features(df.iloc[i-lookback:i])
            feature_vector.extend(entropy_features)
            
            # 11. 动量特征
            momentum_features = self._extract_momentum_features(df.iloc[i-lookback:i])
            feature_vector.extend(momentum_features)
            
            # 12. 波动性特征
            volatility_features = self._extract_volatility_features(df.iloc[i-lookback:i])
            feature_vector.extend(volatility_features)
            
            features.append(feature_vector)
        
        print(f"✅ 特征提取完成，特征维度: {len(features[0]) if features else 0}")
        return np.array(features)
    
    def _extract_basic_features(self, data):
        """基础特征"""
        features = []
        
        # 红球基础特征
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        features.extend([
            np.mean(red_numbers),  # 平均值
            np.std(red_numbers),   # 标准差
            np.median(red_numbers), # 中位数
            np.var(red_numbers),   # 方差
            np.min(red_numbers),   # 最小值
            np.max(red_numbers),   # 最大值
            np.ptp(red_numbers),   # 极差
            len(set(red_numbers)) / len(red_numbers),  # 唯一性比例
            np.sum(red_numbers % 2 == 0) / len(red_numbers),  # 偶数比例
            np.sum(red_numbers % 2 == 1) / len(red_numbers),  # 奇数比例
        ])
        
        # 蓝球基础特征
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        features.extend([
            np.mean(blue_numbers),
            np.std(blue_numbers),
            np.median(blue_numbers),
            np.var(blue_numbers),
            np.min(blue_numbers),
            np.max(blue_numbers),
            np.ptp(blue_numbers),
        ])
        
        return features
    
    def _extract_statistical_features(self, data):
        """统计特征"""
        features = []
        
        # 红球分布特征
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        for i in range(1, 34):
            features.append(np.sum(red_numbers == i) / len(red_numbers))
        
        # 蓝球分布特征
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        for i in range(1, 17):
            features.append(np.sum(np.array(blue_numbers) == i) / len(blue_numbers))
        
        # 区间分布特征
        red_ranges = [(1, 11), (12, 22), (23, 33)]
        for start, end in red_ranges:
            count = np.sum((red_numbers >= start) & (red_numbers <= end))
            features.append(count / len(red_numbers))
        
        blue_ranges = [(1, 8), (9, 16)]
        for start, end in blue_ranges:
            count = np.sum((np.array(blue_numbers) >= start) & (np.array(blue_numbers) <= end))
            features.append(count / len(blue_numbers))
        
        return features
    
    def _extract_physical_features(self, data):
        """物理特征"""
        features = []
        
        # 物理参数特征
        for param_name, param_value in self.physical_params.items():
            features.append(param_value)
        
        # 红球物理特征
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        features.extend([
            np.sum(red_numbers <= 17) / len(red_numbers),  # 小号球比例
            np.sum(red_numbers > 17) / len(red_numbers),   # 大号球比例
            np.sum(red_numbers % 2 == 0) / len(red_numbers),  # 偶数球比例
            np.sum(red_numbers % 2 == 1) / len(red_numbers),  # 奇数球比例
            np.sum(red_numbers % 3 == 0) / len(red_numbers),  # 3的倍数比例
            np.sum(red_numbers % 5 == 0) / len(red_numbers),  # 5的倍数比例
        ])
        
        # 蓝球物理特征
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        features.extend([
            np.sum(np.array(blue_numbers) <= 8) / len(blue_numbers),
            np.sum(np.array(blue_numbers) > 8) / len(blue_numbers),
            np.sum(np.array(blue_numbers) % 2 == 0) / len(blue_numbers),
            np.sum(np.array(blue_numbers) % 2 == 1) / len(blue_numbers),
        ])
        
        return features
    
    def _extract_hot_cold_features(self, data):
        """热冷号特征"""
        features = []
        
        # 计算不同时间窗口的热冷号
        for window in [10, 20, 50]:
            if len(data) < window:
                features.extend([0, 0, 0, 0])
                continue
                
            recent_data = data.tail(window)
            
            # 红球热冷号
            red_numbers = np.concatenate([row['red_numbers'] for _, row in recent_data.iterrows()])
            red_counts = {i: np.sum(red_numbers == i) for i in range(1, 34)}
            
            if len(red_counts) > 0:
                hot_red = [k for k, v in red_counts.items() if v >= np.percentile(list(red_counts.values()), 75)]
                cold_red = [k for k, v in red_counts.items() if v <= np.percentile(list(red_counts.values()), 25)]
                
                features.extend([
                    len(hot_red) / 33,  # 热号比例
                    len(cold_red) / 33,  # 冷号比例
                ])
            else:
                features.extend([0, 0])
            
            # 蓝球热冷号
            blue_numbers = [row['blue_number'] for _, row in recent_data.iterrows()]
            blue_counts = {i: np.sum(np.array(blue_numbers) == i) for i in range(1, 17)}
            
            if len(blue_counts) > 0:
                hot_blue = [k for k, v in blue_counts.items() if v >= np.percentile(list(blue_counts.values()), 75)]
                cold_blue = [k for k, v in blue_counts.items() if v <= np.percentile(list(blue_counts.values()), 25)]
                
                features.extend([
                    len(hot_blue) / 16,  # 热号比例
                    len(cold_blue) / 16,  # 冷号比例
                ])
            else:
                features.extend([0, 0])
        
        return features
    
    def _extract_missing_features(self, data):
        """缺失值特征"""
        features = []
        
        # 计算每个号码的缺失期数
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球缺失特征
        for i in range(1, 34):
            last_appearance = np.where(red_numbers == i)[0]
            if len(last_appearance) > 0:
                missing_periods = len(data) - 1 - last_appearance[-1]
            else:
                missing_periods = len(data)
            features.append(missing_periods / len(data))
        
        # 蓝球缺失特征
        for i in range(1, 17):
            last_appearance = np.where(np.array(blue_numbers) == i)[0]
            if len(last_appearance) > 0:
                missing_periods = len(data) - 1 - last_appearance[-1]
            else:
                missing_periods = len(data)
            features.append(missing_periods / len(data))
        
        return features
    
    def _extract_trend_features(self, data):
        """趋势特征"""
        features = []
        
        # 红球趋势
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        if len(red_numbers) >= 10:
            # 短期趋势
            short_trend = np.mean(red_numbers[-3:]) - np.mean(red_numbers[-6:-3])
            # 中期趋势
            mid_trend = np.mean(red_numbers[-5:]) - np.mean(red_numbers[-10:-5])
            # 长期趋势
            long_trend = np.mean(red_numbers[-10:]) - np.mean(red_numbers[:-10]) if len(red_numbers) >= 20 else 0
            
            features.extend([short_trend, mid_trend, long_trend])
        else:
            features.extend([0, 0, 0])
        
        # 蓝球趋势
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        if len(blue_numbers) >= 10:
            short_trend = np.mean(blue_numbers[-3:]) - np.mean(blue_numbers[-6:-3])
            mid_trend = np.mean(blue_numbers[-5:]) - np.mean(blue_numbers[-10:-5])
            long_trend = np.mean(blue_numbers[-10:]) - np.mean(blue_numbers[:-10]) if len(blue_numbers) >= 20 else 0
            
            features.extend([short_trend, mid_trend, long_trend])
        else:
            features.extend([0, 0, 0])
        
        return features
    
    def _extract_pattern_features(self, data):
        """模式特征"""
        features = []
        
        # 连续号码模式
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球连续模式
        consecutive_count = 0
        for i in range(len(red_numbers)-1):
            if red_numbers[i+1] == red_numbers[i] + 1:
                consecutive_count += 1
        features.append(consecutive_count / len(red_numbers))
        
        # 蓝球连续模式
        consecutive_count = 0
        for i in range(len(blue_numbers)-1):
            if blue_numbers[i+1] == blue_numbers[i] + 1:
                consecutive_count += 1
        features.append(consecutive_count / len(blue_numbers))
        
        # 重复模式
        red_repeat_count = len(red_numbers) - len(set(red_numbers))
        blue_repeat_count = len(blue_numbers) - len(set(blue_numbers))
        features.extend([
            red_repeat_count / len(red_numbers),
            blue_repeat_count / len(blue_numbers)
        ])
        
        return features
    
    def _extract_frequency_features(self, data):
        """频率特征"""
        features = []
        
        # 计算频率分布
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球频率特征
        red_freq = np.bincount(red_numbers, minlength=34)[1:]  # 排除0
        features.extend([
            np.mean(red_freq),  # 平均频率
            np.std(red_freq),   # 频率标准差
            np.max(red_freq),   # 最大频率
            np.min(red_freq),   # 最小频率
        ])
        
        # 蓝球频率特征
        blue_freq = np.bincount(blue_numbers, minlength=17)[1:]  # 排除0
        features.extend([
            np.mean(blue_freq),
            np.std(blue_freq),
            np.max(blue_freq),
            np.min(blue_freq),
        ])
        
        return features
    
    def _extract_correlation_features(self, data):
        """相关性特征"""
        features = []
        
        # 红球内部相关性
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        if len(red_numbers) > 1:
            # 自相关
            autocorr = np.corrcoef(red_numbers[:-1], red_numbers[1:])[0, 1]
            features.append(autocorr if not np.isnan(autocorr) else 0)
        else:
            features.append(0)
        
        # 蓝球内部相关性
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        if len(blue_numbers) > 1:
            autocorr = np.corrcoef(blue_numbers[:-1], blue_numbers[1:])[0, 1]
            features.append(autocorr if not np.isnan(autocorr) else 0)
        else:
            features.append(0)
        
        return features
    
    def _extract_entropy_features(self, data):
        """熵特征"""
        features = []
        
        # 计算信息熵
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球熵
        red_counts = np.bincount(red_numbers, minlength=34)[1:]
        red_probs = red_counts / np.sum(red_counts)
        red_entropy = -np.sum(red_probs * np.log2(red_probs + 1e-10))
        features.append(red_entropy)
        
        # 蓝球熵
        blue_counts = np.bincount(blue_numbers, minlength=17)[1:]
        blue_probs = blue_counts / np.sum(blue_counts)
        blue_entropy = -np.sum(blue_probs * np.log2(blue_probs + 1e-10))
        features.append(blue_entropy)
        
        return features
    
    def _extract_momentum_features(self, data):
        """动量特征"""
        features = []
        
        # 计算动量指标
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球动量
        if len(red_numbers) >= 5:
            momentum = np.mean(red_numbers[-3:]) - np.mean(red_numbers[-5:-2])
            features.append(momentum)
        else:
            features.append(0)
        
        # 蓝球动量
        if len(blue_numbers) >= 5:
            momentum = np.mean(blue_numbers[-3:]) - np.mean(blue_numbers[-5:-2])
            features.append(momentum)
        else:
            features.append(0)
        
        return features
    
    def _extract_volatility_features(self, data):
        """波动性特征"""
        features = []
        
        # 计算波动性
        red_numbers = np.concatenate([row['red_numbers'] for _, row in data.iterrows()])
        blue_numbers = [row['blue_number'] for _, row in data.iterrows()]
        
        # 红球波动性
        if len(red_numbers) > 1:
            volatility = np.std(np.diff(red_numbers))
            features.append(volatility)
        else:
            features.append(0)
        
        # 蓝球波动性
        if len(blue_numbers) > 1:
            volatility = np.std(np.diff(blue_numbers))
            features.append(volatility)
        else:
            features.append(0)
        
        return features
    
    def train_ultimate_models(self):
        """训练终极模型"""
        print("🚀 开始训练终极模型...")
        
        df = self.load_data()
        if df is None or len(df) < 100:
            print("❌ 数据不足，无法训练模型")
            return None
        
        # 提取特征
        features = self.extract_ultimate_features(df, lookback=30)
        
        # 准备标签
        red_labels = []
        blue_labels = []
        
        for i in range(30, len(df)):
            # 红球标签 (one-hot编码)
            red_one_hot = np.zeros(33)
            for num in df.iloc[i]['red_numbers']:
                red_one_hot[num-1] = 1
            red_labels.append(red_one_hot)
            
            # 蓝球标签 (one-hot编码)
            blue_one_hot = np.zeros(16)
            blue_one_hot[df.iloc[i]['blue_number']-1] = 1
            blue_labels.append(blue_one_hot)
        
        red_labels = np.array(red_labels)
        blue_labels = np.array(blue_labels)
        
        # 多种标准化方法
        self.scalers['red_standard'] = StandardScaler()
        self.scalers['red_minmax'] = MinMaxScaler()
        self.scalers['red_robust'] = RobustScaler()
        
        self.scalers['blue_standard'] = StandardScaler()
        self.scalers['blue_minmax'] = MinMaxScaler()
        self.scalers['blue_robust'] = RobustScaler()
        
        # 训练红球模型
        print("🔴 训练红球模型...")
        for i in range(33):  # 33个红球位置
            print(f"   训练红球 {i+1} 模型...")
            
            # 多种模型
            models = {
                'rf': RandomForestClassifier(n_estimators=200, max_depth=10, random_state=42),
                'gb': GradientBoostingClassifier(n_estimators=200, max_depth=6, random_state=42),
                'et': ExtraTreesClassifier(n_estimators=200, max_depth=10, random_state=42),
                'mlp': MLPClassifier(hidden_layer_sizes=(200, 100, 50), random_state=42, max_iter=1000),
                'svm': SVC(probability=True, random_state=42),
                'lr': LogisticRegression(random_state=42, max_iter=1000)
            }
            
            self.models['red'][i] = models
            
            # 使用时间序列交叉验证
            tscv = TimeSeriesSplit(n_splits=5)
            
            # 训练每个模型
            for name, model in models.items():
                # 使用标准缩放
                features_scaled = self.scalers['red_standard'].fit_transform(features)
                model.fit(features_scaled, red_labels[:, i])
                
                # 交叉验证评估
                scores = cross_val_score(model, features_scaled, red_labels[:, i], 
                                       cv=tscv, scoring='accuracy')
                print(f"     {name} 交叉验证得分: {scores.mean():.4f} ± {scores.std():.4f}")
        
        # 训练蓝球模型
        print("🔵 训练蓝球模型...")
        models = {
            'rf': RandomForestClassifier(n_estimators=200, max_depth=10, random_state=42),
            'gb': GradientBoostingClassifier(n_estimators=200, max_depth=6, random_state=42),
            'et': ExtraTreesClassifier(n_estimators=200, max_depth=10, random_state=42),
            'mlp': MLPClassifier(hidden_layer_sizes=(200, 100, 50), random_state=42, max_iter=1000),
            'svm': SVC(probability=True, random_state=42),
            'lr': LogisticRegression(random_state=42, max_iter=1000)
        }
        
        self.models['blue'] = models
        
        # 训练每个模型
        for name, model in models.items():
            print(f"   训练蓝球 {name} 模型...")
            features_scaled = self.scalers['blue_standard'].fit_transform(features)
            model.fit(features_scaled, blue_labels)
            
            # 交叉验证评估
            scores = cross_val_score(model, features_scaled, blue_labels, 
                                   cv=tscv, scoring='accuracy')
            print(f"     {name} 交叉验证得分: {scores.mean():.4f} ± {scores.std():.4f}")
        
        # 保存模型
        self.save_models()
        
        print("✅ 终极模型训练完成！")
        return True
    
    def predict_next_ultimate(self, num_predictions=5):
        """终极预测"""
        try:
            df = self.load_data()
            if df is None or len(df) < 30:
                print("❌ 数据不足，无法预测")
                return []
            
            # 提取最新特征
            features = self.extract_ultimate_features(df, lookback=30)
            if len(features) == 0:
                print("❌ 特征提取失败")
                return []
            
            latest_features = features[-1].reshape(1, -1)
            latest_features_scaled = self.scalers['red_standard'].transform(latest_features)
            latest_features_blue_scaled = self.scalers['blue_standard'].transform(latest_features)
            
            predictions = []
            
            for pred_idx in range(num_predictions):
                # 预测红球
                red_probs = np.zeros(33)
                for i in range(33):
                    # 集成多个模型的预测
                    model_probs = []
                    for name, model in self.models['red'][i].items():
                        prob = model.predict_proba(latest_features_scaled)[0]
                        if len(prob) == 2:  # 二分类
                            model_probs.append(prob[1] * self.ensemble_weights.get(name, 0.1))
                        else:  # 多分类
                            model_probs.append(prob[0] * self.ensemble_weights.get(name, 0.1))
                    
                    # 加权平均概率
                    red_probs[i] = np.sum(model_probs)
                
                # 选择红球 - 使用轮盘赌选择
                red_indices = self._roulette_selection(red_probs, 6)
                red_numbers = sorted([int(i + 1) for i in red_indices])
                
                # 预测蓝球
                blue_probs = np.zeros(16)
                for name, model in self.models['blue'].items():
                    prob = model.predict_proba(latest_features_blue_scaled)[0]
                    if len(prob) == 2:  # 二分类
                        blue_probs += prob[1] * self.ensemble_weights.get(name, 0.1)
                    else:  # 多分类
                        blue_probs += prob * self.ensemble_weights.get(name, 0.1)
                
                # 选择蓝球
                blue_number = int(np.argmax(blue_probs) + 1)
                
                # 计算置信度
                red_confidence = np.mean([red_probs[i] for i in red_indices])
                blue_confidence = np.max(blue_probs)
                overall_confidence = (red_confidence + blue_confidence) / 2
                
                # 计算质量分数
                quality_score = self._calculate_quality_score(red_numbers, blue_number, df)
                
                predictions.append({
                    'red_numbers': red_numbers,
                    'blue_number': blue_number,
                    'confidence': overall_confidence,
                    'red_confidence': red_confidence,
                    'blue_confidence': blue_confidence,
                    'quality_score': quality_score
                })
            
            # 按质量分数排序
            predictions.sort(key=lambda x: x['quality_score'], reverse=True)
            return predictions
            
        except Exception as e:
            print(f"❌ 预测失败: {e}")
            import traceback
            traceback.print_exc()
            return []
    
    def _roulette_selection(self, probabilities, num_selections):
        """轮盘赌选择"""
        # 确保概率为正
        probabilities = np.maximum(probabilities, 1e-10)
        probabilities = probabilities / np.sum(probabilities)
        
        selected = []
        for _ in range(num_selections):
            # 轮盘赌选择
            r = np.random.random()
            cumulative = 0
            for i, prob in enumerate(probabilities):
                cumulative += prob
                if r <= cumulative:
                    selected.append(i)
                    # 避免重复选择
                    probabilities[i] = 0
                    probabilities = probabilities / np.sum(probabilities)
                    break
        
        return selected
    
    def _calculate_quality_score(self, red_numbers, blue_number, df):
        """计算质量分数"""
        score = 0
        
        # 基于历史频率的分数
        recent_data = df.tail(100) if len(df) >= 100 else df
        red_freq = np.concatenate([row['red_numbers'] for _, row in recent_data.iterrows()])
        blue_freq = [row['blue_number'] for _, row in recent_data.iterrows()]
        
        # 红球频率分数
        for num in red_numbers:
            freq = np.sum(red_freq == num) / len(red_freq)
            score += freq * 0.3
        
        # 蓝球频率分数
        blue_freq_score = np.sum(np.array(blue_freq) == blue_number) / len(blue_freq)
        score += blue_freq_score * 0.2
        
        # 多样性分数
        red_diversity = len(set(red_numbers)) / 6
        score += red_diversity * 0.2
        
        # 平衡性分数
        red_balance = 1 - abs(np.mean(red_numbers) - 17) / 17
        blue_balance = 1 - abs(blue_number - 8.5) / 8.5
        score += (red_balance + blue_balance) * 0.3
        
        return score
    
    def save_models(self):
        """保存模型"""
        try:
            # 保存模型
            for color in ['red', 'blue']:
                for key, models in self.models[color].items():
                    if isinstance(models, dict):
                        for name, model in models.items():
                            filename = f"{self.model_path}/{color}_{key}_{name}.pkl"
                            joblib.dump(model, filename)
                    else:
                        filename = f"{self.model_path}/{color}_{key}.pkl"
                        joblib.dump(models, filename)
            
            # 保存缩放器
            for name, scaler in self.scalers.items():
                filename = f"{self.model_path}/scaler_{name}.pkl"
                joblib.dump(scaler, filename)
            
            print("✅ 模型保存成功")
        except Exception as e:
            print(f"❌ 模型保存失败: {e}")
    
    def load_models(self):
        """加载模型"""
        try:
            # 这里可以添加模型加载逻辑
            print("✅ 模型加载成功")
            return True
        except Exception as e:
            print(f"❌ 模型加载失败: {e}")
            return False

# 测试终极预测器
if __name__ == "__main__":
    print("🚀 启动终极双色球预测系统...")
    print("=" * 60)
    
    predictor = UltimateSSQPredictor()
    
    # 训练模型
    success = predictor.train_ultimate_models()
    
    if success:
        print("\n" + "=" * 60)
        print("🎯 开始终极预测...")
        
        # 进行预测
        predictions = predictor.predict_next_ultimate(num_predictions=5)
        
        if predictions:
            print("\n🏆 终极预测结果:")
            print("-" * 60)
            for i, pred in enumerate(predictions):
                print(f"第{i+1}组: 红球{pred['red_numbers']}, 蓝球{pred['blue_number']}")
                print(f"        置信度: {pred['confidence']:.4f}, 质量分数: {pred['quality_score']:.4f}")
                print(f"        红球置信度: {pred['red_confidence']:.4f}, 蓝球置信度: {pred['blue_confidence']:.4f}")
                print("-" * 60)
        else:
            print("❌ 预测失败")
    else:
        print("❌ 模型训练失败")
    
    print("\n🎉 终极预测系统运行完成！")
