#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
3D彩票增量训练脚本
专门为3D彩票设计的增量训练流程，只对最新数据进行训练更新模型
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from sklearn.model_selection import train_test_split
from datetime import datetime
import joblib
import pandas as pd
import argparse

# 设置环境变量以强制使用CPU
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# 添加项目根目录到路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

# 设置环境变量以确保正确的编码
os.environ['PYTHONIOENCODING'] = 'utf-8'

from model import LstmCRFModel
from algorithms.fc3d_sequence_lstm import FC3DDataProcessor, FC3DSequenceLSTM

# 配置参数
CONFIG = {
    'data_file': os.path.join(project_root, 'scripts', 'fc3d', 'fc3d_history.csv'),
    'model_save_path': os.path.join(project_root, 'scripts', 'fc3d', 'fc3d_model.pth'),
    'scaler_save_path': os.path.join(project_root, 'scripts', 'fc3d', 'scaler_X.pkl'),
    'lstm_model_path': os.path.join(project_root, 'scripts', 'fc3d', 'fc3d_lstm_model.pth'),
    'transformer_model_path': os.path.join(project_root, 'scripts', 'fc3d', 'fc3d_transformer_model.pth'),
    'sequence_lstm_model_path': os.path.join(project_root, 'scripts', 'fc3d', '3d_sequence_lstm_model.pth'),
    'enhanced_lstm_crf_model_path': os.path.join(project_root, 'scripts', 'fc3d', 'enhanced_lstm_crf_model.pth'),
    'window_size': 10,
    'hidden_dim': 128,
    'num_layers': 2,
    'batch_size': 32,
    'epochs': 100,
    'learning_rate': 0.0001,  # 较小的学习率用于增量训练
    'sequence_lstm_learning_rate': 0.00005,  # 序列LSTM使用更小的学习率
    'enhanced_lstm_crf_learning_rate': 0.00005,  # 增强版LSTM-CRF使用更小的学习率
    'patience': 10,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'incremental_data_ratio': 0.3  # 增量数据比例（最近30%的数据用于增量训练）
}

class Incremental3DDataset:
    """增量训练数据集"""
    
    def __init__(self, csv_file, window_size, incremental_ratio=0.001):
        """
        初始化增量训练数据集
        :param csv_file: CSV数据文件路径
        :param window_size: 时间窗口大小
        :param incremental_ratio: 增量数据比例
        """
        self.csv_file = csv_file
        self.window_size = window_size
        self.incremental_ratio = incremental_ratio
        self.scaler = None
        
        # 加载数据
        self.data = pd.read_csv(csv_file)
        print("原始数据形状: {}".format(self.data.shape))
        
        # 确定增量训练数据范围
        total_samples = len(self.data)
        incremental_start_idx = int(total_samples * (1 - incremental_ratio))
        self.incremental_data = self.data.iloc[incremental_start_idx:].copy()
        print("增量训练数据形状: {}".format(self.incremental_data.shape))
        
        # 预处理数据
        self.preprocess_data()
        
        # 准备序列数据（用于LSTM-CRF）
        self.features, self.labels = self.prepare_sequences()
        
        # 准备序列数据（用于序列LSTM，包含区域转换特征）
        self.sequence_features, self.sequence_labels = self.prepare_sequence_lstm_data()
    
    def preprocess_data(self):
        """预处理3D数据"""
        # 检查数据格式
        required_columns = ['num_1', 'num_2', 'num_3']
        for col in required_columns:
            if col not in self.incremental_data.columns:
                raise ValueError("数据文件缺少 '{}' 列".format(col))
        
        # 直接使用num_1, num_2, num_3列，提取3个数字（3D格式）
        numbers_data = []
        for _, row in self.incremental_data.iterrows():
            try:
                # 提取3个数字并确保在0-9范围内
                parsed_numbers = []
                for i in range(1, 4):  # num_1, num_2, num_3
                    col_name = f'num_{i}'
                    num = int(row[col_name])
                    # 使用模运算确保在0-9范围内
                    num = num % 10
                    parsed_numbers.append(num)
                numbers_data.append(parsed_numbers)
            except (ValueError, KeyError):
                # 如果有任何问题，添加默认值[0, 0, 0]
                numbers_data.append([0, 0, 0])
        
        self.number_data = np.array(numbers_data)
        print("预处理后数据形状: {}".format(self.number_data.shape))
        print("数据范围: {} - {}".format(self.number_data.min(), self.number_data.max()))
    
    def prepare_sequences(self):
        """准备序列数据（用于LSTM-CRF）"""
        features, labels = [], []
        
        # 创建时间窗口特征和标签
        for i in range(len(self.number_data) - self.window_size):
            # 特征：窗口内的3个数字
            feature_window = self.number_data[i:i + self.window_size]  # shape: (window_size, 3)
            features.append(feature_window)

            # 标签：下一期的3个数字
            next_numbers = self.number_data[i + self.window_size]  # shape: (3,)
            labels.append(next_numbers)

        # 转换为 NumPy 数组
        features_np = np.array(features)  # shape: (num_samples, window_size, 3)
        labels_np = np.array(labels)      # shape: (num_samples, 3)
        
        print("LSTM-CRF特征形状: {}".format(features_np.shape))
        print("LSTM-CRF标签形状: {}".format(labels_np.shape))
        
        return features_np, labels_np
    
    def prepare_sequence_lstm_data(self):
        """准备序列LSTM数据（包含区域转换特征）"""
        # 使用FC3DDataProcessor处理数据
        processor = FC3DDataProcessor(self.csv_file, self.window_size)
        X, y = processor.load_and_process_data()
        
        # 确定增量训练数据范围
        total_samples = len(X)
        incremental_start_idx = int(total_samples * (1 - self.incremental_ratio))
        
        # 只取增量数据部分
        X_incremental = X[incremental_start_idx:]
        y_incremental = y[incremental_start_idx:]
        
        print("序列LSTM特征形状: {}".format(X_incremental.shape))
        print("序列LSTM标签形状: {}".format(y_incremental.shape))
        
        return X_incremental, y_incremental
    
    def get_scaler(self):
        """获取数据缩放器"""
        return self.scaler
    
    def __len__(self):
        return len(self.features)
    
    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]

def load_existing_model(model_path, input_dim, hidden_dim, output_dim, output_seq_length, model_type="lstm_crf"):
    """
    加载现有模型用于增量训练
    :param model_path: 模型路径
    :param input_dim: 输入维度
    :param hidden_dim: 隐藏层维度
    :param output_dim: 输出维度
    :param output_seq_length: 输出序列长度
    :param model_type: 模型类型 ("lstm_crf" 或 "sequence_lstm" 或 "enhanced_lstm_crf")
    :return: 加载的模型
    """
    if not os.path.exists(model_path):
        print("[WARN] 模型文件不存在: {}，将创建新模型".format(model_path))
        return None
    
    try:
        # 加载模型检查点
        checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
        
        if model_type == "lstm_crf":
            # 创建LSTM-CRF模型
            # 根据检查点中的层数信息确定模型层数
            num_layers = 2 if 'lstm.weight_ih_l1' in checkpoint.get('model', {}) or 'lstm.weight_ih_l1' in checkpoint.get('lstm_model', {}) else 1
            model = LstmCRFModel(input_dim, hidden_dim, output_dim, output_seq_length, num_layers=num_layers)
            
            # 加载模型状态字典
            if 'lstm_model' in checkpoint:
                model.load_state_dict(checkpoint['lstm_model'])
            elif 'model' in checkpoint:
                model.load_state_dict(checkpoint['model'])
        elif model_type == "enhanced_lstm_crf":
            # 创建增强版LSTM-CRF模型
            from algorithms.enhanced_lstm_crf import EnhancedLstmCRFModel
            
            # 根据检查点中的配置信息创建模型
            if 'model_config' in checkpoint:
                config = checkpoint['model_config']
                model = EnhancedLstmCRFModel(**config)
            else:
                # 如果没有配置信息，使用默认参数创建模型
                model = EnhancedLstmCRFModel(
                    input_dim=input_dim,
                    hidden_dim=hidden_dim,
                    output_dim=output_dim,
                    output_seq_length=output_seq_length,
                    num_layers=2,
                    dropout=0.2
                )
            
            # 加载模型状态字典
            if 'model_state_dict' in checkpoint:
                model.load_state_dict(checkpoint['model_state_dict'])
            elif 'model' in checkpoint:
                model.load_state_dict(checkpoint['model'])
        else:  # sequence_lstm
            # 创建序列LSTM模型
            model = FC3DSequenceLSTM(
                input_dim=input_dim,
                hidden_dim=hidden_dim,
                num_layers=3,  # 序列LSTM固定为3层
                dropout=0.3
            )
            
            # 加载模型状态字典
            if 'model_state_dict' in checkpoint:
                model.load_state_dict(checkpoint['model_state_dict'])
            elif 'model' in checkpoint:
                model.load_state_dict(checkpoint['model'])
        
        model.eval()
        print("[SUCCESS] 成功加载现有{}模型: {}".format(model_type, model_path))
        return model
    except Exception as e:
        print("[WARN] 加载现有{}模型失败: {}，将创建新模型".format(model_type, e))
        return None

def incremental_train_both_models():
    """3D三模型增量训练"""
    print("开始3D三模型增量训练")
    print("=" * 60)
    
    # 检查数据文件
    if not os.path.exists(CONFIG['data_file']):
        print("[ERROR] 数据文件不存在: {}".format(CONFIG['data_file']))
        return False
    
    try:
        # 1. 加载增量训练数据
        print("加载增量训练数据...")
        dataset = Incremental3DDataset(CONFIG['data_file'], CONFIG['window_size'], CONFIG['incremental_data_ratio'])
        
        if len(dataset) == 0:
            print("[WARN] 没有足够的数据用于训练")
            return False
        
        # 数据集划分（LSTM-CRF）
        train_loader = DataLoader(dataset, batch_size=CONFIG['batch_size'], shuffle=True)
        
        # 数据集划分（序列LSTM）
        sequence_train_dataset = TensorDataset(
            torch.FloatTensor(dataset.sequence_features), 
            torch.LongTensor(dataset.sequence_labels)
        )
        sequence_train_loader = DataLoader(sequence_train_dataset, batch_size=CONFIG['batch_size'], shuffle=True)
        
        # 2. 创建或加载现有模型
        print("创建或加载现有模型...")
        device = torch.device(CONFIG['device'])
        print("[DEVICE] 使用设备: {}".format(device))
        
        # LSTM-CRF模型参数
        lstm_crf_input_dim = dataset.features.shape[-1]  # 3个号码位置
        lstm_crf_output_dim = 10  # 0-9数字
        lstm_crf_output_seq_length = 3  # 3个数字位置
        
        # 序列LSTM模型参数
        sequence_lstm_input_dim = dataset.sequence_features.shape[-1]  # 6个特征（3个数字+3个区域转换）
        sequence_lstm_output_dim = 10  # 0-9数字
        sequence_lstm_output_seq_length = 3  # 3个数字位置
        
        # 增强版LSTM-CRF模型参数
        enhanced_lstm_crf_input_dim = dataset.features.shape[-1]  # 3个号码位置
        enhanced_lstm_crf_output_dim = 10  # 0-9数字
        enhanced_lstm_crf_output_seq_length = 3  # 3个数字位置
        
        # 加载现有LSTM-CRF模型
        print("加载LSTM-CRF模型...")
        lstm_crf_model = load_existing_model(
            CONFIG['lstm_model_path'], 
            lstm_crf_input_dim, 
            CONFIG['hidden_dim'], 
            lstm_crf_output_dim, 
            lstm_crf_output_seq_length,
            "lstm_crf"
        )
        if lstm_crf_model is None:
            # 创建新模型
            lstm_crf_model = LstmCRFModel(
                lstm_crf_input_dim, 
                CONFIG['hidden_dim'], 
                lstm_crf_output_dim, 
                lstm_crf_output_seq_length, 
                num_layers=CONFIG['num_layers']
            )
        
        lstm_crf_model = lstm_crf_model.to(device)
        
        # 加载现有序列LSTM模型
        print("加载序列LSTM模型...")
        sequence_lstm_model = load_existing_model(
            CONFIG['sequence_lstm_model_path'], 
            sequence_lstm_input_dim, 
            CONFIG['hidden_dim'], 
            sequence_lstm_output_dim, 
            sequence_lstm_output_seq_length,
            "sequence_lstm"
        )
        if sequence_lstm_model is None:
            # 创建新模型
            sequence_lstm_model = FC3DSequenceLSTM(
                input_dim=sequence_lstm_input_dim,
                hidden_dim=CONFIG['hidden_dim'],
                num_layers=3,
                dropout=0.3
            )
        
        sequence_lstm_model = sequence_lstm_model.to(device)
        
        # 加载现有增强版LSTM-CRF模型
        print("加载增强版LSTM-CRF模型...")
        enhanced_lstm_crf_model = load_existing_model(
            CONFIG['enhanced_lstm_crf_model_path'], 
            enhanced_lstm_crf_input_dim, 
            CONFIG['hidden_dim'], 
            enhanced_lstm_crf_output_dim, 
            enhanced_lstm_crf_output_seq_length,
            "enhanced_lstm_crf"
        )
        if enhanced_lstm_crf_model is None:
            # 创建新模型
            from algorithms.enhanced_lstm_crf import EnhancedLstmCRFModel
            enhanced_lstm_crf_model = EnhancedLstmCRFModel(
                input_dim=enhanced_lstm_crf_input_dim,
                hidden_dim=CONFIG['hidden_dim'],
                output_dim=enhanced_lstm_crf_output_dim,
                output_seq_length=enhanced_lstm_crf_output_seq_length,
                num_layers=2,
                dropout=0.2
            )
        
        enhanced_lstm_crf_model = enhanced_lstm_crf_model.to(device)
        
        # 3. 定义优化器和损失函数
        # LSTM-CRF优化器
        lstm_crf_optimizer = optim.AdamW(lstm_crf_model.parameters(), lr=CONFIG['learning_rate'], weight_decay=1e-5)
        lstm_crf_scheduler = optim.lr_scheduler.ReduceLROnPlateau(lstm_crf_optimizer, mode='min', factor=0.5, patience=5)
        
        # 序列LSTM优化器和损失函数
        sequence_lstm_optimizer = optim.AdamW(sequence_lstm_model.parameters(), lr=CONFIG['sequence_lstm_learning_rate'], weight_decay=1e-5)
        sequence_lstm_scheduler = optim.lr_scheduler.ReduceLROnPlateau(sequence_lstm_optimizer, mode='min', factor=0.5, patience=5)
        sequence_lstm_criterion = nn.CrossEntropyLoss()  # 简单的交叉熵损失
        
        # 增强版LSTM-CRF优化器
        enhanced_lstm_crf_optimizer = optim.AdamW(enhanced_lstm_crf_model.parameters(), lr=CONFIG['enhanced_lstm_crf_learning_rate'], weight_decay=1e-5)
        enhanced_lstm_crf_scheduler = optim.lr_scheduler.ReduceLROnPlateau(enhanced_lstm_crf_optimizer, mode='min', factor=0.5, patience=5)
        
        # 4. 三模型增量训练循环
        print("[RUN] 开始三模型增量训练...")
        best_lstm_crf_loss = float('inf')
        best_sequence_lstm_loss = float('inf')
        best_enhanced_lstm_crf_loss = float('inf')
        lstm_crf_patience_counter = 0
        sequence_lstm_patience_counter = 0
        enhanced_lstm_crf_patience_counter = 0
        
        for epoch in range(CONFIG['epochs']):
            # 训练模式
            lstm_crf_model.train()
            sequence_lstm_model.train()
            enhanced_lstm_crf_model.train()
            
            total_lstm_crf_loss = 0
            total_sequence_lstm_loss = 0
            total_enhanced_lstm_crf_loss = 0
            num_lstm_crf_batches = 0
            num_sequence_lstm_batches = 0
            num_enhanced_lstm_crf_batches = 0
            
            # LSTM-CRF训练
            for features, labels in train_loader:
                features = features.float().to(device)
                labels = labels.long().to(device)
                
                # 创建全True掩码（3D所有标签都有效）
                mask = torch.ones_like(labels, dtype=torch.bool)
                
                # 前向传播
                lstm_crf_optimizer.zero_grad()
                loss = lstm_crf_model(features, labels, mask)
                
                # 反向传播
                loss.backward()
                torch.nn.utils.clip_grad_norm_(lstm_crf_model.parameters(), max_norm=1.0)
                lstm_crf_optimizer.step()
                
                total_lstm_crf_loss += loss.item()
                num_lstm_crf_batches += 1
            
            # 序列LSTM训练
            for features, labels in sequence_train_loader:
                features = features.float().to(device)
                labels = labels.long().to(device)
                
                # 前向传播
                sequence_lstm_optimizer.zero_grad()
                predictions = sequence_lstm_model(features)
                
                # 计算损失（每个位置独立计算）
                loss = 0
                for pos in range(3):
                    pos_loss = sequence_lstm_criterion(predictions[:, pos, :], labels[:, pos])
                    loss += pos_loss
                
                # 反向传播
                loss.backward()
                torch.nn.utils.clip_grad_norm_(sequence_lstm_model.parameters(), max_norm=1.0)
                sequence_lstm_optimizer.step()
                
                total_sequence_lstm_loss += loss.item()
                num_sequence_lstm_batches += 1
            
            # 增强版LSTM-CRF训练
            for features, labels in sequence_train_loader:
                features = features.float().to(device)
                labels = labels.long().to(device)
                
                # 创建全True掩码（3D所有标签都有效）
                mask = torch.ones_like(labels, dtype=torch.bool)
                
                # 前向传播
                enhanced_lstm_crf_optimizer.zero_grad()
                loss = enhanced_lstm_crf_model(features, target=labels, mask=mask)
                
                # 反向传播
                loss.backward()
                torch.nn.utils.clip_grad_norm_(enhanced_lstm_crf_model.parameters(), max_norm=1.0)
                enhanced_lstm_crf_optimizer.step()
                
                total_enhanced_lstm_crf_loss += loss.item()
                num_enhanced_lstm_crf_batches += 1
            
            avg_lstm_crf_loss = total_lstm_crf_loss / num_lstm_crf_batches if num_lstm_crf_batches > 0 else 0
            avg_sequence_lstm_loss = total_sequence_lstm_loss / num_sequence_lstm_batches if num_sequence_lstm_batches > 0 else 0
            avg_enhanced_lstm_crf_loss = total_enhanced_lstm_crf_loss / num_enhanced_lstm_crf_batches if num_enhanced_lstm_crf_batches > 0 else 0
            
            # 更新学习率
            lstm_crf_scheduler.step(avg_lstm_crf_loss)
            sequence_lstm_scheduler.step(avg_sequence_lstm_loss)
            enhanced_lstm_crf_scheduler.step(avg_enhanced_lstm_crf_loss)
            
            # 每10个epoch打印一次
            if (epoch + 1) % 10 == 0:
                print("Epoch {:3d}/{}: LSTM-CRF Loss: {:.4f}, 序列LSTM Loss: {:.4f}, 增强版LSTM-CRF Loss: {:.4f}, LR: {:.6f}/{:.6f}/{:.6f}".format(
                    epoch+1, CONFIG['epochs'], avg_lstm_crf_loss, avg_sequence_lstm_loss, avg_enhanced_lstm_crf_loss,
                    lstm_crf_optimizer.param_groups[0]['lr'], sequence_lstm_optimizer.param_groups[0]['lr'], enhanced_lstm_crf_optimizer.param_groups[0]['lr']))
            
            # 早停检查（LSTM-CRF）
            if avg_lstm_crf_loss < best_lstm_crf_loss:
                best_lstm_crf_loss = avg_lstm_crf_loss
                lstm_crf_patience_counter = 0
                
                # 保存最佳LSTM-CRF模型
                torch.save({
                    'model': lstm_crf_model.state_dict(),
                    'epoch': epoch,
                    'loss': avg_lstm_crf_loss,
                    'config': CONFIG
                }, CONFIG['lstm_model_path'])
                
                print("[SAVE] 保存最佳LSTM-CRF模型 (Epoch {}, Loss: {:.4f})".format(epoch+1, avg_lstm_crf_loss))
                
            else:
                lstm_crf_patience_counter += 1
                if lstm_crf_patience_counter >= CONFIG['patience']:
                    print("[STOP] LSTM-CRF早停触发 (Patience: {})".format(CONFIG['patience']))
            
            # 早停检查（序列LSTM）
            if avg_sequence_lstm_loss < best_sequence_lstm_loss:
                best_sequence_lstm_loss = avg_sequence_lstm_loss
                sequence_lstm_patience_counter = 0
                
                # 保存最佳序列LSTM模型
                torch.save({
                    'model_state_dict': sequence_lstm_model.state_dict(),
                    'epoch': epoch,
                    'loss': avg_sequence_lstm_loss,
                    'config': CONFIG
                }, CONFIG['sequence_lstm_model_path'])
                
                print("[SAVE] 保存最佳序列LSTM模型 (Epoch {}, Loss: {:.4f})".format(epoch+1, avg_sequence_lstm_loss))
                
            else:
                sequence_lstm_patience_counter += 1
                if sequence_lstm_patience_counter >= CONFIG['patience']:
                    print("[STOP] 序列LSTM早停触发 (Patience: {})".format(CONFIG['patience']))
            
            # 早停检查（增强版LSTM-CRF）
            if avg_enhanced_lstm_crf_loss < best_enhanced_lstm_crf_loss:
                best_enhanced_lstm_crf_loss = avg_enhanced_lstm_crf_loss
                enhanced_lstm_crf_patience_counter = 0
                
                # 保存最佳增强版LSTM-CRF模型
                torch.save({
                    'model_state_dict': enhanced_lstm_crf_model.state_dict(),
                    'model_config': {
                        'input_dim': enhanced_lstm_crf_input_dim,
                        'hidden_dim': CONFIG['hidden_dim'],
                        'output_dim': enhanced_lstm_crf_output_dim,
                        'output_seq_length': enhanced_lstm_crf_output_seq_length,
                        'num_layers': 2,
                        'dropout': 0.2
                    },
                    'epoch': epoch,
                    'loss': avg_enhanced_lstm_crf_loss,
                    'config': CONFIG
                }, CONFIG['enhanced_lstm_crf_model_path'])
                
                print("[SAVE] 保存最佳增强版LSTM-CRF模型 (Epoch {}, Loss: {:.4f})".format(epoch+1, avg_enhanced_lstm_crf_loss))
                
            else:
                enhanced_lstm_crf_patience_counter += 1
                if enhanced_lstm_crf_patience_counter >= CONFIG['patience']:
                    print("[STOP] 增强版LSTM-CRF早停触发 (Patience: {})".format(CONFIG['patience']))
            
            # 如果三个模型都触发早停，则结束训练
            if (lstm_crf_patience_counter >= CONFIG['patience'] and 
                sequence_lstm_patience_counter >= CONFIG['patience'] and
                enhanced_lstm_crf_patience_counter >= CONFIG['patience']):
                print("[STOP] 三模型早停触发，结束训练")
                break
        
        # 5. 保存集成模型
        print("[SAVE] 保存集成模型...")
        ensemble_config = {
            "lstm_model_path": CONFIG['lstm_model_path'],
            "transformer_model_path": CONFIG['transformer_model_path'],
            "weights": [0.6, 0.4]  # LSTM权重和Transformer权重
        }
        
        torch.save({
            "ensemble_config": ensemble_config,
            "lstm_model": lstm_crf_model.state_dict()
        }, CONFIG['model_save_path'])
        
        print("[SUCCESS] 三模型增量训练完成！")
        print("集成模型保存到: {}".format(CONFIG['model_save_path']))
        print("LSTM-CRF模型保存到: {}".format(CONFIG['lstm_model_path']))
        print("序列LSTM模型保存到: {}".format(CONFIG['sequence_lstm_model_path']))
        print("增强版LSTM-CRF模型保存到: {}".format(CONFIG['enhanced_lstm_crf_model_path']))
        print("LSTM-CRF最佳损失: {:.4f}".format(best_lstm_crf_loss))
        print("序列LSTM最佳损失: {:.4f}".format(best_sequence_lstm_loss))
        print("增强版LSTM-CRF最佳损失: {:.4f}".format(best_enhanced_lstm_crf_loss))
        
        return True
        
    except Exception as e:
        print("[ERROR] 三模型增量训练过程出错: {}".format(e))
        import traceback
        print(traceback.format_exc())
        return False

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='3D三模型增量训练脚本')
    parser.add_argument('--ratio', type=float, default=0.3, help='增量数据比例 (默认: 0.3)')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮数 (默认: 100)')
    parser.add_argument('--lr', type=float, default=0.0001, help='LSTM-CRF学习率 (默认: 0.0001)')
    parser.add_argument('--sequence-lr', type=float, default=0.00005, help='序列LSTM学习率 (默认: 0.00005)')
    parser.add_argument('--enhanced-lr', type=float, default=0.00005, help='增强版LSTM-CRF学习率 (默认: 0.00005)')
    
    args = parser.parse_args()
    
    # 更新配置
    CONFIG['incremental_data_ratio'] = args.ratio
    CONFIG['epochs'] = args.epochs
    CONFIG['learning_rate'] = args.lr
    CONFIG['sequence_lstm_learning_rate'] = args.sequence_lr
    CONFIG['enhanced_lstm_crf_learning_rate'] = args.enhanced_lr
    
    print("   三模型增量训练配置:")
    print("   增量数据比例: {}".format(CONFIG['incremental_data_ratio']))
    print("   训练轮数: {}".format(CONFIG['epochs']))
    print("   LSTM-CRF学习率: {}".format(CONFIG['learning_rate']))
    print("   序列LSTM学习率: {}".format(CONFIG['sequence_lstm_learning_rate']))
    print("   增强版LSTM-CRF学习率: {}".format(CONFIG['enhanced_lstm_crf_learning_rate']))
    print("   批大小: {}".format(CONFIG['batch_size']))
    print("   窗口大小: {}".format(CONFIG['window_size']))
    
    success = incremental_train_both_models()
    if success:
        print("[SUCCESS] 3D三模型增量训练成功！")
        sys.exit(0)
    else:
        print("[ERROR] 3D三模型增量训练失败！")
        sys.exit(1)

if __name__ == "__main__":
    main()