# -*- coding: utf-8 -*-
"""
Stock-GPT Transformer Training Script
股票预测Transformer模型训练脚本 - Stock-GPT Phase 1
"""

import sys
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error
import joblib
from datetime import datetime
import argparse

# 导入自定义模块
from stock_tokenizer import StockTokenizer
from stock_transformer import create_model


def configure_gpu(force_cpu=False):
    """配置GPU设备，支持CPU回退"""
    if force_cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        print("ℹ️ 强制使用CPU模式")
        
        # 优化CPU多线程配置
        import multiprocessing
        cpu_count = multiprocessing.cpu_count()
        print(f"🔧 检测到 {cpu_count} 个CPU核心")
        
        # 设置TensorFlow并行线程数
        tf.config.threading.set_inter_op_parallelism_threads(cpu_count)
        tf.config.threading.set_intra_op_parallelism_threads(cpu_count)
        
        # 设置环境变量优化
        os.environ['OMP_NUM_THREADS'] = str(cpu_count)
        os.environ['TF_NUM_INTEROP_THREADS'] = str(cpu_count)
        os.environ['TF_NUM_INTRAOP_THREADS'] = str(cpu_count)
        
        print(f"⚡ CPU优化已启用: {cpu_count} 线程并行")
        return False
        
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            print(f"✅ GPU配置成功！将使用GPU加速")
            return True
        except Exception as e:
            print(f"⚠️ GPU配置失败: {e}")
            print("   自动回退到CPU模式")
            os.environ['CUDA_VISIBLE_DEVICES'] = ''
            
            # CPU回退时也启用多线程优化
            import multiprocessing
            cpu_count = multiprocessing.cpu_count()
            tf.config.threading.set_inter_op_parallelism_threads(cpu_count)
            tf.config.threading.set_intra_op_parallelism_threads(cpu_count)
            os.environ['OMP_NUM_THREADS'] = str(cpu_count)
            print(f"⚡ CPU优化已启用: {cpu_count} 线程并行")
            return False
    else:
        print("ℹ️ 未检测到GPU，使用CPU模式")
        
        # CPU模式启用多线程优化
        import multiprocessing
        cpu_count = multiprocessing.cpu_count()
        tf.config.threading.set_inter_op_parallelism_threads(cpu_count)
        tf.config.threading.set_intra_op_parallelism_threads(cpu_count)
        os.environ['OMP_NUM_THREADS'] = str(cpu_count)
        print(f"⚡ CPU优化已启用: {cpu_count} 线程并行")
        return False


class StockTransformerTrainer:
    """Stock Transformer训练器"""
    
    def __init__(self, stock_code: str, frequency: str = 'daily', 
                 force_cpu: bool = False, model_params: dict = None):
        self.stock_code = stock_code
        self.frequency = frequency
        self.force_cpu = force_cpu
        
        # 配置GPU
        self.use_gpu = configure_gpu(force_cpu)
        
        # 文件路径
        self.data_file = f'output/data/stock_data_{stock_code}_{frequency}_with_indicators.csv'
        self.tokenizer_file = f'output/scalers/tokenizer_{stock_code}_{frequency}.pkl'
        self.model_file = f'output/models/transformer_{stock_code}_{frequency}.weights.h5'
        
        # 模型参数
        self.model_params = model_params or {
            'd_model': 128,      # 减小到128适合单股票训练
            'num_heads': 8,
            'num_layers': 4,     # 减小到4层
            'dff': 256,
            'max_position': 500,
            'dropout_rate': 0.1
        }
        
        self.tokenizer = None
        self.model = None
        
    def load_and_tokenize_data(self):
        """加载数据并进行tokenize"""
        print(f"📊 加载数据: {self.data_file}")
        
        if not os.path.exists(self.data_file):
            raise FileNotFoundError(f"数据文件 {self.data_file} 不存在！请先运行数据获取和预处理流程。")
        
        # 加载数据
        df = pd.read_csv(self.data_file)
        print(f"   数据形状: {df.shape}")
        
        # 创建或加载tokenizer
        self.tokenizer = StockTokenizer()
        
        if os.path.exists(self.tokenizer_file):
            print(f"📖 加载已有tokenizer: {self.tokenizer_file}")
            self.tokenizer.load(self.tokenizer_file)
        else:
            print("🔤 创建新的tokenizer...")
            
        # 数据tokenize
        print("🔄 对数据进行tokenize...")
        sequences = self.tokenizer.encode_stock_data(df, self.stock_code)
        
        # 保存tokenizer
        # 确保output/scalers目录存在
        os.makedirs('output/scalers', exist_ok=True)
        self.tokenizer.save(self.tokenizer_file)
        print(f"💾 Tokenizer已保存: {self.tokenizer_file}")
        
        return sequences, df
        
    def prepare_training_data(self, sequences, df):
        """准备训练数据"""
        print("🔧 准备训练数据...")
        
        # 转换为numpy数组并填充到相同长度
        if not sequences:
            raise ValueError("没有生成有效的token序列")
            
        max_len = max(len(seq) for seq in sequences)
        max_len = min(max_len, self.model_params['max_position'])
        
        print(f"   序列最大长度: {max_len}")
        
        # 填充序列
        padded_sequences = []
        for seq in sequences:
            if len(seq) > max_len:
                padded_seq = seq[:max_len]
            else:
                padded_seq = seq + [self.tokenizer.special_tokens['<PAD>']] * (max_len - len(seq))
            padded_sequences.append(padded_seq)
        
        X = np.array(padded_sequences, dtype=np.int32)
        
        # 创建价格预测标签 [open, close]
        price_columns = ['open', 'close']
        y_price = df[price_columns].values.astype(np.float32)
        
        # 为价格标签创建归一化器
        from sklearn.preprocessing import RobustScaler
        self.price_scaler = RobustScaler()
        y_price_scaled = self.price_scaler.fit_transform(y_price)
        
        # 保存归一化器
        scaler_file = f'output/scalers/price_scaler_{self.stock_code}_{self.frequency}.pkl'
        # 确保output/scalers目录存在
        os.makedirs('output/scalers', exist_ok=True)
        joblib.dump(self.price_scaler, scaler_file)
        print(f"💾 价格归一化器已保存: {scaler_file}")
        
        print(f"   输入数据形状: {X.shape}")
        print(f"   价格标签形状: {y_price_scaled.shape}")
        print(f"   词汇表大小: {self.tokenizer.vocab_size}")
        
        return X, y_price_scaled, max_len
        
    def create_model(self, vocab_size, max_len):
        """创建Transformer模型"""
        print("🏗️ 创建Transformer模型...")
        
        # 更新max_position
        self.model_params['max_position'] = max_len
        
        # 创建模型
        self.model = create_model(vocab_size, self.model_params)
        
        # 构建模型 - 确保使用price任务
        dummy_input = tf.constant([[1, 2, 3, 0, 0, 0]], dtype=tf.int32)
        test_output = self.model(dummy_input, task='price')
        print(f"🔧 模型测试输出形状: {test_output.shape}")  # 应该是 [batch_size, 2]
        
        # 设置optimizer
        self.model.optimizer = keras.optimizers.Adam(learning_rate=1e-4)
        
        # 打印模型摘要
        total_params = self.model.count_params()
        print(f"📏 模型参数量: {total_params:,} (~{total_params/1e6:.1f}M)")
        
        return self.model
        
    def train_model(self, X, y, validation_split=0.2, epochs=50, batch_size=32):
        """训练模型"""
        print("🚀 开始训练Transformer模型...")
        
        # 准备训练回调
        callbacks = [
            keras.callbacks.EarlyStopping(
                monitor='val_loss',
                patience=10,
                restore_best_weights=True,
                verbose=1
            ),
            keras.callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.5,
                patience=5,
                min_lr=1e-6,
                verbose=1
            )
        ]
        
        # 使用自定义训练循环
        train_size = int((1 - validation_split) * len(X))
        X_train, X_val = X[:train_size], X[train_size:]
        y_train, y_val = y[:train_size], y[train_size:]
        
        print(f"   训练集大小: {len(X_train)}")
        print(f"   验证集大小: {len(X_val)}")
        
        # 训练循环
        best_val_loss = float('inf')
        patience_counter = 0
        
        for epoch in range(epochs):
            print(f"\nEpoch {epoch+1}/{epochs}")
            
            # 训练步骤
            epoch_loss = 0
            num_batches = len(X_train) // batch_size
            
            for i in range(num_batches):
                start_idx = i * batch_size
                end_idx = start_idx + batch_size
                
                batch_X = X_train[start_idx:end_idx]
                batch_y = y_train[start_idx:end_idx]
                
                with tf.GradientTape() as tape:
                    predictions = self.model(batch_X, task='price', training=True)
                    loss = tf.keras.losses.mse(batch_y, predictions)
                    loss = tf.reduce_mean(loss)
                
                gradients = tape.gradient(loss, self.model.trainable_variables)
                self.model.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
                
                epoch_loss += loss.numpy()
            
            avg_loss = epoch_loss / num_batches
            
            # 验证步骤
            val_predictions = self.model(X_val, task='price', training=False)
            val_loss = tf.keras.losses.mse(y_val, val_predictions)
            val_loss = tf.reduce_mean(val_loss).numpy()
            
            print(f"Loss: {avg_loss:.4f} - Val_loss: {val_loss:.4f}")
            
            # 早停检查
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience_counter = 0
            else:
                patience_counter += 1
                if patience_counter >= 10:
                    print("Early stopping!")
                    break
        
        # 创建伪history对象
        history = type('History', (), {
            'history': {'loss': [avg_loss], 'val_loss': [val_loss]}
        })()
        
        # 保存模型
        # 确保output/models目录存在
        os.makedirs('output/models', exist_ok=True)
        self.model.save_weights(self.model_file)
        print(f"💾 模型已保存: {self.model_file}")
        
        return history
        
    def evaluate_model(self, X, y):
        """评估模型性能"""
        print("📊 评估模型性能...")
        
        # 预测
        y_pred = self.model(X, task='price', training=False)
        y_pred = y_pred.numpy()
        
        # 反归一化
        y_pred_original = self.price_scaler.inverse_transform(y_pred)
        y_original = self.price_scaler.inverse_transform(y)
        
        # 计算指标
        mse_open = mean_squared_error(y_original[:, 0], y_pred_original[:, 0])
        mae_open = mean_absolute_error(y_original[:, 0], y_pred_original[:, 0])
        
        mse_close = mean_squared_error(y_original[:, 1], y_pred_original[:, 1])
        mae_close = mean_absolute_error(y_original[:, 1], y_pred_original[:, 1])
        
        print(f"📈 开盘价预测 - MSE: {mse_open:.4f}, MAE: {mae_open:.4f}")
        print(f"📈 收盘价预测 - MSE: {mse_close:.4f}, MAE: {mae_close:.4f}")
        
        # 绘制预测结果对比图
        self.plot_predictions(y_original, y_pred_original)
        
        return {
            'mse_open': mse_open, 'mae_open': mae_open,
            'mse_close': mse_close, 'mae_close': mae_close
        }
        
    def plot_predictions(self, y_true, y_pred):
        """绘制预测结果对比图"""
        print("📊 生成预测对比图...")
        
        plt.figure(figsize=(15, 10))
        
        # 只显示最后200个点，避免图表过于拥挤
        n_show = min(200, len(y_true))
        indices = range(len(y_true) - n_show, len(y_true))
        
        # 开盘价对比
        plt.subplot(2, 1, 1)
        plt.plot(indices, y_true[-n_show:, 0], label='真实开盘价', color='blue', alpha=0.7)
        plt.plot(indices, y_pred[-n_show:, 0], label='预测开盘价', color='red', alpha=0.7)
        plt.title(f'{self.stock_code} - Transformer开盘价预测对比 ({self.frequency})')
        plt.xlabel('时间点')
        plt.ylabel('价格')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 收盘价对比
        plt.subplot(2, 1, 2)
        plt.plot(indices, y_true[-n_show:, 1], label='真实收盘价', color='blue', alpha=0.7)
        plt.plot(indices, y_pred[-n_show:, 1], label='预测收盘价', color='red', alpha=0.7)
        plt.title(f'{self.stock_code} - Transformer收盘价预测对比 ({self.frequency})')
        plt.xlabel('时间点')
        plt.ylabel('价格')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 保存图片
        plot_filename = f'output/plots/transformer_prediction_{self.stock_code}_{self.frequency}.png'
        # 确保output/plots目录存在
        os.makedirs('output/plots', exist_ok=True)
        plt.savefig(plot_filename, dpi=300, bbox_inches='tight')
        print(f"📊 预测对比图已保存: {plot_filename}")
        plt.close()


def main():
    parser = argparse.ArgumentParser(description='训练Stock-GPT Transformer模型')
    parser.add_argument('stock_code', type=str, help='股票代码')
    parser.add_argument('-f', '--frequency', choices=['daily', 'hourly'], 
                       default='daily', help='时间周期')
    parser.add_argument('--force-cpu', action='store_true', help='强制使用CPU模式')
    parser.add_argument('--epochs', type=int, default=50, help='训练轮数')
    parser.add_argument('--batch-size', type=int, default=32, help='批次大小')
    
    args = parser.parse_args()
    
    print("🚀 Stock-GPT Transformer Training - Phase 1")
    print("="*50)
    print(f"📊 股票代码: {args.stock_code}")
    print(f"⏰ 时间周期: {args.frequency}")
    print(f"💻 CPU模式: {'是' if args.force_cpu else '否'}")
    print(f"🔄 训练轮数: {args.epochs}")
    print("="*50)
    
    try:
        # 创建训练器
        trainer = StockTransformerTrainer(
            stock_code=args.stock_code,
            frequency=args.frequency,
            force_cpu=args.force_cpu
        )
        
        # 加载和tokenize数据
        sequences, df = trainer.load_and_tokenize_data()
        
        # 准备训练数据
        X, y, max_len = trainer.prepare_training_data(sequences, df)
        
        # 创建模型
        model = trainer.create_model(trainer.tokenizer.vocab_size, max_len)
        
        # 训练模型
        history = trainer.train_model(X, y, epochs=args.epochs, batch_size=args.batch_size)
        
        # 评估模型
        metrics = trainer.evaluate_model(X, y)
        
        print("\n🎉 训练完成！")
        print("📊 最终指标:")
        for key, value in metrics.items():
            print(f"   {key}: {value:.4f}")
            
    except Exception as e:
        print(f"❌ 训练过程出错: {str(e)}")
        sys.exit(1)


if __name__ == "__main__":
    main()