# -*- coding: utf-8 -*-
"""
Stock Transformer Model - 股票预测专用Transformer架构
基于GPT架构，专为金融时间序列数据优化
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import math
from typing import Dict, List, Tuple, Optional
import os


class MultiHeadAttention(layers.Layer):
    """多头注意力机制 - 专为股票数据优化"""
    
    def __init__(self, d_model: int, num_heads: int, dropout_rate: float = 0.1, **kwargs):
        super().__init__(**kwargs)
        self.num_heads = num_heads
        self.d_model = d_model
        self.dropout_rate = dropout_rate
        
        assert d_model % num_heads == 0
        self.depth = d_model // num_heads
        
        self.wq = layers.Dense(d_model)
        self.wk = layers.Dense(d_model)
        self.wv = layers.Dense(d_model)
        self.dense = layers.Dense(d_model)
        self.dropout = layers.Dropout(dropout_rate)
    
    def split_heads(self, x, batch_size):
        """将最后一个维度分割成(num_heads, depth)"""
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
        return tf.transpose(x, perm=[0, 2, 1, 3])
    
    def call(self, q, k, v, mask=None, training=None):
        batch_size = tf.shape(q)[0]
        
        # 线性变换
        q = self.wq(q)
        k = self.wk(k)
        v = self.wv(v)
        
        # 分割头
        q = self.split_heads(q, batch_size)
        k = self.split_heads(k, batch_size)
        v = self.split_heads(v, batch_size)
        
        # 缩放点积注意力
        scaled_attention = self.scaled_dot_product_attention(q, k, v, mask)
        
        # 重新组合头
        scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
        concat_attention = tf.reshape(scaled_attention, 
                                    (batch_size, -1, self.d_model))
        
        # 最终线性变换
        output = self.dense(concat_attention)
        output = self.dropout(output, training=training)
        
        return output
    
    def scaled_dot_product_attention(self, q, k, v, mask):
        """缩放点积注意力"""
        matmul_qk = tf.matmul(q, k, transpose_b=True)
        
        # 缩放
        dk = tf.cast(tf.shape(k)[-1], tf.float32)
        scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
        
        # 添加mask
        if mask is not None:
            scaled_attention_logits += (mask * -1e9)
        
        # softmax
        attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
        output = tf.matmul(attention_weights, v)
        
        return output


class PositionalEncoding(layers.Layer):
    """位置编码 - 为股票序列数据添加时间位置信息"""
    
    def __init__(self, max_position: int, d_model: int, **kwargs):
        super().__init__(**kwargs)
        self.max_position = max_position
        self.d_model = d_model
        self.pos_encoding = self.positional_encoding(max_position, d_model)
    
    def positional_encoding(self, position, d_model):
        """计算位置编码"""
        angle_rads = self.get_angles(
            np.arange(position)[:, np.newaxis],
            np.arange(d_model)[np.newaxis, :],
            d_model
        )
        
        # 对偶数索引应用sin
        angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
        
        # 对奇数索引应用cos
        angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
        
        pos_encoding = angle_rads[np.newaxis, ...]
        return tf.cast(pos_encoding, dtype=tf.float32)
    
    def get_angles(self, pos, i, d_model):
        """获取角度"""
        angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
        return pos * angle_rates
    
    def call(self, x):
        seq_len = tf.shape(x)[1]
        return x + self.pos_encoding[:, :seq_len, :]


class TransformerBlock(layers.Layer):
    """Transformer编码器块"""
    
    def __init__(self, d_model: int, num_heads: int, dff: int, 
                 dropout_rate: float = 0.1, **kwargs):
        super().__init__(**kwargs)
        
        self.attention = MultiHeadAttention(d_model, num_heads, dropout_rate)
        self.ffn = keras.Sequential([
            layers.Dense(dff, activation='relu'),
            layers.Dense(d_model)
        ])
        
        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        
        self.dropout1 = layers.Dropout(dropout_rate)
        self.dropout2 = layers.Dropout(dropout_rate)
    
    def call(self, x, mask=None, training=None):
        # 多头注意力
        attn_output = self.attention(x, x, x, mask=mask, training=training)
        attn_output = self.dropout1(attn_output, training=training)
        out1 = self.layernorm1(x + attn_output)
        
        # 前馈网络
        ffn_output = self.ffn(out1)
        ffn_output = self.dropout2(ffn_output, training=training)
        out2 = self.layernorm2(out1 + ffn_output)
        
        return out2


class StockTransformer(keras.Model):
    """
    股票预测Transformer模型
    
    架构设计：
    - Token Embedding + Positional Encoding
    - N × Transformer Blocks
    - 预训练头：Masked Language Model
    - 下游任务头：Price Prediction
    """
    
    def __init__(self, vocab_size: int, d_model: int = 256, num_heads: int = 8,
                 num_layers: int = 6, dff: int = 512, max_position: int = 1000,
                 dropout_rate: float = 0.1, **kwargs):
        super().__init__(**kwargs)
        
        self.d_model = d_model
        self.num_layers = num_layers
        self.vocab_size = vocab_size
        
        # Token嵌入层
        self.embedding = layers.Embedding(vocab_size, d_model)
        
        # 位置编码
        self.pos_encoding = PositionalEncoding(max_position, d_model)
        
        # Transformer块
        self.transformer_blocks = [
            TransformerBlock(d_model, num_heads, dff, dropout_rate)
            for _ in range(num_layers)
        ]
        
        self.dropout = layers.Dropout(dropout_rate)
        
        # 预训练头 - Masked Language Model
        self.mlm_head = layers.Dense(vocab_size, name='mlm_head')
        
        # 下游任务头 - 价格预测
        self.price_head = keras.Sequential([
            layers.Dense(128, activation='relu'),
            layers.Dropout(dropout_rate),
            layers.Dense(64, activation='relu'),
            layers.Dense(2, name='price_prediction')  # [open, close]
        ], name='price_head')
    
    def create_padding_mask(self, seq):
        """创建padding mask"""
        seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
        return seq[:, tf.newaxis, tf.newaxis, :]
    
    def create_look_ahead_mask(self, size):
        """创建look-ahead mask，确保预测时不能看到未来信息"""
        mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
        return mask
    
    def call(self, inputs, training=None, task='mlm'):
        """
        前向传播
        
        Args:
            inputs: [batch_size, seq_len] token ids
            training: 是否训练模式
            task: 'mlm' (预训练) 或 'price' (价格预测)
        """
        seq_len = tf.shape(inputs)[1]
        
        # Token嵌入 + 缩放
        x = self.embedding(inputs)
        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        
        # 位置编码
        x = self.pos_encoding(x)
        x = self.dropout(x, training=training)
        
        # 创建mask
        padding_mask = self.create_padding_mask(inputs)
        if task == 'price':
            # 价格预测时使用look-ahead mask
            look_ahead_mask = self.create_look_ahead_mask(seq_len)
            combined_mask = tf.maximum(padding_mask, look_ahead_mask)
        else:
            combined_mask = padding_mask
        
        # Transformer块
        for transformer_block in self.transformer_blocks:
            x = transformer_block(x, mask=combined_mask, training=training)
        
        # 任务特定的头
        if task == 'mlm':
            # 预训练：预测被遮蔽的token
            return self.mlm_head(x)
        elif task == 'price':
            # 价格预测：使用最后一个token的表示
            last_token_output = x[:, -1, :]
            return self.price_head(last_token_output)
        else:
            # 默认返回序列表示
            return x
    
    def get_config(self):
        config = super().get_config()
        config.update({
            'vocab_size': self.vocab_size,
            'd_model': self.d_model,
            'num_layers': self.num_layers,
        })
        return config


def create_model(vocab_size: int, model_params: Optional[Dict] = None) -> StockTransformer:
    """创建StockTransformer模型"""
    
    default_params = {
        'd_model': 256,
        'num_heads': 8,
        'num_layers': 6,
        'dff': 512,
        'max_position': 1000,
        'dropout_rate': 0.1
    }
    
    if model_params:
        default_params.update(model_params)
    
    model = StockTransformer(
        vocab_size=vocab_size,
        **default_params
    )
    
    return model


def test_model():
    """测试模型架构"""
    print("🧪 测试Stock Transformer模型...")
    
    # 强制CPU模式避免RTX 5090兼容性问题
    os.environ['CUDA_VISIBLE_DEVICES'] = ''
    print("ℹ️ 强制使用CPU模式进行测试")
    
    # 模型参数
    vocab_size = 1000
    batch_size = 2
    seq_len = 50
    
    # 创建模型
    model = create_model(vocab_size)
    
    # 创建示例输入
    dummy_input = tf.random.uniform((batch_size, seq_len), maxval=vocab_size, dtype=tf.int32)
    
    # 测试预训练任务
    print("📝 测试预训练任务 (MLM)...")
    mlm_output = model(dummy_input, task='mlm')
    print(f"   MLM输出形状: {mlm_output.shape}")  # [batch_size, seq_len, vocab_size]
    
    # 测试价格预测任务
    print("💰 测试价格预测任务...")
    price_output = model(dummy_input, task='price')
    print(f"   价格预测输出形状: {price_output.shape}")  # [batch_size, 2]
    
    # 计算参数量
    model.build((None, seq_len))
    total_params = model.count_params()
    print(f"🔢 模型总参数量: {total_params:,}")
    
    # 模型摘要
    print("\n📊 模型架构摘要:")
    print(f"   词汇表大小: {vocab_size}")
    print(f"   隐藏层维度: 256")  
    print(f"   注意力头数: 8")
    print(f"   Transformer层数: 6")
    print(f"   总参数量: {total_params:,} (~{total_params/1e6:.1f}M)")
    
    print("✅ 模型架构测试完成！")


if __name__ == "__main__":
    test_model()