#!/usr/bin/env python3
"""
Transformer文本生成器
从文本数据生成新的文本内容
支持多种语言和文本格式
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.model import TransformerModel

class TextGenerator:
    def __init__(self, vocab_size, embedding_dim, nhead, num_layers, max_length=512):
        """
        文本生成器初始化
        
        Args:
            vocab_size: 词汇表大小
            embedding_dim: 嵌入维度
            nhead: 注意力头数
            num_layers: Transformer层数
            max_length: 最大生成长度
        """
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.max_length = max_length
        
        # 创建嵌入层和位置编码
        self.token_embedding = nn.Embedding(vocab_size, embedding_dim)
        self.positional_encoding = self._create_positional_encoding(max_length, embedding_dim)
        
        self.model = TransformerModel(
            input_dim=embedding_dim,
            output_dim=vocab_size,
            nhead=nhead,
            num_layers=num_layers
        )

    def _create_positional_encoding(self, max_length, d_model):
        """创建位置编码"""
        # position: 形状从 (max_length,) 变为 (max_length, 1)
        position = torch.arange(0, max_length).unsqueeze(1)
        
        # 计算频率项，确保在d_model较小时也能正常工作
        # 使用min(d_model, 2)确保至少处理前2个维度
        num_freq_components = max(1, d_model // 2)
        
        # div_term: 计算频率项，形状为 (num_freq_components,)
        div_term = torch.exp(torch.arange(0, num_freq_components, 1) * -(math.log(10000.0) / d_model))
        
        # 初始化位置编码矩阵
        pos_encoding = torch.zeros(max_length, d_model)
        
        # 安全地填充位置编码，避免索引溢出
        for i in range(d_model):
            if i % 2 == 0:  # 偶数索引：正弦函数
                freq_idx = i // 2
                if freq_idx < num_freq_components:
                    pos_encoding[:, i] = torch.sin(position.squeeze() * div_term[freq_idx])
            else:  # 奇数索引：余弦函数
                freq_idx = (i - 1) // 2
                if freq_idx < num_freq_components:
                    pos_encoding[:, i] = torch.cos(position.squeeze() * div_term[freq_idx])
        
        return pos_encoding

    def _preprocess_input(self, input_tokens):
        """预处理输入token"""
        if isinstance(input_tokens, list):
            # 判断list中的每个元素是否为数字类型
            if all(isinstance(x, (int, float)) for x in input_tokens):
                input_tokens = torch.tensor(input_tokens)
            else:
                raise TypeError("Unsupported input type: list elements must be numbers")
        
        elif isinstance(input_tokens, np.ndarray):
            input_tokens = torch.from_numpy(input_tokens)
        
        elif isinstance(input_tokens, torch.Tensor):
            input_tokens = input_tokens.to(torch.int64)
        
        else:
            raise TypeError(f"Unsupported input type: {type(input_tokens)}")

        # 处理一维输入：从 (seq_len,) 变为 (1, seq_len)
        if input_tokens.dim() == 1:
            input_tokens = input_tokens.unsqueeze(0)
        
        # 验证输入形状
        if input_tokens.dim() != 2:
            raise ValueError("Input tensor must have shape (batch_size, seq_len) or (seq_len,)")
        
        # 验证token ID范围（跳过空张量）
        if input_tokens.numel() > 0:  # 只有当张量不为空时才检查范围
            if input_tokens.min() < 0 or input_tokens.max() >= self.vocab_size:
                raise ValueError(f"Token IDs must be in range [0, {self.vocab_size-1}]")
        
        return input_tokens

    def generate(self, input_tokens, max_length=None, temperature=1.0, top_k=None):
        """
        生成文本
        
        Args:
            input_tokens: 输入token IDs，形状为(batch_size, seq_len)或(seq_len,)
            max_length: 最大生成长度
            temperature: 温度参数，控制随机性（>1增加随机性，<1减少随机性）
            top_k: top-k采样参数，只考虑概率最高的k个token
            
        Returns:
            generated_tokens: 生成的token序列，形状为(batch_size, generated_seq_len)
        """
        if max_length is None:
            max_length = self.max_length
        
        # 验证最大长度
        if max_length <= 0:
            raise ValueError("max_length must be positive")
        
        if max_length > self.max_length:
            max_length = self.max_length
            raise ValueError(f"max_length must be less than or equal to {self.max_length}")
        
        # 预处理输入
        input_tokens = self._preprocess_input(input_tokens)
        batch_size, seq_len = input_tokens.shape
        
        # 检查输入长度是否超过最大长度
        if seq_len >= max_length:
            return input_tokens  # 如果输入已经达到或超过最大长度，直接返回

        # 初始化生成序列
        start_pos = seq_len
        # 处理空输入的情况
        if seq_len == 0:
            # 对于空输入，我们需要从初始状态开始生成
            # 创建一个初始token（例如，使用0作为起始token）
            initial_token = torch.zeros((batch_size, 1), dtype=torch.long)
            generated_tokens = initial_token
            start_pos = 1  # 从位置1开始生成
        # 正常情况：从输入序列开始
        else:
            generated_tokens = input_tokens.clone() # (batch_size, seq_len)
            start_pos = seq_len
        
        with torch.no_grad():
            for i in range(start_pos, max_length):
                # 获取当前序列（从开始到当前位置）
                current_sequence = generated_tokens[:, :i]
                
                # 嵌入和位置编码
                embeddings = self.token_embedding(current_sequence)  # (batch_size, i, embedding_dim)
                pos_encoding = self.positional_encoding[:i].unsqueeze(0)  # (1, i, embedding_dim)
                embedded_input = embeddings + pos_encoding  # (batch_size, i, embedding_dim)
                
                # Transformer期望的格式：(batch_size, seq_len, embedding_dim) - 已设置batch_first=True
                # 模型前向传播
                logits = self.model(embedded_input)  # (batch_size, i, vocab_size)
                
                # 获取序列最后一个位置的logits
                last_logits = logits[:, -1, :]  # (batch_size, vocab_size)
                
                # 应用温度缩放
                if temperature != 1.0:
                    last_logits = last_logits / temperature
                
                # 应用top-k过滤
                if top_k is not None:
                    top_k = min(top_k, last_logits.size(-1))
                    # 获取第k大的值
                    kth_value = torch.topk(last_logits, top_k)[0][..., -1, None]
                    # 将小于第k大的值设为负无穷
                    indices_to_remove = last_logits < kth_value
                    last_logits[indices_to_remove] = -float('inf')
                
                # 计算概率分布
                probs = F.softmax(last_logits, dim=-1) # (batch_size, vocab_size)
                
                # 采样下一个token
                next_token = torch.multinomial(probs, num_samples=1)  # (batch_size, 1)
                
                # 添加到生成序列
                generated_tokens = torch.cat([generated_tokens, next_token], dim=1)
                
                # 可选：提前终止条件（如遇到结束符）
                # if (next_token == self.eos_token_id).all():
                #     break
        
        return generated_tokens

    def generate_greedy(self, input_tokens, max_length=None):
        """贪婪解码版本"""
        return self.generate(input_tokens, max_length, temperature=0.0, top_k=1)
