#!/usr/bin/env python3
"""
Transformer图像生成器
从图像数据生成新的图像内容
支持多种图像格式和特征提取
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from PIL import Image
from models.model import TransformerModel

class ImageGenerator:
    def __init__(self, image_size=(64, 64), patch_size=8, embedding_dim=512, 
                 nhead=8, num_layers=6, num_channels=3, max_patches=256):
        """
        图像生成器 - 从原始图像数据生成新图像
        
        Args:
            image_size: 输入图像尺寸 (height, width)
            patch_size: 图像块大小
            embedding_dim: 嵌入维度
            nhead: 注意力头数
            num_layers: Transformer层数
            num_channels: 图像通道数 (3 for RGB, 1 for grayscale)
            max_patches: 最大图像块数量
        """
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.max_patches = max_patches
        
        # 计算图像块数量
        self.patches_per_row = image_size[1] // patch_size
        self.patches_per_col = image_size[0] // patch_size
        self.num_patches = self.patches_per_row * self.patches_per_col
        
        # 图像块嵌入层
        self.patch_dim = num_channels * patch_size * patch_size
        self.patch_embedding = nn.Linear(self.patch_dim, embedding_dim)
        
        # 位置编码
        self.positional_encoding = self._create_positional_encoding(self.num_patches, embedding_dim)
        
        # Transformer模型
        self.model = TransformerModel(
            input_dim=embedding_dim,
            output_dim=self.patch_dim,  # 输出与输入相同的维度
            nhead=nhead,
            num_layers=num_layers
        )

    def _create_positional_encoding(self, num_patches, d_model):
        """创建图像块位置编码"""
        position = torch.arange(0, num_patches).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
        
        pos_encoding = torch.zeros(num_patches, d_model)
        pos_encoding[:, 0::2] = torch.sin(position * div_term)
        pos_encoding[:, 1::2] = torch.cos(position * div_term)
        
        return pos_encoding

    def _image_to_patches(self, image_tensor):
        """将图像转换为图像块序列"""
        # image_tensor: (batch_size, channels, height, width)
        batch_size, channels, height, width = image_tensor.shape
        
        # 验证图像尺寸
        assert height % self.patch_size == 0 and width % self.patch_size == 0, \
            f"Image size {height}x{width} must be divisible by patch size {self.patch_size}"
        
        # 使用unfold提取图像块
        patches = image_tensor.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size)
        # patches shape: (batch_size, channels, patches_per_col, patches_per_row, patch_size, patch_size)
        
        # 重塑为序列
        patches = patches.permute(0, 2, 3, 1, 4, 5).contiguous()
        patches = patches.view(batch_size, self.num_patches, -1)  # (batch_size, num_patches, patch_dim)
        
        return patches

    def _patches_to_image(self, patches):
        """将图像块序列转换回图像"""
        # patches: (batch_size, num_patches, patch_dim)
        batch_size, num_patches, patch_dim = patches.shape
        
        # 重塑为图像块格式
        patches = patches.view(batch_size, self.patches_per_col, self.patches_per_row, 
                             self.num_channels, self.patch_size, self.patch_size)
        patches = patches.permute(0, 3, 1, 4, 2, 5).contiguous()
        
        # 合并图像块
        image = patches.view(batch_size, self.num_channels, 
                           self.patches_per_col * self.patch_size, 
                           self.patches_per_row * self.patch_size)
        
        return image

    def preprocess_image(self, image_input):
        """预处理输入图像"""
        if isinstance(image_input, str):
            try:
                # 从文件路径加载
                image = Image.open(image_input).convert('RGB' if self.num_channels == 3 else 'L')
                image = image.resize((self.image_size[1], self.image_size[0]))
                image_array = np.array(image) / 255.0
                image_tensor = torch.tensor(image_array, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
            except Exception as e:
                raise Exception(f"Error loading image from {image_input}: {e}")
        elif isinstance(image_input, np.ndarray):
            # 从numpy数组加载
            if image_input.ndim == 3:
                # 3维数组：单张图像
                if image_input.shape[2] in [1, 3]:  # 通道在最后 (H, W, C)，即通道数是否为 1 或 3
                    image_tensor = torch.tensor(image_input, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
                else:  # 通道在第一维 (C, H, W)
                    image_tensor = torch.tensor(image_input, dtype=torch.float32).unsqueeze(0)
            elif image_input.ndim == 2:
                # 2维数组：灰度图像 (H, W)
                image_tensor = torch.tensor(image_input, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
                # 形状: (1, 1, H, W)
            elif image_input.ndim == 4:
                # 4维数组：批量图像
                if image_input.shape[3] in [1, 3]:  # 通道在最后 (N, H, W, C)
                    image_tensor = torch.tensor(image_input, dtype=torch.float32).permute(0, 3, 1, 2)
                else:  # 通道在第二维 (N, C, H, W)
                    image_tensor = torch.tensor(image_input, dtype=torch.float32)
            else:
                raise ValueError(f"Unsupported numpy array dimension: {image_input.ndim}")
        elif isinstance(image_input, torch.Tensor):
            # 已经是tensor
            image_tensor = image_input
            if image_tensor.dim() == 3:
                image_tensor = image_tensor.unsqueeze(0)
            elif image_tensor.dim() == 2:
                image_tensor = image_tensor.unsqueeze(0).unsqueeze(0)
            elif image_tensor.dim() == 4:
                if image_tensor.shape[3] in [1, 3]:
                    image_tensor = image_tensor.permute(0, 3, 1, 2)
            else:
                raise ValueError(f"Unsupported tensor dimension: {image_tensor.dim()}")
        else:
            raise ValueError("Unsupported image input type")
        
        # 标准化到[-1, 1]
        image_tensor = image_tensor * 2 - 1
        
        return image_tensor

    def generate(self, input_image, is_noise=False, num_steps=10, temperature=1.0):
        """
        从输入图像生成新图像 - 真正的自回归生成
        
        Args:
            input_image: 输入图像 (文件路径、numpy数组或tensor)
            num_steps: 生成步数，控制每个图像块的细化次数
            temperature: 温度参数控制随机性
            
        Returns:
            generated_image: 生成的图像tensor
        """
        if is_noise:
            # 直接使用噪声作为嵌入图像块，跳过预处理步骤
            embeddings = self.patch_embedding(input_image) # (batch_size, num_patches, embedding_dim)
            embeddings = embeddings + self.positional_encoding.unsqueeze(0)
        else:
            # 预处理输入图像
            input_tensor = self.preprocess_image(input_image)
            # 转换为图像块
            patches = self._image_to_patches(input_tensor) # (batch_size, num_patches, patch_dim)
            # 嵌入图像块
            embeddings = self.patch_embedding(patches) # (batch_size, num_patches, embedding_dim)
            embeddings = embeddings + self.positional_encoding.unsqueeze(0)
        
        with torch.no_grad():
            # 自回归生成
            if is_noise:
                current_patches = input_image.clone()
            else:
                current_patches = patches.clone()
            
            for step in range(num_steps):
                # Transformer前向传播
                output_logits = self.model(embeddings)
                
                # 应用温度缩放
                if temperature != 1.0:
                    output_logits = output_logits / temperature
                
                # 直接随机抽样下一个图像块
                output_probs = F.softmax(output_logits.view(-1, self.patch_dim), dim=-1)
                next_patch = torch.multinomial(output_probs, 1).view(output_logits.shape[0], self.num_patches, 1)
                
                # 更新当前图像块
                current_patches = next_patch.expand_as(current_patches).float()

                # 更新嵌入
                embeddings = self.patch_embedding(current_patches)
                embeddings = embeddings + self.positional_encoding.unsqueeze(0)
        
        # 转换回图像
        generated_image = self._patches_to_image(current_patches)
        
        # 正确的反标准化：将图像块值范围[0, patch_dim-1]映射到[0, 1]
        # 由于图像块值是离散的，我们需要将其标准化到合理的像素值范围
        patch_dim = self.num_channels * self.patch_size * self.patch_size
        generated_image = generated_image / (patch_dim - 1)
        
        # 确保值在[0, 1]范围内
        generated_image = torch.clamp(generated_image, 0.0, 1.0)
        
        return generated_image

    def generate_from_noise(self, noise=None, batch_size=1, num_steps=20):
        """从噪声生成图像"""
        # 生成随机噪声作为起始点
        if noise is None:
            noise = torch.randn(batch_size, self.num_patches, 
                               self.num_channels * self.patch_size * self.patch_size)
        
        return self.generate(noise, is_noise=True, num_steps=num_steps)