import os
import torch
import torch.nn as nn
from PIL import Image
import random, string
from torchvision import transforms
import numpy as np
from torchvision.utils import save_image
from network.noise_layers.ganimation.model import Generator


class GANimation(nn.Module):
    # 关键修改1：移除固定device参数，改为动态从输入获取
    def __init__(self, temp="temp/", target="/home/fang_guotong/datasets/watermark_dataset/CelebA-HQ128/test_128"):
        super(GANimation, self).__init__()
        self.temp = temp
        self.target = target  # 确保路径是Linux格式，避免FileNotFound
        
        # 关键修改2：模型先加载到CPU，后续动态移到输入设备（避免固定cuda:0）
        self.G = Generator(64, 17, 6)
        G_path = '/home/fang_guotong/projects/3090server/network/noise_layers/ganimation/eric_andre/pretrained_models/7001-37-G.ckpt'
        # 加载到CPU，后续根据输入动态调整设备
        self.G.load_state_dict(torch.load(G_path, map_location=torch.device('cpu')))
        
        if not os.path.exists(temp):
            os.mkdir(temp)
        
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

    def get_temp(self):
        # 生成唯一临时文件名，避免重复
        temp_file = self.temp + ''.join(random.sample(string.ascii_letters + string.digits, 16)) + ".png"
        while os.path.exists(temp_file):
            temp_file = self.temp + ''.join(random.sample(string.ascii_letters + string.digits, 16)) + ".png"
        return temp_file

    def get_target(self):
        # 生成目标图像路径（celeba数据集格式：6位数字.png）
        idx = random.randint(162771, 182637)
        return os.path.join(self.target, f"{idx:06d}.png")

    def imFromAttReg(self, att, reg, x_real):
        """混合注意力、颜色和真实图像（原逻辑不变）"""
        return (1 - att) * reg + att * x_real

    def forward(self, image_cover_mask):
        image, cover_image = image_cover_mask[0], image_cover_mask[1]
        noised_image = torch.zeros_like(image)
        
        # 关键修改3：从输入图像获取当前设备（适配多GPU副本，如cuda:1、cuda:2）
        current_device = image.device  # 每个GPU副本的输入设备不同，动态获取
        
        for i in range(image.shape[0]):
            # 图像转PIL（CPU操作，不影响设备）
            single_image = ((image[i].clamp(-1, 1).permute(1, 2, 0) + 1) / 2 * 255).add(0.5).clamp(0, 255).to('cpu', torch.uint8).numpy()
            im = Image.fromarray(single_image)
            file = self.get_temp()

            try:
                # 关键修改4：将当前设备传给animation方法
                self.animation(self.get_target(), im, file, current_device)
                fake = np.array(Image.open(file), dtype=np.uint8)
                os.remove(file)
                # 确保输出张量与输入设备一致
                noised_image[i] = self.transform(fake).unsqueeze(0).to(current_device)
            except Exception as e:
                raise e
        return noised_image

    # 关键修改5：新增current_device参数，接收动态设备
    def animation(self, face_image_path, body_image, output_path, current_device):
        regular_image_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])

        # 关键修改6：将生成器移到当前设备（随输入动态变化，如cuda:1）
        self.G = self.G.to(current_device)
        self.G.eval()  # 推理模式，避免BatchNorm/ dropout异常

        with torch.no_grad():
            # 关键修改7：所有张量均使用当前设备，避免设备不匹配
            # 1. 待动画图像
            image_to_animate = regular_image_transform(body_image).unsqueeze(0).to(current_device)
            # 2. 属性文件加载
            animation_attributes_path = '/home/fang_guotong/projects/3090server/network/noise_layers/ganimation/eric_andre/attributes.txt'
            animation_attribute_images_dir = '/home/fang_guotong/projects/3090server/network/noise_layers/ganimation/eric_andre/attribute_images'

            with open(animation_attributes_path, 'r') as txt_file:
                csv_lines = txt_file.readlines()
                # 3. 初始化目标属性张量（指定当前设备）
                targets = torch.zeros(len(csv_lines), 17, device=current_device)
                input_images = torch.zeros(len(csv_lines), 3, 128, 128, device=current_device)

                for idx, line in enumerate(csv_lines):
                    splitted_lines = line.split(' ')
                    image_path = os.path.join(animation_attribute_images_dir, splitted_lines[0])
                    # 4. 属性图像加载后移到当前设备
                    input_images[idx, :] = regular_image_transform(Image.open(image_path)).to(current_device)
                    targets[idx, :] = torch.Tensor(np.array(list(map(lambda x: float(x) / 5., splitted_lines[1::])))).to(current_device)

            # 随机选择目标属性
            target_idx = random.randint(0, targets.size(0) - 1)
            targets_au = targets[target_idx, :].unsqueeze(0).to(current_device)
            # 生成器推理（所有输入均在同一设备）
            resulting_images_att, resulting_images_reg = self.G(image_to_animate, targets_au)
            resulting_image = self.imFromAttReg(resulting_images_att, resulting_images_reg, image_to_animate).to(current_device)

            # 保存结果图像
            save_image((resulting_image + 1) / 2, output_path)