import torch
from torch import nn
from PIL import Image
from torchvision import transforms
from models.encoders.psp import pSp
from models.stylegan2.model import Generator
from  models.encoders.psp_encoders import Encoder4Editing


# Load the image
image_path = 'G:\pythonProject\pythonProject\ThinkStation\optimization\img.png'
image = Image.open(image_path).convert("RGB")
#修改图片大小为1024*1024
image = image.resize((1024, 1024), Image.BICUBIC)
# Apply necessary transformations
transform = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
image = transform(image)
image = image.unsqueeze(0)  # add batch dimension
# Define the options for pSp
opts = {
    'encoder_type': 'Encoder4Editing',  # or 'Encoder4Editing', 'SingleStyleCodeEncoder'
    'stylegan_size': 1024,
    'return_latents': True,
    'stylegan_weights': 'stylegan2-ffhq-config-f.pt',
    'checkpoint_path': 'e4e_ffhq_encode.pt',
    'start_from_latent_avg': True,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
}
print(opts)
# Instantiate pSp
psp = pSp(opts).cuda()
# Convert the image to a latent code
latents = psp(image.to(opts['device']))
print(latents.shape)
# 计算新的形状
new_shape = (latents.shape[0] * latents.shape[1], latents.shape[2])  # [4*18, 512]

# 改变张量的形状
latents2 = latents.contiguous().view(new_shape)

print(latents2.shape)
# 计算第一个维度上的平均值，结果的形状为[1, 512]
latents3 = latents2.mean(dim=0, keepdim=True)
print(latents3.shape)

#保存变量
torch.save(latents3, 'latent_code668.pt')
print(latents3,"保存成功")

#num_elements = latents.nelement()
#fc = nn.Linear(num_elements, 512).to(opts['device'])
#通过全连接层将张量的元素数量减少到512
# x_compressed = latent_code.view(1, -1)
# print(x_compressed.shape)
# # 使用全连接层将张量的元素数量减少到512
# x_transformed = fc(x_compressed)
# print(x_transformed.shape)  # torch.Size([1, 512])
# #保存变量
# torch.save(latent_code, 'latent_code667.pt')
# print(latent_code,"保存成功")
