import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torch.optim as optim

transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor()
])

image = Image.open('E:/pydeeplearning/deeplearning/image_processing/test/Apple.jpg')
image_tensor = transform(image)

image_numpy = image_tensor.numpy().transpose(1, 2, 0)
# print(image_numpy.shape)
# plt.imshow(image_numpy)
# plt.axis('off')
# plt.show()

class Autoencoder(nn.Module):
    def __init__(self):
        super(Autoencoder, self).__init__()
        #编码器
        self.encoder = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 解码器
        self.decoder = nn.Sequential(
            #转置卷积
            nn.ConvTranspose2d(8, 16, kernel_size=3, stride=2, padding=1, output_padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(16, 3, kernel_size=3, stride=2, padding=1, output_padding=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        encoded = self.encoder(x)
        # print(encoded.shape)
        decoded = self.decoder(encoded)
        # print(decoded.shape)
        return decoded
    
model = Autoencoder()

loss = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
epochs =  400

for epoch in range(epochs):
    output = model(image_tensor)
    loss_value = loss(output, image_tensor)
    loss_value.backward()
    optimizer.step()
    optimizer.zero_grad()
    if(epoch+1)%10==0:
        print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss_value.item():.4f}')

#重构图像
with torch.no_grad():
    img_recon = model(image_tensor)
    omg_recon_numpy = img_recon.numpy().transpose(1, 2, 0)
    plt.imshow(omg_recon_numpy)
    plt.axis('off')
    plt.show()
    
