# Papercut 风格迁移 - Fast Neural Style Transfer

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets, models
from torch.utils.data import DataLoader
from PIL import Image
import matplotlib.pyplot as plt

import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# -----------------------------
# 1. 加载数据集
# -----------------------------
transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(256),
    transforms.ToTensor()
])

# 请放 COCO、ImageNet 或任意大图像文件夹到 './content_images'
content_dataset = datasets.ImageFolder('./content_images', transform=transform)
content_loader = DataLoader(content_dataset, batch_size=4, shuffle=True, num_workers=2)

# -----------------------------
# 2. 加载剪纸风格图像
# -----------------------------
style_img = Image.open("style.jpg")
style_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.ToTensor()
])
style_img = style_transform(style_img).unsqueeze(0).to(device)

plt.imshow(style_img.cpu().squeeze(0).permute(1,2,0))
plt.title("剪纸风格图")
plt.axis("off")
plt.show()

# -----------------------------
# 3. 定义 Transformer 网络
# -----------------------------
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channels, channels, 3, stride=1, padding=1),
            nn.InstanceNorm2d(channels, affine=True),
            nn.ReLU(),
            nn.Conv2d(channels, channels, 3, stride=1, padding=1),
            nn.InstanceNorm2d(channels, affine=True)
        )
    def forward(self, x):
        return x + self.block(x)

class TransformerNet(nn.Module):
    def __init__(self):
        super(TransformerNet, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 9, stride=1, padding=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 3, stride=2, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 128, 3, stride=2, padding=1),
            nn.ReLU(),
            *[ResidualBlock(128) for _ in range(5)],
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 3, 9, stride=1, padding=4),
            nn.Tanh()
        )
    def forward(self, x):
        return self.model(x)

model = TransformerNet().to(device)

# -----------------------------
# 4. 损失网络 (VGG16)
# -----------------------------
vgg = models.vgg16(pretrained=True).features.to(device).eval()
for param in vgg.parameters():
    param.requires_grad = False

def gram_matrix(y):
    (b, ch, h, w) = y.size()
    features = y.view(b, ch, w*h)
    G = torch.bmm(features, features.transpose(1,2))
    return G / (ch * h * w)

def get_features(x, model, layers):
    features = []
    for name, layer in model._modules.items():
        x = layer(x)
        if name in layers:
            features.append(x)
    return features

content_layers = ['8']   # relu3_3
style_layers = ['1', '6', '11', '20']  # relu1_2, relu2_2, relu3_3, relu4_3

style_features = get_features(style_img, vgg, style_layers)
style_grams = [gram_matrix(f) for f in style_features]

# -----------------------------
# 5. 训练
# -----------------------------
optimizer = optim.Adam(model.parameters(), lr=1e-3)
mse_loss = nn.MSELoss()

num_epochs = 2
print("开始训练...")
for epoch in range(num_epochs):
    for i, (content_imgs, _) in enumerate(content_loader):
        content_imgs = content_imgs.to(device)
        
        optimizer.zero_grad()
        output = model(content_imgs)
        
        output_features = get_features(output, vgg, content_layers + style_layers)
        content_features = get_features(content_imgs, vgg, content_layers)
        
        content_loss = mse_loss(output_features[0], content_features[0])
        style_loss = 0
        for of, sg in zip(output_features[1:], style_grams):
            og = gram_matrix(of)
            style_loss += mse_loss(og, sg.expand_as(og))
        
        total_loss = content_loss + 5e5 * style_loss
        total_loss.backward()
        optimizer.step()
        
        if i % 20 == 0:
            print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i}/{len(content_loader)}], Loss: {total_loss.item():.4f}")

# -----------------------------
# 6. 保存模型
# -----------------------------
torch.save(model.state_dict(), "papercut_style_net.pth")
print("已保存模型到 papercut_style_net.pth")

# -----------------------------
# 7. 推理 & 展示
# -----------------------------
def stylize_and_show(img_path):
    image = Image.open(img_path)
    image = transform(image).unsqueeze(0).to(device)
    with torch.no_grad():
        output = model(image).cpu().clamp(-1,1)
    output = (output+1)/2  # 因为用了 Tanh
    plt.imshow(output.squeeze(0).permute(1,2,0))
    plt.axis("off")
    plt.show()

stylize_and_show("content.jpg")
