import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, models, datasets
from torch.utils.data import DataLoader
from PIL import Image
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# -------------------
# 1. 预处理 & 加载内容数据集
# -------------------
transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(256),
    transforms.ToTensor()
])

content_dataset = datasets.ImageFolder('.', transform=transform)
content_loader = DataLoader(content_dataset, batch_size=4, shuffle=True)

# -------------------
# 2. 加载风格图像
# -------------------
style_image = Image.open("style.jpg")
style_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.ToTensor()
])
style_image = style_transform(style_image).unsqueeze(0).to(device)

# -------------------
# 3. Transformation Network
# -------------------
class TransformerNet(nn.Module):
    def __init__(self):
        super(TransformerNet, self).__init__()
        # 简单残差块网络
        # 可以用更复杂结构提升效果
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 9, stride=1, padding=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 3, stride=2, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 128, 3, stride=2, padding=1),
            nn.ReLU(),
            # 3个残差块
            *[ResidualBlock(128) for _ in range(3)],
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 3, 9, stride=1, padding=4)
        )
    def forward(self, x):
        return self.model(x)

class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channels, channels, 3, stride=1, padding=1),
            nn.InstanceNorm2d(channels, affine=True),
            nn.ReLU(),
            nn.Conv2d(channels, channels, 3, stride=1, padding=1),
            nn.InstanceNorm2d(channels, affine=True)
        )
    def forward(self, x):
        return x + self.block(x)

# -------------------
# 4. 内容 & 风格损失
# -------------------
vgg = models.vgg16(pretrained=True).features.to(device).eval()
for param in vgg.parameters():
    param.requires_grad = False

def gram_matrix(y):
    (b, ch, h, w) = y.size()
    features = y.view(b, ch, w * h)
    G = torch.bmm(features, features.transpose(1,2))
    return G / (ch * h * w)

content_layers = ['8']   # relu3_3
style_layers = ['1', '6', '11', '20']  # relu1_2, relu2_2, relu3_3, relu4_3

def get_features(x, model, layers):
    features = []
    for name, layer in model._modules.items():
        x = layer(x)
        if name in layers:
            features.append(x)
    return features

# -------------------
# 5. 训练
# -------------------
model = TransformerNet().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
mse_loss = nn.MSELoss()

num_epochs = 2
style_features = get_features(style_image, vgg, style_layers)
style_grams = [gram_matrix(sf) for sf in style_features]

for epoch in range(num_epochs):
    for batch in content_loader:
        content_imgs, _ = batch
        content_imgs = content_imgs.to(device)
        
        optimizer.zero_grad()
        output = model(content_imgs)
        
        output_features = get_features(output, vgg, content_layers + style_layers)
        content_features = get_features(content_imgs, vgg, content_layers)
        
        content_loss = mse_loss(output_features[0], content_features[0])
        style_loss = 0
        for o_feat, t_gram in zip(output_features[1:], style_grams):
            o_gram = gram_matrix(o_feat)
            style_loss += mse_loss(o_gram, t_gram.expand_as(o_gram))
        
        total_loss = content_loss + 5e5 * style_loss
        total_loss.backward()
        optimizer.step()
        
    print(f"Epoch {epoch+1}, Loss: {total_loss.item()}")

# -------------------
# 6. 保存模型
# -------------------
torch.save(model.state_dict(), "papercut_style_net.pth")
print("模型已保存：papercut_style_net.pth")
# -------------------
# 7. 测试模型
# -------------------
model = TransformerNet().to(device)
model.load_state_dict(torch.load("papercut_style_net.pth"))
model.eval()

img = Image.open("your_photo.jpg")
img = transform(img).unsqueeze(0).to(device)
output = model(img).cpu().squeeze(0).clamp(0,1)
output_img = transforms.ToPILImage()(output)
output_img.save("styled_output.png")
