import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import math
import hashlib


# 时间嵌入层
class TimeEmbedding(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        inv_freq = torch.exp(
            torch.arange(0, dim, 2, dtype=torch.float32) *
            (-math.log(10000) / dim)
        )
        self.register_buffer('inv_freq', inv_freq)

    def forward(self, input):
        t = input.unsqueeze(-1).type_as(self.inv_freq)
        sin_inp = torch.sin(t * self.inv_freq)
        cos_inp = torch.cos(t * self.inv_freq)
        emb = torch.cat([sin_inp, cos_inp], dim=-1)
        return emb


# 简单 UNet 模型
class SimpleUNet(nn.Module):
    def __init__(self, in_channels=1, out_channels=1, time_dim=128):
        super().__init__()
        self.time_embedding = TimeEmbedding(time_dim)
        self.down1 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1)
        self.down2 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.up1 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
        self.out = nn.Conv2d(64, out_channels, kernel_size=3, padding=1)

    def forward(self, x, t):
        t_emb = self.time_embedding(t)
        t_emb = t_emb.unsqueeze(-1).unsqueeze(-1)
        x = F.relu(self.down1(x))
        x = F.relu(self.down2(x))
        x = F.relu(self.up1(x))
        x = self.out(x)
        return x


# 扩散过程参数
T = 1000
beta_start = 0.0001
beta_end = 0.02
betas = torch.linspace(beta_start, beta_end, T)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.0)
sqrt_recip_alphas = torch.sqrt(1.0 / alphas)
sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)
sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - alphas_cumprod)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)


# 从张量中提取特定时间步的值
def extract(a, t, x_shape):
    batch_size = t.shape[0]
    out = a.gather(-1, t)
    return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))


# 前向扩散过程
def q_sample(x_start, t, noise=None):
    if noise is None:
        noise = torch.randn_like(x_start)
    sqrt_alphas_cumprod_t = extract(sqrt_alphas_cumprod, t, x_start.shape)
    sqrt_one_minus_alphas_cumprod_t = extract(
        sqrt_one_minus_alphas_cumprod, t, x_start.shape
    )
    return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise


# 将文字转换为固定长度的向量作为水印
def text_to_watermark(text, image_shape):
    # 使用SHA256哈希生成固定长度的摘要
    hash_object = hashlib.sha256(text.encode())
    hash_digest = hash_object.digest()

    # 计算需要的水印元素数量 (28x28=784)
    num_elements = image_shape[2] * image_shape[3]

    # 将哈希值转换为数值并归一化到[-1,1]范围
    watermark_vector = torch.tensor([
        (int.from_bytes(hash_digest[i:i+1], byteorder='big') / 255.0 * 2 - 1)
        for i in range(min(len(hash_digest), num_elements))
    ])

    # 如果哈希值不够长，用0填充剩余部分
    if len(watermark_vector) < num_elements:
        padding = torch.zeros(num_elements - len(watermark_vector))
        watermark_vector = torch.cat([watermark_vector, padding])

    # 重塑为图像形状 [1,1,28,28]
    watermark = watermark_vector.view(image_shape)
    return watermark


# 嵌入水印
def embed_watermark(image, watermark, strength=0.01):
    watermark = watermark.expand_as(image)
    return image + strength * watermark


# 提取水印
def extract_watermark(image_with_watermark, original_image, strength=0.01):
    extracted = (image_with_watermark - original_image) / strength
    return extracted.view(-1)  # 展平为一维向量


# 训练函数
def train():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = SimpleUNet().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    criterion = nn.MSELoss()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    train_dataset = datasets.MNIST(root='./data', train=True,
                                   download=True, transform=transform)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)

    num_epochs = 10
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        for images, _ in train_loader:
            images = images.to(device)
            text = "这是一段水印文字"  # 自定义水印文字
            watermark = text_to_watermark(text, images.shape).to(device)
            images_with_watermark = embed_watermark(images, watermark)

            optimizer.zero_grad()
            t = torch.randint(0, T, (images.shape[0],), device=device).long()
            noise = torch.randn_like(images_with_watermark)
            noisy_images = q_sample(images_with_watermark, t, noise)
            predicted_noise = model(noisy_images, t)
            loss = criterion(predicted_noise, noise)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {running_loss / len(train_loader)}')
    return model


# 反向扩散过程（采样）
@torch.no_grad()
def p_sample(model, x, t, t_index):
    betas_t = extract(betas, t, x.shape)
    sqrt_one_minus_alphas_cumprod_t = extract(
        sqrt_one_minus_alphas_cumprod, t, x.shape
    )
    sqrt_recip_alphas_t = extract(sqrt_recip_alphas, t, x.shape)

    model_mean = sqrt_recip_alphas_t * (
            x - betas_t * model(x, t) / sqrt_one_minus_alphas_cumprod_t
    )

    if t_index == 0:
        return model_mean
    else:
        posterior_variance_t = extract(posterior_variance, t, x.shape)
        noise = torch.randn_like(x)
        return model_mean + torch.sqrt(posterior_variance_t) * noise


# 完整的采样过程
@torch.no_grad()
def p_sample_loop(model, shape):
    device = next(model.parameters()).device
    b = shape[0]
    img = torch.randn(shape, device=device)
    imgs = []
    for i in reversed(range(0, T)):
        img = p_sample(model, img, torch.full((b,), i, device=device, dtype=torch.long), i)
        imgs.append(img.cpu().numpy())
    return imgs


def watermark_to_text(extracted_watermark, original_text, image_shape):
    # 将提取的水印重新生成哈希向量
    expected_watermark = text_to_watermark(original_text, image_shape)
    expected_watermark = expected_watermark.view(-1)

    # 计算相似度（余弦相似度）
    similarity = F.cosine_similarity(
        extracted_watermark.unsqueeze(0),
        expected_watermark.unsqueeze(0)
    )

    # 设置相似度阈值（可根据实际情况调整）
    threshold = 0.8
    recovered_text = watermark_to_text(extracted_watermark, text, shape)
    print(f"提取的水印文本: {recovered_text}")

    watermark = text_to_watermark(text, shape).to(original_images.device)
    if similarity > threshold:
        return original_text
    else:
        return "水印不匹配或已损坏"

# 修改主函数部分
if __name__ == "__main__":
    trained_model = train()
    shape = (1, 1, 28, 28)
    original_images = torch.randn(shape)
    text = "这是一段水印文字"
    watermark = text_to_watermark(text, original_images.shape).to(original_images.device)
    images_with_watermark = embed_watermark(original_images, watermark)

    sampled_images = p_sample_loop(trained_model, shape)
    last_sampled_image = torch.tensor(sampled_images[-1])

    extracted_watermark = extract_watermark(last_sampled_image, original_images)
    print("Sampling completed and watermark extracted.")
