from .DW_EncoderDecoder import *
from .Patch_Discriminator import Patch_Discriminator
import torch
import kornia.losses
import lpips
from transformers import CLIPProcessor, CLIPModel
import sys, os
import torch.nn.functional as F
from typing import Optional
import math
lpip_model_path = "H:/watermark/backbone/checkpoints/alexnet-owt-7be5be79.pth"


class Network(nn.Module):

    def __init__(self, message_length, noise_layers_R, noise_layers_F, device, batch_size, lr, beta1, attention_encoder,
                 attention_decoder, weight):
        super(Network, self).__init__()
        # device
        self.device = device
        # loss function
        self.criterion_MSE = nn.MSELoss().to(device)
        # self.criterion_LPIPS = lpips.LPIPS(net='alex').to(device)
        self.criterion_LPIPS = lpips.LPIPS().to(device)
        self.criterion_CE = torch.nn.CrossEntropyLoss().to(device)
        # weight of encoder-decoder loss
        self.encoder_weight = weight[0]
        self.discriminator_weight = weight[1]
        self.text_weight_R = weight[2]
        self.text_weight_F = weight[3]
        # network
        self.encoder_decoder = DW_EncoderDecoder(message_length, noise_layers_R, noise_layers_F, attention_encoder,
                                                 attention_decoder).to(device)
        # patchGAN
        self.discriminator = Patch_Discriminator().to(device)
        # clip投影头
        self.text_projector = TextProjector().to(device)
        # text_decoder (wrapper around ClipConditionedTransformerDecoder)
        self.text_decoder = TextDecoder().to(device)
        # available_devices = list(range(torch.cuda.device_count())) 
        available_devices = ['cuda:0']
        print(f"available_devices：{available_devices}")

        self.encoder_decoder = torch.nn.DataParallel(self.encoder_decoder, device_ids=available_devices)
        self.discriminator = torch.nn.DataParallel(self.discriminator, device_ids=available_devices)

        # mark "cover" as 1, "encoded" as -1
        self.label_cover = 1.0
        self.label_encoded = - 1.0

        for p in self.encoder_decoder.module.noise.parameters():
            p.requires_grad = False

        # optimizer
        self.opt_encoder_decoder = torch.optim.Adam(
            list(self.encoder_decoder.parameters()) + 
            list(self.text_projector.parameters()) + 
            list(self.text_decoder.parameters()),
            lr=lr, betas=(beta1, 0.999)
        )
        self.opt_discriminator = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(beta1, 0.999))

    def train(self, images: torch.Tensor, messages: torch.Tensor, masks: torch.Tensor, ids: torch.Tensor):
        self.encoder_decoder.train()
        self.discriminator.train()

        with torch.enable_grad():
            # use device to compute
            images, masks, messages, ids = images.to(self.device), masks.to(self.device), messages.to(self.device), ids.to(self.device)
            messages = self.text_projector(messages)
            # sys.stdout = sys.old_stdout
            # print(f"\n############in {self.__class__}###############\n")
            # print(self.encoder_decoder.module.noise.noise[14].G.state_dict().keys())
            sys.stdout = open(os.devnull, 'w')
            encoded_images, decoded_messages_R, decoded_messages_F = self.encoder_decoder(
                images, messages, masks)
            sys.stdout = sys.__stdout__

            '''
            train discriminator
            '''
            for p in self.discriminator.parameters():
                p.requires_grad = True

            self.opt_discriminator.zero_grad()

            # 判别器区分原图像和加水印后的图像
            # RAW : target label for image should be "cover"(1)
            d_label_cover = self.discriminator(images)
            #d_cover_loss = self.criterion_MSE(d_label_cover, torch.ones_like(d_label_cover))
            #d_cover_loss.backward()

            # GAN : target label for encoded image should be "encoded"(0)
            d_label_encoded = self.discriminator(encoded_images.detach())
            #d_encoded_loss = self.criterion_MSE(d_label_encoded, torch.zeros_like(d_label_encoded))
            #d_encoded_loss.backward()

            # 相对判别损失
            d_loss = self.criterion_MSE(d_label_cover - torch.mean(d_label_encoded),
                                        self.label_cover * torch.ones_like(d_label_cover)) + \
                     self.criterion_MSE(d_label_encoded - torch.mean(d_label_cover),
                                        self.label_encoded * torch.ones_like(d_label_encoded))
            d_loss.backward()

            self.opt_discriminator.step()

            '''
            train encoder and decoder
            '''
            # Make it a tiny bit faster
            for p in self.discriminator.parameters():
                p.requires_grad = False

            self.opt_encoder_decoder.zero_grad()

            # GAN : target label for encoded image should be "cover"(0)
            g_label_cover = self.discriminator(images)
            g_label_encoded = self.discriminator(encoded_images)
            g_loss_on_discriminator = self.criterion_MSE(g_label_cover - torch.mean(g_label_encoded),
                                                         self.label_encoded * torch.ones_like(g_label_cover)) + \
                                      self.criterion_MSE(g_label_encoded - torch.mean(g_label_cover),
                                                         self.label_cover * torch.ones_like(g_label_encoded))

            # RAW : the encoded image should be similar to cover image
            g_loss_on_encoder_MSE = self.criterion_MSE(encoded_images, images)
            g_loss_on_encoder_LPIPS = torch.mean(self.criterion_LPIPS(encoded_images, images))
            # text解码
            decoded_text_R = self.text_decoder(decoded_messages_R, ids)
            decoded_text_F = self.text_decoder(decoded_messages_F, ids)
            # 展平
            decoded_text_R = decoded_text_R.transpose(0, 1).contiguous().view(-1, decoded_text_R.size(-1))  # [B*T, V]
            decoded_text_F = decoded_text_F.transpose(0, 1).contiguous().view(-1, decoded_text_F.size(-1))

            labels = ids.contiguous().view(-1).long()                                 # [B*T]s
            # textlosss
            g_loss_on_TEXT_decoder_R = self.criterion_CE(decoded_text_R, labels)
            # R 分支：标签平滑版 KL

            p = F.log_softmax(decoded_text_F, dim=-1)
            uniform_target = torch.full_like(p, 1.0 / p.size(-1))
            g_loss_on_TEXT_decoder_F = F.kl_div(p, uniform_target, reduction='batchmean')
            # full loss
            g_loss = self.discriminator_weight * g_loss_on_discriminator + self.encoder_weight * g_loss_on_encoder_MSE + \
                        self.text_weight_R * g_loss_on_TEXT_decoder_R + self.text_weight_F * g_loss_on_TEXT_decoder_F

            g_loss.backward()
            self.opt_encoder_decoder.step()

            # psnr
            psnr = - kornia.losses.psnr_loss(encoded_images.detach(), images, 2)

            # ssim
            ssim = 1 - 2 * kornia.losses.ssim_loss(encoded_images.detach(), images, window_size=11, reduction="mean")


        result = {
            "g_loss": g_loss,
            "psnr": psnr,
            "ssim": ssim,
            "g_loss_on_discriminator": g_loss_on_discriminator,
            "g_loss_on_encoder_MSE": g_loss_on_encoder_MSE,
            "g_loss_on_encoder_LPIPS": g_loss_on_encoder_LPIPS,
            "g_loss_on_TEXT_decoder_R": g_loss_on_TEXT_decoder_R,
            "g_loss_on_TEXT_decoder_F": g_loss_on_TEXT_decoder_F,
            "d_loss": d_loss
        }
        return result

    def validation(self, images: torch.Tensor, messages: torch.Tensor, masks: torch.Tensor, ids: torch.Tensor):
        self.encoder_decoder.eval()
        self.discriminator.eval()

        with torch.no_grad():
            # use device to compute
            images, messages, masks = images.to(self.device), messages.to(self.device), masks.to(self.device)
            messages = self.text_projector(messages)
            encoded_images, decoded_messages_R, decoded_messages_F = self.encoder_decoder(
                images, messages, masks)

            '''
            validate discriminator
            '''
            # RAW : target label for image should be "cover"(1)
            d_label_cover = self.discriminator(images)
            #d_cover_loss = self.criterion_MSE(d_label_cover, torch.ones_like(d_label_cover))

            # GAN : target label for encoded image should be "encoded"(0)
            d_label_encoded = self.discriminator(encoded_images.detach())
            #d_encoded_loss = self.criterion_MSE(d_label_encoded, torch.zeros_like(d_label_encoded))

            d_loss = self.criterion_MSE(d_label_cover - torch.mean(d_label_encoded),
                                        self.label_cover * torch.ones_like(d_label_cover)) + \
                     self.criterion_MSE(d_label_encoded - torch.mean(d_label_cover),
                                        self.label_encoded * torch.ones_like(d_label_encoded))

            '''
            validate encoder and decoder
            '''

            # GAN : target label for encoded image should be "cover"(0)
            g_label_cover = self.discriminator(images)
            g_label_encoded = self.discriminator(encoded_images)
            g_loss_on_discriminator = self.criterion_MSE(g_label_cover - torch.mean(g_label_encoded),
                                                         self.label_encoded * torch.ones_like(g_label_cover)) + \
                                      self.criterion_MSE(g_label_encoded - torch.mean(g_label_cover),
                                                         self.label_cover * torch.ones_like(g_label_encoded))

            # RAW : the encoded image should be similar to cover image
            g_loss_on_encoder_MSE = self.criterion_MSE(encoded_images, images)
            g_loss_on_encoder_LPIPS = torch.mean(self.criterion_LPIPS(encoded_images, images))


            # full loss
            # unstable g_loss_on_discriminator is not used during validation
            # text解码
            decoded_text_R = self.text_decoder(decoded_messages_R,ids)
            decoded_text_F = self.text_decoder(decoded_messages_F,ids)
            # 展平
            decoded_text_R = decoded_text_R.transpose(0, 1).contiguous().view(-1, decoded_text_R.size(-1))  # [B*T, V]
            decoded_text_F = decoded_text_F.transpose(0, 1).contiguous().view(-1, decoded_text_F.size(-1))

            labels = ids.contiguous().view(-1).long()                                 # [B*T]
            # textloss
            g_loss_on_TEXT_decoder_R = self.criterion_CE(decoded_text_R, labels)
            g_loss_on_TEXT_decoder_F = -self.criterion_CE(decoded_text_F, labels)
            # full loss
            g_loss = self.discriminator_weight * g_loss_on_discriminator + self.encoder_weight * g_loss_on_encoder_MSE + \
                        self.text_weight_R * g_loss_on_TEXT_decoder_R + self.text_weight_F * g_loss_on_TEXT_decoder_F

            # psnr
            psnr = - kornia.losses.psnr_loss(encoded_images.detach(), images, 2)

            # ssim
            ssim = 1 - 2 * kornia.losses.ssim_loss(encoded_images.detach(), images, window_size=11, reduction="mean")



        result = {
            "g_loss": g_loss,
            "psnr": psnr,
            "ssim": ssim,
            "g_loss_on_discriminator": g_loss_on_discriminator,
            "g_loss_on_encoder_MSE": g_loss_on_encoder_MSE,
            "g_loss_on_encoder_LPIPS": g_loss_on_encoder_LPIPS,
            "g_loss_on_TEXT_decoder_R": g_loss_on_TEXT_decoder_R,
            "g_loss_on_TEXT_decoder_F": g_loss_on_TEXT_decoder_F,
            "d_loss": d_loss
        }

        return result, (images, encoded_images)
    def get_logits(self, images: torch.Tensor, messages: torch.Tensor,masks:torch.Tensor, ids: torch.Tensor):
        self.encoder_decoder.eval()
        self.discriminator.eval()
        with torch.no_grad():
            # use device to compute
            images, messages, masks = images.to(self.device), messages.to(self.device), masks.to(self.device)
            messages = self.text_projector(messages)
            encoded_images, decoded_messages_R, decoded_messages_F = self.encoder_decoder(
                images, messages, masks)
            decoded_text_R = self.text_decoder(decoded_messages_R,ids)
            decoded_text_F = self.text_decoder(decoded_messages_F,ids)
        return decoded_text_R,decoded_text_F
    def decoded_message_error_rate(self, message, decoded_message):
        length = message.shape[0]

        message = message.gt(0)
        decoded_message = decoded_message.gt(0)
        error_rate = float(sum(message != decoded_message)) / length
        return error_rate

    def decoded_message_error_rate_batch(self, messages, decoded_messages):
        error_rate = 0.0
        batch_size = len(messages)
        for i in range(batch_size):
            error_rate += self.decoded_message_error_rate(messages[i], decoded_messages[i])
        error_rate /= batch_size
        return error_rate

    def save_model(self, path_encoder_decoder: str, path_discriminator: str, path_text_projector: str, path_text_decoder: str):
        # save state dicts (projector and decoder saved as state_dict)
        # encoder_decoder and discriminator are DataParallel modules -> save module.state_dict()
        torch.save(self.encoder_decoder.module.state_dict(), path_encoder_decoder)
        torch.save(self.discriminator.module.state_dict(), path_discriminator)
        torch.save(self.text_projector.state_dict(), path_text_projector)
        torch.save(self.text_decoder.state_dict(), path_text_decoder)
    def load_model(self, path_encoder_decoder: str, path_discriminator: str, path_text_projector: str, path_text_decoder: str):
        self.load_model_ed(path_encoder_decoder)
        self.load_model_dis(path_discriminator)
        self.load_model_pro(path_text_projector)
        self.load_model_txtdec(path_text_decoder)

    def load_model_ed(self, path_encoder_decoder: str):
        self.encoder_decoder.module.load_state_dict(torch.load(path_encoder_decoder), strict=False)

    def load_model_txtdec(self, path_text_decoder: str):
        """
        支持直接加载 text_decoder_train 导出的 ckpt (含 "model_state")
        兼容 text_decoder 被 DataParallel 或者未被 DataParallel 的情况
        """
        ckpt = torch.load(path_text_decoder, map_location="cpu")
        if "model_state" in ckpt:
            state = ckpt["model_state"]
        else:
            state = ckpt
        # if text_decoder wrapped in DataParallel
        if hasattr(self.text_decoder, "module"):
            self.text_decoder.module.model.load_state_dict(state, strict=False)
        else:
            # our TextDecoder wrapper holds inner .model
            self.text_decoder.model.load_state_dict(state, strict=False)
        print(f"[INFO] Loaded text decoder weights from {path_text_decoder}")

    def load_model_dis(self, path_discriminator: str):
        self.discriminator.module.load_state_dict(torch.load(path_discriminator))

    def load_model_pro(self, path_text_projector: str):
        """
        兼容 projector 是 DataParallel（有 .module）或普通模块（无 .module）的情况
        """
        state = torch.load(path_text_projector, map_location="cpu")
        if hasattr(self.text_projector, "module"):
            self.text_projector.module.load_state_dict(state)
        else:
            self.text_projector.load_state_dict(state)


class TextProjector(nn.Module):
    """
    将 CLIP 输出的 512 维向量压缩到 128 维。
    可以用线性层或非线性 MLP。
    """
    def __init__(self, in_dim=768, out_dim=128, hidden_dim=256):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(in_dim, hidden_dim),
            nn.ReLU(inplace=True),
            nn.Linear(hidden_dim, out_dim)
        )

    def forward(self, x):
        """
        x: [B,512]  →  [B,128]
        """
        x = self.net(x)
        x = F.normalize(x, dim=-1)   # 保持单位向量归一化（稳定语义相似度）
        return x


"""
Standalone TextDecoder model (not a wrapper).
Accepts:
  - emb: (B, clip_emb_dim) float tensor  -- clip-like embedding from encoder
  - tgt_emb: either
       * float tensor (T, B, H) token embeddings (teacher forcing)
       * long tensor (B, T) or (T, B) token ids
Returns:
  - logits: (T, B, V)  (matches original Dual_Mark expectation)
"""



class TextDecoder(nn.Module):
    def __init__(
        self,
        vocab_size: int = 50257,
        d_model: int = 256,
        nhead: int = 4,
        num_layers: int = 3,
        dim_feedforward: int = 1024,
        clip_emb_dim: int = 128,
        max_len: int = 64,
        dropout: float = 0.3,
        pad_token_id: int = 0,
    ):
        super().__init__()
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.pad_token_id = pad_token_id
        self.max_len = max_len

        # token & positional embeddings
        self.token_emb = nn.Embedding(vocab_size, d_model, padding_idx=pad_token_id)
        self.pos_emb = nn.Embedding(max_len, d_model)
        self.dropout = nn.Dropout(dropout)

        # Transformer decoder stack
        decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            activation="gelu",
        )
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)

        # project clip embedding to decoder memory dim
        self.clip_to_mem = nn.Linear(clip_emb_dim, d_model)

        # final projection to vocabulary
        self.output_fc = nn.Linear(d_model, vocab_size)

        # init params
        self._reset_parameters()

    def _reset_parameters(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    @staticmethod
    def generate_causal_mask(sz: int, device=None):
        """
        Create standard causal mask (float -inf on forbidden positions) of shape (sz, sz)
        Suitable to pass as tgt_mask to TransformerDecoder (PyTorch expects float mask with -inf).
        """
        mask = torch.triu(torch.ones(sz, sz, device=device), diagonal=1).bool()
        mask = mask.float().masked_fill(mask, float("-inf")).masked_fill(~mask, 0.0)
        return mask

    def forward(
        self,
        emb: torch.Tensor,            # (B, clip_emb_dim)
        tgt_emb: torch.Tensor,        # (T,B,H) float embeddings OR (B,T)/(T,B) long token ids
        causal_mask: Optional[torch.Tensor] = None,
        key_padding_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """
        emb: (B, clip_emb_dim)
        tgt_emb: (T,B,H) float OR (B,T)/(T,B) long
        Returns logits shaped (T, B, V)
        """
        device = next(self.parameters()).device
        emb = emb.to(device)

        if tgt_emb is None:
            raise ValueError("tgt_emb is None: expected token ids or token embeddings (teacher forcing).")

        # Case A: float embeddings provided (T, B, H) -> map back to token ids
        if tgt_emb.dtype.is_floating_point:
            if tgt_emb.dim() != 3:
                raise ValueError(f"Expected float tgt_emb shape (T,B,H), got {tuple(tgt_emb.shape)}")
            T, B, H = tgt_emb.shape
            if H != self.d_model:
                raise ValueError(f"Embedding dim mismatch: got H={H}, model d_model={self.d_model}")

            # permute -> (B, T, H) and flatten to (B*T, H)
            emb_tokens = tgt_emb.permute(1, 0, 2).contiguous()  # (B, T, H)
            flat = emb_tokens.view(-1, H)  # (B*T, H)

            # vocab embeddings (V, H)
            vocab_emb = self.token_emb.weight.detach().to(device)  # (V, H)

            # similarity dot product and argmax to map to token ids (no grad)
            # WARNING: this computes (B*T, V) matrix -> memory/cost ~ V; costly for big vocabs.
            with torch.no_grad():
                sims = torch.matmul(flat.to(device), vocab_emb.t())  # (B*T, V)
                ids_flat = sims.argmax(dim=-1)  # (B*T)
                tokens_in = ids_flat.view(B, T).to(device).long()  # (B, T)

        else:
            # Case B: integer token ids provided
            ids = tgt_emb
            if ids.dim() != 2:
                raise ValueError(f"Expected token ids of shape (B,T) or (T,B), got {tuple(ids.shape)}")
            batch_size = emb.shape[0]
            # Detect orientation: if first dim equals batch_size -> (B,T)
            if ids.shape[0] == batch_size:
                tokens_in = ids.to(device).long()
            elif ids.shape[1] == batch_size:
                # assume (T,B) -> permute to (B,T)
                tokens_in = ids.permute(1, 0).contiguous().to(device).long()
            else:
                # ambiguous: assume (B,T)
                tokens_in = ids.to(device).long()

            B, T = tokens_in.shape

        # Build token + pos embeddings for decoder query (B,T,d_model)
        pos = torch.arange(0, T, device=device).unsqueeze(0).expand(emb.shape[0], T)
        token_embeddings = self.token_emb(tokens_in) * math.sqrt(self.d_model)  # (B, T, d)
        pos_embeddings = self.pos_emb(pos)  # (B, T, d)
        query_seq = self.dropout(token_embeddings + pos_embeddings).transpose(0, 1)  # (T, B, d)

        # Project clip embedding -> memory (1, B, d)
        clip_memory = self.clip_to_mem(emb)  # (B, d)
        clip_memory = clip_memory.unsqueeze(0)  # (1, B, d)

        # Create causal mask if not provided
        if causal_mask is None:
            causal_mask = self.generate_causal_mask(T, device=device)

        # Forward through TransformerDecoder
        decoder_output = self.decoder(query_seq, clip_memory, tgt_mask=causal_mask, tgt_key_padding_mask=key_padding_mask)
        decoder_output = decoder_output.transpose(0, 1)  # (B, T, d)
        logits = self.output_fc(decoder_output)  # (B, T, V)

        # Return (T, B, V) to match expected format
        return logits.transpose(0, 1).contiguous()  # (T, B, V)

    def load_pretrained(self, ckpt_path: str):
        """
        Load checkpoint exported from training script.
        Supports dict with keys 'model_state' or 'state_dict', or raw state_dict.
        """
        ckpt = torch.load(ckpt_path, map_location="cpu")
        if isinstance(ckpt, dict) and "model_state" in ckpt:
            state = ckpt["model_state"]
        elif isinstance(ckpt, dict) and "state_dict" in ckpt:
            state = ckpt["state_dict"]
        else:
            state = ckpt
        # load into this module (allow missing keys)
        self.load_state_dict(state, strict=False)
        print(f"[INFO] Loaded pretrained text decoder from {ckpt_path}")
