import torch
import numpy as np
import hashlib
import logging
from typing import Tuple, Optional

class WatermarkEmbedder:
    """
    Embed imperceptible watermark into transition matrix T
    """
    def __init__(self, watermark_key: str, watermark_strength: float = 0.01):
        """
        Args:
            watermark_key: Watermark key (used to generate watermark pattern)
            watermark_strength: Watermark strength (controls the impact of watermark on T)
        """
        self.watermark_key = watermark_key
        self.watermark_strength = watermark_strength
        self.watermark_pattern = self._generate_watermark_pattern(watermark_key)
        logging.info(f"Watermark pattern generation completed, length: {len(self.watermark_pattern)}")
        
    def _generate_watermark_pattern(self, key: str) -> np.ndarray:
        """Generate watermark pattern based on key"""
        # Use hash function to generate deterministic watermark pattern
        hash_obj = hashlib.sha256(key.encode())
        hash_bytes = hash_obj.digest()
        
        # Convert hash value to binary sequence
        watermark_bits = []
        for byte in hash_bytes:
            for i in range(8):
                watermark_bits.append((byte >> i) & 1)
        
        return np.array(watermark_bits[:64])  # Use first 64 bits as watermark
    
    def embed_watermark(self, T: torch.Tensor, num_classes: int) -> torch.Tensor:
        """
        Embed watermark into transition matrix T
        
        Args:
            T: Transition matrix [num_classes, num_classes]
            num_classes: Number of classes
            
        Returns:
            Transition matrix with embedded watermark
        """
        T_watermarked = T.clone()
        
        # Select positions near the diagonal of T matrix to embed watermark (minimal impact on performance)
        watermark_len = len(self.watermark_pattern)
        
        # Embed watermark near the diagonal of T
        # Use more obvious embedding method: adjust two positions simultaneously to make differences more obvious
        for i in range(min(num_classes, watermark_len)):
            watermark_bit = self.watermark_pattern[i % watermark_len]
            
            # Define two target positions
            pos_idx = (i + 1) % num_classes  # Position for embedding 1
            neg_idx = (i - 1) % num_classes  # Position for embedding 0
            
            if watermark_bit == 1:
                # Watermark bit is 1: increase pos_idx, decrease neg_idx (make difference more obvious)
                T_watermarked[i, pos_idx] += self.watermark_strength
                T_watermarked[i, neg_idx] = torch.clamp(
                    T_watermarked[i, neg_idx] - self.watermark_strength * 0.5, 
                    min=1e-12
                )
            else:
                # Watermark bit is 0: decrease pos_idx, increase neg_idx (make difference more obvious)
                T_watermarked[i, pos_idx] = torch.clamp(
                    T_watermarked[i, pos_idx] - self.watermark_strength * 0.5,
                    min=1e-12
                )
                T_watermarked[i, neg_idx] += self.watermark_strength
            
            # Renormalize to maintain transition matrix properties
            T_watermarked[i, :] = torch.clamp(T_watermarked[i, :], min=1e-12)
            row_sum = T_watermarked[i, :].sum()
            if row_sum > 0:
                T_watermarked[i, :] /= row_sum
        
        return T_watermarked
    
    def extract_watermark(self, T: torch.Tensor, num_classes: int) -> np.ndarray:
        """
        Extract watermark from transition matrix T
        
        Args:
            T: Transition matrix
            num_classes: Number of classes
            
        Returns:
            Extracted watermark bit sequence
        """
        watermark_len = len(self.watermark_pattern)
        extracted_bits = []
        
        # Extract according to embedding logic: compare values of pos_idx and neg_idx
        for i in range(min(num_classes, watermark_len)):
            pos_idx = (i + 1) % num_classes  # Position increased when watermark bit is 1
            neg_idx = (i - 1) % num_classes  # Position increased when watermark bit is 0
            
            # Directly compare values at two positions
            # If pos_idx > neg_idx, watermark bit might be 1
            # If neg_idx > pos_idx, watermark bit might be 0
            if T[i, pos_idx].item() > T[i, neg_idx].item():
                extracted_bits.append(1)
            else:
                extracted_bits.append(0)
        
        return np.array(extracted_bits)
    
    def verify_watermark(self, T: torch.Tensor, num_classes: int, 
                        threshold: float = 0.6) -> Tuple[bool, float]:
        """
        Verify whether transition matrix T contains expected watermark
        
        Args:
            T: Transition matrix
            num_classes: Number of classes
            threshold: Matching threshold (between 0-1)
            
        Returns:
            (Whether match, match rate)
        """
        extracted = self.extract_watermark(T, num_classes)
        watermark_len = min(len(self.watermark_pattern), len(extracted))
        
        if watermark_len == 0:
            return False, 0.0
        
        # Calculate match rate
        matches = np.sum(extracted[:watermark_len] == self.watermark_pattern[:watermark_len])
        match_rate = matches / watermark_len
        
        return match_rate >= threshold, match_rate


class WatermarkDetector:
    """
    Detect watermark from model outputs (for verifying stolen models)
    """
    def __init__(self, watermark_key: str, victim_model_path: str, 
                 num_classes: int, device: torch.device):
        """
        Args:
            watermark_key: Watermark key
            victim_model_path: Victim model path
            num_classes: Number of classes
            device: Device
        """
        self.watermark_key = watermark_key
        self.victim_model_path = victim_model_path
        self.num_classes = num_classes
        self.device = device
        self.embedder = WatermarkEmbedder(watermark_key)
        
    def detect_from_model_outputs(self, model, test_loader, T_path: Optional[str] = None) -> Tuple[bool, float]:
        """
        Detect watermark by analyzing model outputs
        
        Args:
            model: Model to be detected
            test_loader: Test data loader
            T_path: Transition matrix path (if known)
            
        Returns:
            (Whether watermark detected, confidence)
        """
        if T_path is not None:
            # If transition matrix is known, verify directly
            T = torch.load(T_path).to(self.device)
            return self.embedder.verify_watermark(T, self.num_classes)
        else:
            # Detect by analyzing model output patterns
            # More complex detection methods can be implemented here
            # For example: analyze statistical properties of model outputs
            return False, 0.0

