import enum
from typing import Optional

import scipy.stats
import torch
import os
from transformers import AutoTokenizer

DEFAULT_SEED = 42

def _get_uniform(seed, vocab_size, rng, prev_token: torch.LongTensor, device, eps=1e-20):
    """Seed RNG from local context. Not batched, because the generators we use (like cuda.random) are not batched."""
    # Need to have enough context for seed generation
    prf_key = (prev_token * seed).long().item()
    # enable for long, interesting streams of pseudorandom numbers: print(prf_key)
    rng.manual_seed(prf_key % (2**64 - 1))  # safeguard against overflow from long
    uniform = torch.rand((vocab_size), generator=rng, dtype=torch.float32, device=device) # [vocab_size]
    return uniform

def _get_gumbel(seed, vocab_size, rng, prev_token: torch.LongTensor, device, eps=1e-20):
    """Seed RNG from local context. Not batched, because the generators we use (like cuda.random) are not batched."""
    uniform = _get_uniform(seed, vocab_size, rng, prev_token, device, eps)
    gumbel = -torch.log(-torch.log(torch.clamp(uniform, min=eps)))
    return gumbel

class AarWatermark:
    def __init__(
        self,
        vocab_size: int,
        k: int,
        seed: int = DEFAULT_SEED,
        device: Optional[str] = None,
    ):
        eps = 1e-20

        self.k = k
        self.vocab_size = vocab_size
        self.seed = seed
        self.eps = eps
        self.device = device
        self.rng = torch.Generator(device=device)  # generator is always cuda
    
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
        if input_ids.shape[-1] < self.k:
            return scores
        batch_prev_token = torch.sum(input_ids[:, -self.k:], dim=-1)  # (batch_size,)
        uniform_lst = []
        for i, prev_token in enumerate(batch_prev_token):
            # gumbel = _get_gumbel(self.seed, self.vocab_size, self.rng, prev_token, self.device, self.eps)
            uniform = _get_uniform(self.seed, self.vocab_size, self.rng, prev_token, self.device, self.eps)
            # scores[i,...] += gumbel
            uniform_lst.append(uniform)
        
        batch_uniform = torch.stack(uniform_lst, dim=0)  # (batch_size, vocab_size) 
        batch_gumbel = -torch.log(-torch.log(torch.clamp(batch_uniform, min=self.eps))) # (batch_size, vocab_size)
        scores[..., ...] += batch_gumbel
        
        return scores

class AarWatermarkDetector:
    def __init__(
        self,
        tokenizer: AutoTokenizer,
        k: int = 1,
        seed: int = DEFAULT_SEED,
        vocab_size: int = None,
        device: Optional[str] = None,
    ):
        eps = 1e-20
        self.tokenizer = tokenizer
        self.k = k
        self.seed = seed
        self.eps = eps
        self.vocab_size = vocab_size
        self.device = device
        self.rng = torch.Generator(device=device)  # generator is always cuda

    def detect(self, text: str) -> float:
        """
        Returns p-value, where null hypothesis is that the text is not watermarked.
        
        Under null hypothesis, each u is Uniform(0, 1), so each score (-log(1 -u )) is Exp(1).
        So the sum of scores is distributed as Gamma(n_tokens, 1).
        """
        tokens = self.tokenizer.encode(text, return_tensors="pt", add_special_tokens=False)[0]  # (seq_len,)
        seq_len = tokens.shape[0]
        score = 0
        for i in range(self.k, seq_len):
            prev_token = torch.sum(tokens[i - self.k:i], dim=-1)
            token = tokens[i]
            # u = self._get_uniform(prev_token)[0, token]
            u = _get_uniform(self.seed, self.vocab_size, self.rng, prev_token, self.device, self.eps)[token]
            score += -torch.log(1 - u).item()
        p_value = scipy.stats.gamma.sf(score, seq_len - self.k, loc=0, scale=1)
        return p_value