import gc
import token
import traceback

from AAR.aar_watermark import AarWatermark, AarWatermarkDetector
from KGW import WatermarkLogitsProcessor,WatermarkDetector
from unigram import GPTWatermarkLogitsWarper, GPTWatermarkDetector
from utils.const_utils import MIN_NEW_TOKENS, PROMPT_LEN
from watermarking import generate,generate_rnd,transform_key_func,transform_sampling, gumbel_key_func,gumbel_sampling,get_EXP_ITS_res
from utils import read_json,write_json,MAX_NEW_TOKENS
from utils.const_utils import *

from functools import partial
import concurrent
from transformers import LogitsProcessorList
import torch
import numpy as np
from tqdm import tqdm
import scipy.stats


DEFAULT_SCHEME = "lefthash"
class BaselineLogitsProcessor:
    def __init__(self,model,tokenizer,gamma=0.25,delta=2.0,seeding_scheme=DEFAULT_SCHEME):
        self.model = model
        self.tokenizer = tokenizer
        self.gamma = gamma
        self.delta = delta
        self.seeding_scheme = seeding_scheme

    def get(self,baseline_name):
        assert baseline_name in ["KGW","unigram"]
        if baseline_name == "KGW":
            watermark_processor = WatermarkLogitsProcessor(vocab=list(self.tokenizer.get_vocab().values()),
                                               gamma=self.gamma,
                                               delta=self.delta,
                                               seeding_scheme=self.seeding_scheme)
            processor_list = LogitsProcessorList([watermark_processor])
        if baseline_name == "unigram":
            processor_list = LogitsProcessorList([GPTWatermarkLogitsWarper(fraction=self.gamma,
                                                                        strength=self.delta,
                                                                        vocab_size=self.model.config.vocab_size,
                                                                        watermark_key=0)])
        return processor_list


class WatermarkBaseline:
    def __init__(self,model,tokenizer,vocab_size,baseline):
        assert baseline in [WM_KGW, WM_UNIGRAM, WM_AAR, WM_EXP]
        self.model = model
        self.tokenizer = tokenizer
        self.vocab_size = vocab_size
        self.baseline = baseline
    
    def generate(self,**args):
        outputs = []
        prompts = args["prompts"]
        prompts_num = len(prompts)
        batch_size = args['batch_size']

        #tokenized_input = self.tokenizer(prompts[0], return_tensors="pt").to(self.model.device)
        #prompt_tokens = len(tokenized_input["input_ids"][0])
        if self.baseline in ["KGW","unigram"]:
            assert self.model.device.type == "cuda", "model must be on cuda device"
            # load watermark logits processor
            general_logits_processor = BaselineLogitsProcessor(self.model,self.tokenizer,args["gamma"],args["delta"],args["seeding_scheme"])
            # for idx, prompt in enumerate(tqdm(prompts)):
            for batch_first_idx in tqdm(range(0, prompts_num, batch_size)):
                batch_last_idx = min(batch_first_idx + batch_size, prompts_num)
                try:
                    tokenized_input = self.tokenizer(prompts[batch_first_idx:batch_last_idx], return_tensors="pt", padding=True, truncation=True, max_length=PROMPT_LEN).to(self.model.device)
                    generate_args = {
                        **tokenized_input,
                        'logits_processor': general_logits_processor.get(self.baseline),
                        'do_sample': True,
                        'top_k': 50,
                        'top_p': 0.9,
                        'max_new_tokens': MAX_NEW_TOKENS,
                        'min_new_tokens': MIN_NEW_TOKENS,
                    }
                    prompt_len = tokenized_input["input_ids"].size(1)
                    generation = self.model.generate(**generate_args)
                    gen_text = self.tokenizer.batch_decode([x[prompt_len:] for x in generation], skip_special_tokens=True)
                    outputs.extend(gen_text)
                except Exception as e:
                    print(f"[ERROR] Failed to generate watermarked text: {e}")
                    return outputs
                except KeyboardInterrupt:
                    print("[WARNING] KeyboardInterrupt")
                    return outputs
    
        elif self.baseline in ["EXP"]:
            (key_func,sampling) = (gumbel_key_func,gumbel_sampling) if self.baseline == "EXP" else (transform_key_func, transform_sampling)
            n = args["wmkey_len"] if args["wmkey_len"] is not None else 256 # watermark key length. default: 256
            vocab_size = self.model.get_output_embeddings().weight.shape[0]
            generate_watermark = lambda batch_prompt : generate(self.model,
                                                    batch_prompt,
                                                    vocab_size,
                                                    n,
                                                    MAX_NEW_TOKENS,
                                                    args['seed'],
                                                    key_func,
                                                    sampling,
                                                    random_offset=args["offset"],
                                                    num_shifts=args["num_shifts"]
                                                    )
            
            self.tokenizer.trucation_side = "left"
            for batch_first_idx in tqdm(range(0, prompts_num, batch_size)):
                try:
                    batch_last_idx = min(batch_first_idx + batch_size, prompts_num)
                    tokenized_prompts = self.tokenizer(prompts[batch_first_idx:batch_last_idx], return_tensors="pt", padding=True, truncation=True, max_length=PROMPT_LEN)
                    # generation = generate_watermark(tokenized_prompts)[:, prompt_tokens:] 
                    generation = generate_watermark(tokenized_prompts) # generate_watermark() now return output tokens only
                    gen_text = self.tokenizer.batch_decode(generation, skip_special_tokens=True)
                    outputs.extend(gen_text)
                except Exception as e:
                    traceback.print_exc()
                    return outputs
                except KeyboardInterrupt:
                    print("[WARNING] KeyboardInterrupt")
                    return outputs
            self.tokenizer.trucation_side = "right"
            """
            for idx,prompt in enumerate(tqdm(prompts)):
                outputs.append(generate_watermark(prompt, seeds[idx])[:,prompt_tokens:])
            """
        
        elif self.baseline == WM_AAR:
            assert self.model.device.type == "cuda", "model must be on cuda device"
            # load watermark logits processor
            aar_logits_processor = AarWatermark(vocab_size=self.vocab_size, k=args['khash'], seed=args['seed'], device=self.model.device)
            # for idx, prompt in enumerate(tqdm(prompts)):
            self.tokenizer.trucation_side = "left"
            for batch_first_idx in tqdm(range(0, prompts_num, batch_size)):
                batch_last_idx = min(batch_first_idx + batch_size, prompts_num)
                try:
                    tokenized_input = self.tokenizer(prompts[batch_first_idx:batch_last_idx], return_tensors="pt", padding=True, truncation=True, max_length=PROMPT_LEN).to(self.model.device)
                    generate_args = {
                        **tokenized_input,
                        'logits_processor': LogitsProcessorList([aar_logits_processor]),
                        'do_sample': False,
                        'max_new_tokens': MAX_NEW_TOKENS,
                        'min_new_tokens': MIN_NEW_TOKENS,
                    }
                    prompt_len = tokenized_input["input_ids"].size(1)
                    generation = self.model.generate(**generate_args)
                    gen_text = self.tokenizer.batch_decode([x[prompt_len:] for x in generation], skip_special_tokens=True)
                    outputs.extend(gen_text)
                except Exception as e:
                    traceback.print_exc()
                    print(f"Failed to generate watermarked text: {e}")
                    return outputs
                except KeyboardInterrupt:
                    print("[WARNING] KeyboardInterrupt")
                    return outputs
            self.tokenizer.trucation_side = "right"

        return outputs

    def detect(self,device,**args):
        wm_texts,unwm_texts = args["texts"],args["unwm_texts"]
        score_dict = {}
        p_threshold = scipy.stats.norm.sf(args["z_threshold"])
        if self.baseline == "KGW":
            assert device.type == "cuda", "model must be on cuda device"
            watermark_detector = WatermarkDetector(vocab=list(self.tokenizer.get_vocab().values()),
                                        gamma=args["gamma"], # should match original setting
                                        seeding_scheme=args["seeding_scheme"], # should match original setting
                                        device=device, # must match the original rng device type
                                        tokenizer=self.tokenizer,
                                        z_threshold=args["z_threshold"],
                                        normalizers=[],
                                        ignore_repeated_ngrams=True)
            
            def _batch_detect(texts):
                z_score_list, prediction_list = [],[]
                for text in texts:
                    res = watermark_detector.detect(text)
                    z_score_list.append(res["z_score"])
                    prediction_list.append(int(res["prediction"]))
                return {"z_score":z_score_list, "wm_pred":prediction_list}
            
            score_dict["generations"] = _batch_detect(wm_texts)
            score_dict["unwatermarked"] = _batch_detect(unwm_texts)
            

        elif self.baseline == "unigram":
            assert device.type == "cuda", "model must be on cuda device"
            detector = GPTWatermarkDetector(fraction=args["gamma"],
                                            strength=args["delta"],
                                            vocab_size=self.vocab_size,
                                            watermark_key=0)

            def get_unigram_res(texts):
                z_score_list = []
                for text in tqdm(texts):
                    gen_tokens = self.tokenizer(text, add_special_tokens=False)["input_ids"]
                    z_score_list.append(detector.detect(gen_tokens))

                return {
                    'z_score': z_score_list,
                    'wm_pred': [1 if z > args["z_threshold"] else 0 for z in z_score_list]
                }
            
            score_dict["generations"] = get_unigram_res(wm_texts)
            score_dict["unwatermarked"] = get_unigram_res(unwm_texts)
        
        elif self.baseline in ["EXP"]:
            args['n_runs'] = 500 # DEBUG
            n = args["wmkey_len"] if args["wmkey_len"] is not None else 256 # watermark key length. default: 256
            vocab_size = self.vocab_size
            k = 0  # block size. when set to 0, block size = max new tokens
            seed = args["seed"]
            print("detecting watermarked generations")
            tokenized_texts = self.tokenizer(wm_texts,
                        truncation=True,
                        padding=False,
                        max_length=MAX_NEW_TOKENS).input_ids
            # assert tokenized_texts.shape[1] == MAX_NEW_TOKENS, f"text has shape {tokenized_texts.shape}" # follow original implementation
            score_dict["generations"] = get_EXP_ITS_res(tokenized_texts, seed, self.baseline, vocab_size, n, k, args["n_runs"], args["penalty"], self.tokenizer, PROCESS_NUM,p_threshold)
            print("detecting unwatermarked generations")
            unwm_tokenized_texts = self.tokenizer(unwm_texts,
                        truncation=True,
                        padding=False,
                        max_length=MAX_NEW_TOKENS).input_ids
            # assert tokenized_texts.shape[1] == MAX_NEW_TOKENS
            score_dict["unwatermarked"] = get_EXP_ITS_res(unwm_tokenized_texts, seed, self.baseline, vocab_size, n, k, args["n_runs"], args["penalty"], self.tokenizer, PROCESS_NUM,p_threshold)
        
        elif self.baseline == "AAR":
            assert device.type == "cuda", "model must be on cuda device"
            aar_detector = AarWatermarkDetector(tokenizer=self.tokenizer, k=args["khash"], seed=args["seed"], vocab_size=self.vocab_size, device=device)
            def get_aar_res(texts):
                p_value_list = []
                for text in tqdm(texts):
                    p_value_list.append(aar_detector.detect(text))
                return {
                    'p_value': p_value_list,
                    'wm_pred': [1 if p < p_threshold else 0 for p in p_value_list]
                }
            
            score_dict["generations"] = get_aar_res(wm_texts)
            score_dict["unwatermarked"] = get_aar_res(unwm_texts)
        
        score_dict["config"]={"z_threshold":args["z_threshold"],"p_threshold":p_threshold}
        return score_dict



