# Description: Main entry point
# Generate watermarked text and detect watermark in the generated text.

import json
import time

from sklearn.metrics import roc_auc_score

from baselines import WatermarkBaseline
from utils.const_utils import WM_KGW, WM_UNIGRAM, WM_AAR, WM_EXP
from utils.data_utils import load_prompt_dataset, read_json,write_json
from attack.entry import attack

import argparse
import os
import torch
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer

os.environ['OMP_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1' 
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'

def get_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action='store_true')

    parser.add_argument("--wm_path", type=str) # wm_path is the path of watermarked texts, for `detect` and `attack` mode
    parser.add_argument("--unwm_path", type=str) # unwm_path is the path for non-watermarked texts
    parser.add_argument("--output_path", type=str) # output_path is the path for saving generated text in ``generate`` mode and  `attack`

    parser.add_argument("--model_name", type=str,help="the foundation model name. Choose from: facebook/opt-1.3b, meta-llama/Llama-2-7b-hf, meta-llama/Meta-Llama-3-70B")
    parser.add_argument("--baseline", type=str, default=None,help="choose from: KGW, unigram, EXP, ITS; None indicates pure decoding", choices=[WM_KGW, WM_UNIGRAM, WM_AAR, WM_EXP,  None])
    parser.add_argument("--dataset", type=str, default="en") # en used for generating training data, realnews used for detection

    # params for generation
    parser.add_argument("--batch_size", type=int, default=200)
    # params for KGW and unigram
    parser.add_argument("--gamma", type=float, default=0.5)
    parser.add_argument("--delta", type=float, default=2.0)
    parser.add_argument("--seeding_scheme", type=str, default="lefthash")
    # params for AAR
    parser.add_argument("--khash", type=int, default=1)
    # params for EXP
    parser.add_argument("--wmkey_len", type=int, default=256)
    parser.add_argument('--offset', action='store_true')
    parser.add_argument("--num_shifts", type=int, default=1)
    parser.add_argument("--n_runs", type=int, default=10000)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--penalty", type=float, default=0.0)

    # params for detection
    parser.add_argument("--z_threshold", type=float, default=4.0)

    # params for B^4 attack
    parser.add_argument("--paraphrase_model", type=str, default=None) # the model P_f
    parser.add_argument("--prompt_id", type=int, default=4)
    parser.add_argument("--num_beams", type=int, default=10)
    parser.add_argument("--sample", action='store_true')
    parser.add_argument("--amateur_model", type=str, default=None) # the model P_w
    parser.add_argument("--origin_model", type=str, default=None) # the original ckpt of P_w, used for Approximation Error Adjustment
    parser.add_argument("--closed_form", action='store_true') # whether to derive \lambda in Eq 2
    parser.add_argument("--kl_threshold", type=float, default=0.0) # \epsilon in Eq 2
    parser.add_argument("--coef", type=float, default=1.0) # For not `closed_form`

    # Choose one of the following:
    parser.add_argument('--do_generate', action='store_true')
    parser.add_argument('--do_detect', action='store_true')
    parser.add_argument('--do_attack', action='store_true')

    return parser


model, tokenizer = None, None
def load_model(model_name, device_id='auto', tokenizer_only=False):
    global model,tokenizer,name
    if model is not None and tokenizer is not None and name == model_name:
        if tokenizer_only:
            return tokenizer
        else:
            return model,tokenizer
    elif tokenizer_only:
        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        if 'llama' in str(model_name).lower():
            print("[INFO] add pad token to ", model_name)
            tokenizer.pad_token = tokenizer.eos_token
        # tokenizer.padding_side = "right" if "gemma" in model_name else "left"
        tokenizer.padding_side = "left"
        return tokenizer
    else:
        # load model
        model_path = model_name
        tokenizer = AutoTokenizer.from_pretrained(model_path,trust_remote_code=True)
        if "Meta-Llama-3-70B" == model_path.split('/')[-1]: 
            print("[INFO] Load Meta-Llama-3-70B in bfloat16")
            #from transformers import BitsAndBytesConfig
            model = AutoModelForCausalLM.from_pretrained(
                model_path,
                #quantization_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_compute_dtype=torch.float16),
                device_map=device_id,
                torch_dtype=torch.bfloat16
            )
            tokenizer.pad_token_id = tokenizer.eos_token_id

        elif "Qwen2-72B" == model_path.split('/')[-1]:
            print("[INFO] Load Qwen2-72B in bfloat16")
            # from transformers import BitsAndBytesConfig
            model = AutoModelForCausalLM.from_pretrained(
                model_path,
                # quantization_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_compute_dtype=torch.bfloat16),
                device_map=device_id,
                trust_remote_code=True,
                torch_dtype=torch.bfloat16,
            )
            tokenizer.add_special_tokens({"pad_token":'<|extra_0|>',"eos_token":'<|endoftext|>'})
        
        else:
            model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device_id, torch_dtype=torch.bfloat16 if 'gemma' not in model_name else torch.float16)

        model.eval()
        if 'llama' in str(type(model)).lower():
            tokenizer.pad_token = tokenizer.eos_token
        # tokenizer.padding_side = "right" if "gemma" in model_name else "left"
        tokenizer.padding_side = "left"

        name = model_name
        print("[INFO] Load model: ", model_name)

        return model,tokenizer


def _do_generate(args, gen_path):
    model, tokenizer = load_model(args.model_name)
    # load dataset
    # load all prompts
    prompts = load_prompt_dataset(args.dataset) # TODO: 下载数据集
    outputs = []
        
    kwargs = dict(vars(args))
    kwargs["prompts"] = prompts
    print("[INFO] a total of ", len(prompts), " prompts to generate")
    print(f"Start to generate watermarked text: {args.baseline}")
    wm_generate = WatermarkBaseline(model,tokenizer,vocab_size=model.get_output_embeddings().weight.shape[0],baseline=args.baseline).generate

    start_time = time.time()
    outputs.extend(wm_generate(**kwargs))
    print(f"[DEBUG] Time elapsed: {time.time()-start_time}")
    # write watermarked generation in each watermark method in a single json file
    write_json(outputs,gen_path)
    print(f"{args.baseline} watermarked text generated and written to: {gen_path}")
    
    # return args, uid, gen_path

def _do_detect(args, watermarked_path, unwatermarked_path, output_path):
    if args.model_name in ['meta-llama/Meta-Llama-3-70B', 'Qwen/Qwen2-72B']:
        if args.model_name == 'meta-llama/Meta-Llama-3-70B':
            vocab_size = 128256 # = model.get_output_embeddings().weight.shape[0]
        elif args.model_name == 'Qwen/Qwen2-72B':
            vocab_size = 152064
        elif args.model_name == 'meta-llama/Llama-2-7b-hf':
            vocab_size = 32000
        tokenizer = load_model(args.model_name, tokenizer_only=True)
    else:
        model, tokenizer = load_model(args.model_name)
        vocab_size = model.get_output_embeddings().weight.shape[0]
        del model
    print(f"Vocab size ", vocab_size)
    print(f"Start to detect texts on {watermarked_path}")
    watermark_baseline = WatermarkBaseline(model=None,tokenizer=tokenizer,vocab_size=vocab_size,baseline=args.baseline) # No need to use the whole model
    kwargs = dict(vars(args))
    kwargs["texts"] = read_json(watermarked_path)
    kwargs["unwm_texts"] = read_json(unwatermarked_path)
    scores = watermark_baseline.detect(torch.device('cuda'),**kwargs)
    def switch_dict(scores):
        for k,v in scores.items():
            if type(v) == dict:
                scores[k] = switch_dict(v)
            if torch.is_tensor(v):
                scores[k] = v.tolist()
            if type(v) == np.ndarray:
                scores[k] = v.item()
        return scores
    scores = switch_dict(scores)


    write_json(scores,output_path)
    print(f"{args.baseline} detected, scores written to: {output_path}")

    num_samples = len(kwargs["texts"])
    if args.baseline in [WM_KGW, WM_UNIGRAM]:
        score_label = 'z_score'
        labels = [1]*num_samples+[0]*num_samples
    else:
        score_label = 'p_value'
        labels = [0]*num_samples+[1]*num_samples
    preds = scores["generations"][score_label]+scores["unwatermarked"][score_label][:num_samples]
    auc_score = roc_auc_score(labels,preds)
    print(f"AUC score calculated from {num_samples} samples: {auc_score}")    




def main(args):
    
    print(json.dumps(vars(args),indent=4))

    """Valid check of arguments"""
    assert not (args.do_detect and args.baseline is None), "Detection requires a baseline!"
    if args.baseline == WM_EXP and args.penalty != 0.0:
        print("[ERROR] Penalty should be 0.0 for EXP!")
        args.penalty = 0.0

    if args.do_generate:
        _do_generate(args, args.output_path)

    if args.do_attack:
        prompts = load_prompt_dataset(args.dataset)
        print(f"[INFO] A total of {len(prompts)} prompts loaded.")
        attack(args, args.wm_path, args.output_path, prompts)

    if args.do_detect:
        _do_detect(args, args.wm_path, args.unwm_path, args.output_path)
    




if __name__ == "__main__":
    parser = get_parser()
    args = parser.parse_args()

    main(args)