from turtle import pen
import torch
import time
import concurrent
from functools import partial
from tqdm import tqdm
import os
import scipy
import numpy as np

from utils.const_utils import MAX_NEW_TOKENS

from .transform.key import transform_key_func
from .transform.score import transform_edit_score
from .gumbel.key import gumbel_key_func
from .gumbel.score import gumbel_edit_score

ref_dataset=None
def get_null_results(wm_type, vocab_size, n, k, token_len, n_runs, penalty, tokenizer, process_num, test_stat, check_only=False):
    null_result_lst_path = f".runtime/{wm_type}_null_results_vocab{vocab_size}_n{n}_k{k}_tokenlen{token_len}_nruns{n_runs}_penalty{penalty}_noshift.pt" # already sorted
    assert check_only or os.path.exists(null_result_lst_path), f"Missing null results {null_result_lst_path}"
    if not os.path.exists(null_result_lst_path):
        print(f"=== Missing null results {null_result_lst_path} ===")
        null_results = []
        from datasets import load_from_disk, load_dataset
        global ref_dataset
        if ref_dataset is None:
            try:
                print("=== Loading alternative text dataset from local cache ===")
                ref_dataset = load_from_disk('dataset/realnews/realnewslike')
            except:
                # dataset = load_dataset('allenai/c4','realnewslike',steam=True,trust_remote_code=True)
                print("=== Loading alternative text dataset from remote ===")
                ref_dataset = load_dataset('allenai/c4','realnewslike',split='train',trust_remote_code=True)
        
        dataset = ref_dataset.shuffle()
        tokens_alternative_lst = []
        for item in tqdm(dataset):
            text = item['text']
            tokens_alternative = tokenizer(text,return_tensors='pt')['input_ids'][0]
            if len(tokens_alternative) < token_len:
                continue
            tokens_alternative = tokens_alternative[-token_len:]  # truncate to same length as tokens
            
            tokens_alternative_lst.append(tokens_alternative)
            if len(tokens_alternative_lst) == n_runs:
                break
    
        futures = []
        os.environ['TOKENIZERS_PARALLELISM'] = 'true'
        s_time = time.time()
        with concurrent.futures.ProcessPoolExecutor(process_num) as executor:
            seed_alternative = torch.randint(high=100000,size=(1,)).item()
            for tokens_alternative in tqdm(tokens_alternative_lst):
                """Multi-Processing"""
                future = executor.submit(test_stat,tokens=tokens_alternative,n=n,k=k,seed=seed_alternative,vocab_size=vocab_size,null=True,penalty=penalty)
                futures.append(future)

                """Single-Processing"""
                # null_results.append(test_stat(tokens=tokens_alternative,n=n,k=k,seed=seed_alternative,vocab_size=vocab_size,null=True,penalty=penalty))
            
            for future in tqdm(concurrent.futures.as_completed(futures)):
                null_results.append(future.result())
        print(f"=== Time elapsed: {time.time()-s_time} ===")
        os.environ['TOKENIZERS_PARALLELISM'] = 'false'

        null_results = torch.sort(torch.tensor(null_results))[0] # sorted in ascending order
        torch.save(null_results,null_result_lst_path)
    else:
        null_results = torch.load(null_result_lst_path)
    
    return null_results

            
def get_EXP_ITS_res(tokenized_texts, seed, wm_type, vocab_size, n, k, n_runs, penalty, tokenizer, process_num, p_threshold):
    scores = []
    futures = []

    test_stat = exp_phi if wm_type == 'EXP' else its_phi
    os.environ['TOKENIZERS_PARALLELISM'] = 'true'
    length_set = set([len(tokenized_text) for tokenized_text in tokenized_texts])
    for token_len in length_set:
        k = token_len
        get_null_results(wm_type, vocab_size, n, k, token_len, n_runs, penalty, tokenizer, process_num, test_stat, check_only=True)

    with concurrent.futures.ProcessPoolExecutor(process_num) as executor:
        for idx,tokenized_text in enumerate(tqdm(tokenized_texts)):
            token_len = len(tokenized_text)
            k = token_len
            assert k <= MAX_NEW_TOKENS and k > 0, f"[ERROR] Token length {k} is invalid, here are the text: \n{tokenizer.decode(tokenized_text)}"
            # if k == 0:
            #     continue

            # prepare null results, a list of length `n_runs``
            null_results = get_null_results(wm_type, vocab_size, n, k, token_len, n_runs, penalty, tokenizer, process_num, test_stat)

            # prepare test function
            test = partial(fast_permutation_test, vocab_size=vocab_size, n=n, k=k, test_stat=test_stat, null_results=null_results,penalty=penalty)

            # """Single-process version"""
            # pval = test(torch.tensor(tokenized_text), seed)
            # scores.append(pval)  # return a p value, a smaller p indicates that the text is more likely to be watermarked

            """Multi-process version"""
            future = executor.submit(test,torch.tensor(tokenized_text),seed)
            futures.append(future)

        for future in futures:
            pval = future.result()
            scores.append(pval)

    os.environ['TOKENIZERS_PARALLELISM'] = 'false'
    res = {"p_value":scores,"wm_pred":[1 if p < p_threshold else 0 for p in scores]}
    return res

def fast_permutation_test(tokens,seed,vocab_size,n,k,test_stat,penalty,null_results):
    test_result = test_stat(tokens=tokens,
                            n=n,
                            k=k,
                            vocab_size=vocab_size,
                            seed=seed,
                            penalty=penalty)
    # breakpoint()
    p_val = torch.searchsorted(null_results,test_result,right=True) / len(null_results)
    # breakpoint()
    return p_val.item()

def its_phi(tokens,n,k,seed,vocab_size,penalty,null=False):

    if null:
        pi = torch.randperm(vocab_size)
        xi = torch.rand((n,1))
    else:
        generator = torch.Generator()
        generator.manual_seed(int(seed))
        xi,pi = transform_key_func(generator,n,vocab_size)
    tokens = torch.argsort(pi)[tokens]
    tokens = tokens.float() / vocab_size
    
    transform_edit_score_w_penalty = partial(transform_edit_score, gamma=penalty)
    A = adjacency(tokens,xi,transform_edit_score_w_penalty,k)
    closest = torch.min(A,axis=1)[0]
    rtn = torch.min(closest)

    return rtn


def exp_phi(tokens,n,k,seed,vocab_size,penalty,null=False):

    if null:
        # pi = torch.arange(eff_vocab_size)
        xi = torch.rand((n,vocab_size))
    else:
        generator = torch.Generator()
        generator.manual_seed(int(seed))
        xi,pi = gumbel_key_func(generator,n,vocab_size,None)

    """Version 1"""
    # time1 = time.time()
    # eff_vocab_size = vocab_size
    # A = adjacency(tokens,xi,gumbel_edit_score,k)
    # closest = torch.min(A,axis=1)[0]
    # rtn1 = torch.min(closest)
    # time2 = time.time()


    """Version 2"""
    unique_sets, inverse = torch.unique(tokens, return_inverse=True,sorted=False)
    # select from xi
    eff_vocab_size = len(unique_sets)
    eff_xi = xi[:,unique_sets]
    eff_tokens = inverse
    gumbel_edit_score_w_penalty = partial(gumbel_edit_score, gamma=penalty)
    A = adjacency(eff_tokens,eff_xi,gumbel_edit_score_w_penalty,k)
    closest = torch.min(A,axis=1)[0]
    rtn2 = torch.min(closest)
    # time3 = time.time()
    
    # assert rtn1 == rtn2
    # print(time2-time1-(time3-time2))


    return rtn2

def adjacency(tokens,xi,dist,k):
    m = len(tokens)
    n = len(xi)


    """Previous"""
    # A = torch.empty(size=(m-(k-1),n))
    # for i in range(m-(k-1)):
    #     # print("line", i)
    #     for j in range(n):
    #         # print("row", j)
    #         distance = dist(tokens[i:i+k],xi[(j+torch.arange(k))%n])
    #         A[i][j] = distance

    """New"""
    assert k == m # block size is always the same as the length of the text
    A = torch.empty(size=(1, 1))
    distance = dist(tokens, xi)
    A[0][0] = distance
        

    return A
