import numpy as np
from tqdm import tqdm
import math
import csv
import torch

def load_probability(model_path):
    """
    The `load_probability()` is utilized to load the dictionary required for LLMDet from the Hugging Face repository.
    The initial loading process incurs a certain time overhead.
    """

    # The initial utilization of LLMDet involves loading the dictionary file from HuggingFace.
    # And subsequently caching it in the local cache for future reference.
    # dm = datasets.DownloadManager()
    # files = dm.download_and_extract('https://huggingface.co/datasets/TryMore/n_grams_probability/resolve/main/n-grams_probability.tar.gz')
    files = model_path
    model = ["gpt2", "opt", "unilm", "llama", "bart", "t5", "bloom", "neo", "vicuna" , "gpt2_large", "opt_3b"]
    global_vars = globals()
    for item in tqdm(model):
        n_grams = np.load(f'{files}/npz/{item}.npz', allow_pickle=True)
        global_vars[item] = n_grams["t5"]

def calculate_perplexity_fast(text_set_token_ids, n_grams_probability, vocab_size):
    """
    The `calculate_perplexity_fast()` is used to calculate proxy perplexity with dictionary load in `load_probability()`.
    For each Language Model that has constructed an n-grams dictionary, a corresponding proxy perplexity will be computed."
    """
    if type(n_grams_probability) is str:
        n_grams_probability = globals()[n_grams_probability]
    test_perplexity = []

    for k in range(len(text_set_token_ids)):
        text_token_ids = text_set_token_ids[k]
        ppl = 0
        number_3_grams = 0
        number_4_grams = 0
        number_2_grams = 0
        for i in range(2, len(text_token_ids) - 1):

            # Calculate the perplexity with 4-grams samples probability
            if tuple([text_token_ids[i - j] for j in range(2, -1, -1)]) in n_grams_probability[4].keys():
                if text_token_ids[i + 1] in n_grams_probability[4][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])]:
                    if n_grams_probability[5][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])][n_grams_probability[4][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])].tolist().index(text_token_ids[i + 1])] > 0:
                        ppl = ppl + math.log2(n_grams_probability[5][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])][n_grams_probability[4][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])].tolist().index(text_token_ids[i + 1])])
                else:
                    top_k = len(n_grams_probability[4][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])])
                    sum_probs = sum(n_grams_probability[5][tuple([text_token_ids[i - j] for j in range(2, -1, -1)])])
                    if (1 - sum_probs) > 0:
                        ppl = ppl + math.log2((1 - sum_probs) / (vocab_size - top_k))
                number_4_grams = number_4_grams + 1

            # Calculate the perplexity with 3-grams samples probability
            elif tuple([text_token_ids[i - 1], text_token_ids[i]]) in n_grams_probability[2].keys():
                if text_token_ids[i + 1] in n_grams_probability[2][tuple([text_token_ids[i - 1], text_token_ids[i]])]:
                    if n_grams_probability[3][tuple([text_token_ids[i - 1], text_token_ids[i]])][n_grams_probability[2][tuple([text_token_ids[i - 1], text_token_ids[i]])].tolist().index(text_token_ids[i + 1])] > 0:
                        ppl = ppl + math.log2(
                            n_grams_probability[3][tuple([text_token_ids[i - 1], text_token_ids[i]])][n_grams_probability[2][tuple([text_token_ids[i - 1], text_token_ids[i]])].tolist().index(text_token_ids[i + 1])])
                else:
                    top_k = len(n_grams_probability[2][tuple([text_token_ids[i - 1], text_token_ids[i]])])
                    sum_probs = sum(n_grams_probability[3][tuple([text_token_ids[i - 1], text_token_ids[i]])])
                    if (1 - sum_probs) > 0:
                        ppl = ppl + math.log2((1 - sum_probs) / (vocab_size - top_k))
                number_3_grams = number_3_grams + 1

            # Calculate the perplexity with 2-grams samples probability
            elif tuple([text_token_ids[i]]) in n_grams_probability[0].keys():
                if text_token_ids[i + 1] in n_grams_probability[0][tuple([text_token_ids[i]])]:
                    if n_grams_probability[1][tuple([text_token_ids[i]])][n_grams_probability[0][tuple([text_token_ids[i]])].tolist().index(text_token_ids[i + 1])] > 0:
                        ppl = ppl + math.log2(n_grams_probability[1][tuple([text_token_ids[i]])][n_grams_probability[0][tuple([text_token_ids[i]])].tolist().index(text_token_ids[i + 1])])
                else:
                    top_k = len(n_grams_probability[0][tuple([text_token_ids[i]])])
                    sum_probs = sum(n_grams_probability[1][tuple([text_token_ids[i]])])
                    if (1 - sum_probs) > 0:
                        ppl = ppl + math.log2((1 - sum_probs) / (vocab_size - top_k))
                number_2_grams = number_2_grams + 1

        perplexity = ppl / (number_2_grams + number_3_grams + number_4_grams + 1)
        test_perplexity.append(-perplexity)

    return test_perplexity


def calculate_perplexity(text, model, tokenizer, device):
    input_ids = tokenizer.encode(text, return_tensors="pt",max_length=400,truncation=True).to(device)
    seq_len = input_ids.size(1)
    model.eval()
    with torch.no_grad():
        logits = model(input_ids).logits
        probs = torch.softmax(logits,dim = -1) # [batch_size, seq_len, vocab_size]
        probs = probs[0, torch.arange(seq_len-1), input_ids[0][1:]] # [seq_len]
        logits = -torch.log(probs)
        mean_logits = torch.mean(logits).item()
        std_logits = torch.std(logits).item()
        max_logits = torch.max(logits).item()
        min_logits = torch.min(logits).item()

    return [mean_logits,std_logits,max_logits,min_logits]

def write_to_csv(data,filename):
    with open(filename, 'a', newline='') as file:
        writer = csv.writer(file)
        writer.writerows(data)