from copy import deepcopy
import traceback
from typing import List
from tqdm import tqdm
from transformers import LogitsProcessorList, LogitsProcessor, pipeline
from transformers.cache_utils import DynamicCache
import json
import os
import torch
import torch.nn as nn
from transformers.generation.logits_process import MinNewTokensLengthLogitsProcessor, TemperatureLogitsWarper, TopPLogitsWarper
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, LogitsProcessorList

from utils.func_utils import optimize_distribution
from .prompts import get_prompt




class ContrastiveProcessor(LogitsProcessor):
    def __init__(self, 
                 args,
                 amateur_model, 
                 amateur_tokenizer,
                 generation_config,
                 num_beams=10,
                 closed_form=False,
                 kl_threshold=0.0, # not used unless closed_form is True
                 coef=0.5, 
                 use_cache=False,
                 ):
        """
        Created once for each model
        """

        self.amateur_model = amateur_model
        self.amateur_tokenizer = amateur_tokenizer
        self.generation_config = generation_config
        self.coef = coef
        self.use_cache = use_cache
        self.amateur_temperature = 1.0
        self.normalize_logits = True
        self.closed_form = closed_form
        self.kl_threshold = kl_threshold
        origin_model_name = args.origin_model

        self.num_beams = num_beams

        print("[INFO] Load origin model from", origin_model_name)
        self.origin_model = AutoModelForCausalLM.from_pretrained(
                origin_model_name,
                torch_dtype=torch.bfloat16 if 'gemma' not in args.amateur_model.lower() else torch.float16
                ).to(self.amateur_model.device)
        
        self.is_Qwen = 'Qwen2' in str(self.amateur_model.__class__)


    def _initialize_cache(self, input_ids):
        if self.use_cache:
            self.model_kwargs_amateur["use_cache"] = True
            self.model_kwargs_amateur["past_key_values"] = (
                    DynamicCache()
                )
            self.model_kwargs_origin["use_cache"] = True
            self.model_kwargs_origin["past_key_values"] = (
                    DynamicCache()
                )
        else:
            self.model_kwargs_amateur["use_cache"] = False
            self.model_kwargs_amateur["past_key_values"] = None
            self.model_kwargs_origin["use_cache"] = False
            self.model_kwargs_origin["past_key_values"] = None
        self.model_kwargs_amateur = self.amateur_model._get_initial_cache_position(input_ids.to(self.amateur_model.device), self.model_kwargs_amateur)
        self.model_kwargs_origin = self.origin_model._get_initial_cache_position(input_ids.to(self.origin_model.device), self.model_kwargs_origin)


    def prepare_before_generate(self, inputs):
        """
        Called before each generate()
        """
        inputs = deepcopy(inputs)


        # Overwrite the input_ids for the first generation step
        bsz = inputs['input_ids'].shape[0]
        # inputs['input_ids'] = torch.empty((bsz, 0), dtype=torch.long).to(inputs['input_ids'].device)
        # inputs['attention_mask'] = torch.empty((bsz, 0), dtype=torch.long).to(inputs['attention_mask'].device)
        if self.is_Qwen:
            input_sequence = ["<|endoftext|>"] * bsz
            inputs = self.amateur_tokenizer(input_sequence, return_tensors="pt").to(inputs['input_ids'].device)
        else:
            input_sequence = [""] * bsz
            inputs = self.amateur_tokenizer(input_sequence, return_tensors="pt").to(inputs['input_ids'].device)


        # Update all states including past_key_values
        self.model_kwargs_amateur = {
            'attention_mask': inputs['attention_mask'].to(self.amateur_model.device),
        }
        self.first_input_ids, self.model_kwargs_amateur = self.amateur_model._expand_inputs_for_generation(
            input_ids=inputs['input_ids'],
            expand_size=self.num_beams,
            is_encoder_decoder=False,
            **self.model_kwargs_amateur,
        )
        # Prepare necessary things for origin_model
        self.model_kwargs_origin = {
            'attention_mask': inputs['attention_mask'].to(self.origin_model.device),
        }
        self.first_input_ids_origin, self.model_kwargs_origin = self.origin_model._expand_inputs_for_generation(
            input_ids=inputs['input_ids'],
            expand_size=self.num_beams,
            is_encoder_decoder=False,
            **self.model_kwargs_origin,
        )
        
        self._initialize_cache(inputs['input_ids'].to(self.amateur_model.device))

        # Initialize amateur logits processor
        input_ids_seq_length = inputs["input_ids"].shape[-1]
        self.amateur_logits_processor = self._get_amateur_logits_processor(input_ids_seq_length)

        # Maintain some states
        self.prev_input_ids = None
        self.step_num = 0


    def _get_amateur_logits_processor(self, input_ids_seq_length):
        processors = LogitsProcessorList()
        generation_config = self.generation_config

        if (
            generation_config.min_new_tokens is not None
            and generation_config.min_new_tokens > 0
        ):
            assert generation_config.eos_token_id is not None, "eos_token_id has to be defined in the generation config"
            processors.append(
                MinNewTokensLengthLogitsProcessor(
                    input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id
                )
            )
        
        return processors

    def _infer_beam_idx(self, input_ids, prev_input_ids):
        """
        Infer the beam index from the input_ids and previous_input_ids
        Args:
        - input_ids: The input ids for the current generation step. [bsz, seq_len + 1]
        - previous_input_ids: The input ids for the previous generation step. [bsz, seq_len]
        """
        beam_idx = []
        prev_input_ids_length = prev_input_ids.shape[1]
        last_outputs = input_ids[:, -1-prev_input_ids_length:-1] # [bsz, seq_len]
        for i in range(last_outputs.shape[0]):
            for j in range(prev_input_ids.shape[0]):
                if torch.all(last_outputs[i] == prev_input_ids[j]):
                    beam_idx.append(j)
                    break
            else:
                torch.save(input_ids, ".runtime/tmp_input_ids.pt")
                torch.save(prev_input_ids, ".runtime/tmp_prev_input_ids.pt")
                raise ValueError("Cannot find the corresponding beam index")
        
        return torch.tensor(beam_idx)


    


    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
        """
        Called during each generation step within generate()
        Args:
        - input_ids: The input ids for the current generation step.
        - scores: The log(prob) for the current generation step.
        """

        
        # Overwrite input_ids based on the context_windows
        # on the first run, overwrite the input_ids directly
        if self.step_num == 0:
            input_ids = self.first_input_ids
        # on the second runs, keep only the generated tokens. Overwrite cache to remove '<bos_token>'
        elif self.step_num == 1:
            input_ids = input_ids[:,-1:]
            self.model_kwargs_amateur['attention_mask'] = self.model_kwargs_amateur['attention_mask'][:,-1:]
            self.model_kwargs_origin['attention_mask'] = self.model_kwargs_origin['attention_mask'][:,-1:]
            self._initialize_cache(input_ids)
        # on the other runs, keep only the generated tokens
        else:
            input_ids = input_ids[:,-self.step_num:]


        # Beam search
        assert self.num_beams > 1, "Only support beam search"
        """ 1. Calculate logits of amateur model (On amateur model device)"""
        # Get amateur inputs
        model_inputs_amateur = self.amateur_model.prepare_inputs_for_generation(input_ids.to(self.amateur_model.device), **self.model_kwargs_amateur)
        model_inputs_origin = self.origin_model.prepare_inputs_for_generation(input_ids.to(self.origin_model.device), **self.model_kwargs_origin)
        
        # reorder past_key_values based on beam_idx
        if self.prev_input_ids is not None and self.model_kwargs_amateur["past_key_values"] is not None:
            # TODO: check whether the beam_idx is correct
            beam_idx = self._infer_beam_idx(input_ids, self.prev_input_ids)
            beam_idx = beam_idx.to(self.amateur_model.device)
            self.model_kwargs_amateur["past_key_values"] = self.amateur_model._temporary_reorder_cache(
                self.model_kwargs_amateur["past_key_values"], beam_idx
            )
            self.model_kwargs_origin["past_key_values"] = self.origin_model._temporary_reorder_cache(
                self.model_kwargs_origin["past_key_values"], beam_idx
            )


        outputs_amateur = self.amateur_model(**model_inputs_amateur)
        next_token_logits_amateur = outputs_amateur.logits[:, -1, :] # [bsz * num_beams, vocab_size]
        next_token_scores_amateur = torch.nn.functional.log_softmax(next_token_logits_amateur / self.amateur_temperature, dim=-1).to(scores.device) # [bsz * num_beams, vocab_size]
        if self.is_Qwen:
            padded_tensor = torch.full((next_token_scores_amateur.size(0), scores.size(1)), float("-inf"), device=scores.device)
            padded_tensor[:, :next_token_scores_amateur.size(1)] = next_token_scores_amateur
            next_token_scores_amateur = padded_tensor


        # Update model_kwargs_amateur (Optional)
        self.prev_input_ids = input_ids # update prev_input_ids 
        model_kwargs = self.amateur_model._update_model_kwargs_for_generation(
            outputs_amateur, self.model_kwargs_amateur, is_encoder_decoder=self.amateur_model.config.is_encoder_decoder
        )
        self.model_kwargs_amateur = model_kwargs

            
        outputs_origin = self.origin_model(**model_inputs_origin)
        next_token_logits_origin = outputs_origin.logits[:, -1, :]
        next_token_scores_origin = torch.nn.functional.log_softmax(next_token_logits_origin, dim=-1).to(scores.device)
        if self.is_Qwen:
            padded_tensor = torch.full((next_token_scores_origin.size(0), scores.size(1)), float("-inf"), device=scores.device)
            padded_tensor[:, :next_token_scores_origin.size(1)] = next_token_scores_origin
            next_token_scores_origin = padded_tensor

        # Update model_kwargs_origin (Optional)
        model_kwargs_origin = self.origin_model._update_model_kwargs_for_generation(
            outputs_origin, self.model_kwargs_origin, is_encoder_decoder=self.origin_model.config.is_encoder_decoder
        )
        self.model_kwargs_origin = model_kwargs_origin






        """ 2. Add logits processor (after this step, scores maybe unnormalized)"""
        # Amatuer logits processor
        next_token_scores_amateur = self.amateur_logits_processor(input_ids, next_token_scores_amateur)

        # Only keep topk tokens of expert `scores` (Optional) (Original implementation in Contra-Decode)
        vocab_size = next_token_scores_amateur.shape[-1]
        uniform_prob = 1/vocab_size
        k = 10
        topk_value = torch.topk(scores, k*self.num_beams)[0]
        indices_to_remove = (scores < topk_value[..., -(k-2)*self.num_beams, None]) # only keep top 2 * beam tokens
        indices_to_keep = (torch.abs(torch.exp(next_token_scores_amateur) - torch.exp(next_token_scores_origin)) < uniform_prob) # small difference token won't be optimized
        indices_to_keep |= (scores < topk_value[..., -1, None]) # removed tokens should not be optimized
        backup_scores = scores.clone()
        # scores = scores.masked_fill(indices_to_remove | indices_to_keep, float("-inf"))
        scores = scores.masked_fill(indices_to_keep, float("-inf"))





        """ 3. Calculate contrastive logits (On paraphrase model device)"""
        if not self.closed_form:
            next_token_scores_amateur[next_token_scores_amateur == float("-inf")] = float("inf") # reverse the tokens with 0 probability, in case of "-inf - inf = nan"
            new_scores = (self.coef + 1) * scores - self.coef * next_token_scores_amateur
        else:
            # re-normalize the scores
            p_scores = nn.functional.log_softmax(scores, dim=-1)
            q_scores = nn.functional.log_softmax(next_token_scores_amateur, dim=-1)

            valid_batch_mask = ~(indices_to_keep.all(dim=-1))
            p_scores = p_scores[valid_batch_mask]
            q_scores = q_scores[valid_batch_mask]


            alpha, h_scores = optimize_distribution(p_scores, q_scores, self.kl_threshold)
            new_scores = torch.full_like(scores, float("-inf"))
            new_scores[valid_batch_mask] = h_scores



        """ 2. Add logits processor (after this step, scores maybe unnormalized)"""
        new_scores = torch.where(indices_to_keep, backup_scores, new_scores)
        new_scores = new_scores.masked_fill(indices_to_remove, float("-inf"))

        """ 4. normalize the scores before beam score aggregation"""
        new_scores = nn.functional.log_softmax(new_scores, dim=-1)
        

        del outputs_amateur, next_token_logits_amateur, next_token_scores_amateur

        self.step_num += 1

        return new_scores
        




def _batch_decoding(para_model, tokenizer, messages, config, logits_processors):
    inputs = tokenizer.apply_chat_template(messages, 
                                           return_tensors="pt", 
                                           padding=True, 
                                           add_generation_prompt=True,
                                           return_dict=True).to(para_model.device)
    inputs_length = inputs.input_ids.shape[-1]

    # Contrative decoding
    logits_processors[0].prepare_before_generate(inputs)
    con_output = para_model.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, generation_config=config, logits_processor=logits_processors)
    con_answer = tokenizer.batch_decode(con_output[:,inputs_length:], skip_special_tokens=True)

    return con_answer


def bi_contrastive_paraphrase(args, target_texts):
    if args.debug:
        target_texts = target_texts[:100]


    if torch.cuda.device_count() == 1:
        device_0 = torch.device('cuda')
        device_1 = torch.device('cuda')
    else:
        device_0 = torch.device("cuda:0")
        device_1 = torch.device("cuda:1")

    if 'gemma' in args.paraphrase_model.lower() or 'gemma' in args.amateur_model.lower():
        print("[WARNING] use float16 for gemma model")

    # Load the model and tokenizer
    # paraphrase_pipe = pipeline('text-generation', model=os.path.join(MODEL_DIR, args.paraphrase_model), model_kwargs={"torch_dtype": torch.bfloat16}, device=device_0)
    paraphrase_model = AutoModelForCausalLM.from_pretrained(
        args.paraphrase_model, 
        torch_dtype=torch.bfloat16 if 'gemma' not in args.paraphrase_model.lower() else torch.float16
        ).to(device_0)
    amateur_model = AutoModelForCausalLM.from_pretrained(
        args.amateur_model,
        torch_dtype=torch.bfloat16 if 'gemma' not in args.amateur_model.lower() else torch.float16
        ).to(device_1)
    tokenizer = AutoTokenizer.from_pretrained(args.paraphrase_model)
    amateur_tokenizer = AutoTokenizer.from_pretrained(args.amateur_model)

    # padding_side = "right" if 'gemma-2' in args.paraphrase_model else "left"
    padding_side = "left"
    tokenizer.padding_side = padding_side
    amateur_tokenizer.padding_side = padding_side


    # Encode the input text
    prompt = get_prompt(args)
    if 'gemma' in args.paraphrase_model:
        messages = [
            [{'role': 'user', 'content': prompt.format(t)} ] for t in target_texts
        ]
    elif 'Qwen' in args.paraphrase_model:
        messages = [
            [
                {"role": "system", "content": "You are a helpful assistant."},
                {'role': 'user', 'content': prompt.format(t)} 
            ] for t in target_texts
        ]


    # prepare config
    config = GenerationConfig(max_new_tokens=300, 
                        do_sample=False, 
                        num_beams=10) # beam=10
    logits_processors = LogitsProcessorList([
        ContrastiveProcessor(args,
                            amateur_model, 
                            amateur_tokenizer,
                            config, 
                            num_beams=config.num_beams,
                            coef=args.coef,
                            closed_form=args.closed_form,
                            kl_threshold=args.kl_threshold,
                            use_cache=True)
    ])


    all_con = []
    for i in tqdm(range(0, len(target_texts), args.batch_size)):
        batch_input_texts = messages[i:i+args.batch_size]
        try:
            batch_con = _batch_decoding(paraphrase_model, tokenizer, batch_input_texts, config, logits_processors)
        except Exception as e:
            traceback.print_exc()
            print("[ERROR] Error occurs in batch decoding, early stop")
            return all_con
        except KeyboardInterrupt:
            print("[WARNING] Early stop by user")
            return all_con
        all_con.extend(batch_con)

    return all_con