import os
import logging
import argparse
import torch
import numpy as np
from typing import List, Optional

from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import StoppingCriteria, StoppingCriteriaList

class StopWordsCriteria(StoppingCriteria):
    def __init__(self, stop_word_ids: List[torch.LongTensor]):
        self.stop_word_ids = stop_word_ids

    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        # input_ids: tensor([B, N])
        # stop_words: list(tensor([1, N]))
        for stop_word in self.stop_word_ids:
            word_len = stop_word.shape[-1]
            bn = input_ids.shape[0]
            last_word = input_ids[:, -word_len:]
            batch_stop_word = stop_word.expand(bn, -1)
            if last_word.shape == batch_stop_word.shape and  \
                    torch.all(last_word == batch_stop_word):
                return True

        return False

class GptEngine(object):
    def __init__(self,
            model_name_or_path='EleutherAI/gpt-neo-1.3B',
            tokenizer_path=None,
            device='cuda:0',
            fp16=True,
            seed=42,
            eos_token_id=None,
            pad_token_id=None):

        self.device = torch.device(device)
        self.reset_seed(seed)
        self.fp16 = fp16
        logging.debug('start to load model')
        if fp16:
            model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
                    torch_dtype=torch.float16, low_cpu_mem_usage=True)
        else:
            model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
        model = model.to(self.device)
        model.eval()
        self.model = model
        self.tokenizer_path = tokenizer_path or model_name_or_path
        self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path)
        logging.debug('load model completely')
        self.eos_token_id = eos_token_id or self.tokenizer.eos_token_id
        self.pad_token_id = pad_token_id or self.eos_token_id

    def reset_seed(self, seed):
        self.seed = seed
        np.random.seed(self.seed)
        torch.random.manual_seed(self.seed)
        torch.cuda.manual_seed(self.seed)

    def _parse_gen_out(self, out_ids, start_idx, stop_ids):
        def _check_stop_ids(out_ids, i, stop_ids):
            for stop_word in [i[0] for i in stop_ids]:
                slen = len(stop_word)
                if out_ids[max(i-slen, 0):i] == stop_word:
                    return True

            return False

        end_idx = out_ids.shape[-1]
        for i in range(start_idx, end_idx):
            if _check_stop_ids(out_ids, i, stop_ids):
                end_idx = i
                break
        return out_ids[start_idx: end_idx]

    def _truncate_stop_word(self, answer, stop_words):
        for stop_word in stop_words:
            if stop_word in answer:
                ind = answer.index(stop_word)
                answer = answer[:ind]
        return answer

    @torch.no_grad()
    def generate(self, prompt, top_k=50, top_p=0.95, max_len=30, min_len=5,
            temperature=0.9, length_penalty=1.0, repetition_penalty=1.0,
            do_sample=True, num_return_sequences=1, stop_words=[]):

        if type(prompt) is not str:
            logging.warn('invalid prompt %s', prompt)
            return []

        logging.debug('prompt: %s', prompt)
        input_ids = self.tokenizer.encode(prompt)
        input_ids = torch.tensor(input_ids).to(self.device).unsqueeze(0)
        input_ids_size = input_ids.shape[-1]
        stop_ids = [self.tokenizer.encode(i, return_tensors='pt').to(self.device) for i in stop_words]
        stopping_criteria = StoppingCriteriaList([StopWordsCriteria(stop_ids)])

        beam_outputs = self.model.generate(
                input_ids,
                max_length=max_len + input_ids_size,
                min_length=min_len + input_ids_size,
                do_sample=do_sample,
                temperature=temperature,
                top_k=top_k,
                top_p=top_p,
                length_penalty=length_penalty,
                repetition_penalty=repetition_penalty,
                num_return_sequences=num_return_sequences,
                pad_token_id=self.pad_token_id,
                eos_token_id=self.eos_token_id,
                stopping_criteria=stopping_criteria
        )

        answers = []
        for idx, beam_output in enumerate(beam_outputs):
            logging.debug('stoped out[%s] size is %s', idx, len(beam_output))
            out_ids = beam_output[input_ids_size:]
            answer = self.tokenizer.decode(out_ids)
            logging.debug('decoded str %s', answer)
            answer = self._truncate_stop_word(answer, stop_words)
            logging.debug('truncated str %s', answer)
            answers.append(answer)
        return answers
    
if __name__ == '__main__':
    engine = GptEngine(device='cuda:1')
    import time
    ans = engine.generate('hello')
    print(ans)

    start = time.time()
    num_return_sequences = 1
    ans = engine.generate('hello', num_return_sequences=num_return_sequences)
    print(ans, f'cost: {time.time() - start}s')

    start = time.time()
    num_return_sequences = 3
    ans = engine.generate('hello', num_return_sequences=num_return_sequences)
    print(ans, f'cost: {time.time() - start}s')
