import copy
import logging
from typing import Dict, List, Optional, Union

from lagent.schema import ModelStatusCode
from .base_api import APITemplateParser
from .base_llm import BaseLLM

logger = logging.getLogger(__name__)


class HFTransformer(BaseLLM):
    """
    HuggingFace 模型的包装器类。
    用于加载和使用 HuggingFace 的通用模型。
    
    Args:
        path: HuggingFace 模型的名称或路径
        tokenizer_path: 分词器路径，默认为 None
        tokenizer_kwargs: 分词器的额外参数
        tokenizer_only: 是否只初始化分词器
        model_kwargs: 模型加载的额外参数
        meta_template: 模型的元提示模板
    """

    def __init__(self,
                 path: str,
                 tokenizer_path: Optional[str] = None,
                 tokenizer_kwargs: dict = dict(),
                 tokenizer_only: bool = False,
                 model_kwargs: dict = dict(device_map='auto'),
                 meta_template: Optional[Dict] = None,
                 stop_words_id: Union[List[int], int] = None,
                 **kwargs):
        """初始化模型和分词器"""
        super().__init__(path=path, tokenizer_only=tokenizer_only, 
                        meta_template=meta_template, **kwargs)
        
        # 处理停止词 ID
        if isinstance(stop_words_id, int):
            stop_words_id = [stop_words_id]
        self.gen_params.update(stop_words_id=stop_words_id)
        
        # 加载分词器和模型
        self._load_tokenizer(path, tokenizer_path, tokenizer_kwargs)
        if not tokenizer_only:
            self._load_model(path=path, model_kwargs=model_kwargs)

        # 设置生成参数
        from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList
        self.logits_processor = LogitsProcessorList()
        self.stopping_criteria = StoppingCriteriaList()
        self.prefix_allowed_tokens_fn = None

        # 处理停止词
        stop_words_id = []
        if self.gen_params.get('stop_words_id'):
            stop_words_id = self.gen_params.get('stop_words_id')
        elif self.gen_params.get('stop_words'):
            for sw in self.gen_params.get('stop_words'):
                stop_words_id.append(self.tokenizer(sw)['input_ids'][-1])
        self.additional_eos_token_id = stop_words_id

    def _load_tokenizer(self, path: str, tokenizer_path: Optional[str],
                        tokenizer_kwargs: dict):
        """加载分词器"""
        from transformers import AutoTokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            tokenizer_path if tokenizer_path else path,
            trust_remote_code=True,
            **tokenizer_kwargs)

        # 处理填充 token
        if self.tokenizer.pad_token_id is None:
            if self.tokenizer.eos_token is not None:
                self.tokenizer.pad_token = self.tokenizer.eos_token
            else:
                from transformers.generation import GenerationConfig
                self.gcfg = GenerationConfig.from_pretrained(path)
                if self.gcfg.pad_token_id is not None:
                    self.tokenizer.pad_token_id = self.gcfg.pad_token_id
                else:
                    raise ValueError('需要设置 pad_token_id')

    def _load_model(self, path: str, model_kwargs: dict):
        """加载模型"""
        import torch
        from transformers import AutoModel
        model_kwargs.setdefault('torch_dtype', torch.float16)
        self.model = AutoModel.from_pretrained(
            path, trust_remote_code=True, **model_kwargs)
        self.model.eval()

    def tokenize(self, inputs: str):
        assert isinstance(inputs, str)
        inputs = self.tokenizer(
            inputs, return_tensors='pt', return_length=True)
        return inputs['input_ids'].tolist()

    def generate(
        self,
        inputs: Union[str, List[str]],
        do_sample: bool = True,
        **kwargs,
    ):
        """
        @param {Union[str, List[str]]} inputs - 要完成的输入文本，可以是单个字符串或字符串列表
        @param {boolean} do_sample - 是否使用采样策略，默认为True
        @param {object} kwargs - 其他可选参数
        @returns {Union[str, List[str]]} - 返回生成的文本或文本列表
        """
        for status, chunk, _ in self.stream_generate(inputs, do_sample, **kwargs):
            response = chunk
        return response

    def stream_generate(
        self,
        inputs: List[str],
        do_sample: bool = True,
        **kwargs,
    ):
        """
        流式生成文本。
        
        Args:
            inputs: 输入文本列表
            do_sample: 是否使用采样
            kwargs: 其他生成参数
            
        Returns:
            生成的文本流
        """
        import torch
        from torch import nn
        
        with torch.no_grad():
            # 处理输入
            batched = True
            if isinstance(inputs, str):
                inputs = [inputs]
                batched = False
                
            # 对输入进行分词
            inputs = self.tokenizer(
                inputs, padding=True, return_tensors='pt', return_length=True)
            input_length = inputs['length']
            
            # 将输入移到 GPU
            for k, v in inputs.items():
                inputs[k] = v.cuda()
            input_ids = inputs['input_ids']
            attention_mask = inputs['attention_mask']
            
            # 设置生成参数
            generation_config = copy.deepcopy(self.model.generation_config)
            new_gen_params = self.update_gen_params(**kwargs)
            generation_config.update(**new_gen_params)
            generation_config.update(**kwargs)
            
            # 准备生成
            model_kwargs = generation_config.to_dict()
            model_kwargs['attention_mask'] = attention_mask
            
            # 设置结束标记
            _, eos_token_id = (
                generation_config.bos_token_id,
                generation_config.eos_token_id,
            )
            if eos_token_id is None:
                if self.gcfg.eos_token_id is not None:
                    eos_token_id = self.gcfg.eos_token_id
                else:
                    eos_token_id = []
                    
            # 处理结束标记 ID
            if isinstance(eos_token_id, int):
                eos_token_id = [eos_token_id]
            if self.additional_eos_token_id is not None:
                eos_token_id.extend(self.additional_eos_token_id)
                
            # 开始生成
            batch_size = input_ids.shape[0]  # 从输入张量获取批次大小
            unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
            
            while True:
                # 准备模型输入
                model_inputs = self.model.prepare_inputs_for_generation(input_ids, **model_kwargs)
                
                # 前向传播
                outputs = self.model(**model_inputs, return_dict=True)
                next_token_logits = outputs.logits[:, -1, :]
                
                # 采样或贪婪搜索
                probs = nn.functional.softmax(next_token_logits, dim=-1)
                if do_sample:
                    # 从概率分布中随机采样一个token。这种方法允许生成多样化的输出，因为它不是总是选择概率最高的token。
                    next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
                else:
                    # 选择概率最高的token作为下一个token。这种方法通常用于生成确定性的输出。
                    next_tokens = torch.argmax(probs, dim=-1)
                    
                # 更新生成序列
                # 将当前输入的token与新采样到的token连接起来，形成新的输入序列
                input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
                # 同时更新 attention_mask
                attention_mask = torch.cat([
                    attention_mask,
                    attention_mask.new_ones((attention_mask.shape[0], 1))
                ], dim=1)
                model_kwargs['attention_mask'] = attention_mask
                
                # 检查是否结束生成
                if eos_token_id:
                    for eos_id in eos_token_id:
                        # .mul() 执行元素级乘法:如果遇到结束符号，相应位置会乘以0，使该序列标记为已完成。当所有序列都完成时（即 unfinished_sequences 全为0），生成过程就会停止。
                        unfinished_sequences = unfinished_sequences.mul(
                            (next_tokens != eos_id).long())
                        
                # 将GPU上的tensor转换为CPU上的Python列表
                output_token_ids = input_ids.cpu().tolist()
                for i in range(len(output_token_ids)):
                    # 移除输入部分的token，只保留生成的新token
                    # input_length[i] 表示第i个序列的原始输入长度
                    output_token_ids[i] = output_token_ids[i][input_length[i]:]
                    
                    # 如果设置了结束符token
                    if eos_token_id:
                        # 使用next()和生成器表达式找到第一个结束符的位置
                        # enumerate()同时获取索引和值
                        # 如果找不到结束符，返回None
                        first_eos_idx = next(
                            (idx for idx, token_id in enumerate(output_token_ids[i])
                             if token_id in eos_token_id), None)
                        
                        # 如果找到了结束符
                        if first_eos_idx is not None:
                            # 截取到结束符位置（不包含结束符）
                            output_token_ids[i] = output_token_ids[i][:first_eos_idx]

                # 返回生成结果
                response = self.tokenizer.batch_decode(output_token_ids)
                if not batched:
                    response = response[0]
                yield ModelStatusCode.STREAM_ING, response, None
                
                # 检查是否所有序列都已完成
                if unfinished_sequences.max() == 0:
                    break
                    
            yield ModelStatusCode.END, response, None

    def stream_chat(
        self,
        inputs: List[dict],
        do_sample: bool = True,
        **kwargs,
    ):
        """Return the chat completions in stream mode.

        Args:
            inputs (List[dict]): input messages to be completed.
            do_sample (bool): do sampling if enabled
        Returns:
            the text/chat completion
        """
        prompt = self.template_parser(inputs)
        yield from self.stream_generate(prompt, do_sample, **kwargs)


class HFTransformerCasualLM(HFTransformer):

    def _load_model(self, path: str, model_kwargs: dict):
        import torch
        from transformers import AutoModelForCausalLM
        model_kwargs.setdefault('torch_dtype', torch.float16)
        self.model = AutoModelForCausalLM.from_pretrained(
            path, trust_remote_code=True, **model_kwargs)
        self.model.eval()


class HFTransformerChat(HFTransformerCasualLM):

    def __init__(self, template_parser=APITemplateParser, **kwargs):
        super().__init__(template_parser=template_parser, **kwargs)

    def chat(self,
             inputs: Union[List[dict], List[List[dict]]],
             do_sample: bool = True,
             **kwargs):
        """Return the chat completions in stream mode.

        Args:
            inputs (Union[List[dict], List[List[dict]]]): input messages to be completed.
            do_sample (bool): do sampling if enabled
        Returns:
            the text/chat completion
        """
        # handle batch inference with vanilla for loop
        if isinstance(inputs[0], list):
            resps = []
            for input in inputs:
                resps.append(self.chat(input, do_sample, **kwargs))
            return resps
        prompt = self.template_parser(inputs)
        query = prompt[-1]['content']
        history = prompt[:-1]
        try:
            response, history = self.model.chat(
                self.tokenizer, query, history=history)
        except Exception as e:
            # handle over-length input error
            logger.warning(str(e))
            response = ''
        return response
