from pydantic import BaseModel
from llmapi.base import *
from typing import List, Optional, Any
import torch
_max_tokens = 2048

class GLMModel(BaseModel, ChatModel):
    model: Any
    tokenizer: Any
    model_name: Optional[str] = "chatglm" 
    @property
    def name(self) -> str:
        return self.model_name or "chatglm"
    
    def chat_completion(self,
            messages: List[ChatCompletionMessage],
            temperature: float = 0.2,
            top_p: float = 0.95,
            top_k: int = 40,
            stream: bool = False,
            stop: Optional[List[str]] = [],
            max_tokens: int = 2048,
            verbose: Optional[bool] = False,
            **kwargs) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
        """Generate a chat completion from a list of messages.
        Args:
            messages: A list of messages to generate a response for.
            temperature: The temperature to use for sampling.
            top_p: The top-p value to use for sampling.
            top_k: The top-k value to use for sampling.
            stream: Whether to stream the results.
            stop: A list of strings to stop generation when encountered.
            max_tokens: The maximum number of tokens to generate.
            repeat_penalty: The penalty to apply to repeated tokens.

        Returns:
            Generated chat completion or a stream of chat completion chunks.
        """
        stop = stop if stop is not None else []
        prompt = "".join(
            f'###{"Human" if message["role"] == "user" else "Assistant"}:{message["content"]}'
            for message in messages
        )
        prompt = prompt  + "###Assistant:"
        PROMPT_STOP = ["###Assistant:", "###Human:"]
        stop = PROMPT_STOP + stop

        # prepare response
        completion_id: str = f"chatcmpl-{str(uuid.uuid4())}"
        created: int = int(time.time())
        completion_tokens: List = []
        response: str
        finish_reason: str = None
        
        # Add blank space to start of prompt to match OG llama tokenizer
        inputs: List = self.tokenizer([prompt], return_tensors="pt")
        # generate
        do_sample: bool = True
        if "do_sample" in kwargs.keys():
            do_sample = kwargs["do_sample"] or True
        if stream:
            sends = 0
            i :int = 0
            for response, _ in self.model.stream_chat(
                    self.tokenizer, prompt,
                    do_sample=do_sample,
                    temperature=temperature,
                    top_p=top_p,
                    max_length=max_tokens or _max_tokens):
                created = int(time.time())
                for s in stop:
                    # FIXME: 目前返回的response可能包含部分stop的字符串儿不是全部。
                    # 需要连接两个token后再判断
                    if s in response:
                        response = response.split(s)[0].strip()
                        finish_reason = "stop"
                        break
                ret = response[sends:]
                # https://github.com/THUDM/ChatGLM-6B/issues/478
                # 修复表情符号的输出问题
                if "\uFFFD" == ret[-1:]:
                    continue
                sends = len(response)
                if i == 0:
                    yield {
                        "id": completion_id,
                        "model": self.name,
                        "created": created,
                        "object": "chat.completion.chunk",
                        "choices": [
                            {
                                "index": 0,
                                "delta": {
                                    "role": "assistant",
                                },
                                "finish_reason": None,
                            }
                        ],
                    }
                else:
                    yield {
                        "id": completion_id,
                        "model": self.name,
                        "created": created,
                        "object": "chat.completion.chunk",
                        "choices": [
                            {
                                "index": 0,
                                "delta": {
                                    "content": ret,
                                },
                                "finish_reason": finish_reason,
                            }
                        ],
                    }
                i= i + 1
                if finish_reason == "stop":
                    break
            if finish_reason is None:
                finish_reason = "length"
                yield {
                    "id": completion_id,
                    "model": self.name,
                    "created": created,
                    "object": "chat.completion.chunk",
                    "choices": [
                        {
                            "index": 0,
                            "delta": {
                                "content": '',
                            },
                            "finish_reason": finish_reason,
                        }
                    ],
                }
            return
        else:
            response, _ = self.model.chat(
                self.tokenizer, prompt,
                do_sample=do_sample,
                temperature=temperature,
                top_p=top_p,
                max_length=max_tokens or _max_tokens)
        for s in stop:
            if s in response:
                response = response.split(s)[0].strip()
                finish_reason = "stop"
                break
        completion_tokens = self.tokenizer([response], return_tensors="pt")
        finish_reason = "length"
        yield {
            "id": completion_id,
            "object": "chat.completion",
            "created": created,
            "model": self.name,
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": response,
                    },
                    "finish_reason": finish_reason,
                }
            ],
            "usage": {
                "prompt_tokens": len(inputs),
                "completion_tokens": len(completion_tokens),
                "total_tokens": len(inputs) + len(completion_tokens),
            },
        }
    
    def completion(self, 
                   prompt: str,
                   max_tokens: int = 16,
                   temperature: float = 0.7,
                   top_p: float = 1.0,
                   top_k: int = 0,
                   presence_penalty: float = 0.0,
                   frequency_penalty: float = 0.0,
                   stop: Optional[List[str]] = [],
                   echo: bool = False,
                   logprobs: int = 0,
                   stream: bool = False,
                   verbose: Optional[bool] = False,
                   **kwargs) -> Union[Completion, Iterator[CompletionChunk]]:
        stop = stop if stop is not None else []
        logprobs_or_none: Optional[CompletionLogprobs] = None
         # prepare response
        completion_id: str = f"chatcmpl-{str(uuid.uuid4())}"
        created: int = int(time.time())
        completion_tokens: List = []
        response: str
        finish_reason: str = None
        
        # Add blank space to start of prompt to match OG llama tokenizer
        inputs: List = self.tokenizer([prompt], return_tensors="pt")
        # generate
        do_sample: bool = True
        if "do_sample" in kwargs.keys():
            do_sample = kwargs["do_sample"] or True
        if stream:
            sends = 0
            for response, _ in self.model.stream_chat(
                    self.tokenizer, prompt,
                    do_sample=do_sample,
                    temperature=temperature,
                    top_p=top_p,
                    max_length=max_tokens or _max_tokens):
                created = int(time.time())
                ret = response[sends:]
                # https://github.com/THUDM/ChatGLM-6B/issues/478
                # 修复表情符号的输出问题
                if "\uFFFD" == ret[-1:]:
                    continue
                sends = len(response)
                for s in stop:
                    if s in response:
                        response = response.split(s)[0].strip()
                        finish_reason = "stop"
                        break
                yield {
                    "id": completion_id,
                    "object": "text_completion",
                    "created": created,
                    "model": self.name,
                    "choices": [
                        {
                            "text": ret,
                            "index": 0,
                            "logprobs": logprobs_or_none,
                            "finish_reason": finish_reason,
                        }
                    ],
                }
                if finish_reason == "stop":
                    break
            if finish_reason is None:
                finish_reason = "length"
                yield {
                    "id": completion_id,
                    "object": "text_completion",
                    "created": created,
                    "model": self.name,
                    "choices": [
                        {
                            "text": '',
                            "index": 0,
                            "logprobs": logprobs_or_none,
                            "finish_reason": finish_reason,
                        }
                    ],
                }
            return
        else:
            response, _ = self.model.chat(
                self.tokenizer, prompt,
                do_sample=do_sample,
                temperature=temperature,
                top_p=top_p,
                max_length=max_tokens or _max_tokens)
        for s in stop:
            if s in response:
                response = response.split(s)[0].strip()
                finish_reason = "stop"
                break
        completion_tokens = self.tokenizer([response], return_tensors="pt")
        finish_reason = "length"
        yield {
            "id": completion_id,
            "object": "text_completion",
            "created": created,
            "model": self.name,
            "choices": [
                {
                    "text": response,
                    "index": 0,
                    "logprobs": logprobs_or_none,
                    "finish_reason": finish_reason,
                }
            ],
            "usage": {
                "prompt_tokens": len(inputs),
                "completion_tokens": len(completion_tokens),
                "total_tokens": len(inputs) + len(completion_tokens),
            },
        }

