from pydantic import BaseModel
from llmapi.base import *
from typing import List, Optional, Union, Generator, Sequence, Iterator, Deque
import llama_cpp 
class LLamaCPPModel(BaseModel, ChatModel, EmbeddingModel):
    llama : llama_cpp.Llama
    model_name: Optional[str] = "llama"
    class Config:
        arbitrary_types_allowed = True
    @property
    def name(self) -> str:
        return self.model_name or "llama"
    
    def embedding(self, input: str, **kwargs) -> Embedding:
        return self.llama.create_embedding(input)
    
    def chat_completion(self,
                        messages: List[ChatCompletionMessage],
                        temperature: float = 0.2,
                        top_p: float = 0.95,
                        top_k: int = 40,
                        stream: bool = False,
                        stop: Optional[List[str]] = [],
                        max_tokens: int = 2048,
                        verbose: Optional[bool] = False,
                        **kwargs) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
        max_tokens = max_tokens or 2048
        return self.llama.create_chat_completion(messages, temperature, top_p, top_k, stream, stop, max_tokens)

    def completion(self, 
                   prompt: str,
                   max_tokens: int = 16,
                   temperature: float = 0.7,
                   top_p: float = 1.0,
                   top_k: int = 0,
                   presence_penalty: float = 0.0,
                   frequency_penalty: float = 0.0,
                   stop: Optional[List[str]] = [],
                   echo: bool = False,
                   logprobs: int = 0,
                   stream: bool = False,
                   verbose: Optional[bool] = False,
                   **kwargs) -> Union[Completion, Iterator[CompletionChunk]]:
        return self.llama.create_completion(prompt, max_tokens, temperature, top_p, top_k, presence_penalty, frequency_penalty, stop, echo, logprobs, stream)