from abc import abstractmethod
from collections import Sequence
from typing import Any

from llama_index.core.base.llms.types import CompletionResponse, LLMMetadata, ChatMessage, ChatResponseGen, \
    CompletionResponseGen, ChatResponse, ChatResponseAsyncGen, CompletionResponseAsyncGen
from llama_index.core.base.query_pipeline.query import ChainableMixin
from llama_index.core.schema import BaseComponent


class BaseLLM(ChainableMixin, BaseComponent):
    """BaseLLM interface."""

    @abstractmethod
    def metadata(self) -> LLMMetadata:
        pass

    @abstractmethod
    def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        pass
    @abstractmethod
    def complete(
            self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponse:
        pass
    @abstractmethod
    def stream_chat(
            self, messages: Sequence[ChatMessage], **kwargs: Any
    ) -> ChatResponseGen:
        pass
    @abstractmethod
    def stream_complete(
            self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponseGen:
        pass
    @abstractmethod
    async def achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        pass
    @abstractmethod
    async def acomplete(
            self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponse:
        pass
    @abstractmethod
    async def astream_chat(
            self, messages: Sequence[ChatMessage], **kwargs: Any
    ) -> ChatResponseAsyncGen:
        pass
    @abstractmethod
    async def astream_complete(
            self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponseAsyncGen:
        pass
async def acomplete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
    @abstractmethod
    async def astream_chat(
            self, messages: Sequence[ChatMessage], **kwargs: Any
    ) -> ChatResponseAsyncGen:
        pass
    # @abstractmethod
    # async def astream_complete(
    #         self, prompt: str, formatted: bool = False, **kwargs: Any
    # ) -> CompletionResponseAsyncGen:
