from typing import List, Tuple
from tqdm.asyncio import tqdm as tqdm_async

import os
import asyncio

from langchain_openai import OpenAIEmbeddings
from langchain_core.embeddings import Embeddings

from utils import divide_chunks

class Embedder:
    def __init__(self):
        self.embedder = self._create_embedding_func()

    def _create_embedding_func(self) -> Embeddings:
        openai_api_base = os.environ['OPENAI_EMBEDDING_ENDPOINT']
        model = os.environ['OPENAI_EMBEDDING_MODEL']
        key = os.environ['OPENAI_KEY']

        return OpenAIEmbeddings(
            openai_api_base=openai_api_base,
            model=model,
            openai_api_key=key, 
        )

    def get_embedding_dimension(self) -> int:
        return len(self.embedder.embed_query("foo"))

    async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
        return await self.embedder.aembed_documents(texts)
    
    async def aembed_query(self, text: str) -> List[float]:
        return await self.embedder.aembed_query(text)
    
class EmbeddingService:
    def __init__(self, pretexts: List[str]):
        self.embedder = Embedder()
        self.texts = pretexts
        self.cache = {}  # using redis for better persistent caching
    
    async def _pre_embedding(self):
        texts = list(set(self.texts))

        for i in range(len(texts)):
            # make sure thread safe during the following initialiazation
            self.cache[texts[i]] = None

        chunks = list(divide_chunks(texts, 500))

        async def ainvoke(chunk: List[str]) -> Tuple[List[str], List[List[float]]]:
            return chunk, await self.embedder.aembed_documents(chunk)

        for result in tqdm_async(
            asyncio.as_completed([ainvoke(chunk) for chunk in chunks]),
            total=len(chunks),
            desc="Embedding",
            unit="Chunk",
        ):
            chunk, embeddings = await result
            for i in range(len(chunk)):
                self.cache[chunk[i]] = embeddings[i]

    async def get(self, s: str) -> List[float]:
        if len(self.texts) > 0 and len(self.cache) == 0:
            await self._pre_embedding()

        if res := self.cache.get(s):
            return res
        else:
            self.cache[s] = await self.embedder.aembed_query(s)
            return self.cache[s]
        
