from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from zhipuai import ZhipuAI
import chromadb
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
import utils
import uuid

__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')

zhipu_client = ZhipuAI(api_key="03e70022bf97812e0fceb66eae340fd8.kW6JFsPd3vZBf9lM")

class ZhipuEmbeddingFunction(EmbeddingFunction):
  def __call__(self, input: Documents) -> Embeddings:
      embeddings = []
      for text in input:
          response = embed_query(text)
          try:
              embedding = response[0].embedding
              embeddings.append(embedding)
          except (IndexError, TypeError, KeyError) as e:
              print(f"Error processing text: {text}, Error: {e}")
      return embeddings

def embed_query(content):
    response = zhipu_client.embeddings.create(
        model="embedding-2",
        input=content
    )
    result = response.data
    return result

collection_list = []
def update_embedding(documents, source, unique_embedding_id):
    chroma_client = chromadb.Client()
    chroma_client = chromadb.PersistentClient(path="chromac")

    try:
        chroma_client.delete_collection(utils.remove_non_alphanumeric(source))
    except Exception as e:
        print(e)

    collection = chroma_client.get_or_create_collection(
        name=utils.remove_non_alphanumeric(source), embedding_function=ZhipuEmbeddingFunction())
    collection_list.append({
        "id": unique_embedding_id,
        "collection": collection
    })
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20)
    docs = text_splitter.split_documents(documents)

    print("开始 embedding")
    docs_list = []
    metadatas = []
    ids = []
    for item in docs:
        docs_list.append(item.page_content)
        metadatas.append({"source": source})
        ids.append(str(uuid.uuid4()))
        collection.add(
            documents=docs_list,
            metadatas=metadatas,
            ids=ids
        )
    print("结束 embedding")

def get_collection_by_id(id):
    for collection in collection_list:
        if collection.get("id") == id: 
            return collection
    return None

def get_vector_search(query, unique_embedding_id):
    collection = get_collection_by_id(unique_embedding_id)['collection']
    results = collection.query(
        query_texts=[query],
        n_results=2
    )
    return results["documents"][0]

def getTextLLMResponse(messages):
    llm_response = zhipu_client.chat.completions.create(
        model="glm-4",  # 填写需要调用的模型名称
        messages=messages
    )
    response = llm_response.choices[0].message.content

    print(response)
    return response

def getImageLLMResponse(prompt):
    llm_response = zhipu_client.images.generations(
        model="cogview-3",  # 填写需要调用的模型名称
        prompt=prompt
    )
    url = llm_response.data[0].url

    print(url)
    return url