import uuid
from typing import Iterable, Optional, Any, Type

import dotenv
import numpy as np
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
from openai.types import embedding


text= [
    "笨笨是一只很喜欢睡觉的猫咪",
    "我喜欢在夜晚听音乐，这让我感到放松。",
    "猫咪在窗台上打盹，看起来非常可爱。",
    "学习新技能是每个人都应该追求的目标。",
    "我最喜欢的食物是意大利面，尤其是番茄酱的那种。",
    "昨晚我做了一个奇怪的梦，梦见自己在太空飞行。",
    "我的手机突然关机了，让我有些焦虑。",
    "阅读是我每天都会做的事情，我觉得很充实。",
    "他们一起计划了一次周末的野餐，希望天气能好。",
    "我的狗喜欢追逐球，看起来非常开心。",
]

metadata= [
    {"page": 1},
    {"page": 2},
    {"page": 3},
    {"page": 4},
    {"page": 5},
    {"page": 6, "account_id": 1},
    {"page": 7},
    {"page": 8},
    {"page": 9},
    {"page": 10},
]


class MemoryVectorStore(VectorStore):
    '''自定义向量数据库'''
    store: dict= {}  # 将向量存储在内存里

    def __init__(self, embedding: Embeddings, **kwargs):
        '''初始化自定义的向量数据库'''
        self._embedding = embedding

    def add_texts(
        self,
        texts: Iterable[str],
        metadatas: Optional[list[dict]] = None,
        *,
        ids: Optional[list[str]] = None,
        **kwargs: Any,
    ) -> list[str]:
            '''将数据添加到内存'''
            # 1. 判断metadata是否和texts长度一致
            if metadatas is not None and len(metadatas) != len(texts):
                raise ValueError('元数据的长度必须和文本数据保持一致')
            # 2. 将文本转化成向量
            embeddings= self._embedding.embed_documents(texts)
            # 3. 生成uuid
            ids= [str(uuid.uuid4()) for _ in texts]
            # 4. 将原始文本, 向量, 元数据, id构建成字典并存储
            for idx, text in enumerate(texts):
                self.store[ids[idx]] = {
                    'id': ids[idx],
                    'vector': embeddings[idx],
                    "text": text,
                    'metadata': metadatas[idx] if metadatas is not None else {},
                }
            return ids


    def similarity_search(
            self,
            query: str,
            k: int = 4,
            **kwargs: Any
    ) -> list[Document]:
        '''执行相似性搜索'''
        # 1. 将query转化成向量
        embedding= self._embedding.embed_query(query)
        # 2. 循环遍历向量存储, 计算欧几里举例
        result: list=[]
        for key, record in self.store.items():
            distance= self._euclidean_distance(embedding, record['vector'])
            result.append({
                'distance': distance,
                **record
            })
        # 找到殴几里举例最小的K条记录
        sorted_result= sorted(result, key=lambda x: x['distance'])
        result_k= sorted_result[:k]
        # 4. 循环构建文档列表并返回
        documents= [
            Document(page_content= item['text'], metadata= {**item['metadata'], 'score': item['distance']})
            for item in result_k
        ]
        return documents

    @classmethod
    def from_texts(
            cls: Type['MemoryVectorStore'],
            texts: list[str],
            embeddings: Embeddings,
            metadata: Optional[list[str]] = None,
            **kwargs: Any
    ) -> 'MemoryVectorStore':
        '''通过文本嵌入模型, 元数据构建向量数据库'''
        memory_vector_store= cls(embeddings, **kwargs)
        memory_vector_store.add_texts(texts, metadata)
        return memory_vector_store

    @classmethod
    def _euclidean_distance(cls, vec1, vec2) -> float:
        '''计算两个向量的殴几里举例'''
        return float(np.linalg.norm(np.array(vec1) - np.array(vec2)))

dotenv.load_dotenv()
embeddings= OpenAIEmbeddings(model= 'text-embedding-3-small')
db= MemoryVectorStore.from_texts(texts=text, embeddings= embeddings, metadata= metadata)


results= db.similarity_search('有叫笨笨的猫吗?')
for i in results:
    print(i)