from MySK import *
from semantic_kernel.connectors.ai.open_ai import OpenAITextEmbedding
from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin
from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory
from semantic_kernel.memory.volatile_memory_store import VolatileMemoryStore
from semantic_kernel.text import split_markdown_lines

# 创建 embedding 模型
embedding_gen = OpenAITextEmbedding(
    ai_model_id="text-embedding-ada-002",
    api_key=api_key
)
# 将 Embedding 服务添加到 kernel 中
kernel.add_service(embedding_gen)

# 创建一个（内存）向量数据库
memory = SemanticTextMemory(storage=VolatileMemoryStore(), embeddings_generator=embedding_gen)

# 添加一个连接向量数据库的 Plugin
kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPlugin")

# 读取文件内容
with open('../ChatALL.md', 'r') as f:
    # with open('sk_samples/SamplePlugin/SamplePlugin.py', 'r') as f:
    content = f.read()

# 将文件内容分片，单片最大 100 token（注意：SK 的 text split 功能目前对中文支持不如对英文支持得好）
lines = split_markdown_lines(content, 100)

collection_id = "generic"


async def save_function(collection, id, text):
    return await memory.save_information(collection=collection, id=id, text=text)


# 将分片后的内容，存入内存
for index, line in enumerate(lines):
    asyncio.run(
        save_function(collection=collection_id, id=index, text=line)
    )
print("存储完成")


async def run_function(*args):
    return await memory.search(*args)


result = asyncio.run(
    run_function(
        collection_id, "ChatALL怎么下载？"
    )
)
print(result[0].text)
