import asyncio
import os

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAITextEmbedding
from semantic_kernel.core_plugins import TextMemoryPlugin
from semantic_kernel.functions import KernelArguments
from semantic_kernel.memory import SemanticTextMemory, VolatileMemoryStore
from semantic_kernel.prompt_template import PromptTemplateConfig, InputVariable

collection_id = "generic"

async def run_function(*args):
   return await kernel.invoke(*args)

async def initData(memory):
    from semantic_kernel.text import split_markdown_lines
    with open('./markdowns/demo.md','r') as f:
        content = f.read()

    lines = split_markdown_lines(content,100)

    for index,line in enumerate(lines):
        await memory.save_information(collection=collection_id,id=index,text=line)

if __name__ == "__main__":
    os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
    os.environ["OPENAI_BASE_URL"] = os.environ["OPENAI_API_BASE_ZHIHU"]

    kernel = Kernel()

    llm_service_id = "llm_service"
    embedding_service_id = "embedding_service"

    #LLM 服务
    llm_service = OpenAIChatCompletion(
        api_key=os.environ["OPENAI_API_KEY_ZHIHU"],
        service_id=llm_service_id,
        ai_model_id="gpt-4o")

    #Embedding 服务
    embedding_service = OpenAITextEmbedding(
        api_key=os.environ["OPENAI_API_KEY_ZHIHU"],
        ai_model_id="text-embedding-ada-002")

    kernel.add_service(llm_service)
    kernel.add_service(embedding_service)
    # 加载 semantic function。注意目录结构
    memory = SemanticTextMemory(storage = VolatileMemoryStore(),embeddings_generator=embedding_service)
    kernel.add_plugin(TextMemoryPlugin(memory),"TextMemoryPlugin")

    #灌库
    asyncio.run(initData(memory))

    result = asyncio.run(memory.search(collection=collection_id,query='What is Sui?'))
    print(result[0].text)

    #RAG
    prompt = """
    基于下面的背景信息回答问题。如果背景信息为空，或者和问题不相关，请回答“我不知道”
    [背景信息开始]
    {{TextMemoryPlugin.recall $input}}
    [背景信息结束]
    
    问题：{{$input}}
    回答：
    """

    req_setting = kernel.get_service(service_id=llm_service_id).get_prompt_execution_settings_class()(service_id=llm_service_id)
    prompt_template_config = PromptTemplateConfig(template=prompt,description="RAG回答",execute_settings = {llm_service_id:req_setting},
                                                  input_variables=[InputVariable(name="input",description="用户输入问题",is_reqired=True)])

    rag_function = kernel.add_function(plugin_name="TextMemoryPlugin",function_name="rag_function",description="RAG Function",prompt_template_config=prompt_template_config)
    result = asyncio.run(run_function(rag_function,KernelArguments(input="What is Sui?")))

    print(result)