import asyncio
import os

import semantic_kernel as sk
from dotenv import load_dotenv, find_dotenv
from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion
from semantic_kernel.functions import KernelArguments
from semantic_kernel.prompt_template import PromptTemplateConfig, InputVariable

_ = load_dotenv(find_dotenv())
# 创建 semantic kernel
kernel = sk.Kernel()
api_key = os.getenv("OPENAI_API_KEY")
serviceId = "default"
# 将 LLM 服务添加到 kernel 中
kernel.add_service(
    OpenAIChatCompletion(
        service_id=serviceId,
        ai_model_id="gpt-3.5-turbo-1106",
        api_key=api_key
    ),
)

# 获取当前默认设定
req_settings = kernel.get_service(serviceId).get_prompt_execution_settings_class()(service_id=serviceId)

# 定义 Prompt 模板
# 模板中，变量以 {{$变量名}} 表示
prompt_template_config = PromptTemplateConfig(
    template="讲个关于{{$topic}}的笑话",
    description="Generate a joke about a specific topic",
    execution_settings={serviceId: req_settings},
    # 代表要输入一个输入参数，名称为topic
    input_variables=[
        InputVariable(name="topic", description="The topic", is_required=True),
    ],
)
# 注册 function
topical_joke_function = kernel.add_function(
    function_name="topical_joke",
    plugin_name="DemoPlugin",
    prompt_template_config=prompt_template_config
)


# 异步调用
async def runAsyncFunc(*args):
    return await kernel.invoke(*args)


result = asyncio.run(
    runAsyncFunc(topical_joke_function, KernelArguments(topic="小明"))
)
print(result)
