import asyncio

from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig
from semantic_kernel.prompt_template.input_variable import InputVariable 
from semantic_kernel.functions import KernelArguments
import semantic_kernel as sk
import os


async def run_function(*args):
   return await kernel.invoke(*args)


if __name__=='__main__':
   os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
   os.environ["OPENAI_BASE_URL"] = os.environ["OPENAI_API_BASE_ZHIHU"]


   kernel = sk.Kernel()

   service_id = "default1"
   plugin_name = "MyDemoPlugin1"
   service = OpenAIChatCompletion(service_id=service_id,api_key=os.environ.get("OPENAI_API_KEY_ZHIHU"),ai_model_id="gpt-4o")
   kernel.add_service(service=service)

   #获取当前默认设定
   req_settings = kernel.get_service(service_id).get_prompt_execution_settings_class()(service_id=service_id)
   #定义 Prompt 模板
   #模板中,变量以{{$变量名}}表示
   prompt_template_config = PromptTemplateConfig(template="讲个关于{{$topic}}的笑话",description="Generate a joke about a specific topic",
      execution_settings={service_id:req_settings},input_variables=[InputVariable(name="topic",description="The topic",is_required=True)])
   #注册 function
   topical_joke_function = kernel.add_function(function_name="topical_joke",plugin_name=plugin_name,prompt_template_config=prompt_template_config)

   result = asyncio.run(run_function(topical_joke_function,KernelArguments(topic="小明")))
   print(result)