# coding=utf-8
# @Time   : 2023/11/26
# @Author : wangjl
# @Email  : 1975039138@qq.com
import os
import re
import time
import traceback
from typing import Dict, Iterable
from functools import wraps

import semantic_kernel as sk
import tiktoken
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_kernel.orchestration.sk_function_base import SKFunctionBase


def build_params(params: Dict) -> sk.ContextVariables:
    variables = sk.ContextVariables()
    for k, v in params.items():
        variables[k] = str(v)
    return variables


def create_semantic_kernel(model_id="gpt-4-0613", api_key=None, endpoint=None):
    assert api_key is not None, f"## must fill api_key: {api_key}"
    assert endpoint is not None, f"## must fill endpoint (something like '/v1/completion'): {endpoint}"
    service_id = "completion_model"
    service = OpenAIChatCompletion(model_id=model_id, api_key=api_key, endpoint=endpoint)
    kernel = sk.Kernel()
    kernel.add_chat_service(service_id=service_id, service=service)
    return kernel


def load_native_kernel_func(kernel: sk.Kernel, parent_dir: str, func_dir: str, func_name: str):
    funcs = kernel.import_semantic_skill_from_directory(parent_directory=parent_dir, skill_directory_name=func_dir)
    if func_name in funcs:
        return funcs[func_name]
    skill_dir = os.path.abspath(os.path.join(parent_dir, func_dir))
    raise RuntimeError(f"## cannot find semantic kernel func {func_name} in native dir {skill_dir}.")


def load_inline_kernel_func(kernel: sk.Kernel, prompt: str, max_tokens=2000, temperature=0.0, top_p=0.0):
    func = kernel.create_semantic_function(prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
    return func


def template_convert(prompt_template):
    """
    :param prompt_template: {{$param}}
    :return: new_template: {}
    """
    pattern = r"\{\{\$(.*?)\}\}"

    def replace(match):
        replacement = "{" + str(replace.counter) + "}"
        replace.counter += 1
        return replacement

    replace.counter = 0
    new_template = re.sub(pattern, replace, prompt_template)
    return new_template


def load_native_prompt_template(parent_dir: str, func_dir: str, func_name: str, raw_template=False):
    abs_func_dir = os.path.abspath(os.path.join(parent_dir, func_dir, func_name))
    with open(os.path.join(abs_func_dir, "skprompt.txt"), "r") as fp:
        skprompts = "".join(fp.readlines())
        prompt_template = template_convert(skprompts) if not raw_template else skprompts
    return prompt_template


def tiktoken_count(prompt, model="gpt-4"):
    enc = tiktoken.encoding_for_model(model)
    encoded = enc.encode(prompt)
    return len(encoded)


def default_appr_startswith(result: str, start_content: str):
    return result.startswith(start_content)


def invoke_with_retry(max_retries=5, appr_startswith=default_appr_startswith):
    """decorator with args"""
    def invoke_decorator(llm_func):
        """decorator"""
        @wraps(llm_func)
        def wrapped_function(*args, **kwargs):
            try:
                num_retry = 0
                success = False
                result = None
                while not success and num_retry < max_retries:
                    result = llm_func(*args, **kwargs)
                    start_content = kwargs["start_content"]
                    if start_content and not appr_startswith(result, start_content):
                        print(f"## start_content = {start_content}\ngenerated result = {result}")
                        result = None
                    success = True if result else False
                    if success:
                        break
                    else:
                        kwargs["request_key"] = None
                        num_retry += 1
                        time.sleep(2)
                        print(f"## num_retry: 第{num_retry}次重试！")
                return result
            except:
                traceback.print_stack()
                traceback.print_exc()
                return None

        return wrapped_function
    return invoke_decorator


@invoke_with_retry(max_retries=5)
def invoke_kernel_func(sk_func: SKFunctionBase, params: sk.ContextVariables, verify_func=None, prompt=None):
    result = sk_func.invoke(variables=params)
    print(f"## result: {result.result}")
    if result.last_error_description:
        print(f"## kernel func last err: {result.last_error_description}")
        print(f"## kernel func last exception: {result.last_exception}")

    if prompt:
        input_tokens = tiktoken_count(prompt)
        output_tokens = tiktoken_count(result.result)
        print(f"## prompt = {prompt}")
        print(f"## input_tiktoken = {input_tokens}, output_tiktokens = {output_tokens}")

    if verify_func:
        return verify_func(result.result)
    else:
        return result.result


@invoke_with_retry(max_retries=5)
def invoke_langchain_func(llm: ChatOpenAI, params: Iterable, verify_func=None, prompt_template=None,
                          template_type=None, request_key=None, max_tokens=200):
    request_prompt = prompt_template.format(*params)
    if not template_type:
        messages = [
            SystemMessage(content=request_prompt),
            HumanMessage(content="")
        ]
        result = llm(messages=messages).content.strip()
    elif template_type == "qwen":
        generate_args = dict(
            n=1,
            temperature=0.3,
            top_p=0.8,
            top_k=-1,
            use_beam_search=False,
            ignore_eos=False,
            frequency_penalty=1.2,
            max_tokens=max_tokens,
            stop=["<|im_end|>", "<|endoftext|>"],
            prompt_type="vllm_qwen",
            request_key=request_key
        )
        messages = [SystemMessage(content=request_prompt)]
        result = llm(messages, **generate_args).content.strip()
    else:
        raise RuntimeError(f"## unsupported template_type = {template_type}")

    print(f"## result: {result}")
    if verify_func:
        return verify_func(result)
    else:
        return result
