import os
import random
import re
from typing import List, Tuple

from sparkai.core.messages import ChatMessage

from iflytech_assistant.es import TextNode, search
from iflytech_assistant.spark_client import GenerationResult
from iflytech_assistant.spark_client import instruct_with_profile as instruct

from .dataclasses import FiveSuggestions, PreferenceTag
from .markdown_prompt_parser import PromptTemplate

__all__ = ["generate", "generate_response", "refine"]

INDEX_NAME = "heqc"
EXAMPLE_INDEX_NAME = "heqc_examples_20240924"

templates_dir = os.path.join(os.path.dirname(__file__), "templates")
template_file = os.path.join(templates_dir, "rag_heqc.md")
prompt_template = PromptTemplate.from_markdown_text(open(template_file).read())

SYSTEM_PROMPT = prompt_template.system_prompt_template.raw_str
REPLY_USER_PROMPT = prompt_template.reply_user_prompt_template.raw_str
POLISH_USER_PROMPT = prompt_template.polish_user_prompt_template.raw_str
REFINE_PROMPT = prompt_template.refine_user_prompt_template.raw_str


def build_prompt(
    user_input: str, target: str, tags: List[PreferenceTag], mode: str
) -> Tuple[str, str]:
    tone_prompt = "\n".join([f"- {tag.tag}: {tag.tone}" for tag in tags if tag.tone])
    style_prompt = "\n".join([f"- {tag.tag}: {tag.style}" for tag in tags if tag.style])
    personality_prompt = "\n".join(
        [f"- {tag.tag}: {tag.personality}" for tag in tags if tag.personality]
    )
    example_blocks = []
    for tag in tags:
        examples: List[TextNode] = search(
            user_input,
            index_name=EXAMPLE_INDEX_NAME,
            filters={"target": target, "tag": tag.tag, "mode": mode},
            threshold=0,
        )
        if len(examples) == 0:
            continue
        example = examples[0]
        texts = [re.sub(r"\d+\.\s*", "", e) for e in example.metadata["examples"]]
        texts = random.sample(texts, k=min(5, len(texts)))
        block = (
            f"##### {tag.tag}"
            + "\n"
            + f"当用户输入“{example.text}”\n"
            + "\n".join([f"- {text}" for text in texts])
        )
        example_blocks.append(block)

    example_prompt = "## Examples\n" + "\n\n".join(example_blocks)

    if mode == "reply":
        user_prompt = REPLY_USER_PROMPT.format(
            target=target,
            user_input=user_input,
            preferenced_style="、".join([tag.tag for tag in tags]),
        )
        # communication_skills = search(
        #     f"如何高情商回复{target}对你说：{user_input}", index_name=INDEX_NAME
        # )
        communication_skills = ""
    else:
        user_prompt = POLISH_USER_PROMPT.format(
            target=target,
            user_input=user_input,
            preferenced_style="、".join([tag.tag for tag in tags]),
        )
        # communication_skills = search(
        #     f"如何高情商对{target}表达：{user_input}", index_name=INDEX_NAME
        # )
        communication_skills = ""

    if not communication_skills:
        communication_skills = ""
    else:
        communication_skills = re.sub(r"[\n\r]+", "\n", communication_skills[0].text)

    system_prompt = SYSTEM_PROMPT.format(
        tone=tone_prompt,
        style=style_prompt,
        personality=personality_prompt,
        communication_skills=communication_skills,
        examples=example_prompt,
    )

    return system_prompt, user_prompt


def generate(user_input, target: str, tags: List[PreferenceTag], mode: str):
    system_prompt, user_prompt = build_prompt(user_input, target, tags, mode)
    llm_response = instruct(user_prompt, system_prompt, prefix="")

    suggestions = FiveSuggestions.from_generation_result(llm_response)

    return system_prompt, user_prompt, llm_response, suggestions


def refine(
    user_input: str,
    system_prompt: str,
    refine_words: str,
    llm_response: GenerationResult,
):
    history = [
        ChatMessage(role="user", content=user_input),
        ChatMessage(role="assistant", content=llm_response.llm_response),
    ]

    refine_prompt = REFINE_PROMPT.format(refine_words=refine_words)
    try:
        llm_refine_response = instruct(
            refine_prompt, system_prompt, prefix="", historys=history
        )
    except Exception:
        llm_refine_response = ""

    refine_suggestions = FiveSuggestions.from_generation_result(llm_refine_response)

    return refine_suggestions, refine_prompt


def generate_response(
    user_input: str,
    target: str,
    tags: List[PreferenceTag],
    refine_words: str,
    mode: str,
) -> Tuple[FiveSuggestions, FiveSuggestions]:
    system_prompt, user_prompt, llm_response, suggestions = generate(
        user_input, target, tags, mode
    )

    refine_suggestions, refine_prompt = refine(
        user_input, system_prompt, refine_words, llm_response
    )
    return (
        suggestions,
        refine_suggestions,
        system_prompt,
        user_prompt,
        refine_prompt,
    )
