import json
import re
from typing import List

from llama_index.core.schema import TextNode
from pydantic import BaseModel, field_validator, model_validator

from iflytech_assistant.spark_client import GenerationResult


class LianaiRagData(BaseModel):
    input: str
    gender: str = "default"
    tag: str = "default"
    stage: str = "default"
    examples: List[str] = []

    def to_text_node(self):
        return TextNode(
            text=self.input,
            metadata={
                "gender": self.gender,
                "tag": self.tag,
                "stage": self.stage,
                "examples": self.examples,
            },
        )


class RagData(BaseModel):
    input: str
    target: str
    tag: str
    mode: str

    examples: List[str]

    @field_validator("mode")
    def mode_validator(cls, v):
        if v in ["回复", "reply"]:
            return "reply"
        elif v in ["表达", "polish"]:
            return "polish"
        else:
            raise ValueError(f"Unknown mode: {v}")

    @field_validator("examples", mode="before")
    def check_examples(cls, v):
        if isinstance(v, list):
            return v
        lines = [line.strip() for line in re.split(r"[\n\r]+", v)]
        lines = [re.sub(r"^\d+\.\s", "", line) for line in lines]
        # deduplicate
        lines = list(set(lines))
        return lines

    @classmethod
    def from_jsonline(cls, line: str) -> "RagData":
        tag_re = re.compile(r"用户的人设是(.+?)-")
        target_re = re.compile(r"聊天对象是用户的(.+?)。")
        input_re = re.compile(r"当(用户|对方)输入“(.+?)”时")
        mode_re = re.compile(r"高情商的(回复|表达)")

        raw_json = json.loads(line)

        if isinstance(raw_json["query"], str):
            query_str = raw_json["query"]
        else:
            query_str = raw_json["query"][0]
        tag = tag_re.search(query_str).group(1)
        target = target_re.search(query_str).group(1)
        input = input_re.search(query_str).group(2)
        mode = mode_re.search(query_str).group(1)

        if isinstance(raw_json["answer"], str):
            answer_str = raw_json["answer"]
        else:
            answer_str = raw_json["answer"][1]["content"]
        lines = [line.strip() for line in re.split(r"[\n\r]+", answer_str)]
        examples = [line for line in lines if re.match(r"^\d+\.\s", line)]

        if isinstance(raw_json["answer"], list):
            refine_str = raw_json["answer"][3]["content"]
            refine_lines = [line.strip() for line in re.split(r"[\n\r]+", refine_str)]
            refine_examples = [
                line for line in refine_lines if re.match(r"^\d+\.\s", line)
            ]
            refine_examples = refine_examples[-10:]
        else:
            refine_examples = []

        return cls(
            input=input,
            target=target,
            tag=tag,
            examples=examples + refine_examples,
            mode=mode,
        )

    def to_text_node(self):
        return TextNode(
            text=self.input,
            metadata={
                "target": self.target,
                "tag": self.tag,
                "examples": self.examples,
                "mode": self.mode,
            },
        )


class PreferenceTag(BaseModel):
    tag: str
    target: str = ""
    tone: str = ""
    style: str = ""
    personality: str = ""

    @field_validator("tag", "target", "tone", "style", "personality", mode="before")
    def check_empty(cls, v, field):
        if not isinstance(v, str):
            return ""
        return v.strip()

    @model_validator(mode="after")
    def check_tag(cls, v):
        if not v.tag:
            raise ValueError("Tag cannot be empty.")
        return v


class RefineWord(BaseModel):
    word: str
    type: str

    @field_validator("type", mode="before")
    def check_type(cls, v):
        v = v.strip().lower()
        if v not in ["tone", "style", "personality"]:
            raise ValueError("Type must be one of tone, style, personality.")
        return v


class FiveSuggestions(GenerationResult):
    class Suggestion(BaseModel):
        suggestion: str
        explanation: str

    suggestions: List[Suggestion]
    llm_response: str = ""

    def __str__(self):
        return "\n".join([suggestion.suggestion for suggestion in self.suggestions])

    @property
    def explanations(self):
        return "\n".join([suggestion.explanation for suggestion in self.suggestions])

    @classmethod
    def from_generation_result(cls, result: GenerationResult) -> "FiveSuggestions":
        response_str = result.llm_response
        obj = cls.from_llm_response(response_str)

        return cls(**result.model_dump(), suggestions=obj.suggestions)

    @classmethod
    def from_llm_response(cls, response: str) -> "FiveSuggestions":
        """
        Response from LLM format:
        1. xxx
        2. xxx
        3. xxx
        4. xxx
        5. xxx

        每条建议的解释：
        a. xxx
        b. xxx
        c. xxx
        d. xxx
        e. xxx
        """

        lines = [line.strip() for line in re.split(r"[\n\r]+", response)]

        suggestions = [line for line in lines if re.match(r"^\d+\.\s", line)]

        explanations = [line for line in lines if re.match(r"^[a-eA-E]\.\s", line)]

        result = []
        for i, suggestion in enumerate(suggestions):
            if i < len(explanations):
                explanation = explanations[i]
            else:
                explanation = ""
            result.append(
                cls.Suggestion(suggestion=suggestion, explanation=explanation)
            )
        return cls(suggestions=result, llm_response=response)
