import hashlib
import typing

from pydantic import BaseModel, ConfigDict

if typing.TYPE_CHECKING:
    from .api import Knowledge


class DifyKnowledge(BaseModel):
    class Metadata(BaseModel):
        _source: str
        position: int
        dataset_id: str
        dataset_name: str
        document_id: str
        document_name: str
        document_data_source_type: str
        segment_id: str
        retriever_from: str
        score: float
        segment_hit_count: int
        segment_word_count: int
        segment_position: int
        segment_index_node_hash: str

    metadata: Metadata
    title: str
    content: str

    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "title": "东莞QA",
                "content": "养老保险-关系转移-企业职工-灵活就业人员-跨省（广东省外）-关系转入-办理指南\n企业职工（含灵活就业人员）养老保险跨省如何转移？...",
                "metadata": {
                    "_source": "knowledge",
                    "position": 1,
                    "dataset_id": "30a66f85-9a34-4807-821a-84bf98799cc5",
                    "dataset_name": "东莞QA",
                    "document_id": "0504b66d-824c-4f2a-abbf-ea0f66bf47e5",
                    "document_name": "东莞QA",
                    "document_data_source_type": "upload_file",
                    "segment_id": "7c667389-ca86-91ab-2adf-ee119d50daa5",
                    "retriever_from": "workflow",
                    "score": 0.9996366500854492,
                    "segment_hit_count": 1885,
                    "segment_word_count": 1125,
                    "segment_position": 289,
                    "segment_index_node_hash": "1540acad2146b16b5dbcf7b253dae28d51481d1f271b9ebdadfb9d5e40d6bcae",
                },
            }
        }
    )

    @classmethod
    def from_knowledge(cls, knowledge: "Knowledge") -> "DifyKnowledge":
        return cls.model_validate(
            {
                "title": knowledge.question,
                "content": f"{knowledge.question}\n{knowledge.answer}",
                "metadata": {
                    "_source": "knowledge",
                    "position": 1,
                    "dataset_id": knowledge.dataset_id,
                    "dataset_name": knowledge.dataset_id,
                    "document_id": knowledge.tenant_id,
                    "document_name": knowledge.tenant_id,
                    "document_data_source_type": "upload_file",
                    # Note: here we use desheng_id as segment_id
                    "segment_id": knowledge.desheng_id,
                    "retriever_from": "workflow",
                    "score": knowledge.matched_score,
                    "segment_hit_count": 1,
                    "segment_word_count": len(knowledge.answer),
                    "segment_position": 1,
                    "segment_index_node_hash": hashlib.md5(
                        knowledge.answer.encode()
                    ).hexdigest(),
                },
            }
        )
