# -*- coding: utf-8 -*-
"""
    @Author：SimpleTree
    @date：2025/6/5 10:14
    @desc:
"""
import logging
import traceback

from celery_once import QueueOnce

from common.config.embedding_config import ModelManage
from ops import celery_app
from django.db.models import QuerySet

from common.event.listener_manage import ListenerManagement
from dataset.models import Document, State, TaskType
from django.utils.translation import gettext_lazy as _

from setting.models import Model
from setting.models_provider import get_model

max_kb_error = logging.getLogger("max_kb_error")
max_kb = logging.getLogger("max_kb")

def get_embedding_model(model_id, exception_handler=lambda e: max_kb_error.error(
    _('Failed to obtain vector model: {error} {traceback}').format(
        error=str(e),
        traceback=traceback.format_exc()
    ))):
    try:
        model = QuerySet(Model).filter(id=model_id).first()
        embedding_model = ModelManage.get_model(model_id, lambda _id: get_model(model))
    except Exception as e:
        exception_handler(e)
        raise e
    return embedding_model


@celery_app.task(base=QueueOnce, once={"keys": ["dataset_id"]}, name="celery:embedding_by_dataset")
def embedding_by_dataset(dataset_id, model_id):
    """
    向量化知识库
    @param dataset_id: 知识库id
    @param model_id 向量模型
    """
    max_kb.info(_(f"Start--->Vectorizated dataset: {dataset_id}").format(dataset_id=dataset_id))
    try:
        ListenerManagement.delete_embedding_by_dataset(dataset_id)
        document_list = QuerySet(Document).filter(dataset_id=dataset_id)
        max_kb.info(_("Dataset documentation: {document_names}").format(
            document_names=", ".join([d.name for d in document_list])))
        for document in document_list:
            try:
                embedding_by_document.delay(document.id, model_id)
            except Exception as e:
                pass
    except Exception as e:
        max_kb_error.error(
            _('Vectorized dataset: {dataset_id} error {error} {traceback}'.format(dataset_id=dataset_id,
                                                                                  error=str(e),
                                                                                  traceback=traceback.format_exc())))
    finally:
        max_kb.info(_('End--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id))


@celery_app.task(base=QueueOnce, once={"keys": ["document_id"]}, name="celery:embedding_by_document")
def embedding_by_document(document_id, model_id, state_list=None):
    """
    向量化文档
    @param state_list:
    @param document_id: 文档id
    @param model_id 向量模型
    :return: None
    """
    if state_list is None:
        state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
                      State.REVOKE.value, State.REVOKED.value, State.IGNORED.value]

    def exception_handler(e):
        ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING, State.FAILURE)

        max_kb_error.error(
            _('Failed to obtain vector model: {error} {traceback}').format(
                error=str(e),
                traceback=traceback.format_exc()
            ))

    embedding_model = get_embedding_model(model_id, exception_handler)
    ListenerManagement.embedding_by_document(document_id, embedding_model, state_list)


def delete_embedding_by_dataset_id_list(dataset_id_list):
    ListenerManagement.delete_embedding_by_dataset_id_list(dataset_id_list)


def delete_embedding_by_dataset(dataset_id):
    """
    删除指定数据集向量数据
    @param dataset_id: 数据集id
    @return: None
    """
    ListenerManagement.delete_embedding_by_dataset(dataset_id)
