"""
Celery异步任务
用于处理文档的文本提取、分块、向量化
"""
import logging
from typing import List

from celery import shared_task
from django.conf import settings

from .models import Document, DocumentChunk
from .utils import extract_text_from_document, split_text_into_chunks
from .vector_store import VectorStoreService

logger = logging.getLogger('pioneer_xiaoke')


@shared_task(bind=True, max_retries=3, default_retry_delay=60)
def process_document_task(self, document_id: int):
    """
    异步处理文档：
    1. 提取文本
    2. 文本分块
    3. 生成向量并写入 Chroma
    """
    try:
        document = Document.objects.select_related('knowledge_base').get(id=document_id)
    except Document.DoesNotExist:
        logger.warning('文档不存在，忽略。document_id=%s', document_id)
        return

    vector_service = VectorStoreService()

    try:
        _mark_processing(document)
        _rebuild_document_vectors(document, vector_service)
        document.status = 'completed'
        document.vector_indexed = True
        document.vector_id = vector_service.get_collection_name(document.knowledge_base_id)
        document.error_message = None
        document.save(update_fields=['status', 'vector_indexed', 'vector_id', 'error_message', 'updated_at'])
        logger.info('文档处理完成 document_id=%s chunks=%s', document.id, document.chunks.count())

    except Exception as exc:
        logger.exception('文档处理失败 document_id=%s error=%s', document.id, exc)
        if self.request.retries < self.max_retries:
            document.error_message = str(exc)
            document.save(update_fields=['error_message', 'updated_at'])
            raise self.retry(exc=exc, countdown=2 ** self.request.retries * 60)

        document.status = 'failed'
        document.vector_indexed = False
        document.error_message = str(exc)
        document.save(update_fields=['status', 'vector_indexed', 'error_message', 'updated_at'])
        raise


def _mark_processing(document: Document):
    document.status = 'processing'
    document.error_message = None
    document.save(update_fields=['status', 'error_message', 'updated_at'])


def _rebuild_document_vectors(document: Document, vector_service: VectorStoreService):
    # 重新提取文本
    content, stats = extract_text_from_document(document)
    if not content:
        raise ValueError('未提取到有效文本内容')

    document.content = content
    document.page_count = stats.get('page_count')
    document.word_count = stats.get('word_count')
    document.save(update_fields=['content', 'page_count', 'word_count', 'updated_at'])

    # 分块
    chunks = split_text_into_chunks(
        text=content,
        chunk_size=settings.DOCUMENT_CHUNK_SIZE,
        overlap=settings.DOCUMENT_CHUNK_OVERLAP,
    )
    if not chunks:
        raise ValueError('文本长度不足，无法生成分块')

    # 清理旧数据
    existing_vector_ids = list(
        document.chunks.exclude(vector_id__isnull=True).values_list('vector_id', flat=True)
    )
    if existing_vector_ids:
        vector_service.delete_vectors(document.knowledge_base_id, existing_vector_ids)
    DocumentChunk.objects.filter(document=document).delete()

    # 写入新分块
    chunk_instances, chunk_ids, chunk_docs, chunk_metadatas = _build_chunk_payload(document, chunks)
    DocumentChunk.objects.bulk_create(chunk_instances)

    # 写入向量库
    vector_service.upsert_chunks(
        knowledge_base_id=document.knowledge_base_id,
        chunk_ids=chunk_ids,
        documents=chunk_docs,
        metadatas=chunk_metadatas,
    )


def _build_chunk_payload(document: Document, chunks: List[str]):
    chunk_instances = []
    chunk_ids = []
    chunk_docs = []
    chunk_metadatas = []

    for idx, chunk_text in enumerate(chunks, start=1):
        vector_id = f'doc_{document.id}_chunk_{idx}'
        token_count = len(chunk_text.split())

        chunk_instances.append(DocumentChunk(
            document=document,
            chunk_index=idx,
            content=chunk_text,
            token_count=token_count,
            vector_id=vector_id,
        ))
        chunk_ids.append(vector_id)
        chunk_docs.append(chunk_text)
        chunk_metadatas.append({
            'user_id': document.user_id,
            'knowledge_base_id': document.knowledge_base_id,
            'document_id': document.id,
            'chunk_index': idx,
            'title': document.title,
        })

    return chunk_instances, chunk_ids, chunk_docs, chunk_metadatas
