File size: 3,458 Bytes
a8b3f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import datetime
import logging
import time

import click
from celery import shared_task

from core.indexing_runner import IndexingRunner
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Dataset, Document, DocumentSegment
from services.feature_service import FeatureService


@shared_task(queue="dataset")
def sync_website_document_indexing_task(dataset_id: str, document_id: str):
    """
    Async process document
    :param dataset_id:
    :param document_id:

    Usage: sync_website_document_indexing_task.delay(dataset_id, document_id)
    """
    start_at = time.perf_counter()

    dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()

    sync_indexing_cache_key = "document_{}_is_sync".format(document_id)
    # check document limit
    features = FeatureService.get_features(dataset.tenant_id)
    try:
        if features.billing.enabled:
            vector_space = features.vector_space
            if 0 < vector_space.limit <= vector_space.size:
                raise ValueError(
                    "Your total number of documents plus the number of uploads have over the limit of "
                    "your subscription."
                )
    except Exception as e:
        document = (
            db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
        )
        if document:
            document.indexing_status = "error"
            document.error = str(e)
            document.stopped_at = datetime.datetime.utcnow()
            db.session.add(document)
            db.session.commit()
        redis_client.delete(sync_indexing_cache_key)
        return

    logging.info(click.style("Start sync website document: {}".format(document_id), fg="green"))
    document = db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
    try:
        if document:
            # clean old data
            index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()

            segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
            if segments:
                index_node_ids = [segment.index_node_id for segment in segments]
                # delete from vector index
                index_processor.clean(dataset, index_node_ids)

                for segment in segments:
                    db.session.delete(segment)
                db.session.commit()

            document.indexing_status = "parsing"
            document.processing_started_at = datetime.datetime.utcnow()
            db.session.add(document)
            db.session.commit()

            indexing_runner = IndexingRunner()
            indexing_runner.run([document])
            redis_client.delete(sync_indexing_cache_key)
    except Exception as ex:
        document.indexing_status = "error"
        document.error = str(ex)
        document.stopped_at = datetime.datetime.utcnow()
        db.session.add(document)
        db.session.commit()
        logging.info(click.style(str(ex), fg="yellow"))
        redis_client.delete(sync_indexing_cache_key)
        pass
    end_at = time.perf_counter()
    logging.info(click.style("Sync document: {} latency: {}".format(document_id, end_at - start_at), fg="green"))