Spaces:
Runtime error
Runtime error
Sparrow ML new services
Browse files- config.py +5 -0
- data/donut_evaluate_stats.json +1 -0
- data/donut_inference_stats.json +1 -1
- data/donut_training_stats.json +1 -26
- endpoints.py +4 -0
- requirements-fastapi.txt +1 -1
- routers/donut_evaluate.py +90 -0
- routers/donut_inference.py +17 -6
- routers/donut_training.py +393 -0
- routers/inference.py +2 -2
- routers/training.py +64 -6
config.py
CHANGED
@@ -7,8 +7,13 @@ class Settings(BaseSettings):
|
|
7 |
sparrow_key: str = os.environ.get("sparrow_key")
|
8 |
processor: str = "katanaml-org/invoices-donut-model-v1"
|
9 |
model: str = "katanaml-org/invoices-donut-model-v1"
|
|
|
|
|
|
|
|
|
10 |
inference_stats_file: str = "data/donut_inference_stats.json"
|
11 |
training_stats_file: str = "data/donut_training_stats.json"
|
|
|
12 |
|
13 |
|
14 |
settings = Settings()
|
|
|
7 |
sparrow_key: str = os.environ.get("sparrow_key")
|
8 |
processor: str = "katanaml-org/invoices-donut-model-v1"
|
9 |
model: str = "katanaml-org/invoices-donut-model-v1"
|
10 |
+
dataset: str = "katanaml-org/invoices-donut-data-v1"
|
11 |
+
base_config: str = "naver-clova-ix/donut-base"
|
12 |
+
base_processor: str = "naver-clova-ix/donut-base"
|
13 |
+
base_model: str = "naver-clova-ix/donut-base"
|
14 |
inference_stats_file: str = "data/donut_inference_stats.json"
|
15 |
training_stats_file: str = "data/donut_training_stats.json"
|
16 |
+
evaluate_stats_file: str = "data/donut_evaluate_stats.json"
|
17 |
|
18 |
|
19 |
settings = Settings()
|
data/donut_evaluate_stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[[498.8315510749817, {"accuracies": [0.9903846153846154, 0.6744186046511628, 0.9920948616600791, 0.981675392670157, 0.9728555917480999, 0.9886363636363636, 0.9846796657381616, 0.9858156028368794, 0.996031746031746, 0.9768115942028985, 0.9777227722772277, 0.9886363636363636, 0.9955555555555555, 0.9894847528916929, 0.9964028776978417, 0.9970238095238095, 0.975609756097561, 0.9941176470588236, 0.9921259842519685, 0.9533898305084746, 0.8410174880763116, 0.9781609195402299, 0.8535825545171339, 0.9862595419847329, 0.9906868451688009, 0.9929478138222849], "mean_accuracy": 0.9633126365834221}, 0.9633126365834221, "katanaml-org/invoices-donut-model-v1", "2023-05-15 16:16:59"], [480.58880615234375, {"accuracies": [0.9903846153846154, 0.6744186046511628, 0.9920948616600791, 0.981675392670157, 0.9728555917480999, 0.9886363636363636, 0.9846796657381616, 0.9858156028368794, 0.996031746031746, 0.9768115942028985, 0.9777227722772277, 0.9886363636363636, 0.9955555555555555, 0.9894847528916929, 0.9964028776978417, 0.9970238095238095, 0.975609756097561, 0.9941176470588236, 0.9921259842519685, 0.9533898305084746, 0.8410174880763116, 0.9781609195402299, 0.8535825545171339, 0.9862595419847329, 0.9906868451688009, 0.9929478138222849], "mean_accuracy": 0.9633126365834221}, 0.9633126365834221, "katanaml-org/invoices-donut-model-v1", "2023-05-15 16:29:24"], [496.27668499946594, {"accuracies": [0.9903846153846154, 0.6744186046511628, 0.9920948616600791, 0.981675392670157, 0.9728555917480999, 0.9886363636363636, 0.9846796657381616, 0.9858156028368794, 0.996031746031746, 0.9768115942028985, 0.9777227722772277, 0.9886363636363636, 0.9955555555555555, 0.9894847528916929, 0.9964028776978417, 0.9970238095238095, 0.975609756097561, 0.9941176470588236, 0.9921259842519685, 0.9533898305084746, 0.8410174880763116, 0.9781609195402299, 0.8535825545171339, 0.9862595419847329, 0.9906868451688009, 0.9929478138222849], "mean_accuracy": 0.9633126365834221}, 0.9633126365834221, "katanaml-org/invoices-donut-model-v1", "2023-05-17 11:26:54"], [496.5165719985962, {"accuracies": [0.9903846153846154, 0.6744186046511628, 0.9920948616600791, 0.981675392670157, 0.9728555917480999, 0.9886363636363636, 0.9846796657381616, 0.9858156028368794, 0.996031746031746, 0.9768115942028985, 0.9777227722772277, 0.9886363636363636, 0.9955555555555555, 0.9894847528916929, 0.9964028776978417, 0.9970238095238095, 0.975609756097561, 0.9941176470588236, 0.9921259842519685, 0.9533898305084746, 0.8410174880763116, 0.9781609195402299, 0.8535825545171339, 0.9862595419847329, 0.9906868451688009, 0.9929478138222849], "mean_accuracy": 0.9633126365834221}, 0.9633126365834221, "katanaml-org/invoices-donut-model-v1", "2023-05-17 11:40:15"], [528.6264460086823, {"accuracies": [0.9903846153846154, 0.6744186046511628, 0.9920948616600791, 0.981675392670157, 0.9728555917480999, 0.9886363636363636, 0.9846796657381616, 0.9858156028368794, 0.996031746031746, 0.9768115942028985, 0.9777227722772277, 0.9886363636363636, 0.9955555555555555, 0.9894847528916929, 0.9964028776978417, 0.9970238095238095, 0.975609756097561, 0.9941176470588236, 0.9921259842519685, 0.9533898305084746, 0.8410174880763116, 0.9781609195402299, 0.8535825545171339, 0.9862595419847329, 0.9906868451688009, 0.9929478138222849], "mean_accuracy": 0.9633126365834221}, 0.9633126365834221, "katanaml-org/invoices-donut-model-v1", "2023-05-17 22:22:59"]]
|
data/donut_inference_stats.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
[[14.571558952331543, 21, "invoice_10.jpg", "katanaml-org/invoices-donut-model-v1", "2023-04-13 21:45:30"]]
|
|
|
1 |
+
[[14.571558952331543, 21, "invoice_10.jpg", "katanaml-org/invoices-donut-model-v1", "2023-04-13 21:45:30"], [14.510485887527466, 16, "docs/inference/invoice_0_16823599391530209.jpg", "katanaml-org/invoices-donut-model-v1", "2023-04-24 21:12:37"]]
|
data/donut_training_stats.json
CHANGED
@@ -1,26 +1 @@
|
|
1 |
-
[["2023-
|
2 |
-
["2023-04-10 23:24:24", 0.2, 1360, "invoices-donut-model-v1"],
|
3 |
-
["2023-04-11 23:24:24", 0.85, 1750, "invoices-donut-model-v1"],
|
4 |
-
["2023-04-15 23:24:24", 0.24, 2547, "invoices-donut-model-v1"],
|
5 |
-
["2023-04-16 23:24:24", 0.17, 2549, "invoices-donut-model-v1"],
|
6 |
-
["2023-04-09 23:24:24", 0.18, 4756, "invoices-donut-model-v2"],
|
7 |
-
["2023-04-10 23:24:24", 0.19, 4856, "invoices-donut-model-v2"],
|
8 |
-
["2023-04-11 23:24:24", 0.48, 4956, "invoices-donut-model-v2"],
|
9 |
-
["2023-04-15 23:24:24", 0.71, 5056, "invoices-donut-model-v2"],
|
10 |
-
["2023-04-16 23:24:24", 0.22, 5156, "invoices-donut-model-v2"],
|
11 |
-
["2023-04-09 23:24:24", 0.23, 5260, "invoices-donut-model-v3"],
|
12 |
-
["2023-04-10 23:24:24", 0.44, 5360, "invoices-donut-model-v3"],
|
13 |
-
["2023-04-11 23:24:24", 0.25, 5460, "invoices-donut-model-v3"],
|
14 |
-
["2023-04-15 23:24:24", 0.56, 5560, "invoices-donut-model-v3"],
|
15 |
-
["2023-04-16 23:24:24", 0.37, 5660, "invoices-donut-model-v3"],
|
16 |
-
["2023-04-09 23:24:24", 0.88, 5760, "invoices-donut-model-v4"],
|
17 |
-
["2023-04-10 23:24:24", 0.29, 5860, "invoices-donut-model-v4"],
|
18 |
-
["2023-04-11 23:24:24", 0.3, 5960, "invoices-donut-model-v4"],
|
19 |
-
["2023-04-15 23:24:24", 0.51, 6060, "invoices-donut-model-v4"],
|
20 |
-
["2023-04-16 23:24:24", 0.32, 6160, "invoices-donut-model-v4"],
|
21 |
-
["2023-04-09 23:24:24", 0.53, 6260, "invoices-donut-model-v5"],
|
22 |
-
["2023-04-10 23:24:24", 0.34, 6360, "invoices-donut-model-v5"],
|
23 |
-
["2023-04-11 23:24:24", 0.85, 6460, "invoices-donut-model-v5"],
|
24 |
-
["2023-04-15 23:24:24", 0.36, 6560, "invoices-donut-model-v5"],
|
25 |
-
["2023-04-16 23:24:24", 0.37, 6660, "invoices-donut-model-v5"]
|
26 |
-
]
|
|
|
1 |
+
[[112.83321595191956, "katanaml-org/invoices-donut-model-v1", "2023-05-17 22:05:20"], [47.31714415550232, "katanaml-org/invoices-donut-model-v1", "2023-05-17 22:06:31"]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
endpoints.py
CHANGED
@@ -1,6 +1,10 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
from fastapi.middleware.cors import CORSMiddleware
|
3 |
from routers import inference, training
|
|
|
|
|
|
|
|
|
4 |
|
5 |
app = FastAPI(openapi_url="/api/v1/sparrow-ml/openapi.json", docs_url="/api/v1/sparrow-ml/docs")
|
6 |
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from fastapi.middleware.cors import CORSMiddleware
|
3 |
from routers import inference, training
|
4 |
+
from huggingface_hub import login
|
5 |
+
from config import settings
|
6 |
+
|
7 |
+
login(settings.huggingface_key)
|
8 |
|
9 |
app = FastAPI(openapi_url="/api/v1/sparrow-ml/openapi.json", docs_url="/api/v1/sparrow-ml/docs")
|
10 |
|
requirements-fastapi.txt
CHANGED
@@ -5,6 +5,6 @@ tensorboard
|
|
5 |
pytorch-lightning
|
6 |
Pillow
|
7 |
donut-python
|
8 |
-
fastapi==0.95.
|
9 |
uvicorn[standard]
|
10 |
python-multipart
|
|
|
5 |
pytorch-lightning
|
6 |
Pillow
|
7 |
donut-python
|
8 |
+
fastapi==0.95.2
|
9 |
uvicorn[standard]
|
10 |
python-multipart
|
routers/donut_evaluate.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
2 |
+
import locale
|
3 |
+
|
4 |
+
import re
|
5 |
+
import json
|
6 |
+
import torch
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
import numpy as np
|
9 |
+
from donut import JSONParseEvaluator
|
10 |
+
from datasets import load_dataset
|
11 |
+
from functools import lru_cache
|
12 |
+
import os
|
13 |
+
import time
|
14 |
+
from config import settings
|
15 |
+
|
16 |
+
locale.getpreferredencoding = lambda: "UTF-8"
|
17 |
+
|
18 |
+
|
19 |
+
@lru_cache(maxsize=1)
|
20 |
+
def prepare_model():
|
21 |
+
processor = DonutProcessor.from_pretrained(settings.processor)
|
22 |
+
model = VisionEncoderDecoderModel.from_pretrained(settings.model)
|
23 |
+
|
24 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
25 |
+
|
26 |
+
model.eval()
|
27 |
+
model.to(device)
|
28 |
+
|
29 |
+
dataset = load_dataset(settings.dataset, split="test")
|
30 |
+
|
31 |
+
return processor, model, device, dataset
|
32 |
+
|
33 |
+
|
34 |
+
def run_evaluate_donut():
|
35 |
+
worker_pid = os.getpid()
|
36 |
+
print(f"Handling evaluation request with worker PID: {worker_pid}")
|
37 |
+
|
38 |
+
start_time = time.time()
|
39 |
+
|
40 |
+
output_list = []
|
41 |
+
accs = []
|
42 |
+
|
43 |
+
processor, model, device, dataset = prepare_model()
|
44 |
+
|
45 |
+
for idx, sample in tqdm(enumerate(dataset), total=len(dataset)):
|
46 |
+
# prepare encoder inputs
|
47 |
+
pixel_values = processor(sample["image"].convert("RGB"), return_tensors="pt").pixel_values
|
48 |
+
pixel_values = pixel_values.to(device)
|
49 |
+
# prepare decoder inputs
|
50 |
+
task_prompt = "<s_cord-v2>"
|
51 |
+
decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids
|
52 |
+
decoder_input_ids = decoder_input_ids.to(device)
|
53 |
+
|
54 |
+
# autoregressively generate sequence
|
55 |
+
outputs = model.generate(
|
56 |
+
pixel_values,
|
57 |
+
decoder_input_ids=decoder_input_ids,
|
58 |
+
max_length=model.decoder.config.max_position_embeddings,
|
59 |
+
early_stopping=True,
|
60 |
+
pad_token_id=processor.tokenizer.pad_token_id,
|
61 |
+
eos_token_id=processor.tokenizer.eos_token_id,
|
62 |
+
use_cache=True,
|
63 |
+
num_beams=1,
|
64 |
+
bad_words_ids=[[processor.tokenizer.unk_token_id]],
|
65 |
+
return_dict_in_generate=True,
|
66 |
+
)
|
67 |
+
|
68 |
+
# turn into JSON
|
69 |
+
seq = processor.batch_decode(outputs.sequences)[0]
|
70 |
+
seq = seq.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "")
|
71 |
+
seq = re.sub(r"<.*?>", "", seq, count=1).strip() # remove first task start token
|
72 |
+
seq = processor.token2json(seq)
|
73 |
+
|
74 |
+
ground_truth = json.loads(sample["ground_truth"])
|
75 |
+
ground_truth = ground_truth["gt_parse"]
|
76 |
+
evaluator = JSONParseEvaluator()
|
77 |
+
score = evaluator.cal_acc(seq, ground_truth)
|
78 |
+
|
79 |
+
accs.append(score)
|
80 |
+
output_list.append(seq)
|
81 |
+
|
82 |
+
end_time = time.time()
|
83 |
+
processing_time = end_time - start_time
|
84 |
+
|
85 |
+
scores = {"accuracies": accs, "mean_accuracy": np.mean(accs)}
|
86 |
+
print(scores, f"length : {len(accs)}")
|
87 |
+
print("Mean accuracy:", np.mean(accs))
|
88 |
+
print(f"Evaluation done, worker PID: {worker_pid}")
|
89 |
+
|
90 |
+
return scores, np.mean(accs), processing_time
|
routers/donut_inference.py
CHANGED
@@ -3,20 +3,29 @@ import time
|
|
3 |
import torch
|
4 |
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
5 |
from config import settings
|
6 |
-
from
|
|
|
7 |
|
8 |
|
9 |
-
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
model
|
|
|
|
|
13 |
|
14 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
-
model.to(device)
|
16 |
|
17 |
def process_document_donut(image):
|
|
|
|
|
|
|
18 |
start_time = time.time()
|
19 |
|
|
|
|
|
20 |
# prepare encoder inputs
|
21 |
pixel_values = processor(image, return_tensors="pt").pixel_values
|
22 |
|
@@ -46,4 +55,6 @@ def process_document_donut(image):
|
|
46 |
end_time = time.time()
|
47 |
processing_time = end_time - start_time
|
48 |
|
|
|
|
|
49 |
return processor.token2json(sequence), processing_time
|
|
|
3 |
import torch
|
4 |
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
5 |
from config import settings
|
6 |
+
from functools import lru_cache
|
7 |
+
import os
|
8 |
|
9 |
|
10 |
+
@lru_cache(maxsize=1)
|
11 |
+
def load_model():
|
12 |
+
processor = DonutProcessor.from_pretrained(settings.processor)
|
13 |
+
model = VisionEncoderDecoderModel.from_pretrained(settings.model)
|
14 |
|
15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
model.to(device)
|
17 |
+
|
18 |
+
return processor, model, device
|
19 |
|
|
|
|
|
20 |
|
21 |
def process_document_donut(image):
|
22 |
+
worker_pid = os.getpid()
|
23 |
+
print(f"Handling inference request with worker PID: {worker_pid}")
|
24 |
+
|
25 |
start_time = time.time()
|
26 |
|
27 |
+
processor, model, device = load_model()
|
28 |
+
|
29 |
# prepare encoder inputs
|
30 |
pixel_values = processor(image, return_tensors="pt").pixel_values
|
31 |
|
|
|
55 |
end_time = time.time()
|
56 |
processing_time = end_time - start_time
|
57 |
|
58 |
+
print(f"Inference done, worker PID: {worker_pid}")
|
59 |
+
|
60 |
return processor.token2json(sequence), processing_time
|
routers/donut_training.py
ADDED
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# !pip install -q git+https://github.com/huggingface/transformers.git datasets sentencepiece
|
2 |
+
# !pip install -q pytorch-lightning==1.9.5 wandb
|
3 |
+
|
4 |
+
from config import settings
|
5 |
+
from datasets import load_dataset
|
6 |
+
from transformers import VisionEncoderDecoderConfig
|
7 |
+
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
8 |
+
|
9 |
+
import json
|
10 |
+
import random
|
11 |
+
from typing import Any, List, Tuple
|
12 |
+
|
13 |
+
import torch
|
14 |
+
from torch.utils.data import Dataset
|
15 |
+
|
16 |
+
from torch.utils.data import DataLoader
|
17 |
+
|
18 |
+
import re
|
19 |
+
from nltk import edit_distance
|
20 |
+
import numpy as np
|
21 |
+
import os
|
22 |
+
import time
|
23 |
+
|
24 |
+
import pytorch_lightning as pl
|
25 |
+
from functools import lru_cache
|
26 |
+
|
27 |
+
from pytorch_lightning.loggers import WandbLogger
|
28 |
+
from pytorch_lightning.callbacks import Callback
|
29 |
+
from config import settings
|
30 |
+
|
31 |
+
added_tokens = []
|
32 |
+
|
33 |
+
dataset_name = settings.dataset
|
34 |
+
base_config_name = settings.base_config
|
35 |
+
base_processor_name = settings.base_processor
|
36 |
+
base_model_name = settings.base_model
|
37 |
+
model_name = settings.model
|
38 |
+
|
39 |
+
@lru_cache(maxsize=1)
|
40 |
+
def prepare_job():
|
41 |
+
print("Preparing job...")
|
42 |
+
|
43 |
+
dataset = load_dataset(dataset_name)
|
44 |
+
|
45 |
+
max_length = 768
|
46 |
+
image_size = [1280, 960]
|
47 |
+
|
48 |
+
# update image_size of the encoder
|
49 |
+
# during pre-training, a larger image size was used
|
50 |
+
config = VisionEncoderDecoderConfig.from_pretrained(base_config_name)
|
51 |
+
config.encoder.image_size = image_size # (height, width)
|
52 |
+
# update max_length of the decoder (for generation)
|
53 |
+
config.decoder.max_length = max_length
|
54 |
+
# TODO we should actually update max_position_embeddings and interpolate the pre-trained ones:
|
55 |
+
# https://github.com/clovaai/donut/blob/0acc65a85d140852b8d9928565f0f6b2d98dc088/donut/model.py#L602
|
56 |
+
|
57 |
+
processor = DonutProcessor.from_pretrained(base_processor_name)
|
58 |
+
model = VisionEncoderDecoderModel.from_pretrained(base_model_name, config=config)
|
59 |
+
|
60 |
+
return model, processor, dataset, config, image_size, max_length
|
61 |
+
|
62 |
+
|
63 |
+
class DonutDataset(Dataset):
|
64 |
+
"""
|
65 |
+
DonutDataset which is saved in huggingface datasets format. (see details in https://huggingface.co/docs/datasets)
|
66 |
+
Each row, consists of image path(png/jpg/jpeg) and gt data (json/jsonl/txt),
|
67 |
+
and it will be converted into input_tensor(vectorized image) and input_ids(tokenized string).
|
68 |
+
Args:
|
69 |
+
dataset_name_or_path: name of dataset (available at huggingface.co/datasets) or the path containing image files and metadata.jsonl
|
70 |
+
max_length: the max number of tokens for the target sequences
|
71 |
+
split: whether to load "train", "validation" or "test" split
|
72 |
+
ignore_id: ignore_index for torch.nn.CrossEntropyLoss
|
73 |
+
task_start_token: the special token to be fed to the decoder to conduct the target task
|
74 |
+
prompt_end_token: the special token at the end of the sequences
|
75 |
+
sort_json_key: whether or not to sort the JSON keys
|
76 |
+
"""
|
77 |
+
|
78 |
+
def __init__(
|
79 |
+
self,
|
80 |
+
dataset_name_or_path: str,
|
81 |
+
max_length: int,
|
82 |
+
split: str = "train",
|
83 |
+
ignore_id: int = -100,
|
84 |
+
task_start_token: str = "<s>",
|
85 |
+
prompt_end_token: str = None,
|
86 |
+
sort_json_key: bool = True,
|
87 |
+
):
|
88 |
+
super().__init__()
|
89 |
+
|
90 |
+
model, processor, dataset, config, image_size, p1 = prepare_job()
|
91 |
+
|
92 |
+
self.max_length = max_length
|
93 |
+
self.split = split
|
94 |
+
self.ignore_id = ignore_id
|
95 |
+
self.task_start_token = task_start_token
|
96 |
+
self.prompt_end_token = prompt_end_token if prompt_end_token else task_start_token
|
97 |
+
self.sort_json_key = sort_json_key
|
98 |
+
|
99 |
+
self.dataset = load_dataset(dataset_name_or_path, split=self.split)
|
100 |
+
self.dataset_length = len(self.dataset)
|
101 |
+
|
102 |
+
self.gt_token_sequences = []
|
103 |
+
for sample in self.dataset:
|
104 |
+
ground_truth = json.loads(sample["ground_truth"])
|
105 |
+
if "gt_parses" in ground_truth: # when multiple ground truths are available, e.g., docvqa
|
106 |
+
assert isinstance(ground_truth["gt_parses"], list)
|
107 |
+
gt_jsons = ground_truth["gt_parses"]
|
108 |
+
else:
|
109 |
+
assert "gt_parse" in ground_truth and isinstance(ground_truth["gt_parse"], dict)
|
110 |
+
gt_jsons = [ground_truth["gt_parse"]]
|
111 |
+
|
112 |
+
self.gt_token_sequences.append(
|
113 |
+
[
|
114 |
+
self.json2token(
|
115 |
+
gt_json,
|
116 |
+
update_special_tokens_for_json_key=self.split == "train",
|
117 |
+
sort_json_key=self.sort_json_key,
|
118 |
+
)
|
119 |
+
+ processor.tokenizer.eos_token
|
120 |
+
for gt_json in gt_jsons # load json from list of json
|
121 |
+
]
|
122 |
+
)
|
123 |
+
|
124 |
+
self.add_tokens([self.task_start_token, self.prompt_end_token])
|
125 |
+
self.prompt_end_token_id = processor.tokenizer.convert_tokens_to_ids(self.prompt_end_token)
|
126 |
+
|
127 |
+
def json2token(self, obj: Any, update_special_tokens_for_json_key: bool = True, sort_json_key: bool = True):
|
128 |
+
"""
|
129 |
+
Convert an ordered JSON object into a token sequence
|
130 |
+
"""
|
131 |
+
if type(obj) == dict:
|
132 |
+
if len(obj) == 1 and "text_sequence" in obj:
|
133 |
+
return obj["text_sequence"]
|
134 |
+
else:
|
135 |
+
output = ""
|
136 |
+
if sort_json_key:
|
137 |
+
keys = sorted(obj.keys(), reverse=True)
|
138 |
+
else:
|
139 |
+
keys = obj.keys()
|
140 |
+
for k in keys:
|
141 |
+
if update_special_tokens_for_json_key:
|
142 |
+
self.add_tokens([fr"<s_{k}>", fr"</s_{k}>"])
|
143 |
+
output += (
|
144 |
+
fr"<s_{k}>"
|
145 |
+
+ self.json2token(obj[k], update_special_tokens_for_json_key, sort_json_key)
|
146 |
+
+ fr"</s_{k}>"
|
147 |
+
)
|
148 |
+
return output
|
149 |
+
elif type(obj) == list:
|
150 |
+
return r"<sep/>".join(
|
151 |
+
[self.json2token(item, update_special_tokens_for_json_key, sort_json_key) for item in obj]
|
152 |
+
)
|
153 |
+
else:
|
154 |
+
obj = str(obj)
|
155 |
+
if f"<{obj}/>" in added_tokens:
|
156 |
+
obj = f"<{obj}/>" # for categorical special tokens
|
157 |
+
return obj
|
158 |
+
|
159 |
+
def add_tokens(self, list_of_tokens: List[str]):
|
160 |
+
"""
|
161 |
+
Add special tokens to tokenizer and resize the token embeddings of the decoder
|
162 |
+
"""
|
163 |
+
model, processor, dataset, config, image_size, p1 = prepare_job()
|
164 |
+
|
165 |
+
newly_added_num = processor.tokenizer.add_tokens(list_of_tokens)
|
166 |
+
if newly_added_num > 0:
|
167 |
+
model.decoder.resize_token_embeddings(len(processor.tokenizer))
|
168 |
+
added_tokens.extend(list_of_tokens)
|
169 |
+
|
170 |
+
def __len__(self) -> int:
|
171 |
+
return self.dataset_length
|
172 |
+
|
173 |
+
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
174 |
+
"""
|
175 |
+
Load image from image_path of given dataset_path and convert into input_tensor and labels
|
176 |
+
Convert gt data into input_ids (tokenized string)
|
177 |
+
Returns:
|
178 |
+
input_tensor : preprocessed image
|
179 |
+
input_ids : tokenized gt_data
|
180 |
+
labels : masked labels (model doesn't need to predict prompt and pad token)
|
181 |
+
"""
|
182 |
+
|
183 |
+
model, processor, dataset, config, image_size, p1 = prepare_job()
|
184 |
+
|
185 |
+
sample = self.dataset[idx]
|
186 |
+
|
187 |
+
# inputs
|
188 |
+
pixel_values = processor(sample["image"], random_padding=self.split == "train",
|
189 |
+
return_tensors="pt").pixel_values
|
190 |
+
pixel_values = pixel_values.squeeze()
|
191 |
+
|
192 |
+
# targets
|
193 |
+
target_sequence = random.choice(self.gt_token_sequences[idx]) # can be more than one, e.g., DocVQA Task 1
|
194 |
+
input_ids = processor.tokenizer(
|
195 |
+
target_sequence,
|
196 |
+
add_special_tokens=False,
|
197 |
+
max_length=self.max_length,
|
198 |
+
padding="max_length",
|
199 |
+
truncation=True,
|
200 |
+
return_tensors="pt",
|
201 |
+
)["input_ids"].squeeze(0)
|
202 |
+
|
203 |
+
labels = input_ids.clone()
|
204 |
+
labels[labels == processor.tokenizer.pad_token_id] = self.ignore_id # model doesn't need to predict pad token
|
205 |
+
# labels[: torch.nonzero(labels == self.prompt_end_token_id).sum() + 1] = self.ignore_id # model doesn't need to predict prompt (for VQA)
|
206 |
+
return pixel_values, labels, target_sequence
|
207 |
+
|
208 |
+
|
209 |
+
def build_data_loaders():
|
210 |
+
print("Building data loaders...")
|
211 |
+
|
212 |
+
model, processor, dataset, config, image_size, max_length = prepare_job()
|
213 |
+
|
214 |
+
# we update some settings which differ from pretraining; namely the size of the images + no rotation required
|
215 |
+
# source: https://github.com/clovaai/donut/blob/master/config/train_cord.yaml
|
216 |
+
processor.feature_extractor.size = image_size[::-1] # should be (width, height)
|
217 |
+
processor.feature_extractor.do_align_long_axis = False
|
218 |
+
|
219 |
+
train_dataset = DonutDataset(dataset_name, max_length=max_length,
|
220 |
+
split="train", task_start_token="<s_cord-v2>", prompt_end_token="<s_cord-v2>",
|
221 |
+
sort_json_key=False, # cord dataset is preprocessed, so no need for this
|
222 |
+
)
|
223 |
+
|
224 |
+
val_dataset = DonutDataset(dataset_name, max_length=max_length,
|
225 |
+
split="validation", task_start_token="<s_cord-v2>", prompt_end_token="<s_cord-v2>",
|
226 |
+
sort_json_key=False, # cord dataset is preprocessed, so no need for this
|
227 |
+
)
|
228 |
+
|
229 |
+
model.config.pad_token_id = processor.tokenizer.pad_token_id
|
230 |
+
model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids(['<s_cord-v2>'])[0]
|
231 |
+
|
232 |
+
# feel free to increase the batch size if you have a lot of memory
|
233 |
+
# I'm fine-tuning on Colab and given the large image size, batch size > 1 is not feasible
|
234 |
+
# Set num_workers=4
|
235 |
+
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=4)
|
236 |
+
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4)
|
237 |
+
|
238 |
+
return train_dataloader, val_dataloader, max_length
|
239 |
+
|
240 |
+
|
241 |
+
class DonutModelPLModule(pl.LightningModule):
|
242 |
+
def __init__(self, config, processor, model):
|
243 |
+
super().__init__()
|
244 |
+
self.config = config
|
245 |
+
self.processor = processor
|
246 |
+
self.model = model
|
247 |
+
|
248 |
+
self.train_dataloader, self.val_dataloader, self.max_length = build_data_loaders()
|
249 |
+
|
250 |
+
def training_step(self, batch, batch_idx):
|
251 |
+
pixel_values, labels, _ = batch
|
252 |
+
|
253 |
+
outputs = self.model(pixel_values, labels=labels)
|
254 |
+
loss = outputs.loss
|
255 |
+
self.log_dict({"train_loss": loss}, sync_dist=True)
|
256 |
+
return loss
|
257 |
+
|
258 |
+
def validation_step(self, batch, batch_idx, dataset_idx=0):
|
259 |
+
pixel_values, labels, answers = batch
|
260 |
+
batch_size = pixel_values.shape[0]
|
261 |
+
# we feed the prompt to the model
|
262 |
+
decoder_input_ids = torch.full((batch_size, 1), self.model.config.decoder_start_token_id, device=self.device)
|
263 |
+
|
264 |
+
outputs = self.model.generate(pixel_values,
|
265 |
+
decoder_input_ids=decoder_input_ids,
|
266 |
+
max_length=self.max_length,
|
267 |
+
early_stopping=True,
|
268 |
+
pad_token_id=self.processor.tokenizer.pad_token_id,
|
269 |
+
eos_token_id=self.processor.tokenizer.eos_token_id,
|
270 |
+
use_cache=True,
|
271 |
+
num_beams=1,
|
272 |
+
bad_words_ids=[[self.processor.tokenizer.unk_token_id]],
|
273 |
+
return_dict_in_generate=True, )
|
274 |
+
|
275 |
+
predictions = []
|
276 |
+
for seq in self.processor.tokenizer.batch_decode(outputs.sequences):
|
277 |
+
seq = seq.replace(self.processor.tokenizer.eos_token, "").replace(self.processor.tokenizer.pad_token, "")
|
278 |
+
seq = re.sub(r"<.*?>", "", seq, count=1).strip() # remove first task start token
|
279 |
+
predictions.append(seq)
|
280 |
+
|
281 |
+
scores = list()
|
282 |
+
for pred, answer in zip(predictions, answers):
|
283 |
+
pred = re.sub(r"(?:(?<=>) | (?=</s_))", "", pred)
|
284 |
+
# NOT NEEDED ANYMORE
|
285 |
+
# answer = re.sub(r"<.*?>", "", answer, count=1)
|
286 |
+
answer = answer.replace(self.processor.tokenizer.eos_token, "")
|
287 |
+
scores.append(edit_distance(pred, answer) / max(len(pred), len(answer)))
|
288 |
+
|
289 |
+
if self.config.get("verbose", False) and len(scores) == 1:
|
290 |
+
print(f"Prediction: {pred}")
|
291 |
+
print(f" Answer: {answer}")
|
292 |
+
print(f" Normed ED: {scores[0]}")
|
293 |
+
|
294 |
+
return scores
|
295 |
+
|
296 |
+
def validation_epoch_end(self, validation_step_outputs):
|
297 |
+
# I set this to 1 manually
|
298 |
+
# (previously set to len(self.config.dataset_name_or_paths))
|
299 |
+
num_of_loaders = 1
|
300 |
+
if num_of_loaders == 1:
|
301 |
+
validation_step_outputs = [validation_step_outputs]
|
302 |
+
assert len(validation_step_outputs) == num_of_loaders
|
303 |
+
cnt = [0] * num_of_loaders
|
304 |
+
total_metric = [0] * num_of_loaders
|
305 |
+
val_metric = [0] * num_of_loaders
|
306 |
+
for i, results in enumerate(validation_step_outputs):
|
307 |
+
for scores in results:
|
308 |
+
cnt[i] += len(scores)
|
309 |
+
total_metric[i] += np.sum(scores)
|
310 |
+
val_metric[i] = total_metric[i] / cnt[i]
|
311 |
+
val_metric_name = f"val_metric_{i}th_dataset"
|
312 |
+
self.log_dict({val_metric_name: val_metric[i]}, sync_dist=True)
|
313 |
+
self.log_dict({"val_metric": np.sum(total_metric) / np.sum(cnt)}, sync_dist=True)
|
314 |
+
|
315 |
+
def configure_optimizers(self):
|
316 |
+
# TODO add scheduler
|
317 |
+
optimizer = torch.optim.Adam(self.parameters(), lr=self.config.get("lr"))
|
318 |
+
|
319 |
+
return optimizer
|
320 |
+
|
321 |
+
def train_dataloader(self):
|
322 |
+
return self.train_dataloader
|
323 |
+
|
324 |
+
def val_dataloader(self):
|
325 |
+
return self.val_dataloader
|
326 |
+
|
327 |
+
|
328 |
+
class PushToHubCallback(Callback):
|
329 |
+
def on_train_epoch_end(self, trainer, pl_module):
|
330 |
+
print(f"Pushing model to the hub, epoch {trainer.current_epoch}")
|
331 |
+
pl_module.model.push_to_hub(model_name,
|
332 |
+
commit_message=f"Training in progress, epoch {trainer.current_epoch}")
|
333 |
+
|
334 |
+
def on_train_end(self, trainer, pl_module):
|
335 |
+
print(f"Pushing model to the hub after training")
|
336 |
+
pl_module.processor.push_to_hub(model_name,
|
337 |
+
commit_message=f"Training done")
|
338 |
+
pl_module.model.push_to_hub(model_name,
|
339 |
+
commit_message=f"Training done")
|
340 |
+
|
341 |
+
|
342 |
+
def run_training_donut(max_epochs_param, val_check_interval_param, warmup_steps_param):
|
343 |
+
worker_pid = os.getpid()
|
344 |
+
print(f"Handling training request with worker PID: {worker_pid}")
|
345 |
+
|
346 |
+
start_time = time.time()
|
347 |
+
|
348 |
+
# Set epochs = 30
|
349 |
+
# Set num_training_samples_per_epoch = training set size
|
350 |
+
# Set val_check_interval = 0.4
|
351 |
+
# Set warmup_steps: 425 / 8 = 54, 54 * 10 = 540, 540 * 0.15 = 81
|
352 |
+
config_params = {"max_epochs": max_epochs_param,
|
353 |
+
"val_check_interval": val_check_interval_param, # how many times we want to validate during an epoch
|
354 |
+
"check_val_every_n_epoch": 1,
|
355 |
+
"gradient_clip_val": 1.0,
|
356 |
+
"num_training_samples_per_epoch": 425,
|
357 |
+
"lr": 3e-5,
|
358 |
+
"train_batch_sizes": [8],
|
359 |
+
"val_batch_sizes": [1],
|
360 |
+
# "seed":2022,
|
361 |
+
"num_nodes": 1,
|
362 |
+
"warmup_steps": warmup_steps_param, # 425 / 8 = 54, 54 * 10 = 540, 540 * 0.15 = 81
|
363 |
+
"result_path": "./result",
|
364 |
+
"verbose": False,
|
365 |
+
}
|
366 |
+
|
367 |
+
model, processor, dataset, config, image_size, p1 = prepare_job()
|
368 |
+
|
369 |
+
model_module = DonutModelPLModule(config, processor, model)
|
370 |
+
|
371 |
+
# wandb_logger = WandbLogger(project="sparrow", name="invoices-donut-v5")
|
372 |
+
|
373 |
+
# trainer = pl.Trainer(
|
374 |
+
# accelerator="gpu",
|
375 |
+
# devices=1,
|
376 |
+
# max_epochs=config_params.get("max_epochs"),
|
377 |
+
# val_check_interval=config_params.get("val_check_interval"),
|
378 |
+
# check_val_every_n_epoch=config_params.get("check_val_every_n_epoch"),
|
379 |
+
# gradient_clip_val=config_params.get("gradient_clip_val"),
|
380 |
+
# precision=16, # we'll use mixed precision
|
381 |
+
# num_sanity_val_steps=0,
|
382 |
+
# # logger=wandb_logger,
|
383 |
+
# callbacks=[PushToHubCallback()],
|
384 |
+
# )
|
385 |
+
|
386 |
+
# trainer.fit(model_module)
|
387 |
+
|
388 |
+
end_time = time.time()
|
389 |
+
processing_time = end_time - start_time
|
390 |
+
|
391 |
+
print(f"Training done, worker PID: {worker_pid}")
|
392 |
+
|
393 |
+
return processing_time
|
routers/inference.py
CHANGED
@@ -57,7 +57,7 @@ async def run_inference(file: Optional[UploadFile] = File(None), image_url: Opti
|
|
57 |
# parse file name from url
|
58 |
file_name = image_url.split("/")[-1]
|
59 |
utils.log_stats(settings.inference_stats_file, [processing_time, count_values(result), file_name, settings.model])
|
60 |
-
print(f"Processing time: {processing_time:.2f} seconds")
|
61 |
else:
|
62 |
result = {"info": "No input provided"}
|
63 |
|
@@ -78,4 +78,4 @@ async def get_statistics():
|
|
78 |
else:
|
79 |
content = []
|
80 |
|
81 |
-
return content
|
|
|
57 |
# parse file name from url
|
58 |
file_name = image_url.split("/")[-1]
|
59 |
utils.log_stats(settings.inference_stats_file, [processing_time, count_values(result), file_name, settings.model])
|
60 |
+
print(f"Processing time inference: {processing_time:.2f} seconds")
|
61 |
else:
|
62 |
result = {"info": "No input provided"}
|
63 |
|
|
|
78 |
else:
|
79 |
content = []
|
80 |
|
81 |
+
return content
|
routers/training.py
CHANGED
@@ -1,21 +1,79 @@
|
|
1 |
-
from fastapi import APIRouter
|
2 |
from config import settings
|
3 |
import os
|
4 |
import json
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
router = APIRouter()
|
8 |
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
file_path = settings.training_stats_file
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
# Check if the file exists, and read its content
|
20 |
if os.path.exists(file_path):
|
21 |
with open(file_path, 'r') as file:
|
|
|
1 |
+
from fastapi import APIRouter, Form, BackgroundTasks
|
2 |
from config import settings
|
3 |
import os
|
4 |
import json
|
5 |
+
from routers.donut_evaluate import run_evaluate_donut
|
6 |
+
from routers.donut_training import run_training_donut
|
7 |
+
import utils
|
8 |
|
9 |
|
10 |
router = APIRouter()
|
11 |
|
12 |
|
13 |
+
def invoke_training(max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key):
|
14 |
+
if sparrow_key != settings.sparrow_key:
|
15 |
+
return {"error": "Invalid Sparrow key."}
|
16 |
|
17 |
+
if model_in_use == 'donut':
|
18 |
+
processing_time = run_training_donut(max_epochs, val_check_interval, warmup_steps)
|
19 |
+
utils.log_stats(settings.training_stats_file, [processing_time, settings.model])
|
20 |
+
print(f"Processing time training: {processing_time:.2f} seconds")
|
21 |
|
22 |
+
|
23 |
+
@router.post("/training")
|
24 |
+
async def run_training(background_tasks: BackgroundTasks,
|
25 |
+
max_epochs: int = Form(30),
|
26 |
+
val_check_interval: float = Form(0.4),
|
27 |
+
warmup_steps: int = Form(81),
|
28 |
+
model_in_use: str = Form('donut'),
|
29 |
+
sparrow_key: str = Form(None)):
|
30 |
+
|
31 |
+
background_tasks.add_task(invoke_training, max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key)
|
32 |
+
|
33 |
+
return {"message": "Sparrow ML training started in the background"}
|
34 |
+
|
35 |
+
|
36 |
+
def invoke_evaluate(model_in_use, sparrow_key):
|
37 |
+
if sparrow_key != settings.sparrow_key:
|
38 |
+
return {"error": "Invalid Sparrow key."}
|
39 |
+
|
40 |
+
if model_in_use == 'donut':
|
41 |
+
scores, accuracy, processing_time = run_evaluate_donut()
|
42 |
+
utils.log_stats(settings.evaluate_stats_file, [processing_time, scores, accuracy, settings.model])
|
43 |
+
print(f"Processing time evaluate: {processing_time:.2f} seconds")
|
44 |
+
|
45 |
+
|
46 |
+
@router.post("/evaluate")
|
47 |
+
async def run_evaluate(background_tasks: BackgroundTasks,
|
48 |
+
model_in_use: str = Form('donut'),
|
49 |
+
sparrow_key: str = Form(None)):
|
50 |
+
|
51 |
+
background_tasks.add_task(invoke_evaluate, model_in_use, sparrow_key)
|
52 |
+
|
53 |
+
return {"message": "Sparrow ML model evaluation started in the background"}
|
54 |
+
|
55 |
+
|
56 |
+
@router.get("/statistics/training")
|
57 |
+
async def get_statistics_training():
|
58 |
file_path = settings.training_stats_file
|
59 |
|
60 |
+
# Check if the file exists, and read its content
|
61 |
+
if os.path.exists(file_path):
|
62 |
+
with open(file_path, 'r') as file:
|
63 |
+
try:
|
64 |
+
content = json.load(file)
|
65 |
+
except json.JSONDecodeError:
|
66 |
+
content = []
|
67 |
+
else:
|
68 |
+
content = []
|
69 |
+
|
70 |
+
return content
|
71 |
+
|
72 |
+
|
73 |
+
@router.get("/statistics/evaluate")
|
74 |
+
async def get_statistics_evaluate():
|
75 |
+
file_path = settings.evaluate_stats_file
|
76 |
+
|
77 |
# Check if the file exists, and read its content
|
78 |
if os.path.exists(file_path):
|
79 |
with open(file_path, 'r') as file:
|