beelzeebuub
commited on
Commit
•
1ca266a
1
Parent(s):
f6081d3
upload files manually
Browse files- .gitattributes +27 -8
- config.json +13 -0
- pipeline.py +65 -7
- requirements.txt +4 -0
- textfile3-2.pk1 +3 -0
.gitattributes
CHANGED
@@ -1,17 +1,36 @@
|
|
1 |
-
*.
|
2 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
10 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
11 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.pk1 filter=lfs diff=lfs merge=lfs -text
|
config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"id2label": {
|
3 |
+
"0": "1",
|
4 |
+
"1": "1-2",
|
5 |
+
"2": "2",
|
6 |
+
"3": "2-3",
|
7 |
+
"4": "3",
|
8 |
+
"5": "3-4",
|
9 |
+
"6": "4",
|
10 |
+
"7": "4-5",
|
11 |
+
"8": "5"
|
12 |
+
}
|
13 |
+
}
|
pipeline.py
CHANGED
@@ -1,16 +1,62 @@
|
|
1 |
from typing import Dict, List, Any
|
2 |
from PIL import Image
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
class PreTrainedPipeline():
|
5 |
def __init__(self, path=""):
|
6 |
# IMPLEMENT_THIS
|
7 |
# Preload all the elements you are going to need at inference.
|
8 |
# For instance your model, processors, tokenizer that might be needed.
|
9 |
# This function is only called once, so do all the heavy processing I/O here"""
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
14 |
def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]:
|
15 |
"""
|
16 |
Args:
|
@@ -22,6 +68,18 @@ class PreTrainedPipeline():
|
|
22 |
It is preferred if the returned list is in decreasing `score` order
|
23 |
"""
|
24 |
# IMPLEMENT_THIS
|
25 |
-
|
26 |
-
|
27 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from typing import Dict, List, Any
|
2 |
from PIL import Image
|
3 |
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import numpy as np
|
7 |
+
from fastai.learner import load_learner
|
8 |
+
|
9 |
+
class OrdinalRegressionMetric(Metric):
|
10 |
+
def __init__(self):
|
11 |
+
super().__init__()
|
12 |
+
self.total = 0
|
13 |
+
self.count = 0
|
14 |
+
|
15 |
+
def accumulate(self, learn):
|
16 |
+
# Get predictions and targets
|
17 |
+
preds, targs = learn.pred, learn.y
|
18 |
+
|
19 |
+
# Your custom logic to convert predictions and targets to numeric values
|
20 |
+
preds_numeric = torch.argmax(preds, dim=1)
|
21 |
+
targs_numeric = targs
|
22 |
+
|
23 |
+
#print("preds_numeric: ",preds_numeric)
|
24 |
+
#print("targs_numeric: ",targs_numeric)
|
25 |
+
|
26 |
+
# Calculate the metric (modify this based on your specific needs)
|
27 |
+
squared_diff = torch.sum(torch.sqrt((preds_numeric - targs_numeric)**2))
|
28 |
+
|
29 |
+
# Normalize by the maximum possible difference
|
30 |
+
max_diff = torch.sqrt((torch.max(targs_numeric) - torch.min(targs_numeric))**2)
|
31 |
+
|
32 |
+
#print("squared_diff: ",squared_diff)
|
33 |
+
#print("max_diff: ",max_diff)
|
34 |
+
|
35 |
+
# Update the metric value
|
36 |
+
self.total += squared_diff
|
37 |
+
#print("self.total: ",self.total)
|
38 |
+
self.count += max_diff
|
39 |
+
#print("self.count: ",self.count)
|
40 |
+
@property
|
41 |
+
def value(self):
|
42 |
+
if self.count == 0:
|
43 |
+
return 0.0 # or handle this case appropriately
|
44 |
+
#print("self.total / self.count: ", (self.total / self.count))
|
45 |
+
# Calculate the normalized metric value
|
46 |
+
metric_value = 1/(self.total / self.count)
|
47 |
+
return metric_value
|
48 |
+
|
49 |
class PreTrainedPipeline():
|
50 |
def __init__(self, path=""):
|
51 |
# IMPLEMENT_THIS
|
52 |
# Preload all the elements you are going to need at inference.
|
53 |
# For instance your model, processors, tokenizer that might be needed.
|
54 |
# This function is only called once, so do all the heavy processing I/O here"""
|
55 |
+
self.model = load_learner(os.path.join(path, "textfile3-2.pk1"))
|
56 |
+
with open(os.path.join(path, "config.json")) as config:
|
57 |
+
config = json.load(config)
|
58 |
+
self.id2label = config["id2label"]
|
59 |
+
|
60 |
def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]:
|
61 |
"""
|
62 |
Args:
|
|
|
68 |
It is preferred if the returned list is in decreasing `score` order
|
69 |
"""
|
70 |
# IMPLEMENT_THIS
|
71 |
+
# FastAI expects a np array, not a PIL Image.
|
72 |
+
_, _, preds = self.model.predict(np.array(inputs))
|
73 |
+
preds = preds.tolist()
|
74 |
+
labels = [
|
75 |
+
{"label": str(self.id2label["0"]), "score": preds[0]},
|
76 |
+
{"label": str(self.id2label["1"]), "score": preds[1]},
|
77 |
+
{"label": str(self.id2label["2"]), "score": preds[2]},
|
78 |
+
{"label": str(self.id2label["3"]), "score": preds[3]},
|
79 |
+
{"label": str(self.id2label["4"]), "score": preds[4]},
|
80 |
+
{"label": str(self.id2label["5"]), "score": preds[5]},
|
81 |
+
{"label": str(self.id2label["6"]), "score": preds[6]},
|
82 |
+
{"label": str(self.id2label["7"]), "score": preds[7]},
|
83 |
+
{"label": str(self.id2label["8"]), "score": preds[8]},
|
84 |
+
]
|
85 |
+
return labels
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastai
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
numpy
|
textfile3-2.pk1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c86bf454f4bcf7dd0fba8c4628ac89872ce3431fb4d2997ccbdbab5ba70c3dbb
|
3 |
+
size 46998504
|