apsora commited on
Commit
7084745
·
verified ·
1 Parent(s): 1d06bc3

Upload folder using huggingface_hub

Browse files
.config/.last_opt_in_prompt.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
.config/.last_survey_prompt.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ last_prompt_time: 1760622052.5923831
.config/.last_update_check.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"last_update_check_time": 1760622059.1239753, "last_update_check_revision": 20251010143653, "notifications": [], "last_nag_times": {}}
.config/active_config ADDED
@@ -0,0 +1 @@
 
 
1
+ default
.config/config_sentinel ADDED
File without changes
.config/configurations/config_default ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [component_manager]
2
+ disable_update_check = true
3
+
4
+ [compute]
5
+ gce_metadata_read_timeout_sec = 0
6
+
.config/default_configs.db ADDED
Binary file (12.3 kB). View file
 
.config/gce ADDED
@@ -0,0 +1 @@
 
 
1
+ False
.config/hidden_gcloud_config_universe_descriptor_data_cache_configs.db ADDED
Binary file (12.3 kB). View file
 
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
37
+ sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
app.py CHANGED
@@ -1,11 +1,15 @@
1
-
2
- import pathlib, shutil, zipfile
3
  import pandas as pd
4
  import gradio as gr
 
5
  from huggingface_hub import hf_hub_download
6
  from autogluon.tabular import TabularPredictor
7
 
8
- # ====== Settings (edit if your repo/zip name changes) ======
 
 
 
 
9
  MODEL_REPO_ID = "Iris314/classical-automl-model"
10
  ZIP_FILENAME = "lego_predictor_dir.zip"
11
 
@@ -18,7 +22,7 @@ COLUMN_ALIAS = {
18
  }
19
  FEATURE_COLS_UI = ["Length", "Height", "Width", "Studs"]
20
 
21
- # ====== Load predictor from the model repo ======
22
  CACHE_DIR = pathlib.Path("hf_cache"); EXTRACT_DIR = CACHE_DIR / "predictor"
23
  CACHE_DIR.mkdir(exist_ok=True, parents=True)
24
 
@@ -32,20 +36,26 @@ def load_predictor():
32
  )
33
  if EXTRACT_DIR.exists():
34
  shutil.rmtree(EXTRACT_DIR)
35
- EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
36
  with zipfile.ZipFile(local_zip, "r") as zf:
37
  zf.extractall(EXTRACT_DIR)
38
  kids = list(EXTRACT_DIR.iterdir())
39
  path = kids[0] if len(kids) == 1 and kids[0].is_dir() else EXTRACT_DIR
40
  return TabularPredictor.load(str(path), require_py_version_match=False)
41
 
42
- PREDICTOR = load_predictor()
 
 
 
 
43
 
 
44
  def _cast_and_rename(row_dict):
45
  row = dict(row_dict)
46
  row["Length"] = float(row["Length"])
47
  row["Height"] = float(row["Height"])
48
  row["Width"] = float(row["Width"])
 
49
  row["Studs"] = int(round(float(row["Studs"])))
50
  X_ui = pd.DataFrame([row], columns=FEATURE_COLS_UI)
51
  X_model = X_ui.rename(columns=COLUMN_ALIAS)
@@ -53,40 +63,45 @@ def _cast_and_rename(row_dict):
53
 
54
  def classify_brick(length, height, width, studs):
55
  try:
56
- X = _cast_and_rename({"Length": length, "Height": height, "Width": width, "Studs": studs})
57
- pred = PREDICTOR.predict(X)
58
- pred_val = pred.iloc[0] if hasattr(pred, "iloc") else pred
 
 
 
 
 
59
  try:
60
  proba = PREDICTOR.predict_proba(X)
61
  s = proba.iloc[0] if hasattr(proba, "iloc") else proba
62
  s = s.sort_values(ascending=False)
63
- s.index = [str(k) for k in s.index]
64
  return {k: float(v) for k, v in s.items()}
65
  except Exception:
 
 
66
  return {"prediction": str(pred_val)}
67
  except Exception as e:
68
- import traceback
69
- return {"error": f"{type(e).__name__}: {e}", "traceback": traceback.format_exc()}
 
 
70
 
71
- with gr.Blocks() as demo:
72
- gr.Markdown("# 🧱 LEGO Brick Classifier")
73
- gr.Markdown("Enter dimensions to predict **Standard / Flat / Sloped**. Uses a classmate's AutoGluon model trained on `aedupuga/lego-sizes`.")
74
- with gr.Row():
75
- length = gr.Slider(1, 10, step=0.1, value=4, label="Length")
76
- height = gr.Slider(0.2, 5, step=0.1, value=1.2, label="Height")
77
- with gr.Row():
78
- width = gr.Slider(1, 10, step=0.1, value=2, label="Width")
79
- studs = gr.Number(value=4, precision=0, label="Studs")
80
- out = gr.Label(num_top_classes=3, label="Predicted Class / Probabilities")
81
- gr.Button("Classify Brick", variant="primary").click(
82
- fn=classify_brick, inputs=[length, height, width, studs], outputs=out
83
- )
84
- gr.Examples(
85
- examples=[[4, 1.2, 2, 4],[2, 0.6, 2, 2],[3, 2.0, 2, 2]],
86
- inputs=[length, height, width, studs],
87
- label="Examples",
88
- cache_examples=False
89
- )
90
 
91
  if __name__ == "__main__":
 
92
  demo.launch()
 
1
+ import pathlib, shutil, zipfile, os, traceback
 
2
  import pandas as pd
3
  import gradio as gr
4
+
5
  from huggingface_hub import hf_hub_download
6
  from autogluon.tabular import TabularPredictor
7
 
8
+ # ---------------- UI copy ----------------
9
+ TITLE = "🧱 LEGO Brick Classifier"
10
+ DESC = "Predicts whether a LEGO piece is Standard, Flat, or Sloped from basic dimensions."
11
+
12
+ # ---------------- Settings ----------------
13
  MODEL_REPO_ID = "Iris314/classical-automl-model"
14
  ZIP_FILENAME = "lego_predictor_dir.zip"
15
 
 
22
  }
23
  FEATURE_COLS_UI = ["Length", "Height", "Width", "Studs"]
24
 
25
+ # ---------------- Load predictor ----------------
26
  CACHE_DIR = pathlib.Path("hf_cache"); EXTRACT_DIR = CACHE_DIR / "predictor"
27
  CACHE_DIR.mkdir(exist_ok=True, parents=True)
28
 
 
36
  )
37
  if EXTRACT_DIR.exists():
38
  shutil.rmtree(EXTRACT_DIR)
39
+ EXTRACT_DIR.mkdir(parents=True)
40
  with zipfile.ZipFile(local_zip, "r") as zf:
41
  zf.extractall(EXTRACT_DIR)
42
  kids = list(EXTRACT_DIR.iterdir())
43
  path = kids[0] if len(kids) == 1 and kids[0].is_dir() else EXTRACT_DIR
44
  return TabularPredictor.load(str(path), require_py_version_match=False)
45
 
46
+ try:
47
+ PREDICTOR = load_predictor()
48
+ except Exception as e:
49
+ PREDICTOR = None
50
+ print("Failed to load predictor:", e)
51
 
52
+ # ---------------- Helpers ----------------
53
  def _cast_and_rename(row_dict):
54
  row = dict(row_dict)
55
  row["Length"] = float(row["Length"])
56
  row["Height"] = float(row["Height"])
57
  row["Width"] = float(row["Width"])
58
+ # gr.Number returns float; round & cast for integer feature
59
  row["Studs"] = int(round(float(row["Studs"])))
60
  X_ui = pd.DataFrame([row], columns=FEATURE_COLS_UI)
61
  X_model = X_ui.rename(columns=COLUMN_ALIAS)
 
63
 
64
  def classify_brick(length, height, width, studs):
65
  try:
66
+ if PREDICTOR is None:
67
+ raise RuntimeError("Model failed to load on startup. Check model artifact path & runtime deps.")
68
+
69
+ X = _cast_and_rename({
70
+ "Length": length, "Height": height, "Width": width, "Studs": studs
71
+ })
72
+
73
+ # Try probabilities; fall back to label
74
  try:
75
  proba = PREDICTOR.predict_proba(X)
76
  s = proba.iloc[0] if hasattr(proba, "iloc") else proba
77
  s = s.sort_values(ascending=False)
78
+ s.index = [str(k) for k in s.index] # ensure JSON-serializable keys
79
  return {k: float(v) for k, v in s.items()}
80
  except Exception:
81
+ pred = PREDICTOR.predict(X)
82
+ pred_val = pred.iloc[0] if hasattr(pred, "iloc") else pred
83
  return {"prediction": str(pred_val)}
84
  except Exception as e:
85
+ return {
86
+ "error": f"{type(e).__name__}: {e}",
87
+ "traceback": traceback.format_exc(limit=1)
88
+ }
89
 
90
+ # ---------------- Gradio ----------------
91
+ demo = gr.Interface(
92
+ fn=classify_brick,
93
+ inputs=[
94
+ gr.Slider(1, 10, step=0.1, value=4, label="Length"),
95
+ gr.Slider(0.2, 5, step=0.1, value=1.2, label="Height"),
96
+ gr.Slider(1, 10, step=0.1, value=2, label="Width"),
97
+ gr.Number(value=4, precision=0, label="Studs"),
98
+ ],
99
+ outputs=gr.Label(num_top_classes=3, label="Predicted Class / Probabilities"),
100
+ examples=[[4, 1.2, 2, 4], [2, 0.6, 2, 2], [3, 2.0, 2, 2]],
101
+ title=TITLE,
102
+ description=DESC
103
+ )
 
 
 
 
 
104
 
105
  if __name__ == "__main__":
106
+ # In Spaces, no share=True needed
107
  demo.launch()
sample_data/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This directory includes a few sample datasets to get you started.
2
+
3
+ * `california_housing_data*.csv` is California housing data from the 1990 US
4
+ Census; more information is available at:
5
+ https://docs.google.com/document/d/e/2PACX-1vRhYtsvc5eOR2FWNCwaBiKL6suIOrxJig8LcSBbmCbyYsayia_DvPOOBlXZ4CAlQ5nlDD8kTaIDRwrN/pub
6
+
7
+ * `mnist_*.csv` is a small sample of the
8
+ [MNIST database](https://en.wikipedia.org/wiki/MNIST_database), which is
9
+ described at: http://yann.lecun.com/exdb/mnist/
10
+
11
+ * `anscombe.json` contains a copy of
12
+ [Anscombe's quartet](https://en.wikipedia.org/wiki/Anscombe%27s_quartet); it
13
+ was originally described in
14
+
15
+ Anscombe, F. J. (1973). 'Graphs in Statistical Analysis'. American
16
+ Statistician. 27 (1): 17-21. JSTOR 2682899.
17
+
18
+ and our copy was prepared by the
19
+ [vega_datasets library](https://github.com/altair-viz/vega_datasets/blob/4f67bdaad10f45e3549984e17e1b3088c731503d/vega_datasets/_data/anscombe.json).
sample_data/anscombe.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"Series":"I", "X":10.0, "Y":8.04},
3
+ {"Series":"I", "X":8.0, "Y":6.95},
4
+ {"Series":"I", "X":13.0, "Y":7.58},
5
+ {"Series":"I", "X":9.0, "Y":8.81},
6
+ {"Series":"I", "X":11.0, "Y":8.33},
7
+ {"Series":"I", "X":14.0, "Y":9.96},
8
+ {"Series":"I", "X":6.0, "Y":7.24},
9
+ {"Series":"I", "X":4.0, "Y":4.26},
10
+ {"Series":"I", "X":12.0, "Y":10.84},
11
+ {"Series":"I", "X":7.0, "Y":4.81},
12
+ {"Series":"I", "X":5.0, "Y":5.68},
13
+
14
+ {"Series":"II", "X":10.0, "Y":9.14},
15
+ {"Series":"II", "X":8.0, "Y":8.14},
16
+ {"Series":"II", "X":13.0, "Y":8.74},
17
+ {"Series":"II", "X":9.0, "Y":8.77},
18
+ {"Series":"II", "X":11.0, "Y":9.26},
19
+ {"Series":"II", "X":14.0, "Y":8.10},
20
+ {"Series":"II", "X":6.0, "Y":6.13},
21
+ {"Series":"II", "X":4.0, "Y":3.10},
22
+ {"Series":"II", "X":12.0, "Y":9.13},
23
+ {"Series":"II", "X":7.0, "Y":7.26},
24
+ {"Series":"II", "X":5.0, "Y":4.74},
25
+
26
+ {"Series":"III", "X":10.0, "Y":7.46},
27
+ {"Series":"III", "X":8.0, "Y":6.77},
28
+ {"Series":"III", "X":13.0, "Y":12.74},
29
+ {"Series":"III", "X":9.0, "Y":7.11},
30
+ {"Series":"III", "X":11.0, "Y":7.81},
31
+ {"Series":"III", "X":14.0, "Y":8.84},
32
+ {"Series":"III", "X":6.0, "Y":6.08},
33
+ {"Series":"III", "X":4.0, "Y":5.39},
34
+ {"Series":"III", "X":12.0, "Y":8.15},
35
+ {"Series":"III", "X":7.0, "Y":6.42},
36
+ {"Series":"III", "X":5.0, "Y":5.73},
37
+
38
+ {"Series":"IV", "X":8.0, "Y":6.58},
39
+ {"Series":"IV", "X":8.0, "Y":5.76},
40
+ {"Series":"IV", "X":8.0, "Y":7.71},
41
+ {"Series":"IV", "X":8.0, "Y":8.84},
42
+ {"Series":"IV", "X":8.0, "Y":8.47},
43
+ {"Series":"IV", "X":8.0, "Y":7.04},
44
+ {"Series":"IV", "X":8.0, "Y":5.25},
45
+ {"Series":"IV", "X":19.0, "Y":12.50},
46
+ {"Series":"IV", "X":8.0, "Y":5.56},
47
+ {"Series":"IV", "X":8.0, "Y":7.91},
48
+ {"Series":"IV", "X":8.0, "Y":6.89}
49
+ ]
sample_data/california_housing_test.csv ADDED
The diff for this file is too large to render. See raw diff
 
sample_data/california_housing_train.csv ADDED
The diff for this file is too large to render. See raw diff
 
sample_data/mnist_test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c292478d94ec3a01461bdfa82eb0885d262eb09e615679b2d69dedb6ad09e7
3
+ size 18289443
sample_data/mnist_train_small.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ef64781aa03180f4f5ce504314f058f5d0227277df86060473d973cf43b033e
3
+ size 36523880