Keeby-smilyai commited on
Commit
5abbf47
Β·
verified Β·
1 Parent(s): ff5d079

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -33
app.py CHANGED
@@ -1,7 +1,9 @@
1
- # app.py β€” FULL WORKING VERSION β€” WITH USER DESCRIPTION FOR PUBLISH
2
  import gradio as gr
 
3
  from backend import (
4
- verify_hf_token,
 
5
  get_user_runs,
6
  get_run_logs,
7
  queue_training_run,
@@ -11,28 +13,37 @@ from backend import (
11
  )
12
  from utils import ARCH_ANALOGIES, get_auto_hyperparams
13
 
14
- # ------------------------------ STATE ------------------------------
15
- user_state = {"user_id": None, "hf_token": "", "arch_config": {}}
 
16
 
17
  # ------------------------------ BACKEND WRAPPERS (UI LOGIC) ------------------------------
18
 
19
- def page_login(hf_token):
20
- user_id, msg = verify_hf_token(hf_token)
 
21
  if user_id:
22
  user_state["user_id"] = user_id
23
- user_state["hf_token"] = hf_token
24
  runs_list = page_processes()
25
  return gr.update(visible=False), gr.update(visible=True), msg, runs_list
26
  else:
27
  return gr.update(), gr.update(), msg, ""
28
 
 
 
 
 
 
 
 
 
 
 
29
  def page_processes():
30
  if not user_state.get("user_id"): return "Login required."
31
  runs = get_user_runs(user_state["user_id"])
32
- run_list = "\n".join([
33
- f"🍳 Run #{r[0]} | {r[1].upper()} x{r[2]} layers | Status: {r[3]}"
34
- for r in runs
35
- ]) or "No runs yet. Start cooking!"
36
  return run_list
37
 
38
  def load_run_logs(run_id_str):
@@ -60,7 +71,7 @@ def page_hyperparams_next(lr, epochs, batch_size):
60
  "batch_size": int(batch_size) if batch_size else config["auto_config"]["batch_size"],
61
  }
62
  queue_training_run(user_state["user_id"], final_config)
63
- start_training_if_free()
64
  refreshed_runs = page_processes()
65
  return gr.update(visible=False), gr.update(visible=True), refreshed_runs
66
 
@@ -89,11 +100,11 @@ def inference_action(run_id, prompt):
89
  try: return run_inference(run_id, prompt)
90
  except Exception as e: return f"Error: {str(e)}"
91
 
92
- def publish_action(run_id, description):
 
93
  if not run_id: return "Error: Please select a model to publish."
 
94
  try:
95
- hf_token = user_state["hf_token"]
96
- # Find run details to create a nice repo name
97
  run_details = next((r for r in get_completed_runs() if r[0] == run_id), None)
98
  repo_name = f"llm-kitchen-{run_details[1]}-{run_details[2]}L-run{run_id}" if run_details else f"llm-kitchen-run-{run_id}"
99
  url = publish_run_to_hub(run_id, hf_token, repo_name, description.strip())
@@ -107,20 +118,31 @@ with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
107
  gr.Markdown("# 🍳 Welcome to LLM Kitchen")
108
  gr.Markdown("### Cook your own language model β€” from scratch!")
109
 
110
- # ---- PAGE 1: LOGIN ----
 
 
 
 
 
 
 
 
 
111
  with gr.Group() as page_login_ui:
112
- gr.Markdown("### πŸ” Step 1: Login with Hugging Face Token")
113
- token_input = gr.Textbox(label="HF Token (with write permissions)", type="password")
114
- login_btn = gr.Button("Login to Kitchen", variant="primary")
 
115
  login_msg = gr.Markdown()
 
116
 
117
- # ---- PAGE 2: PROCESSES ----
118
  with gr.Group(visible=False) as page_processes_ui:
119
  gr.Markdown("### πŸ§‘β€πŸ³ Your Processes")
120
  with gr.Row():
121
  refresh_btn = gr.Button("πŸ”„ Refresh List")
122
  inference_btn = gr.Button("πŸ§ͺ Go to Inference Kitchen")
123
- publish_btn = gr.Button("πŸš€ Go to Publishing Bay") # NEW: Always visible publish button
124
  runs_display = gr.Textbox(label="Your Training Runs", lines=8, interactive=False)
125
  with gr.Accordion("View Raw Logs", open=False):
126
  run_id_input = gr.Textbox(label="Enter a Run ID to view its logs")
@@ -128,7 +150,7 @@ with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
128
  logs_display = gr.Textbox(label="Training Logs", lines=10, interactive=False)
129
  new_run_btn = gr.Button("βž• Start New Process", variant="primary")
130
 
131
- # ---- PAGE 3: INFERENCE KITCHEN ----
132
  with gr.Group(visible=False) as page_inference_ui:
133
  gr.Markdown("### πŸ§ͺ Inference Kitchen")
134
  inf_run_id_dropdown = gr.Dropdown(label="Select a Completed Model to Use")
@@ -137,16 +159,18 @@ with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
137
  output_text = gr.Textbox(label="Model's Answer", lines=5, interactive=False)
138
  back_from_inf = gr.Button("⬅️ Back to Processes")
139
 
140
- # ---- NEW PAGE 4: PUBLISHING BAY ----
141
  with gr.Group(visible=False) as page_publish_ui:
142
  gr.Markdown("### πŸš€ Publishing Bay")
143
  pub_run_id_dropdown = gr.Dropdown(label="Select a Completed Model to Publish")
 
 
144
  pub_description_input = gr.Textbox(label="Model Card Description", lines=4, placeholder="Write a short description for your model card...")
145
  publish_now_btn = gr.Button("Publish to Hugging Face Hub", variant="primary")
146
  publish_status = gr.Markdown()
147
  back_from_pub = gr.Button("⬅️ Back to Processes")
148
 
149
- # ---- PAGE 5: ARCHITECTURE ----
150
  with gr.Group(visible=False) as page_arch_ui:
151
  gr.Markdown("### πŸ—οΈ Step 2: Choose Your Architecture")
152
  arch_dropdown = gr.Dropdown(["cnn", "rnn", "transformer"], label="Architecture Type")
@@ -154,8 +178,6 @@ with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
154
  arch_next_btn = gr.Button("Next β†’ Hyperparameters")
155
  arch_analogy = gr.Markdown()
156
  auto_suggestion = gr.Markdown()
157
-
158
- # ---- PAGE 6: HYPERPARAMETERS ----
159
  with gr.Group(visible=False) as page_hyper_ui:
160
  gr.Markdown("### πŸ§‚ Step 3: Season Your Model (Hyperparameters)")
161
  gr.Markdown("Use Auto-Seasoningβ„’ (pre-filled) or customize manually")
@@ -164,23 +186,27 @@ with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
164
  batch_input = gr.Number(label="Batch Size", precision=0)
165
  hyper_next_btn = gr.Button("Start Cooking! 🍲")
166
 
167
- # ------------------------------ EVENTS ------------------------------
 
 
 
 
 
 
168
 
169
- login_btn.click(page_login, inputs=token_input, outputs=[page_login_ui, page_processes_ui, login_msg, runs_display])
170
  refresh_btn.click(page_processes, outputs=runs_display)
171
  view_logs_btn.click(load_run_logs, inputs=run_id_input, outputs=logs_display)
172
  new_run_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True), "", ""), outputs=[page_processes_ui, page_arch_ui, arch_analogy, auto_suggestion])
173
  arch_next_btn.click(page_architecture_next, inputs=[arch_dropdown, layers_slider], outputs=[page_arch_ui, page_hyper_ui, arch_analogy, auto_suggestion, lr_input, epochs_input, batch_input])
174
  hyper_next_btn.click(page_hyperparams_next, inputs=[lr_input, epochs_input, batch_input], outputs=[page_hyper_ui, page_processes_ui, runs_display])
175
-
176
- # Inference Flow
177
  inference_btn.click(go_to_inference_page, outputs=[page_processes_ui, page_inference_ui, inf_run_id_dropdown])
178
  infer_btn.click(inference_action, inputs=[inf_run_id_dropdown, prompt_input], outputs=output_text)
179
  back_from_inf.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_inference_ui, page_processes_ui])
180
-
181
- # NEW: Publish Flow
182
  publish_btn.click(go_to_publish_page, outputs=[page_processes_ui, page_publish_ui, pub_run_id_dropdown, publish_status])
183
- publish_now_btn.click(publish_action, inputs=[pub_run_id_dropdown, pub_description_input], outputs=publish_status)
184
  back_from_pub.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_publish_ui, page_processes_ui])
185
 
186
  demo.queue().launch()
 
1
+ # app.py β€” STABLE VERSION UPDATED WITH USERNAME/PASSWORD
2
  import gradio as gr
3
+ # --- MODIFIED: Updated backend imports ---
4
  from backend import (
5
+ login_user,
6
+ signup_user,
7
  get_user_runs,
8
  get_run_logs,
9
  queue_training_run,
 
13
  )
14
  from utils import ARCH_ANALOGIES, get_auto_hyperparams
15
 
16
+ # ------------------------------ STATE (MODIFIED) ------------------------------
17
+ # HF Token is no longer stored globally. Username is stored instead.
18
+ user_state = {"user_id": None, "username": "", "arch_config": {}}
19
 
20
  # ------------------------------ BACKEND WRAPPERS (UI LOGIC) ------------------------------
21
 
22
+ # --- NEW: Login and Signup Actions ---
23
+ def login_action(username, password):
24
+ user_id, msg = login_user(username, password)
25
  if user_id:
26
  user_state["user_id"] = user_id
27
+ user_state["username"] = username
28
  runs_list = page_processes()
29
  return gr.update(visible=False), gr.update(visible=True), msg, runs_list
30
  else:
31
  return gr.update(), gr.update(), msg, ""
32
 
33
+ def signup_action(username, password):
34
+ user_id, msg = signup_user(username, password)
35
+ if user_id:
36
+ # On success, go back to the login page with a success message
37
+ return gr.update(visible=False), gr.update(visible=True), msg
38
+ else:
39
+ # On failure, show an error message on the signup page
40
+ return gr.update(), gr.update(), msg
41
+
42
+ # --- (The following functions are from the old stable version, unchanged) ---
43
  def page_processes():
44
  if not user_state.get("user_id"): return "Login required."
45
  runs = get_user_runs(user_state["user_id"])
46
+ run_list = "\n".join([f"🍳 Run #{r[0]} | {r[1].upper()} x{r[2]} layers | Status: {r[3]}" for r in runs]) or "No runs yet. Start cooking!"
 
 
 
47
  return run_list
48
 
49
  def load_run_logs(run_id_str):
 
71
  "batch_size": int(batch_size) if batch_size else config["auto_config"]["batch_size"],
72
  }
73
  queue_training_run(user_state["user_id"], final_config)
74
+ # The new parallel backend handles calling start_training_if_free() automatically
75
  refreshed_runs = page_processes()
76
  return gr.update(visible=False), gr.update(visible=True), refreshed_runs
77
 
 
100
  try: return run_inference(run_id, prompt)
101
  except Exception as e: return f"Error: {str(e)}"
102
 
103
+ # --- MODIFIED: Publish action now takes the HF token from the UI ---
104
+ def publish_action(run_id, hf_token, description):
105
  if not run_id: return "Error: Please select a model to publish."
106
+ if not hf_token.strip(): return "Error: Hugging Face Token is required to publish."
107
  try:
 
 
108
  run_details = next((r for r in get_completed_runs() if r[0] == run_id), None)
109
  repo_name = f"llm-kitchen-{run_details[1]}-{run_details[2]}L-run{run_id}" if run_details else f"llm-kitchen-run-{run_id}"
110
  url = publish_run_to_hub(run_id, hf_token, repo_name, description.strip())
 
118
  gr.Markdown("# 🍳 Welcome to LLM Kitchen")
119
  gr.Markdown("### Cook your own language model β€” from scratch!")
120
 
121
+ # --- NEW: SIGNUP PAGE ---
122
+ with gr.Group(visible=False) as page_signup_ui:
123
+ gr.Markdown("### πŸ“ Create a New Account")
124
+ signup_user_input = gr.Textbox(label="Username")
125
+ signup_pass_input = gr.Textbox(label="Password", type="password")
126
+ signup_btn = gr.Button("Sign Up", variant="primary")
127
+ signup_msg = gr.Markdown()
128
+ go_to_login_btn = gr.Button("Already have an account? Log In")
129
+
130
+ # --- NEW: LOGIN PAGE (Replaces old HF Token login) ---
131
  with gr.Group() as page_login_ui:
132
+ gr.Markdown("### πŸ” Login to the Kitchen")
133
+ login_user_input = gr.Textbox(label="Username")
134
+ login_pass_input = gr.Textbox(label="Password", type="password")
135
+ login_btn = gr.Button("Login", variant="primary")
136
  login_msg = gr.Markdown()
137
+ go_to_signup_btn = gr.Button("Don't have an account? Sign Up")
138
 
139
+ # ---- PAGE 2: PROCESSES (Unchanged) ----
140
  with gr.Group(visible=False) as page_processes_ui:
141
  gr.Markdown("### πŸ§‘β€πŸ³ Your Processes")
142
  with gr.Row():
143
  refresh_btn = gr.Button("πŸ”„ Refresh List")
144
  inference_btn = gr.Button("πŸ§ͺ Go to Inference Kitchen")
145
+ publish_btn = gr.Button("πŸš€ Go to Publishing Bay")
146
  runs_display = gr.Textbox(label="Your Training Runs", lines=8, interactive=False)
147
  with gr.Accordion("View Raw Logs", open=False):
148
  run_id_input = gr.Textbox(label="Enter a Run ID to view its logs")
 
150
  logs_display = gr.Textbox(label="Training Logs", lines=10, interactive=False)
151
  new_run_btn = gr.Button("βž• Start New Process", variant="primary")
152
 
153
+ # ---- PAGE 3: INFERENCE KITCHEN (Unchanged) ----
154
  with gr.Group(visible=False) as page_inference_ui:
155
  gr.Markdown("### πŸ§ͺ Inference Kitchen")
156
  inf_run_id_dropdown = gr.Dropdown(label="Select a Completed Model to Use")
 
159
  output_text = gr.Textbox(label="Model's Answer", lines=5, interactive=False)
160
  back_from_inf = gr.Button("⬅️ Back to Processes")
161
 
162
+ # ---- PAGE 4: PUBLISHING BAY (MODIFIED) ----
163
  with gr.Group(visible=False) as page_publish_ui:
164
  gr.Markdown("### πŸš€ Publishing Bay")
165
  pub_run_id_dropdown = gr.Dropdown(label="Select a Completed Model to Publish")
166
+ # --- NEW: HF Token input field ---
167
+ pub_hf_token_input = gr.Textbox(label="Your Hugging Face Token (with write permissions)", type="password", placeholder="hf_...")
168
  pub_description_input = gr.Textbox(label="Model Card Description", lines=4, placeholder="Write a short description for your model card...")
169
  publish_now_btn = gr.Button("Publish to Hugging Face Hub", variant="primary")
170
  publish_status = gr.Markdown()
171
  back_from_pub = gr.Button("⬅️ Back to Processes")
172
 
173
+ # ---- PAGE 5 & 6: ARCHITECTURE & HYPERPARAMETERS (Unchanged) ----
174
  with gr.Group(visible=False) as page_arch_ui:
175
  gr.Markdown("### πŸ—οΈ Step 2: Choose Your Architecture")
176
  arch_dropdown = gr.Dropdown(["cnn", "rnn", "transformer"], label="Architecture Type")
 
178
  arch_next_btn = gr.Button("Next β†’ Hyperparameters")
179
  arch_analogy = gr.Markdown()
180
  auto_suggestion = gr.Markdown()
 
 
181
  with gr.Group(visible=False) as page_hyper_ui:
182
  gr.Markdown("### πŸ§‚ Step 3: Season Your Model (Hyperparameters)")
183
  gr.Markdown("Use Auto-Seasoningβ„’ (pre-filled) or customize manually")
 
186
  batch_input = gr.Number(label="Batch Size", precision=0)
187
  hyper_next_btn = gr.Button("Start Cooking! 🍲")
188
 
189
+ # ------------------------------ EVENTS (MODIFIED) ------------------------------
190
+
191
+ # --- NEW: Auth flow events ---
192
+ go_to_signup_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_login_ui, page_signup_ui])
193
+ go_to_login_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_signup_ui, page_login_ui])
194
+ login_btn.click(login_action, inputs=[login_user_input, login_pass_input], outputs=[page_login_ui, page_processes_ui, login_msg, runs_display])
195
+ signup_btn.click(signup_action, inputs=[signup_user_input, signup_pass_input], outputs=[page_signup_ui, page_login_ui, signup_msg])
196
 
197
+ # --- Old events, mostly unchanged ---
198
  refresh_btn.click(page_processes, outputs=runs_display)
199
  view_logs_btn.click(load_run_logs, inputs=run_id_input, outputs=logs_display)
200
  new_run_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True), "", ""), outputs=[page_processes_ui, page_arch_ui, arch_analogy, auto_suggestion])
201
  arch_next_btn.click(page_architecture_next, inputs=[arch_dropdown, layers_slider], outputs=[page_arch_ui, page_hyper_ui, arch_analogy, auto_suggestion, lr_input, epochs_input, batch_input])
202
  hyper_next_btn.click(page_hyperparams_next, inputs=[lr_input, epochs_input, batch_input], outputs=[page_hyper_ui, page_processes_ui, runs_display])
 
 
203
  inference_btn.click(go_to_inference_page, outputs=[page_processes_ui, page_inference_ui, inf_run_id_dropdown])
204
  infer_btn.click(inference_action, inputs=[inf_run_id_dropdown, prompt_input], outputs=output_text)
205
  back_from_inf.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_inference_ui, page_processes_ui])
206
+
207
+ # --- MODIFIED: Publish event now includes the HF token input ---
208
  publish_btn.click(go_to_publish_page, outputs=[page_processes_ui, page_publish_ui, pub_run_id_dropdown, publish_status])
209
+ publish_now_btn.click(publish_action, inputs=[pub_run_id_dropdown, pub_hf_token_input, pub_description_input], outputs=publish_status)
210
  back_from_pub.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_publish_ui, page_processes_ui])
211
 
212
  demo.queue().launch()