Mel Seto commited on
Commit
4fa4f0a
·
1 Parent(s): fe6d194

revert back to 2-column UI

Browse files
Files changed (1) hide show
  1. app.py +58 -13
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # app.py
2
  import os
3
  from dotenv import load_dotenv
4
  import gradio as gr
@@ -21,17 +20,15 @@ Explanation
21
  Situation: {situation}
22
  Answer:
23
  """
24
- # Use Cerebras chat completions API
25
  response = client.chat.completions.create(
26
- model="meta-llama/Llama-3.3-70B-Instruct",
27
  messages=[{"role": "user", "content": prompt}],
28
  max_tokens=150
29
  )
30
 
31
- # Extract generated text
32
  generated_text = response.choices[0].message.content.strip()
33
 
34
- # Split lines for UI
35
  lines = [line.strip() for line in generated_text.split("\n") if line.strip()]
36
  if len(lines) >= 3:
37
  idiom = lines[0]
@@ -44,21 +41,69 @@ Answer:
44
 
45
  return idiom, explanation
46
 
 
47
  def launch_app():
 
48
  client = InferenceClient(
49
  provider="cerebras",
 
50
  api_key=os.environ["HF_TOKEN"]
51
  )
52
 
53
- with gr.Blocks() as demo:
54
- txt = gr.Textbox(label="Situation", lines=3)
55
- idiom_out = gr.HTML()
56
- expl_out = gr.HTML()
57
- btn = gr.Button("✨ Find Idiom")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
- def update_ui(s):
60
- return generate_idiom(s, client)
 
 
 
 
 
 
61
 
62
- btn.click(update_ui, inputs=[txt], outputs=[idiom_out, expl_out])
 
 
 
 
 
 
 
 
 
 
63
 
64
  demo.launch(debug=True)
 
 
 
 
 
 
1
  import os
2
  from dotenv import load_dotenv
3
  import gradio as gr
 
20
  Situation: {situation}
21
  Answer:
22
  """
23
+ # Use Cerebras chat completion API
24
  response = client.chat.completions.create(
 
25
  messages=[{"role": "user", "content": prompt}],
26
  max_tokens=150
27
  )
28
 
 
29
  generated_text = response.choices[0].message.content.strip()
30
 
31
+ # Split lines for clean UI
32
  lines = [line.strip() for line in generated_text.split("\n") if line.strip()]
33
  if len(lines) >= 3:
34
  idiom = lines[0]
 
41
 
42
  return idiom, explanation
43
 
44
+
45
  def launch_app():
46
+ # Instantiate Cerebras client inside the function
47
  client = InferenceClient(
48
  provider="cerebras",
49
+ model="meta-llama/Llama-3.3-70B-Instruct",
50
  api_key=os.environ["HF_TOKEN"]
51
  )
52
 
53
+ with gr.Blocks(css="""
54
+ .idiom-output {
55
+ font-size: 2rem;
56
+ font-weight: bold;
57
+ text-align: center;
58
+ color: #8B0000;
59
+ margin-bottom: 0.5em;
60
+ }
61
+ .explanation-output {
62
+ font-size: 1rem;
63
+ line-height: 1.5;
64
+ color: #333333;
65
+ text-align: center;
66
+ }
67
+ .gradio-container {
68
+ background-color: #fdfcf7;
69
+ }
70
+ """) as demo:
71
+
72
+ gr.Markdown("## 🀄 Chinese Wisdom Generator\nEnter a situation, get a Chinese idiom with explanation.")
73
+
74
+ with gr.Row():
75
+ # Left column: input + examples + button
76
+ with gr.Column(scale=1):
77
+ situation_input = gr.Textbox(
78
+ label="Describe your situation...",
79
+ placeholder="e.g. I procrastinated on my homework again...",
80
+ lines=3
81
+ )
82
+ submit_btn = gr.Button("✨ Find Idiom")
83
 
84
+ gr.Examples(
85
+ examples=[
86
+ ["I studied hard but still failed my exam."],
87
+ ["I missed my bus because I woke up late."],
88
+ ["I finally finished a long project after months."],
89
+ ],
90
+ inputs=[situation_input]
91
+ )
92
 
93
+ # Right column: outputs
94
+ with gr.Column(scale=1):
95
+ idiom_output = gr.HTML("<div class='idiom-output'>—</div>")
96
+ explanation_output = gr.HTML("<div class='explanation-output'>—</div>")
97
+
98
+ # Button callback directly calls generate_idiom
99
+ def update_ui(situation):
100
+ idiom, explanation = generate_idiom(situation, client)
101
+ return f"<div class='idiom-output'>{idiom}</div>", f"<div class='explanation-output'>{explanation}</div>"
102
+
103
+ submit_btn.click(update_ui, inputs=[situation_input], outputs=[idiom_output, explanation_output])
104
 
105
  demo.launch(debug=True)
106
+
107
+
108
+ if __name__ == "__main__":
109
+ launch_app()