Rustamshry commited on
Commit
3079076
·
verified ·
1 Parent(s): 765e67a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -65
app.py CHANGED
@@ -1,56 +1,48 @@
1
  import gradio as gr
2
- import requests
 
 
3
 
4
- def generate_response(user_input, chat_history, hf_token):
5
- if not hf_token:
6
- return chat_history, chat_history, "❌ Please enter your Hugging Face API token first."
7
 
 
 
 
 
 
 
 
 
 
 
 
8
  if not user_input.strip():
9
- return chat_history, chat_history, ""
10
-
11
- model_id = "khazarai/BioGenesis-ToT" # Your hosted model
12
-
13
- headers = {
14
- "Authorization": f"Bearer {hf_token}"
15
- }
16
-
17
- # Combine chat history into a conversation string
18
- conversation = ""
19
- for msg in chat_history:
20
- role = "User" if msg["role"] == "user" else "Assistant"
21
- conversation += f"{role}: {msg['content']}\n"
22
- conversation += f"User: {user_input}\nAssistant:"
23
-
24
- # Send the request to HF Inference API
25
- payload = {
26
- "inputs": conversation,
27
- "parameters": {
28
- "max_new_tokens": 2200,
29
- "temperature": 0.6,
30
- "top_p": 0.95,
31
- "top_k": 20,
32
- }
33
- }
34
-
35
- response = requests.post(
36
- f"https://api-inference.huggingface.co/models/{model_id}",
37
- headers=headers,
38
- json=payload,
39
  )
40
 
41
- if response.status_code != 200:
42
- return chat_history, chat_history, f"⚠️ API Error: {response.text}"
43
 
44
- result = response.json()
 
 
 
 
 
 
45
 
46
- # Extract model output
47
- if isinstance(result, list) and len(result) > 0 and "generated_text" in result[0]:
48
- reply = result[0]["generated_text"].split("Assistant:")[-1].strip()
49
- else:
50
- reply = "🤔 Sorry, I couldn’t generate a response."
51
 
52
- chat_history.append({"role": "user", "content": user_input})
53
- chat_history.append({"role": "assistant", "content": reply})
54
 
55
  gr_chat_history = [
56
  (m["content"], chat_history[i + 1]["content"])
@@ -58,7 +50,7 @@ def generate_response(user_input, chat_history, hf_token):
58
  if m["role"] == "user"
59
  ]
60
 
61
- return gr_chat_history, chat_history, ""
62
 
63
 
64
  # --- UI Design ---
@@ -66,55 +58,45 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="emerald", secondary_hue="slate"
66
  gr.HTML("""
67
  <div style="text-align: center; margin-bottom: 20px;">
68
  <h1 style="font-family: 'Inter', sans-serif; font-weight: 800; color: #047857; font-size: 2.2em;">
69
- 🧬 BioGenesis-ToT Chatbot (Hosted on Hugging Face)
70
  </h1>
71
  <p style="color: #4B5563; font-size: 1.05em; margin-top: -10px;">
72
- Talk to your biology-trained LLM — no GPU needed, just your Hugging Face token ⚡
73
  </p>
74
  </div>
75
  """)
76
 
77
  with gr.Row():
78
  with gr.Column(scale=6):
79
- hf_token = gr.Textbox(
80
- placeholder="Enter your Hugging Face API Token here...",
81
- label="🔑 Hugging Face Token",
82
- type="password",
83
- )
84
-
85
  chatbot = gr.Chatbot(
86
  label="BioGenesis Chat",
87
- height=550,
88
- bubble_full_width=False,
89
  show_copy_button=True,
90
  avatar_images=(
91
- "https://cdn-icons-png.flaticon.com/512/1077/1077012.png",
92
- "https://cdn-icons-png.flaticon.com/512/4140/4140048.png",
93
  ),
94
  )
95
-
96
  user_input = gr.Textbox(
97
  placeholder="Ask me about cell biology, molecular structure, or biochemistry...",
98
  label="💬 Your question",
99
  lines=3,
100
  autofocus=True,
101
  )
102
-
103
- status_box = gr.Textbox(label="Status", interactive=False)
104
-
105
  with gr.Row():
106
  send_btn = gr.Button("🚀 Send", variant="primary")
107
  clear_btn = gr.Button("🧹 Clear Chat")
108
 
109
  state = gr.State([])
110
 
111
- send_btn.click(generate_response, [user_input, state, hf_token], [chatbot, state, status_box])
112
- user_input.submit(generate_response, [user_input, state, hf_token], [chatbot, state, status_box])
113
- clear_btn.click(lambda: ([], [], ""), None, [chatbot, state, status_box])
114
 
115
  gr.HTML("""
116
  <div style="text-align: center; margin-top: 25px; color: #6B7280; font-size: 0.9em;">
117
- Powered by <b>Hugging Face Inference API</b> | Built with ❤️ using Gradio
118
  </div>
119
  """)
120
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from peft import PeftModel
4
+ import torch
5
 
6
+ # --- Load tokenizer and model for CPU ---
7
+ tokenizer = AutoTokenizer.from_pretrained("unsloth/Qwen3-1.7B")
 
8
 
9
+ base_model = AutoModelForCausalLM.from_pretrained(
10
+ "unsloth/Qwen3-1.7B",
11
+ torch_dtype=torch.float32,
12
+ device_map={"": "cpu"},
13
+ )
14
+
15
+ model = PeftModel.from_pretrained(base_model, "khazarai/BioGenesis-ToT").to("cpu")
16
+
17
+
18
+ # --- Chatbot logic ---
19
+ def generate_response(user_input, chat_history):
20
  if not user_input.strip():
21
+ return chat_history, chat_history
22
+
23
+ chat_history.append({"role": "user", "content": user_input})
24
+
25
+ text = tokenizer.apply_chat_template(
26
+ chat_history,
27
+ tokenize=False,
28
+ add_generation_prompt=True,
29
+ enable_thinking=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  )
31
 
32
+ inputs = tokenizer(text, return_tensors="pt").to("cpu")
 
33
 
34
+ output_tokens = model.generate(
35
+ **inputs,
36
+ max_new_tokens=2200,
37
+ temperature=0.6,
38
+ top_p=0.95,
39
+ top_k=20,
40
+ )
41
 
42
+ response = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
43
+ response = response.split(user_input)[-1].strip()
 
 
 
44
 
45
+ chat_history.append({"role": "assistant", "content": response})
 
46
 
47
  gr_chat_history = [
48
  (m["content"], chat_history[i + 1]["content"])
 
50
  if m["role"] == "user"
51
  ]
52
 
53
+ return gr_chat_history, chat_history
54
 
55
 
56
  # --- UI Design ---
 
58
  gr.HTML("""
59
  <div style="text-align: center; margin-bottom: 20px;">
60
  <h1 style="font-family: 'Inter', sans-serif; font-weight: 800; color: #047857; font-size: 2.2em;">
61
+ 🧬 BioGenesis-ToT Chatbot
62
  </h1>
63
  <p style="color: #4B5563; font-size: 1.05em; margin-top: -10px;">
64
+ Your AI companion for biology, biochemistry, and life sciences.
65
  </p>
66
  </div>
67
  """)
68
 
69
  with gr.Row():
70
  with gr.Column(scale=6):
 
 
 
 
 
 
71
  chatbot = gr.Chatbot(
72
  label="BioGenesis Chat",
73
+ height=600,
74
+ bubble_full_width=True,
75
  show_copy_button=True,
76
  avatar_images=(
77
+ "https://cdn-icons-png.flaticon.com/512/1077/1077012.png", # user icon
78
+ "https://cdn-icons-png.flaticon.com/512/4140/4140048.png", # bot icon
79
  ),
80
  )
 
81
  user_input = gr.Textbox(
82
  placeholder="Ask me about cell biology, molecular structure, or biochemistry...",
83
  label="💬 Your question",
84
  lines=3,
85
  autofocus=True,
86
  )
 
 
 
87
  with gr.Row():
88
  send_btn = gr.Button("🚀 Send", variant="primary")
89
  clear_btn = gr.Button("🧹 Clear Chat")
90
 
91
  state = gr.State([])
92
 
93
+ send_btn.click(generate_response, [user_input, state], [chatbot, state])
94
+ user_input.submit(generate_response, [user_input, state], [chatbot, state])
95
+ clear_btn.click(lambda: ([], []), None, [chatbot, state])
96
 
97
  gr.HTML("""
98
  <div style="text-align: center; margin-top: 25px; color: #6B7280; font-size: 0.9em;">
99
+ Powered by <b>Qwen3-1.7B + BioGenesis-ToT</b> | Built with ❤️ using Gradio
100
  </div>
101
  """)
102