Bhaiya Hari Narayan Singh commited on
Commit
dfd7076
Β·
verified Β·
1 Parent(s): 275d027

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -56
app.py CHANGED
@@ -1,69 +1,97 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
 
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
 
 
 
 
 
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
20
 
21
- messages.extend(history)
22
 
23
- messages.append({"role": "user", "content": message})
 
 
 
24
 
25
- response = ""
 
 
 
26
 
27
- for message in client.chat_completion(
28
  messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  with gr.Blocks() as demo:
63
- with gr.Sidebar():
64
- gr.LoginButton()
65
- chatbot.render()
 
 
 
66
 
 
 
67
 
68
- if __name__ == "__main__":
69
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from transformers.utils import get_json_schema
4
+ import torch
5
 
6
+ # -----------------------
7
+ # Load model
8
+ # -----------------------
9
+ model_name = "bhaiyahnsingh45/functiongemma-multiagent-router"
10
 
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_name,
14
+ device_map="auto",
15
+ torch_dtype="auto"
16
+ )
17
+
18
+ # -----------------------
19
+ # Agents
20
+ # -----------------------
21
+ def technical_support_agent(issue_type: str, priority: str) -> str:
22
+ return f"πŸ› οΈ Routing to Technical Support: {issue_type} ({priority})"
23
+
24
+ def billing_agent(request_type: str, urgency: str) -> str:
25
+ return f"πŸ’° Routing to Billing: {request_type} ({urgency})"
26
+
27
+ def product_info_agent(query_type: str, category: str) -> str:
28
+ return f"πŸ“¦ Routing to Product Info: {query_type} ({category})"
29
 
30
+ # Tool schemas
31
+ AGENT_TOOLS = [
32
+ get_json_schema(technical_support_agent),
33
+ get_json_schema(billing_agent),
34
+ get_json_schema(product_info_agent)
35
+ ]
36
 
37
+ SYSTEM_MSG = "You are an intelligent routing agent that directs customer queries to the appropriate specialized agent."
38
 
39
+ # -----------------------
40
+ # Core inference
41
+ # -----------------------
42
+ def route_query(user_query: str):
43
 
44
+ messages = [
45
+ {"role": "developer", "content": SYSTEM_MSG},
46
+ {"role": "user", "content": user_query}
47
+ ]
48
 
49
+ inputs = tokenizer.apply_chat_template(
50
  messages,
51
+ tools=AGENT_TOOLS,
52
+ add_generation_prompt=True,
53
+ return_dict=True,
54
+ return_tensors="pt"
55
+ )
56
+
57
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
58
+
59
+ outputs = model.generate(
60
+ **inputs,
61
+ max_new_tokens=128,
62
+ pad_token_id=tokenizer.eos_token_id
63
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ result = tokenizer.decode(
66
+ outputs[0][len(inputs["input_ids"][0]):],
67
+ skip_special_tokens=True
68
+ )
69
+
70
+ return result
71
+
72
+
73
+ # -----------------------
74
+ # Chatbot logic
75
+ # -----------------------
76
+ def chat_fn(message, history):
77
+ response = route_query(message)
78
+ history.append((message, response))
79
+ return history, history
80
+
81
+
82
+ # -----------------------
83
+ # UI
84
+ # -----------------------
85
  with gr.Blocks() as demo:
86
+ gr.Markdown("## πŸ€– Multi-Agent Router Chatbot")
87
+ gr.Markdown("Ask anything about billing, product, or technical issues.")
88
+
89
+ chatbot = gr.Chatbot()
90
+ msg = gr.Textbox(placeholder="Type your query here...")
91
+ clear = gr.Button("Clear")
92
 
93
+ msg.submit(chat_fn, [msg, chatbot], [chatbot, chatbot])
94
+ clear.click(lambda: None, None, chatbot, queue=False)
95
 
96
+ # Launch
97
+ demo.launch()