sanjay920 commited on
Commit
40602a2
·
1 Parent(s): 0b7bdc7
Files changed (1) hide show
  1. app.py +48 -12
app.py CHANGED
@@ -30,16 +30,48 @@ model_choices = [
30
  "rubra-ai/Qwen2-7B-Instruct",
31
  "rubra-ai/Phi-3-mini-128k-instruct",
32
  "rubra-ai/Mistral-7B-Instruct-v0.3",
33
- "rubra-ai/Mistral-7B-Instruct-v0.2",
34
- "rubra-ai/gemma-1.1-2b-it"
35
  ]
36
 
37
  DESCRIPTION = """\
38
- # Rubra v0.1 - Top LLMs enhanced with function (tool) calling
39
 
40
  This is a demo of the Rubra collection of models. You can use the models for general conversation,
41
  task completion, and function calling with the provided tools input.
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
 
45
  LICENSE = """
@@ -71,7 +103,6 @@ if torch.cuda.is_available():
71
  global model, tokenizer
72
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=False)
73
  tokenizer = AutoTokenizer.from_pretrained(model_name)
74
- tokenizer.use_default_system_prompt = False
75
  model.generation_config.pad_token_id = tokenizer.pad_token_id
76
 
77
  load_model(model_id) # Load the default model
@@ -199,22 +230,26 @@ bot_message = """Hello! How can I assist you today? If you have any questions or
199
  {
200
  "type": "function",
201
  "function": {
202
- "name": "get_current_weather",
203
- "description": "Get the current weather in a given location",
204
  "parameters": {
205
  "type": "object",
206
  "properties": {
207
- "location": {
 
 
 
 
208
  "type": "string",
209
- "description": "Must include the city AND state, e.g. 'San Francisco, CA'"
210
  },
211
- "unit": {
212
  "type": "string",
213
- "enum":
214
- ["celsius", "fahrenheit"]
215
  }
216
  },
217
- "required": ["location"]
218
  }
219
  }
220
  }
@@ -319,6 +354,7 @@ def create_chat_interface():
319
 
320
  clear_btn.click(lambda: ([], None), outputs=[chatbot, error_box])
321
 
 
322
  gr.Markdown(LICENSE)
323
 
324
  return demo
 
30
  "rubra-ai/Qwen2-7B-Instruct",
31
  "rubra-ai/Phi-3-mini-128k-instruct",
32
  "rubra-ai/Mistral-7B-Instruct-v0.3",
33
+ # "rubra-ai/Mistral-7B-Instruct-v0.2",
34
+ # "rubra-ai/gemma-1.1-2b-it"
35
  ]
36
 
37
  DESCRIPTION = """\
38
+ # Rubra v0.1 - A Collection of Tool (Function) Calling LLMs
39
 
40
  This is a demo of the Rubra collection of models. You can use the models for general conversation,
41
  task completion, and function calling with the provided tools input.
42
 
43
+ See more at https://docs.rubra.ai/ & https://github.com/rubra-ai/rubra
44
+ """
45
+
46
+ model_table = """
47
+ <p/>
48
+
49
+ ---
50
+
51
+ ## Rubra Benchmarks
52
+
53
+ | Model | Params (in billions) | Function Calling | MMLU (5-shot) | GPQA (0-shot) | GSM-8K (8-shot, CoT) | MATH (4-shot, CoT) | MT-bench |
54
+ |----------------------------------|----------------------|------------------|---------------|---------------|----------------------|--------------------|----------|
55
+ | GPT-4o | - | 98.57% | - | 53.6 | - | - | - |
56
+ | Claude-3.5 Sonnet | - | 98.57% | 88.7 | 59.4 | - | - | - |
57
+ | [**Rubra Llama-3 70B Instruct**](https://huggingface.co/rubra-ai/Meta-Llama-3-70B-Instruct) | 70.6 | 97.85% | 75.90 | 33.93 | 82.26 | 34.24 | 8.36 |
58
+ | [**Rubra Llama-3 8B Instruct**](https://huggingface.co/rubra-ai/Meta-Llama-3-8B-Instruct) | 8.9 | 89.28% | 64.39 | 31.70 | 68.99 | 23.76 | 8.03 |
59
+ | [**Rubra Qwen2 7B Instruct**](https://huggingface.co/rubra-ai/Qwen2-7B-Instruct) | 8.55 | 85.71% | 68.88 | 30.36 | 75.82 | 28.72 | 8.08 |
60
+ | Qwen2-7B-Instruct | 7.62 | - | 70.78 | 32.14 | 78.54 | 30.10 | 8.29 |
61
+ | NousResearch/Hermes-2-Pro-Llama-3-8B | 8.03 | 41.25% | 64.16 | 31.92 | 73.92 | 21.58 | 7.83 |
62
+ | gorilla-llm/gorilla-openfunctions-v2 | 6.91 | 41.25% ∔ | 49.14 | 23.66 | 48.29 | 17.54 | 5.13 |
63
+ | [**Rubra Mistral 7B Instruct v0.3**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.3) | 8.12 | 73.57% | 59.12 | 29.91 | 43.29 | 11.14 | 7.69 |
64
+ | [**Rubra Phi-3 Mini 128k Instruct**](https://huggingface.co/rubra-ai/Phi-3-mini-128k-instruct) | 4.27 | 65.71% | 66.66 | 29.24 | 74.09 | 26.84 | 7.45 |
65
+ | Nexusflow/NexusRaven-V2-13B | 13.0 | 53.75% ∔ | 43.23 | 28.79 | 22.67 | 7.12 | 5.36 |
66
+ | Mistral 7B Instruct v0.3 | 7.25 | 22.5% | 62.10 | 30.58 | 53.07 | 12.98 | 7.50 |
67
+ | [**Rubra Gemma-1.1 2B Instruct**](https://huggingface.co/rubra-ai/gemma-1.1-2b-it) | 2.84 | 45.00% | 38.85 | 24.55 | 6.14 | 2.38 | 5.75 |
68
+ | Llama-3 70B Instruct | 70.6 | - | 79.90 | 38.17 | 90.67 | 44.24 | 8.88 |
69
+ | Llama-3 8B Instruct | 8.03 | - | 65.69 | 31.47 | 77.41 | 27.58 | 8.07 |
70
+ | Mistral 7B Instruct v0.2 | 7.24 | - | 59.27 | 27.68 | 43.21 | 10.30 | 7.50 |
71
+ | [**Rubra Mistral 7B Instruct v0.2**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.2) | 8.11 | 69.28% | 58.90 | 29.91 | 34.12 | 8.36 | 7.36 |
72
+ | Phi-3 Mini 128k Instruct | 3.82 | - | 68.17 | 30.58 | 80.44 | 28.12 | 7.92 |
73
+ | Gemma-1.1 2B Instruct | 2.51 | - | 37.84 | 22.99 | 6.29 | 6.14 | 5.82 |
74
+
75
  """
76
 
77
  LICENSE = """
 
103
  global model, tokenizer
104
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=False)
105
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
106
  model.generation_config.pad_token_id = tokenizer.pad_token_id
107
 
108
  load_model(model_id) # Load the default model
 
230
  {
231
  "type": "function",
232
  "function": {
233
+ "name": "get_stock_information",
234
+ "description": "Get the current stock market information for a given company",
235
  "parameters": {
236
  "type": "object",
237
  "properties": {
238
+ "ticker_symbol": {
239
+ "type": "string",
240
+ "description": "The stock ticker symbol of the company, e.g., 'AAPL' for Apple Inc."
241
+ },
242
+ "exchange": {
243
  "type": "string",
244
+ "description": "The stock exchange where the company is listed, e.g., 'NASDAQ'. If not provided, default to the primary exchange for the ticker symbol."
245
  },
246
+ "data_type": {
247
  "type": "string",
248
+ "enum": ["price", "volume", "market_cap"],
249
+ "description": "The type of stock data to retrieve: 'price' for current price, 'volume' for trading volume, 'market_cap' for market capitalization."
250
  }
251
  },
252
+ "required": ["ticker_symbol", "data_type"]
253
  }
254
  }
255
  }
 
354
 
355
  clear_btn.click(lambda: ([], None), outputs=[chatbot, error_box])
356
 
357
+ gr.Markdown(model_table)
358
  gr.Markdown(LICENSE)
359
 
360
  return demo