legolasyiu
commited on
Commit
•
cf8b99d
1
Parent(s):
83c3fc6
Update README.md
Browse files
README.md
CHANGED
@@ -95,7 +95,7 @@ quantization_config = BitsAndBytesConfig(
|
|
95 |
bnb_4bit_use_double_quant=True,
|
96 |
)
|
97 |
|
98 |
-
model_id =
|
99 |
pipeline = transformers.pipeline(
|
100 |
"text-generation",
|
101 |
model=model_id,
|
@@ -104,12 +104,13 @@ pipeline = transformers.pipeline(
|
|
104 |
)
|
105 |
messages = [
|
106 |
{"role": "system", "content": """
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
113 |
"""},
|
114 |
{"role": "user", "content": "Create a bar plot showing the market capitalization of the top 7 publicly listed companies using matplotlib"}
|
115 |
]
|
@@ -141,7 +142,7 @@ fourbit_models = [
|
|
141 |
] # More models at https://huggingface.co/unsloth
|
142 |
|
143 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
144 |
-
model_name = "
|
145 |
max_seq_length = 128000,
|
146 |
load_in_4bit = True,
|
147 |
token =userdata.get('HF_TOKEN')
|
@@ -150,12 +151,14 @@ def chatbot(query):
|
|
150 |
messages = [
|
151 |
{"from": "system", "value":
|
152 |
"""
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
|
|
159 |
"""
|
160 |
},
|
161 |
{"from": "human", "value": query},
|
@@ -166,6 +169,7 @@ The explanation should follow these rules:
|
|
166 |
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 2048, use_cache = True)
|
167 |
```
|
168 |
|
|
|
169 |
# Response
|
170 |
|
171 |
```python
|
|
|
95 |
bnb_4bit_use_double_quant=True,
|
96 |
)
|
97 |
|
98 |
+
model_id = EpistemeAI2/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.005-128K-code-COT"
|
99 |
pipeline = transformers.pipeline(
|
100 |
"text-generation",
|
101 |
model=model_id,
|
|
|
104 |
)
|
105 |
messages = [
|
106 |
{"role": "system", "content": """
|
107 |
+
Environment: ipython. Tools: brave_search, wolfram_alpha. Cutting Knowledge Date: December 2023. Today Date: 4 October 2024\n
|
108 |
+
You are a coding assistant with expert with everything\n
|
109 |
+
Ensure any code you provide can be executed \n
|
110 |
+
with all required imports and variables defined. List the imports. Structure your answer with a description of the code solution. \n
|
111 |
+
write only the code. do not print anything else.\n
|
112 |
+
debug code if error occurs. \n
|
113 |
+
Here is the user question: {question}
|
114 |
"""},
|
115 |
{"role": "user", "content": "Create a bar plot showing the market capitalization of the top 7 publicly listed companies using matplotlib"}
|
116 |
]
|
|
|
142 |
] # More models at https://huggingface.co/unsloth
|
143 |
|
144 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
145 |
+
model_name = "EpistemeAI2/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.005-128K-code-COT",
|
146 |
max_seq_length = 128000,
|
147 |
load_in_4bit = True,
|
148 |
token =userdata.get('HF_TOKEN')
|
|
|
151 |
messages = [
|
152 |
{"from": "system", "value":
|
153 |
"""
|
154 |
+
Environment: ipython. Tools: brave_search, wolfram_alpha. Cutting Knowledge Date: December 2023. Today Date: 4 October 2024\n
|
155 |
+
You are a coding assistant with expert with everything\n
|
156 |
+
Ensure any code you provide can be executed \n
|
157 |
+
with all required imports and variables defined. List the imports. Structure your answer with a description of the code solution. \n
|
158 |
+
write only the code. do not print anything else.\n
|
159 |
+
use ipython for search tool. \n
|
160 |
+
debug code if error occurs. \n
|
161 |
+
Here is the user question: {question}
|
162 |
"""
|
163 |
},
|
164 |
{"from": "human", "value": query},
|
|
|
169 |
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 2048, use_cache = True)
|
170 |
```
|
171 |
|
172 |
+
|
173 |
# Response
|
174 |
|
175 |
```python
|