Update README.md
Browse files
README.md
CHANGED
@@ -4,3 +4,26 @@ base_model: nvidia/Llama3-ChatQA-1.5-8B
|
|
4 |
---
|
5 |
|
6 |
[MLC](https://llm.mlc.ai/) version of [nvidia/Llama3-ChatQA-1.5-8B](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-8B), using `q4f16_1` quantization.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
---
|
5 |
|
6 |
[MLC](https://llm.mlc.ai/) version of [nvidia/Llama3-ChatQA-1.5-8B](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-8B), using `q4f16_1` quantization.
|
7 |
+
|
8 |
+
## Usage
|
9 |
+
|
10 |
+
```typescript
|
11 |
+
import { CreateMLCEngine } from "@mlc-ai/web-llm";
|
12 |
+
|
13 |
+
async main() {
|
14 |
+
const appConfig = {
|
15 |
+
"model_list": [
|
16 |
+
{
|
17 |
+
"model": "https://huggingface.co/Felladrin/mlc-q4f16-Llama3-ChatQA-1.5-8B",
|
18 |
+
"model_id": "mlc-q4f16-Llama3-ChatQA-1.5-8B"
|
19 |
+
"model_lib": "https://huggingface.co/Felladrin/mlc-q4f16-Llama3-ChatQA-1.5-8B/resolve/main/model.wasm",
|
20 |
+
}
|
21 |
+
],
|
22 |
+
};
|
23 |
+
|
24 |
+
const engine = await CreateMLCEngine(
|
25 |
+
"mlc-q4f16-Llama3-ChatQA-1.5-8B",
|
26 |
+
{ appConfig },
|
27 |
+
);
|
28 |
+
}
|
29 |
+
```
|