andrijdavid commited on
Commit
135337a
1 Parent(s): 2ce5af6

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -57,3 +57,18 @@ Q5_K_M/Q5_K_M-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
57
  Q5_K_S/Q5_K_S-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
58
  Q6_K/Q6_K-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
59
  Q8_0/Q8_0-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  Q5_K_S/Q5_K_S-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
58
  Q6_K/Q6_K-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
59
  Q8_0/Q8_0-00001-of-00001.gguf filter=lfs diff=lfs merge=lfs -text
60
+ Q3_K_L/Q3_K_L-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
61
+ Q3_K_L/Q3_K_L-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
62
+ Q4_K_M/Q4_K_M-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
63
+ Q4_K_M/Q4_K_M-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
64
+ Q4_K_S/Q4_K_S-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
65
+ Q4_K_S/Q4_K_S-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
66
+ Q5_K_M/Q5_K_M-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
67
+ Q5_K_M/Q5_K_M-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
68
+ Q5_K_S/Q5_K_S-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
69
+ Q5_K_S/Q5_K_S-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
70
+ Q6_K/Q6_K-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
71
+ Q6_K/Q6_K-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
72
+ Q8_0/Q8_0-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
73
+ Q8_0/Q8_0-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
74
+ Q8_0/Q8_0-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
Q2_K/Q2_K-00001-of-00001.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f22d1911fbc002e3439219b2a107c5a37a5c2127ea75ea027c026ebfe2fa9a6
3
- size 7818784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bb0169d36973cf9d121ab4bd91bb3dff39ddc34d12aa1be057e902846061416
3
+ size 26375108224
Q3_K_L/Q3_K_L-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61030d4d888cc0831a8f6611e70c645b414cc1e2228f9b79bd1cb4460248d9fc
3
+ size 32087450240
Q3_K_L/Q3_K_L-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215c10de1cdeaec095e68633c00b086ead54de29b55083415df27e0527c4cb69
3
+ size 5053142144
Q3_K_M/Q3_K_M-00001-of-00002.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55bfd05e915ce6581847cfa87e805fcd84422574b0a53bb3b3f11602da25ab6f
3
- size 32214320224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d012fa470dab1ab5ace6febe9762962478af2e4b55c3fe5381551a1e16d6100e
3
+ size 32213807232
Q3_K_S/Q3_K_S-00001-of-00001.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f12aefde6bd226383a9016e94eae9a51ecd4756fcd95bb1f921a97a7a1f3d088
3
- size 7818784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34be5d435335cfd8eefbedf93369785a5fa941ceb61edb7eb541aeb4042eef40
3
+ size 30912050816
Q4_0/Q4_0-00001-of-00002.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bcc513f5740f21f054cbedc8a0f5b909c1c6cfc951e3c766a936a0cb5c0cc40
3
- size 32104950688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f1e0ee276ec9c890aee07f642cc185706959087a0bcea77e393dd37c89d777
3
+ size 32104437696
Q4_1/Q4_1-00001-of-00002.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e12ebfafda1c929cdfdd1590212f495d671c2f39ae6969e65dc6028319511b9a
3
- size 32220552672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b51ca36ff4e5c2f5664f2b6981ba6c50ce12cbd40e39144d253a1a19008d76
3
+ size 32220039680
Q4_K_M/Q4_K_M-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:504c10e8027e788d689efd8d5a1ee79bb64b5b284de882e3309f4253cc777f61
3
+ size 32215847104
Q4_K_M/Q4_K_M-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7383a99f68ab332f47759701b6ffea8d5a5a92105a003f3a0acd1b374d511b
3
+ size 10304546368
Q4_K_S/Q4_K_S-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc373913a6241365e9f961f676d27703a13f10c7da33380ba1e85ab802c1438
3
+ size 32116954592
Q4_K_S/Q4_K_S-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dfa7ac4877e2e302012e3c0f8a2db6516723ecfafe90559e58ee078d404e19f
3
+ size 8230265120
Q5_0/Q5_0-00001-of-00002.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0b16a54bd1062d7077f54f5ebd52d8fb1927db7c523079aa2b75d22f7ba62df
3
- size 32176771712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b22f89814d50d8c89b04e3d4e4733b16ae7b211b61737a6405c377ec19323b2c
3
+ size 32176258688
Q5_1/Q5_1-00001-of-00002.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6aa488def95e7b7a3639d5534e81204fbfff03827dc0a356cbf27cf09b6942b
3
- size 32068044832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcbad9756d0f09f2b5551fe18cafbb549c189c5af5ced0d6b3393cf6fa45f0f
3
+ size 32067531840
Q5_K_M/Q5_K_M-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0088ff77a02c532f0c87c7a694ed600f74cb8ec252cd0e2455a864fe9945c6c
3
+ size 32201882688
Q5_K_M/Q5_K_M-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f03529504b50d06f1aa0a30941299f69d775b7b2f6bbc7d0986ccc25fb4e346c
3
+ size 17747933888
Q5_K_S/Q5_K_S-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fe30b6c2d4836e2e9225ce7a63815786ce09d60ad439187278ffacfea2577c6
3
+ size 32176258688
Q5_K_S/Q5_K_S-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443ef2819cd782c62b3d5c05e20e5f43af8a2b0e876de21e7a3d58f58a8e727e
3
+ size 16481187936
Q6_K/Q6_K-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc98635ece5434913ee4ea575972c8abe52bf30f689477ca2e7f60048b5f9882
3
+ size 32072330144
Q6_K/Q6_K-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:384b56e4e416711d16024f13cbe9275e6797b85f2b8ebd9bcb5bc577d3d43ed0
3
+ size 25815812960
Q8_0/Q8_0-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f5962553b1464efabdc06ed945f1c8b224f1c13b8bd8529394f5eaf4680d631
3
+ size 32196761184
Q8_0/Q8_0-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c4a3831debb17fdede320dfe3bfbe45c504fa7e61972978ef1610515ef1dfdc
3
+ size 32070912576
Q8_0/Q8_0-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedb9269d4024652849361594a76e544ec00ebbf2ef29b6d866bafb2254f833a
3
+ size 10707375808
README.md CHANGED
@@ -263,6 +263,202 @@ Here are guides on using llama-cpp-python and ctransformers with LangChain:
263
 
264
 
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  <div align="center">
267
  <img width="260px" src="https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/BrQCb95lmEIFz79QAmoNA.png"></div>
268
 
@@ -587,3 +783,5 @@ Result sources
587
  - [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)
588
 
589
  <!-- original-model-card end -->
 
 
 
263
 
264
 
265
 
266
+ # Llama3-OpenBioLLM-70B-GGUF
267
+ - Original model: [Llama3-OpenBioLLM-70B](https://huggingface.co/aaditya/Llama3-OpenBioLLM-70B)
268
+
269
+ <!-- description start -->
270
+ ## Description
271
+
272
+ This repo contains GGUF format model files for [Llama3-OpenBioLLM-70B](https://huggingface.co/aaditya/Llama3-OpenBioLLM-70B).
273
+
274
+ <!-- description end -->
275
+ <!-- README_GGUF.md-about-gguf start -->
276
+ ### About GGUF
277
+ GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp.
278
+ Here is an incomplete list of clients and libraries that are known to support GGUF:
279
+ * [llama.cpp](https://github.com/ggerganov/llama.cpp). This is the source project for GGUF, providing both a Command Line Interface (CLI) and a server option.
280
+ * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), Known as the most widely used web UI, this project boasts numerous features and powerful extensions, and supports GPU acceleration.
281
+ * [Ollama](https://github.com/jmorganca/ollama) Ollama is a lightweight and extensible framework designed for building and running language models locally. It features a simple API for creating, managing, and executing models, along with a library of pre-built models for use in various applications​
282
+ * [KoboldCpp](https://github.com/LostRuins/koboldcpp), A comprehensive web UI offering GPU acceleration across all platforms and architectures, particularly renowned for storytelling.
283
+ * [GPT4All](https://gpt4all.io), This is a free and open source GUI that runs locally, supporting Windows, Linux, and macOS with full GPU acceleration.
284
+ * [LM Studio](https://lmstudio.ai/) An intuitive and powerful local GUI for Windows and macOS (Silicon), featuring GPU acceleration.
285
+ * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui). A notable web UI with a variety of unique features, including a comprehensive model library for easy model selection.
286
+ * [Faraday.dev](https://faraday.dev/), An attractive, user-friendly character-based chat GUI for Windows and macOS (both Silicon and Intel), also offering GPU acceleration.
287
+ * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), A Python library equipped with GPU acceleration, LangChain support, and an OpenAI-compatible API server.
288
+ * [candle](https://github.com/huggingface/candle), A Rust-based ML framework focusing on performance, including GPU support, and designed for ease of use.
289
+ * [ctransformers](https://github.com/marella/ctransformers), A Python library featuring GPU acceleration, LangChain support, and an OpenAI-compatible AI server.
290
+ * [localGPT](https://github.com/PromtEngineer/localGPT) An open-source initiative enabling private conversations with documents.
291
+ <!-- README_GGUF.md-about-gguf end -->
292
+
293
+ <!-- compatibility_gguf start -->
294
+ ## Explanation of quantisation methods
295
+ <details>
296
+ <summary>Click to see details</summary>
297
+ The new methods available are:
298
+
299
+ * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
300
+ * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
301
+ * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
302
+ * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
303
+ * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw.
304
+ </details>
305
+ <!-- compatibility_gguf end -->
306
+
307
+ <!-- README_GGUF.md-how-to-download start -->
308
+ ## How to download GGUF files
309
+
310
+ **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single folder.
311
+
312
+ The following clients/libraries will automatically download models for you, providing a list of available models to choose from:
313
+
314
+ * LM Studio
315
+ * LoLLMS Web UI
316
+ * Faraday.dev
317
+
318
+ ### In `text-generation-webui`
319
+
320
+ Under Download Model, you can enter the model repo: LiteLLMs/Llama3-OpenBioLLM-70B-GGUF and below it, a specific filename to download, such as: Q4_0/Q4_0-00001-of-00009.gguf.
321
+
322
+ Then click Download.
323
+
324
+ ### On the command line, including multiple files at once
325
+
326
+ I recommend using the `huggingface-hub` Python library:
327
+
328
+ ```shell
329
+ pip3 install huggingface-hub
330
+ ```
331
+
332
+ Then you can download any individual model file to the current directory, at high speed, with a command like this:
333
+
334
+ ```shell
335
+ huggingface-cli download LiteLLMs/Llama3-OpenBioLLM-70B-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False
336
+ ```
337
+
338
+ <details>
339
+ <summary>More advanced huggingface-cli download usage (click to read)</summary>
340
+
341
+ You can also download multiple files at once with a pattern:
342
+
343
+ ```shell
344
+ huggingface-cli download LiteLLMs/Llama3-OpenBioLLM-70B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'
345
+ ```
346
+
347
+ For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).
348
+
349
+ To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:
350
+
351
+ ```shell
352
+ pip3 install huggingface_hub[hf_transfer]
353
+ ```
354
+
355
+ And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:
356
+
357
+ ```shell
358
+ HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download LiteLLMs/Llama3-OpenBioLLM-70B-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False
359
+ ```
360
+
361
+ Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.
362
+ </details>
363
+ <!-- README_GGUF.md-how-to-download end -->
364
+ <!-- README_GGUF.md-how-to-run start -->
365
+ ## Example `llama.cpp` command
366
+
367
+ Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.
368
+
369
+ ```shell
370
+ ./main -ngl 35 -m Q4_0/Q4_0-00001-of-00009.gguf --color -c 8192 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<PROMPT>"
371
+ ```
372
+
373
+ Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
374
+
375
+ Change `-c 8192` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value.
376
+
377
+ If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
378
+
379
+ For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
380
+
381
+ ## How to run in `text-generation-webui`
382
+
383
+ Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp).
384
+
385
+ ## How to run from Python code
386
+
387
+ You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python.
388
+
389
+ ### How to load this model in Python code, using llama-cpp-python
390
+
391
+ For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/).
392
+
393
+ #### First install the package
394
+
395
+ Run one of the following commands, according to your system:
396
+
397
+ ```shell
398
+ # Base ctransformers with no GPU acceleration
399
+ pip install llama-cpp-python
400
+ # With NVidia CUDA acceleration
401
+ CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
402
+ # Or with OpenBLAS acceleration
403
+ CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python
404
+ # Or with CLBLast acceleration
405
+ CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python
406
+ # Or with AMD ROCm GPU acceleration (Linux only)
407
+ CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python
408
+ # Or with Metal GPU acceleration for macOS systems only
409
+ CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python
410
+ # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA:
411
+ $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on"
412
+ pip install llama-cpp-python
413
+ ```
414
+
415
+ #### Simple llama-cpp-python example code
416
+
417
+ ```python
418
+ from llama_cpp import Llama
419
+ # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
420
+ llm = Llama(
421
+ model_path="./Q4_0/Q4_0-00001-of-00009.gguf", # Download the model file first
422
+ n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources
423
+ n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance
424
+ n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available
425
+ )
426
+ # Simple inference example
427
+ output = llm(
428
+ "<PROMPT>", # Prompt
429
+ max_tokens=512, # Generate up to 512 tokens
430
+ stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using.
431
+ echo=True # Whether to echo the prompt
432
+ )
433
+ # Chat Completion API
434
+ llm = Llama(model_path="./Q4_0/Q4_0-00001-of-00009.gguf", chat_format="llama-2") # Set chat_format according to the model you are using
435
+ llm.create_chat_completion(
436
+ messages = [
437
+ {"role": "system", "content": "You are a story writing assistant."},
438
+ {
439
+ "role": "user",
440
+ "content": "Write a story about llamas."
441
+ }
442
+ ]
443
+ )
444
+ ```
445
+
446
+ ## How to use with LangChain
447
+
448
+ Here are guides on using llama-cpp-python and ctransformers with LangChain:
449
+
450
+ * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)
451
+ * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)
452
+
453
+ <!-- README_GGUF.md-how-to-run end -->
454
+
455
+ <!-- footer end -->
456
+
457
+ <!-- original-model-card start -->
458
+ # Original model card: Llama3-OpenBioLLM-70B
459
+
460
+
461
+
462
  <div align="center">
463
  <img width="260px" src="https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/BrQCb95lmEIFz79QAmoNA.png"></div>
464
 
 
783
  - [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)
784
 
785
  <!-- original-model-card end -->
786
+
787
+ <!-- original-model-card end -->