MaziyarPanahi commited on
Commit
ebfaae0
1 Parent(s): a928cb7

Update README.md (#6)

Browse files

- Update README.md (a4dccdc9bdee09975f838f834678bb77617f5d43)

Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -17,7 +17,7 @@ inference: false
17
  model_creator: MaziyarPanahi
18
  quantized_by: MaziyarPanahi
19
  base_model: Qwen/Qwen2-7B
20
- model_name: Qwen2-7B-Instruct-v0.6
21
  datasets:
22
  - nvidia/HelpSteer2
23
  - teknium/OpenHermes-2.5
@@ -28,13 +28,13 @@ license: apache-2.0
28
 
29
  <img src="./qwen2-fine-tunes-maziyar-panahi.webp" alt="Qwen2 fine-tune" width="500" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
30
 
31
- # MaziyarPanahi/Qwen2-7B-Instruct-v0.6
32
 
33
  This is a fine-tuned version of the `Qwen/Qwen2-7B` model. It aims to improve the base model across all benchmarks.
34
 
35
  # ⚡ Quantized GGUF
36
 
37
- All GGUF models are available here: [MaziyarPanahi/Qwen2-7B-Instruct-v0.6-GGUF](https://huggingface.co/MaziyarPanahi/Qwen2-7B-Instruct-v0.6-GGUF)
38
 
39
  # 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
40
 
@@ -68,7 +68,7 @@ from transformers import pipeline
68
  messages = [
69
  {"role": "user", "content": "Who are you?"},
70
  ]
71
- pipe = pipeline("text-generation", model="MaziyarPanahi/Qwen2-7B-Instruct-v0.6")
72
  pipe(messages)
73
 
74
 
@@ -76,6 +76,6 @@ pipe(messages)
76
 
77
  from transformers import AutoTokenizer, AutoModelForCausalLM
78
 
79
- tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/Qwen2-7B-Instruct-v0.6")
80
- model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/Qwen2-7B-Instruct-v0.6")
81
  ```
 
17
  model_creator: MaziyarPanahi
18
  quantized_by: MaziyarPanahi
19
  base_model: Qwen/Qwen2-7B
20
+ model_name: calme-2.6-qwen2-7b
21
  datasets:
22
  - nvidia/HelpSteer2
23
  - teknium/OpenHermes-2.5
 
28
 
29
  <img src="./qwen2-fine-tunes-maziyar-panahi.webp" alt="Qwen2 fine-tune" width="500" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
30
 
31
+ # MaziyarPanahi/calme-2.6-qwen2-7b
32
 
33
  This is a fine-tuned version of the `Qwen/Qwen2-7B` model. It aims to improve the base model across all benchmarks.
34
 
35
  # ⚡ Quantized GGUF
36
 
37
+ All GGUF models are available here: [MaziyarPanahi/calme-2.6-qwen2-7b-GGUF](https://huggingface.co/MaziyarPanahi/calme-2.6-qwen2-7b-GGUF)
38
 
39
  # 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
40
 
 
68
  messages = [
69
  {"role": "user", "content": "Who are you?"},
70
  ]
71
+ pipe = pipeline("text-generation", model="MaziyarPanahi/calme-2.6-qwen2-7b")
72
  pipe(messages)
73
 
74
 
 
76
 
77
  from transformers import AutoTokenizer, AutoModelForCausalLM
78
 
79
+ tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/calme-2.6-qwen2-7b")
80
+ model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/calme-2.6-qwen2-7b")
81
  ```