oguzhandoganoglu commited on
Commit
e6462f9
1 Parent(s): ac03e55

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +40 -0
README.md CHANGED
@@ -25,3 +25,43 @@ TruthfulQA_tr
25
  Winogrande _tr
26
  GSM8k_tr
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  Winogrande _tr
26
  GSM8k_tr
27
 
28
+ ## Usage Examples
29
+
30
+ ```python
31
+
32
+ from transformers import AutoModelForCausalLM, AutoTokenizer
33
+ device = "cuda" # the device to load the model onto
34
+
35
+ model = AutoModelForCausalLM.from_pretrained(
36
+ "Cerebrum/cere-llama-3-8b-tr",
37
+ torch_dtype="auto",
38
+ device_map="auto"
39
+ )
40
+ tokenizer = AutoTokenizer.from_pretrained("Cerebrum/cere-llama-3-8b-tr")
41
+
42
+ prompt = "Python'da ekrana 'Merhaba Dünya' nasıl yazılır?"
43
+ messages = [
44
+ {"role": "system", "content": "Sen, Cerebrum Tech tarafından üretilen ve verilen talimatları takip ederek en iyi cevabı üretmeye çalışan yardımcı bir yapay zekasın."},
45
+ {"role": "user", "content": prompt}
46
+ ]
47
+ text = tokenizer.apply_chat_template(
48
+ messages,
49
+ tokenize=False,
50
+ add_generation_prompt=True
51
+ )
52
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
53
+
54
+ generated_ids = model.generate(
55
+ model_inputs.input_ids,
56
+ temperature=0.3,
57
+ top_k=50,
58
+ top_p=0.9,
59
+ max_new_tokens=512,
60
+ repetition_penalty=1,
61
+ )
62
+ generated_ids = [
63
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
64
+ ]
65
+
66
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
67
+ ```