NickyNicky commited on
Commit
3e0930b
1 Parent(s): b38cc65

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +69 -0
README.md CHANGED
@@ -62,4 +62,73 @@ slices:
62
  parameters:
63
  density: 0.55
64
  weight: 0.56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ```
 
62
  parameters:
63
  density: 0.55
64
  weight: 0.56
65
+ ```
66
+
67
+
68
+
69
+ ```Python
70
+ from transformers import (
71
+ AutoModelForCausalLM,
72
+ AutoTokenizer,
73
+ BitsAndBytesConfig,
74
+ HfArgumentParser,
75
+ TrainingArguments,
76
+ pipeline,
77
+ logging,
78
+ GenerationConfig,
79
+ TextIteratorStreamer,
80
+ )
81
+ import torch
82
+
83
+ new_model= "NickyNicky/TinyDolphin-2.8-1.1b_oasst2_chatML_all_Cluster_merge_v1"
84
+ model = AutoModelForCausalLM.from_pretrained(#f'NickyNicky/{new_model}',
85
+ new_model,
86
+ device_map="auto",
87
+ trust_remote_code=True,
88
+ torch_dtype=torch.bfloat16,
89
+
90
+ low_cpu_mem_usage= True,
91
+ # use_flash_attention_2=False,
92
+
93
+ )
94
+
95
+
96
+ tokenizer = AutoTokenizer.from_pretrained(new_model,
97
+ max_length=2048,
98
+ trust_remote_code=True,
99
+ use_fast = True,
100
+ )
101
+
102
+ tokenizer.pad_token = tokenizer.eos_token
103
+ # tokenizer.padding_side = 'left'
104
+ tokenizer.padding_side = 'right'
105
+
106
+
107
+ prompt= """<|im_start|>system
108
+ You are a helpful AI assistant.<|im_end|>
109
+ <|im_start|>user
110
+ escribe una historia de amor.<|im_end|>
111
+ <|im_start|>assistant
112
+ """
113
+
114
+ inputs = tokenizer.encode(prompt,
115
+ return_tensors="pt",
116
+ add_special_tokens=False).cuda()#.to("cuda") # False # True
117
+
118
+
119
+ generation_config = GenerationConfig(
120
+ max_new_tokens=700,
121
+ temperature=0.5,
122
+ top_p=0.9,
123
+ top_k=40,
124
+ repetition_penalty=1.1, #1.1, # 1.0 means no penalty, > 1.0 means penalty, 1.2 from CTRL paper
125
+ do_sample=True,
126
+ pad_token_id=tokenizer.eos_token_id,
127
+ eos_token_id=tokenizer.eos_token_id,
128
+ )
129
+ outputs = model.generate(
130
+ generation_config=generation_config,
131
+ input_ids=inputs,)
132
+ # tokenizer.decode(outputs[0], skip_special_tokens=False) #True
133
+ print(tokenizer.decode(outputs[0], skip_special_tokens=False))
134
  ```