Update README.md
Browse files
README.md
CHANGED
@@ -36,7 +36,7 @@ import torch
|
|
36 |
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-1.3b-base", trust_remote_code=True)
|
37 |
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-1.3b-base", trust_remote_code=True).cuda()
|
38 |
input_text = "#write a quick sort algorithm"
|
39 |
-
inputs = tokenizer(input_text, return_tensors="pt").
|
40 |
outputs = model.generate(**inputs, max_length=128)
|
41 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
42 |
```
|
@@ -59,7 +59,7 @@ input_text = """<|fim▁begin|>def quick_sort(arr):
|
|
59 |
else:
|
60 |
right.append(arr[i])
|
61 |
return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>"""
|
62 |
-
inputs = tokenizer(input_text, return_tensors="pt").
|
63 |
outputs = model.generate(**inputs, max_length=128)
|
64 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):])
|
65 |
```
|
@@ -144,7 +144,7 @@ from model import IrisClassifier as Classifier
|
|
144 |
def main():
|
145 |
# Model training and evaluation
|
146 |
"""
|
147 |
-
inputs = tokenizer(input_text, return_tensors="pt").
|
148 |
outputs = model.generate(**inputs, max_new_tokens=140)
|
149 |
print(tokenizer.decode(outputs[0]))
|
150 |
```
|
|
|
36 |
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-1.3b-base", trust_remote_code=True)
|
37 |
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-1.3b-base", trust_remote_code=True).cuda()
|
38 |
input_text = "#write a quick sort algorithm"
|
39 |
+
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
|
40 |
outputs = model.generate(**inputs, max_length=128)
|
41 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
42 |
```
|
|
|
59 |
else:
|
60 |
right.append(arr[i])
|
61 |
return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>"""
|
62 |
+
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
|
63 |
outputs = model.generate(**inputs, max_length=128)
|
64 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):])
|
65 |
```
|
|
|
144 |
def main():
|
145 |
# Model training and evaluation
|
146 |
"""
|
147 |
+
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
|
148 |
outputs = model.generate(**inputs, max_new_tokens=140)
|
149 |
print(tokenizer.decode(outputs[0]))
|
150 |
```
|