Update README.md
Browse files
README.md
CHANGED
@@ -7,6 +7,10 @@ language:
|
|
7 |
metrics:
|
8 |
- bleu
|
9 |
pipeline_tag: text-generation
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
# Kongo Llama Experiment
|
@@ -41,7 +45,28 @@ This is the model card of a 🤗 transformers model that has been pushed on the
|
|
41 |
|
42 |
## Uses
|
43 |
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
### Direct Use
|
47 |
|
|
|
7 |
metrics:
|
8 |
- bleu
|
9 |
pipeline_tag: text-generation
|
10 |
+
base_model: huggyllama/llama-30b
|
11 |
+
tags:
|
12 |
+
- africa
|
13 |
+
- languages
|
14 |
---
|
15 |
|
16 |
# Kongo Llama Experiment
|
|
|
45 |
|
46 |
## Uses
|
47 |
|
48 |
+
```py
|
49 |
+
# Load the model
|
50 |
+
model = LlamaForCausalLM.from_pretrained('/content/kongo-llama/checkpoint-9000')
|
51 |
+
|
52 |
+
# Prepare input text
|
53 |
+
text = "Nzambi "
|
54 |
+
inputs = wrapped_tokenizer(text, return_tensors="pt")
|
55 |
+
|
56 |
+
# Generate text
|
57 |
+
generated_ids = model.generate(
|
58 |
+
max_length=150, # Increased length
|
59 |
+
num_beams=5, # Use beam search
|
60 |
+
temperature=0.7, # Adjust temperature for creativity
|
61 |
+
do_sample=True,
|
62 |
+
top_k=50, # Limit vocabulary for next token
|
63 |
+
top_p=0.95 # Nucleus sampling
|
64 |
+
)
|
65 |
+
|
66 |
+
# Decode and print the generated text
|
67 |
+
generated_text = wrapped_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
68 |
+
print(generated_text)
|
69 |
+
```
|
70 |
|
71 |
### Direct Use
|
72 |
|