Update README.md
Browse files
README.md
CHANGED
@@ -29,3 +29,23 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
29 |
tokenizer = AutoTokenizer.from_pretrained("asif00/bangla-llama-4bit")
|
30 |
model = AutoModelForCausalLM.from_pretrained("asif00/bangla-llama-4bit")
|
31 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
tokenizer = AutoTokenizer.from_pretrained("asif00/bangla-llama-4bit")
|
30 |
model = AutoModelForCausalLM.from_pretrained("asif00/bangla-llama-4bit")
|
31 |
```
|
32 |
+
|
33 |
+
|
34 |
+
# To get a cleaned up version of the response, you can use:
|
35 |
+
|
36 |
+
```python
|
37 |
+
def generate_response(question, context):
|
38 |
+
inputs = tokenizer([
|
39 |
+
prompt.format(
|
40 |
+
question,
|
41 |
+
context,
|
42 |
+
""
|
43 |
+
)
|
44 |
+
], return_tensors="pt").to("cuda")
|
45 |
+
|
46 |
+
outputs = model.generate(**inputs, max_new_tokens=1024, use_cache=True)
|
47 |
+
responses = tokenizer.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
48 |
+
response_start = responses.find("### Response:") + len("### Response:")
|
49 |
+
response = responses[response_start:].strip()
|
50 |
+
return response
|
51 |
+
```
|