Usage
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_id = 'datapaf/fvt_ift_rus'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map='auto'
)
chat = [
{"role": "system", "content": "Ты AI-помощник, ответь на вопрос"},
{"role": "user", "content": "Привет! Как дела?"},
]
templated = tokenizer.apply_chat_template(chat, tokenize=False)
encoded = tokenizer(templated, return_tensors="pt",add_special_tokens=True)
inputs = {key: tensor.to(model.device) for key, tensor in encoded.items()}
output = model.generate(
**inputs,
max_new_tokens=1024,
do_sample=False,
repetition_penalty=1.2
)
decoded_output = tokenizer.decode(
output[0][inputs['input_ids'].size(1)+2:],
skip_special_tokens=True
)
print(decoded_output)
- Downloads last month
- 34
Inference Providers
NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API:
The model has no library tag.