Edit model card

Model Generation

from transformers import AutoModelForCausalLM, AutoTokenizer

model = AutoModelForCausalLM.from_pretrained("AidenU/Mistral-7b-ko-Y24-DPO_v0.1", device_map="auto")
tokenizer = AutoTokenizer.from_pretrained("AidenU/Mistral-7b-ko-Y24-DPO_v0.1")

messages = [
  {"role":"user", "content", "์•ˆ๋…•ํ•˜์„ธ์š”?"}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt")
inputs = encodeds.to("cuda")
model.to("cuda")

outputs = model.generated(
  inputs,
  max_new_tokens=256,
  do_sample=True
)

decoded = tokenizer.batch_decode(outputs)
print(decoded[0])
Downloads last month
1,247