ThomasAPI / README.md
Thomas121's picture
Create README.md
ffa9b49 verified
|
raw
history blame
821 Bytes

import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga-13B", use_fast=False) model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-13B", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") system_prompt = "### System:\nYou are Stable Beluga 13B, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"

message = "Write me a poem please" prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)

print(tokenizer.decode(output[0], skip_special_tokens=True))