Edit model card

Model Trained Using AutoTrain

This model was trained using AutoTrain. For more information, please visit AutoTrain.

Usage


from transformers import (
    AutoConfig,
    AutoModelForCausalLM,
    AutoTokenizer,
    Trainer,
    BitsAndBytesConfig,
)

from peft import PeftModel, PeftConfig
config = PeftConfig.from_pretrained("trottdw/CE104FinalLlama3Orca")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
model = model.to('cuda:0')
lora_model = PeftModel.from_pretrained(model, "trottdw/CE104FinalLlama3Orca")
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B", use_fast=True

def prompt(text, tmp):
  model_inputs = tokenizer(text, return_tensors="pt").to("cuda:0")
  output = lora_model.generate(**model_inputs, temperature=tmp)
  return print(tokenizer.decode(output[0], skip_special_tokens=True))

print(prompt('Describe the solar system',0.7))
Downloads last month

-

Downloads are not tracked for this model. How to track