Crystalcareai commited on
Commit
4cc235f
1 Parent(s): 4b0d6fb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -63,14 +63,16 @@ To run inference with the Llama-3-SEC model using the llama3 chat template, use
63
 
64
  ```python
65
  from transformers import AutoModelForCausalLM, AutoTokenizer
66
- device = "cuda"
 
 
67
 
68
  model = AutoModelForCausalLM.from_pretrained(
69
- "arcee-ai/Llama-3-SEC",
70
  torch_dtype="auto",
71
  device_map="auto"
72
  )
73
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-72B-Instruct")
74
 
75
  prompt = "What are the key regulatory considerations for a company planning to conduct an initial public offering (IPO) in the United States?"
76
  messages = [
 
63
 
64
  ```python
65
  from transformers import AutoModelForCausalLM, AutoTokenizer
66
+ device = "cuda"
67
+
68
+ model_name = "arcee-ai/Llama-3-SEC"
69
 
70
  model = AutoModelForCausalLM.from_pretrained(
71
+ "model_name",
72
  torch_dtype="auto",
73
  device_map="auto"
74
  )
75
+ tokenizer = AutoTokenizer.from_pretrained("model_name")
76
 
77
  prompt = "What are the key regulatory considerations for a company planning to conduct an initial public offering (IPO) in the United States?"
78
  messages = [