ekshat commited on
Commit
ea2108a
1 Parent(s): a395344

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -4
README.md CHANGED
@@ -15,13 +15,13 @@ Our Model is fine tuned on Llama-2 7B model on text-2-sql Dataset on alpaca form
15
 
16
  # Inference
17
  ```python
18
- !pip install transformers accelerate xformers
19
 
20
- # Load model directly
21
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
22
 
23
  tokenizer = AutoTokenizer.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
24
- model = AutoModelForCausalLM.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
 
25
 
26
  context = "CREATE TABLE head (name VARCHAR, born_state VARCHAR, age VARCHAR)"
27
  question = "List the name, born state and age of the heads of departments ordered by age."
@@ -32,7 +32,8 @@ prompt = f"""Below is an context that describes a sql query, paired with an ques
32
  ### Question:
33
  {question}
34
  ### Answer:"""
 
35
  pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
36
  result = pipe(prompt)
37
- print(result[0]['generated_text'])
38
  ```
 
15
 
16
  # Inference
17
  ```python
18
+ !pip install transformers accelerate xformers bitsandbytes
19
 
 
20
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
21
 
22
  tokenizer = AutoTokenizer.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
23
+ # Load model directly
24
+ model = AutoModelForCausalLM.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql", load_in_4bit=True)
25
 
26
  context = "CREATE TABLE head (name VARCHAR, born_state VARCHAR, age VARCHAR)"
27
  question = "List the name, born state and age of the heads of departments ordered by age."
 
32
  ### Question:
33
  {question}
34
  ### Answer:"""
35
+
36
  pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
37
  result = pipe(prompt)
38
+ print(result[0]['generated_text'])
39
  ```