Chandranshu Jain commited on
Commit
068748f
1 Parent(s): 6933749

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -2
app.py CHANGED
@@ -14,6 +14,8 @@ from langchain_community.document_loaders import PyPDFLoader
14
  from langchain_chroma import Chroma
15
  from langchain_community.vectorstores import Chroma
16
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
17
 
18
  #st.set_page_config(page_title="Document Genie", layout="wide")
19
 
@@ -81,8 +83,17 @@ def get_conversational_chain():
81
  #repo_id='meta-llama/Meta-Llama-3-70B'
82
  #llm = HuggingFaceEndpoint(
83
  #repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGING_FACE_API_KEY)
84
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
85
- llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
 
 
 
 
 
 
 
 
 
86
 
87
  pt = ChatPromptTemplate.from_template(template)
88
  # Retrieve and generate using the relevant snippets of the blog.
 
14
  from langchain_chroma import Chroma
15
  from langchain_community.vectorstores import Chroma
16
  from transformers import AutoTokenizer, AutoModelForCausalLM
17
+ import transformers
18
+ import torch
19
 
20
  #st.set_page_config(page_title="Document Genie", layout="wide")
21
 
 
83
  #repo_id='meta-llama/Meta-Llama-3-70B'
84
  #llm = HuggingFaceEndpoint(
85
  #repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGING_FACE_API_KEY)
86
+ #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
87
+ #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
88
+
89
+ model_id = "google/gemma-1.1-2b-it"
90
+ dtype = torch.bfloat16
91
+
92
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
93
+ llm= AutoModelForCausalLM.from_pretrained(
94
+ model_id,
95
+ torch_dtype=dtype,
96
+ )
97
 
98
  pt = ChatPromptTemplate.from_template(template)
99
  # Retrieve and generate using the relevant snippets of the blog.