Chandranshu Jain commited on
Commit
dad94f5
1 Parent(s): af47347

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
  from langchain_community.llms import HuggingFaceEndpoint
3
- from langchain.embeddings import HuggingFaceEmbeddings
4
  from langchain_core.runnables import RunnablePassthrough
5
  from langchain_core.output_parsers import StrOutputParser
6
  from langchain.prompts import ChatPromptTemplate
@@ -13,8 +12,13 @@ from langchain.prompts import PromptTemplate
13
  from langchain_community.document_loaders import PyPDFLoader
14
  from langchain_chroma import Chroma
15
  from langchain_community.vectorstores import Chroma
 
 
 
16
 
17
 
 
 
18
 
19
 
20
  #st.set_page_config(page_title="Document Genie", layout="wide")
@@ -63,7 +67,7 @@ def text_splitter(text):
63
 
64
  #GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
65
  #COHERE_API_KEY = os.getenv("COHERE_API_KEY")
66
- HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
67
 
68
  def get_conversational_chain():
69
  prompt_template = """
@@ -82,10 +86,10 @@ def get_conversational_chain():
82
  #repo_id ='google/gemma-1.1-2b-it'
83
  #repo_id='meta-llama/Meta-Llama-3-70B'
84
  repo_id = 'microsoft/Phi-3-mini-4k-instruct'
85
- llm = HuggingFaceEndpoint(repo_id=repo_id, max_length=512, temperature=0.3, token=HUGGINGFACE_API_KEY)
86
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
87
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
88
-
89
  #llm = pipeline("text-generation", model="google/gemma-1.1-2b-it")
90
 
91
  pt = ChatPromptTemplate.from_template(prompt_template)
 
1
  import streamlit as st
2
  from langchain_community.llms import HuggingFaceEndpoint
 
3
  from langchain_core.runnables import RunnablePassthrough
4
  from langchain_core.output_parsers import StrOutputParser
5
  from langchain.prompts import ChatPromptTemplate
 
12
  from langchain_community.document_loaders import PyPDFLoader
13
  from langchain_chroma import Chroma
14
  from langchain_community.vectorstores import Chroma
15
+ from langchain_community.embeddings import HuggingFaceEmbeddings
16
+ # Load model directly
17
+ from transformers import AutoModelForCausalLM
18
 
19
 
20
+ access_token = os.getenv("HUGGINGFACE_API_KEY")
21
+
22
 
23
 
24
  #st.set_page_config(page_title="Document Genie", layout="wide")
 
67
 
68
  #GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
69
  #COHERE_API_KEY = os.getenv("COHERE_API_KEY")
70
+ #HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
71
 
72
  def get_conversational_chain():
73
  prompt_template = """
 
86
  #repo_id ='google/gemma-1.1-2b-it'
87
  #repo_id='meta-llama/Meta-Llama-3-70B'
88
  repo_id = 'microsoft/Phi-3-mini-4k-instruct'
89
+ #llm = HuggingFaceEndpoint(repo_id=repo_id, max_length=512, temperature=0.3, token=HUGGINGFACE_API_KEY)
90
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
91
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
92
+ llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)
93
  #llm = pipeline("text-generation", model="google/gemma-1.1-2b-it")
94
 
95
  pt = ChatPromptTemplate.from_template(prompt_template)