Chandranshu Jain commited on
Commit
6977def
1 Parent(s): d4c2024

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -80,14 +80,14 @@ def get_conversational_chain():
80
  Answer:
81
  """
82
  #model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
83
- #repo_id ='google/gemma-1.1-2b-it'
84
  #repo_id='meta-llama/Meta-Llama-3-70B'
85
- #llm = HuggingFaceEndpoint(
86
- #repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGING_FACE_API_KEY)
87
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
88
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
89
 
90
- llm = pipeline("text-generation", model="google/gemma-1.1-2b-it")
91
 
92
  pt = ChatPromptTemplate.from_template(template)
93
  # Retrieve and generate using the relevant snippets of the blog.
 
80
  Answer:
81
  """
82
  #model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
83
+ repo_id ='google/gemma-1.1-2b-it'
84
  #repo_id='meta-llama/Meta-Llama-3-70B'
85
+ llm = HuggingFaceEndpoint(
86
+ repo_id=repo_id, max_length=512, temperature=0.3, token=HUGGING_FACE_API_KEY)
87
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
88
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
89
 
90
+ #llm = pipeline("text-generation", model="google/gemma-1.1-2b-it")
91
 
92
  pt = ChatPromptTemplate.from_template(template)
93
  # Retrieve and generate using the relevant snippets of the blog.