BharadhwajS commited on
Commit
0356b85
1 Parent(s): 290ba5f

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +39 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import gradio as gr
4
+ from langchain_community.llms import HuggingFaceEndpoint
5
+ from langchain.prompts import PromptTemplate
6
+
7
+ # Initialize the chatbot
8
+ HF_TOKEN = os.getenv("HF_TOKEN")
9
+ llm = HuggingFaceEndpoint(
10
+ repo_id="google/gemma-1.1-7b-it",
11
+ task="text-generation",
12
+ max_new_tokens=512,
13
+ top_k=5,
14
+ temperature=0.1,
15
+ repetition_penalty=1.03,
16
+ huggingfacehub_api_token=HF_TOKEN
17
+ )
18
+ template = """
19
+ You are a Mental Health Chatbot. Help the user with their mental health concerns.
20
+ Use the context below to answer the questions {context}
21
+ Question: {question}
22
+ Helpful Answer:"""
23
+ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template)
24
+
25
+ def predict(message, history):
26
+ input_prompt = QA_CHAIN_PROMPT.format(question=message, context=history)
27
+ result = llm.generate([input_prompt])
28
+ print(result) # Print the result for inspection
29
+
30
+ # Access the generated text using the correct attribute(s)
31
+ if result.generations:
32
+ ai_msg = result.generations[0][0].text
33
+ else:
34
+ ai_msg = "I'm sorry, I couldn't generate a response for that input."
35
+
36
+ return ai_msg
37
+
38
+
39
+ gr.ChatInterface(predict).launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ langchain
2
+ gradio
3
+ langchain-community
4
+ chromadb
5
+ wandb
6
+ transformers