tahirsher commited on
Commit
1946379
1 Parent(s): c8056e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -29
app.py CHANGED
@@ -45,39 +45,51 @@ st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
45
 
46
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
47
 
48
- # Hugging Face access token
49
- HF_TOKEN = "HF_TOKEN" # Replace with your actual token or set it as an environment variable
 
50
 
51
- # Load the text generation pipeline with model and tokenizer
52
- @st.cache_resource
53
- def load_text_generation_pipeline():
54
- model_name = "google/gemma-2-9b-it"
55
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
56
- model = AutoModelForCausalLM.from_pretrained(
57
- model_name,
58
- load_in_8bit=True,
59
- device_map="auto",
60
- use_auth_token=HF_TOKEN
61
- )
62
- return pipeline("text-generation", model=model, tokenizer=tokenizer)
63
-
64
- text_generator = load_text_generation_pipeline()
65
 
66
- # Load the counseling dataset
67
  @st.cache_resource
68
- def load_counseling_dataset():
69
- return load_dataset("Amod/mental_health_counseling_conversations")
70
-
71
- dataset = load_counseling_dataset()
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # Streamlit App
74
  st.title("Mental Health Counseling Chat")
75
  st.markdown("""
76
  Welcome to the **Mental Health Counseling Chat Application**.
77
- This platform is designed to provide **supportive, positive, and encouraging responses** using a fast and efficient language model.
78
  """)
79
 
80
- # Display example dataset entries
 
 
 
 
 
 
 
 
81
  if st.checkbox("Show Example Questions and Answers from Dataset"):
82
  sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
83
  for example in sample:
@@ -91,16 +103,13 @@ user_input = st.text_area("Your question or concern:", placeholder="Type your qu
91
  if st.button("Get Supportive Response"):
92
  if user_input.strip():
93
  try:
94
- # Generate response using the text generation pipeline
95
  prompt = f"User: {user_input}\nCounselor:"
96
- response = text_generator(prompt, max_length=100, num_return_sequences=1)
97
-
98
- # Extract and display the response
99
- counselor_reply = response[0]["generated_text"].strip()
100
  st.subheader("Counselor's Response:")
101
  st.write(counselor_reply)
102
  except Exception as e:
103
- st.error(f"An error occurred while generating the response: {e}")
104
  else:
105
  st.error("Please enter a question or concern to receive a response.")
106
 
 
45
 
46
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
47
 
48
+ # Groq API Configuration
49
+ api_key = os.environ.get("LawersGuideAPIKey") # Ensure GROQ_API_KEY is set in your environment variables
50
+ base_url = "https://api.groq.com/openai/v1/models/google/gemma-2-9b-it/completions"
51
 
52
+ headers = {
53
+ "Authorization": f"Bearer {api_key}",
54
+ "Content-Type": "application/json"
55
+ }
 
 
 
 
 
 
 
 
 
 
56
 
57
+ # Function to query Groq model
58
  @st.cache_resource
59
+ def query_groq_model(prompt, max_tokens=100, temperature=0.7):
60
+ try:
61
+ payload = {
62
+ "prompt": prompt,
63
+ "max_tokens": max_tokens,
64
+ "temperature": temperature,
65
+ "top_p": 1.0,
66
+ "frequency_penalty": 0.0,
67
+ "presence_penalty": 0.0,
68
+ "n": 1
69
+ }
70
+ response = requests.post(base_url, headers=headers, json=payload)
71
+ response.raise_for_status()
72
+ result = response.json()
73
+ return result["choices"][0]["text"].strip()
74
+ except Exception as e:
75
+ return f"Error querying the model: {e}"
76
 
77
  # Streamlit App
78
  st.title("Mental Health Counseling Chat")
79
  st.markdown("""
80
  Welcome to the **Mental Health Counseling Chat Application**.
81
+ This platform is designed to provide **supportive, positive, and encouraging responses** using the Groq `google/gemma-2-9b-it` model.
82
  """)
83
 
84
+ # Load example dataset for user exploration (optional)
85
+ @st.cache_resource
86
+ def load_counseling_dataset():
87
+ from datasets import load_dataset
88
+ return load_dataset("Amod/mental_health_counseling_conversations")
89
+
90
+ dataset = load_counseling_dataset()
91
+
92
+ # Display example questions and answers from dataset
93
  if st.checkbox("Show Example Questions and Answers from Dataset"):
94
  sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
95
  for example in sample:
 
103
  if st.button("Get Supportive Response"):
104
  if user_input.strip():
105
  try:
106
+ # Query Groq model
107
  prompt = f"User: {user_input}\nCounselor:"
108
+ counselor_reply = query_groq_model(prompt, max_tokens=150, temperature=0.7)
 
 
 
109
  st.subheader("Counselor's Response:")
110
  st.write(counselor_reply)
111
  except Exception as e:
112
+ st.error(f"An error occurred while querying the model: {e}")
113
  else:
114
  st.error("Please enter a question or concern to receive a response.")
115