tahirsher commited on
Commit
5878a82
·
verified ·
1 Parent(s): cca43a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -53
app.py CHANGED
@@ -1,9 +1,7 @@
1
  import streamlit as st
 
2
  #from transformers import AutoTokenizer
3
  #from llama_cpp import Llama
4
- from transformers import pipeline
5
- #from peft import PeftModel, PeftConfig
6
- #from transformers import AutoModelForCausalLM
7
  from datasets import load_dataset
8
 
9
  # Replace with the direct image URL
@@ -48,22 +46,22 @@ st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
48
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
49
 
50
  # Hugging Face access token
51
- HF_TOKEN = "HF_TOKEN" # Replace with your actual token or use environment variables
52
 
53
- # Load the text generation model pipeline with authentication
54
  @st.cache_resource
55
- def load_text_generation_model():
56
- try:
57
- return pipeline(
58
- "text-generation",
59
- model="google/gemma-2-9b-it",
60
- use_auth_token=HF_TOKEN
61
- )
62
- except Exception as e:
63
- st.error(f"Error loading model: {e}")
64
- return None
65
 
66
- text_generator = load_text_generation_model()
67
 
68
  # Load the counseling dataset
69
  @st.cache_resource
@@ -79,43 +77,38 @@ Welcome to the **Mental Health Counseling Chat Application**.
79
  This platform is designed to provide **supportive, positive, and encouraging responses** using a fast and efficient language model.
80
  """)
81
 
82
- # Check if the model loaded correctly
83
- if text_generator is None:
84
- st.error("The text generation model could not be loaded. Please check your Hugging Face configuration.")
85
- else:
86
- # Explore dataset for additional context or resources (optional)
87
- if st.checkbox("Show Example Questions and Answers from Dataset"):
88
- sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
89
- for example in sample:
90
- st.markdown(f"**Question:** {example.get('context', 'N/A')}")
91
- st.markdown(f"**Answer:** {example.get('response', 'N/A')}")
92
- st.markdown("---")
93
 
94
- # User input for mental health concerns
95
- user_input = st.text_area("Your question or concern:", placeholder="Type your question here...")
96
 
97
- if st.button("Get Supportive Response"):
98
- if user_input.strip():
99
- try:
100
- # Generate response using the text generation pipeline
101
- prompt = f"User: {user_input}\nCounselor:"
102
- response = text_generator(prompt, max_length=200, num_return_sequences=1)
103
-
104
- # Extract and display the response
105
- counselor_reply = response[0]["generated_text"].strip()
106
- st.subheader("Counselor's Response:")
107
- st.write(counselor_reply)
108
- except Exception as e:
109
- st.error(f"An error occurred while generating the response: {e}")
110
- else:
111
- st.error("Please enter a question or concern to receive a response.")
112
 
113
- # Sidebar resources
114
- st.sidebar.header("Additional Mental Health Resources")
115
- st.sidebar.markdown("""
116
- - [Mental Health Foundation](https://www.mentalhealth.org)
117
- - [Mind](https://www.mind.org.uk)
118
- - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
119
- """)
120
-
121
- st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  #from transformers import AutoTokenizer
4
  #from llama_cpp import Llama
 
 
 
5
  from datasets import load_dataset
6
 
7
  # Replace with the direct image URL
 
46
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
47
 
48
  # Hugging Face access token
49
+ HF_TOKEN = "HF_TOKEN" # Replace with your actual token or set it as an environment variable
50
 
51
+ # Load the text generation pipeline with model and tokenizer
52
  @st.cache_resource
53
+ def load_text_generation_pipeline():
54
+ model_name = "google/gemma-2-9b-it"
55
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
56
+ model = AutoModelForCausalLM.from_pretrained(
57
+ model_name,
58
+ load_in_8bit=True,
59
+ device_map="auto",
60
+ use_auth_token=HF_TOKEN
61
+ )
62
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
63
 
64
+ text_generator = load_text_generation_pipeline()
65
 
66
  # Load the counseling dataset
67
  @st.cache_resource
 
77
  This platform is designed to provide **supportive, positive, and encouraging responses** using a fast and efficient language model.
78
  """)
79
 
80
+ # Display example dataset entries
81
+ if st.checkbox("Show Example Questions and Answers from Dataset"):
82
+ sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
83
+ for example in sample:
84
+ st.markdown(f"**Question:** {example.get('context', 'N/A')}")
85
+ st.markdown(f"**Answer:** {example.get('response', 'N/A')}")
86
+ st.markdown("---")
 
 
 
 
87
 
88
+ # User input for mental health concerns
89
+ user_input = st.text_area("Your question or concern:", placeholder="Type your question here...")
90
 
91
+ if st.button("Get Supportive Response"):
92
+ if user_input.strip():
93
+ try:
94
+ # Generate response using the text generation pipeline
95
+ prompt = f"User: {user_input}\nCounselor:"
96
+ response = text_generator(prompt, max_length=100, num_return_sequences=1)
97
+
98
+ # Extract and display the response
99
+ counselor_reply = response[0]["generated_text"].strip()
100
+ st.subheader("Counselor's Response:")
101
+ st.write(counselor_reply)
102
+ except Exception as e:
103
+ st.error(f"An error occurred while generating the response: {e}")
104
+ else:
105
+ st.error("Please enter a question or concern to receive a response.")
106
 
107
+ # Sidebar resources
108
+ st.sidebar.header("Additional Mental Health Resources")
109
+ st.sidebar.markdown("""
110
+ - [Mental Health Foundation](https://www.mentalhealth.org)
111
+ - [Mind](https://www.mind.org.uk)
112
+ - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
113
+ """)
114
+ st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")