tahirsher commited on
Commit
3a64fb1
1 Parent(s): be8b77d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -12
app.py CHANGED
@@ -1,4 +1,8 @@
1
  import streamlit as st
 
 
 
 
2
 
3
  # Replace with the direct image URL
4
  flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png"
@@ -39,18 +43,55 @@ st.markdown(
39
  # Add the blurred background div
40
  st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
41
 
42
- # Streamlit UI elements
43
- st.title("Word Cloud Application")
44
- st.markdown("Welcome to the Word Cloud Application with a blurred, centered flower background!")
 
 
 
 
 
45
 
46
- # Add some interactivity
47
- if st.button("Click Me"):
48
- st.write("You clicked the button!")
49
 
50
- # Add a select box
51
- option = st.selectbox(
52
- "Choose an option:",
53
- ["Option 1", "Option 2", "Option 3"]
54
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- st.write(f"You selected: {option}")
 
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer
3
+ from peft import PeftModel, PeftConfig
4
+ from transformers import AutoModelForCausalLM
5
+ from datasets import load_dataset
6
 
7
  # Replace with the direct image URL
8
  flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png"
 
43
  # Add the blurred background div
44
  st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
45
 
46
+ # Load the fine-tuned model and tokenizer
47
+ @st.cache_resource
48
+ def load_model_and_tokenizer():
49
+ config = PeftConfig.from_pretrained("zementalist/llama-3-8B-chat-psychotherapist")
50
+ base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
51
+ model = PeftModel.from_pretrained(base_model, "zementalist/llama-3-8B-chat-psychotherapist")
52
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
53
+ return model, tokenizer
54
 
55
+ model, tokenizer = load_model_and_tokenizer()
 
 
56
 
57
+ # Load dataset for reference (optional)
58
+ @st.cache_resource
59
+ def load_dataset_reference():
60
+ return load_dataset("Amod/mental_health_counseling_conversations")
61
+
62
+ dataset = load_dataset_reference()
63
+
64
+ # Streamlit App Configuration
65
+ st.title("Mental Well-Being Support Application")
66
+ st.markdown("""
67
+ Welcome to the Mental Well-Being Support Application. This platform is designed to provide positive, supportive, and encouraging responses to your mental health concerns. Our responses are powered by a fine-tuned AI model based on expert psychologists' answers.
68
+ """)
69
+
70
+ # User Input Section
71
+ st.header("Your Mental Health Journey")
72
+ user_query = st.text_area("Please share your thoughts or questions:", placeholder="Write here...")
73
+
74
+ # Generate AI Response
75
+ if st.button("Get Supportive Response"):
76
+ if user_query.strip():
77
+ # Generate response
78
+ inputs = tokenizer(f"User: {user_query}\nAI:", return_tensors="pt")
79
+ outputs = model.generate(inputs.input_ids, max_length=200, temperature=0.7, num_return_sequences=1)
80
+ ai_response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("AI:")[-1].strip()
81
+
82
+ # Display the response
83
+ st.subheader("Your Supportive Response:")
84
+ st.write(ai_response)
85
+ else:
86
+ st.error("Please enter a question or concern to get a response.")
87
+
88
+ # Additional Resources Section
89
+ st.sidebar.header("Resources")
90
+ st.sidebar.markdown("""
91
+ - [Mental Health Foundation](https://www.mentalhealth.org)
92
+ - [Mind](https://www.mind.org.uk)
93
+ - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
94
+ """)
95
 
96
+ # Footer
97
+ st.sidebar.info("This application is not a replacement for professional help. If you're in crisis, please contact a mental health professional.")