tahirsher commited on
Commit
adf24b1
1 Parent(s): e733189

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -16
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
  #from transformers import AutoTokenizer
3
  from llama_cpp import Llama
4
- #from transformers import pipeline
5
  #from peft import PeftModel, PeftConfig
6
  #from transformers import AutoModelForCausalLM
7
  from datasets import load_dataset
@@ -45,24 +45,20 @@ st.markdown(
45
  # Add the blurred background div
46
  st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
47
 
48
- """"""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
49
 
50
- # Path to the GGUF model on Hugging Face (Hugging Face Spaces automatically downloads it)
51
- MODEL_PATH = "/root/.cache/huggingface/hub/models--QuantFactory--Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF/blobs/"
52
-
53
- # Load Llama model
54
  @st.cache_resource
55
- def load_llama_model():
56
  try:
57
- # The GGUF model will be cached automatically by llama.cpp
58
- return Llama(model_path=f"{MODEL_PATH}/model.gguf", n_threads=8) # Adjust n_threads based on your environment
59
  except Exception as e:
60
  st.error(f"Error loading model: {e}")
61
  return None
62
 
63
- llama_model = load_llama_model()
64
 
65
- # Load dataset for context
66
  @st.cache_resource
67
  def load_counseling_dataset():
68
  return load_dataset("Amod/mental_health_counseling_conversations")
@@ -77,8 +73,8 @@ This platform is designed to provide **supportive, positive, and encouraging res
77
  """)
78
 
79
  # Check if the model loaded correctly
80
- if llama_model is None:
81
- st.error("The text generation model could not be loaded. Please check your configuration.")
82
  else:
83
  # Explore dataset for additional context or resources (optional)
84
  if st.checkbox("Show Example Questions and Answers from Dataset"):
@@ -94,12 +90,14 @@ else:
94
  if st.button("Get Supportive Response"):
95
  if user_input.strip():
96
  try:
97
- # Generate response using Llama
98
  prompt = f"User: {user_input}\nCounselor:"
99
- response = llama_model(prompt, max_tokens=200, stop=["\n", "User:"])
100
 
 
 
101
  st.subheader("Counselor's Response:")
102
- st.write(response["choices"][0]["text"].strip())
103
  except Exception as e:
104
  st.error(f"An error occurred while generating the response: {e}")
105
  else:
 
1
  import streamlit as st
2
  #from transformers import AutoTokenizer
3
  from llama_cpp import Llama
4
+ from transformers import pipeline
5
  #from peft import PeftModel, PeftConfig
6
  #from transformers import AutoModelForCausalLM
7
  from datasets import load_dataset
 
45
  # Add the blurred background div
46
  st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
47
 
48
+ #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
49
 
50
+ # Load the text generation model pipeline
 
 
 
51
  @st.cache_resource
52
+ def load_text_generation_model():
53
  try:
54
+ return pipeline("text-generation", model="QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF")
 
55
  except Exception as e:
56
  st.error(f"Error loading model: {e}")
57
  return None
58
 
59
+ text_generator = load_text_generation_model()
60
 
61
+ # Load the counseling dataset
62
  @st.cache_resource
63
  def load_counseling_dataset():
64
  return load_dataset("Amod/mental_health_counseling_conversations")
 
73
  """)
74
 
75
  # Check if the model loaded correctly
76
+ if text_generator is None:
77
+ st.error("The text generation model could not be loaded. Please check your Hugging Face configuration.")
78
  else:
79
  # Explore dataset for additional context or resources (optional)
80
  if st.checkbox("Show Example Questions and Answers from Dataset"):
 
90
  if st.button("Get Supportive Response"):
91
  if user_input.strip():
92
  try:
93
+ # Generate response using the text generation pipeline
94
  prompt = f"User: {user_input}\nCounselor:"
95
+ response = text_generator(prompt, max_length=200, num_return_sequences=1)
96
 
97
+ # Extract and display the response
98
+ counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip()
99
  st.subheader("Counselor's Response:")
100
+ st.write(counselor_reply)
101
  except Exception as e:
102
  st.error(f"An error occurred while generating the response: {e}")
103
  else: