AliArshad commited on
Commit
b31f731
1 Parent(s): 6e40eda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -12
app.py CHANGED
@@ -1,13 +1,3 @@
1
- # -*- coding: utf-8 -*-
2
- """app.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/#fileId=https%3A//huggingface.co/spaces/AliArshad/Psycho/blob/main/app.ipynb
8
- """
9
-
10
- pip install transformers gradio
11
 
12
  # Load model directly
13
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -32,14 +22,14 @@ def generate_response(system_input, user_input):
32
  return response.split("### Assistant:\n")[-1]
33
 
34
  '''# Example usage
35
- system_input = "Please act as a psychology counselor assistant. I will provide you counselling notes and you need to provide me exactly two things in return, assessment and plan for the notes. Please note that the assessment should begin with keword 'Assessment:', and plan should begin with keyword 'Plan:'. Please make it logical, simple, concise, and clear."
36
  user_input = "Can't sleep well?"
37
  response = generate_response(system_input, user_input)
38
  print(response)
39
  '''
40
 
41
  def generate_response1(user_input):
42
- system_input = "Please act as a psychology counselor assistant. I will provide you counselling notes and you need to provide me exactly two things in return, assessment and plan for the notes. Please note that the assessment should begin with keword 'Assessment:', and plan should begin with keyword 'Plan:'. Please make it logical, simple, concise, and clear."
43
  prompt = f"### System:\n{system_input}\n### User:\n{user_input}\n### Assistant:\n"
44
  inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
45
  outputs = model.generate(inputs, max_length=1000, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  # Load model directly
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
22
  return response.split("### Assistant:\n")[-1]
23
 
24
  '''# Example usage
25
+ system_input = "Please act as a psychology counselor assistant. I will provide you counseling notes and you need to provide me with exactly two things in return, assessment and plan for the notes. Please note that the assessment should begin with keword 'Assessment:', and plan should begin with keyword 'Plan:'. Please make it logical, simple, concise, and clear."
26
  user_input = "Can't sleep well?"
27
  response = generate_response(system_input, user_input)
28
  print(response)
29
  '''
30
 
31
  def generate_response1(user_input):
32
+ system_input = "Please act as a psychology counselor assistant. I will provide you counseling notes and you need to provide me with exactly two things in return, assessment and plan for the notes. Please note that the assessment should begin with keword 'Assessment:', and plan should begin with keyword 'Plan:'. Please make it logical, simple, concise, and clear."
33
  prompt = f"### System:\n{system_input}\n### User:\n{user_input}\n### Assistant:\n"
34
  inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
35
  outputs = model.generate(inputs, max_length=1000, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)