Spaces:
Sleeping
Sleeping
ragha108
commited on
Commit
·
7c3eac5
0
Parent(s):
Duplicate from ragha108/aiyogi_dosha
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +157 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Aiyogi
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.23.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: ragha108/aiyogi_dosha
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
|
5 |
+
# configure OpenAI
|
6 |
+
|
7 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
8 |
+
|
9 |
+
INSTRUCTIONS = "You are an experienced Ayurvedic practitioner. Introduce yourself as AiYogi, an ai chatbot trained with the intellectual knowledge of an Ayurvedic practitioner. Greet the user by their name. Let the user know your goal is to help them understand their dosha. " \
|
10 |
+
"Users will interact with you in order to learn which type of dosha they are" \
|
11 |
+
"I want you to ask the user a series of 10 multiple choice questions, one by one, in order for you to assess their dosha " \
|
12 |
+
"Please ask one question at a time and wait for the user to respond before you ask the next question" \
|
13 |
+
"Very important, do not provide an assessment until you have asked all 10 questions" \
|
14 |
+
"After the user has responded to all 10 questions and before you provide your assessment ask the users if there is any other information they would like to share with you. You will use their response as part of your assessment " \
|
15 |
+
"Finally explain the user what dosha they are by providing a brief summary along with diet, supplements and lifestyle choices they could benefit from. Let the user know they are welcome to ask you more questions about their dosha " \
|
16 |
+
"Be polite and compassionate, like a true ayurvedic practitioner" \
|
17 |
+
"Limit your answers to no more than 200 words"
|
18 |
+
|
19 |
+
|
20 |
+
TEMPERATURE = 0.5
|
21 |
+
MAX_TOKENS = 500
|
22 |
+
FREQUENCY_PENALTY = 0
|
23 |
+
PRESENCE_PENALTY = 0.6
|
24 |
+
# limits how many questions we include in the prompt
|
25 |
+
MAX_CONTEXT_QUESTIONS = 10
|
26 |
+
|
27 |
+
|
28 |
+
def get_response(instructions, previous_questions_and_answers, new_question):
|
29 |
+
"""Get a response from ChatCompletion
|
30 |
+
|
31 |
+
Args:
|
32 |
+
instructions: The instructions for the chat bot - this determines how it will behave
|
33 |
+
previous_questions_and_answers: Chat history
|
34 |
+
new_question: The new question to ask the bot
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
The response text
|
38 |
+
"""
|
39 |
+
# build the messages
|
40 |
+
messages = [
|
41 |
+
{ "role": "system", "content": instructions },
|
42 |
+
]
|
43 |
+
# add the previous questions and answers
|
44 |
+
for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]:
|
45 |
+
messages.append({ "role": "user", "content": question })
|
46 |
+
messages.append({ "role": "assistant", "content": answer })
|
47 |
+
# add the new question
|
48 |
+
messages.append({ "role": "user", "content": new_question })
|
49 |
+
|
50 |
+
completion = openai.ChatCompletion.create(
|
51 |
+
model="gpt-3.5-turbo",
|
52 |
+
messages=messages,
|
53 |
+
temperature=TEMPERATURE,
|
54 |
+
max_tokens=MAX_TOKENS,
|
55 |
+
top_p=1,
|
56 |
+
frequency_penalty=FREQUENCY_PENALTY,
|
57 |
+
presence_penalty=PRESENCE_PENALTY,
|
58 |
+
)
|
59 |
+
return completion.choices[0].message.content
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
def get_moderation(question):
|
64 |
+
"""
|
65 |
+
Check the question is safe to ask the model
|
66 |
+
|
67 |
+
Parameters:
|
68 |
+
question (str): The question to check
|
69 |
+
|
70 |
+
Returns a list of errors if the question is not safe, otherwise returns None
|
71 |
+
"""
|
72 |
+
|
73 |
+
errors = {
|
74 |
+
"hate": "Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.",
|
75 |
+
"hate/threatening": "Hateful content that also includes violence or serious harm towards the targeted group.",
|
76 |
+
"self-harm": "Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.",
|
77 |
+
"sexual": "Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).",
|
78 |
+
"sexual/minors": "Sexual content that includes an individual who is under 18 years old.",
|
79 |
+
"violence": "Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.",
|
80 |
+
"violence/graphic": "Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.",
|
81 |
+
}
|
82 |
+
response = openai.Moderation.create(input=question)
|
83 |
+
if response.results[0].flagged:
|
84 |
+
# get the categories that are flagged and generate a message
|
85 |
+
result = [
|
86 |
+
error
|
87 |
+
for category, error in errors.items()
|
88 |
+
if response.results[0].categories[category]
|
89 |
+
]
|
90 |
+
return result
|
91 |
+
return None
|
92 |
+
|
93 |
+
|
94 |
+
# def main():
|
95 |
+
# os.system("cls" if os.name == "nt" else "clear")
|
96 |
+
# # keep track of previous questions and answers
|
97 |
+
# previous_questions_and_answers = []
|
98 |
+
# while True:
|
99 |
+
# # ask the user for their question
|
100 |
+
# new_question = input(
|
101 |
+
# Fore.GREEN + Style.BRIGHT + "wwww?: " + Style.RESET_ALL
|
102 |
+
# )
|
103 |
+
# # check the question is safe
|
104 |
+
# errors = get_moderation(new_question)
|
105 |
+
# if errors:
|
106 |
+
# print(
|
107 |
+
# Fore.RED
|
108 |
+
# + Style.BRIGHT
|
109 |
+
# + "Sorry, you're question didn't pass the moderation check:"
|
110 |
+
# )
|
111 |
+
# for error in errors:
|
112 |
+
# print(error)
|
113 |
+
# print(Style.RESET_ALL)
|
114 |
+
# continue
|
115 |
+
# response = get_response(INSTRUCTIONS, previous_questions_and_answers, new_question)
|
116 |
+
|
117 |
+
# # add the new question and answer to the list of previous questions and answers
|
118 |
+
# previous_questions_and_answers.append((new_question, response))
|
119 |
+
|
120 |
+
|
121 |
+
def delete_chat_history(previous_questions_and_answers):
|
122 |
+
previous_questions_and_answers.clear()
|
123 |
+
return previous_questions_and_answers,""
|
124 |
+
|
125 |
+
def chatgpt_clone(input, previous_questions_and_answers):
|
126 |
+
previous_questions_and_answers = previous_questions_and_answers or []
|
127 |
+
s = list(sum(previous_questions_and_answers, ()))
|
128 |
+
s.append(input)
|
129 |
+
inp = ' '.join(s)
|
130 |
+
moderation_errors = get_moderation(input)
|
131 |
+
if moderation_errors is not None:
|
132 |
+
return "\n".join(moderation_errors)
|
133 |
+
output = get_response(INSTRUCTIONS, previous_questions_and_answers, inp)
|
134 |
+
previous_questions_and_answers.append((input, output))
|
135 |
+
return previous_questions_and_answers, previous_questions_and_answers
|
136 |
+
|
137 |
+
|
138 |
+
block = gr.Blocks(theme=gr.themes.Monochrome(secondary_hue="neutral").set(button_primary_background_fill="*primary_400",
|
139 |
+
button_primary_background_fill_hover="*primary_300"))
|
140 |
+
|
141 |
+
with block:
|
142 |
+
# gr.Markdown("""<h1><center>_/\_ AI YOGI _/\_ </center></h1>""")
|
143 |
+
chatbot = gr.Chatbot(label='Ai Yogi:')
|
144 |
+
message = gr.Textbox(label='Namaste! Please introduce yourself below and then click SEND',placeholder='')
|
145 |
+
# message.change(fn=lambda value: gr.update(value=""))
|
146 |
+
state = gr.State()
|
147 |
+
submit = gr.Button("SEND")
|
148 |
+
submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
|
149 |
+
clear = gr.Button("CLEAR")
|
150 |
+
clear.click(delete_chat_history, inputs=[state], outputs=[chatbot, state])
|
151 |
+
clear.click(lambda x: gr.update(value='',placeholder='',label='Namaste! Please introduce yourself below and then click SEND'), [],[message])
|
152 |
+
submit.click(lambda x: gr.update(value='',placeholder='',label='Please answer below and then click SEND'), [],[message])
|
153 |
+
submit.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
|
154 |
+
clear.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
|
155 |
+
|
156 |
+
message.submit(lambda x: gr.update(value='',placeholder="",label=""), [],[message])
|
157 |
+
block.launch(show_api=False)
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai
|