Spaces:
Runtime error
Runtime error
TenPoisk
commited on
Commit
•
32d21dc
1
Parent(s):
fe5a9fd
Upload 4 files
Browse files- README (3).md +11 -0
- dolphin.script.py +51 -0
- gitattributes (1).txt +35 -0
- requirements (1).txt +1 -0
README (3).md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: DolphinChat — ChatGPT
|
3 |
+
emoji: 🐬
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.39.0
|
8 |
+
app_file: dolphin.script.py
|
9 |
+
pinned: true
|
10 |
+
license: bigscience-bloom-rail-1.0
|
11 |
+
---
|
dolphin.script.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import gradio as gr
|
3 |
+
import openai
|
4 |
+
|
5 |
+
openai.api_type = "azure"
|
6 |
+
openai.api_base = "https://hrangaopenaillm.openai.azure.com"
|
7 |
+
openai.api_version = "2023-03-15-preview"
|
8 |
+
openai.api_key = "e951b48da7c548e18af601a15cb6aefa"
|
9 |
+
|
10 |
+
|
11 |
+
def gptresponse(message, history):
|
12 |
+
system_prompt = "You are a professional power BI assistant that knows a lot about DAX measures. You are going to help users write their own DAX measures, explain the code and provide any possible information. If you are not sure about the answer, just respond with I am not sure. It's your responsibility to remind users not to send sensitive or private data. Your answers should always be friendly, polite, helpful, secure and with fact support."
|
13 |
+
|
14 |
+
messages = [{"role":"system","content":system_prompt}]
|
15 |
+
for human, assistant in history:
|
16 |
+
messages.append({"role":"user", "content":human})
|
17 |
+
messages.append({"role":"assistant", "content":assistant})
|
18 |
+
|
19 |
+
if message != '':
|
20 |
+
messages.append({"role":"user", "content":message})
|
21 |
+
|
22 |
+
response = openai.ChatCompletion.create(engine = "NGA_AI_ASSISTANT",
|
23 |
+
messages = messages,
|
24 |
+
temperature =0.7,
|
25 |
+
max_tokens = 800,
|
26 |
+
top_p = 0.95,
|
27 |
+
frequency_penalty = 0,
|
28 |
+
presence_penalty = 0,
|
29 |
+
stop = None)
|
30 |
+
|
31 |
+
return response["choices"][0]["message"]["content"]
|
32 |
+
|
33 |
+
title = "🐬 DolphinChat"
|
34 |
+
description = \
|
35 |
+
"""
|
36 |
+
<p></p>
|
37 |
+
<h1>ℹ️ I am DolphinChat and I was created to help people!</h1>
|
38 |
+
<p></p>
|
39 |
+
<h1>✅️ I have been trained on almost the entire Internet!</h1>
|
40 |
+
<p></p>
|
41 |
+
<h1>♻️ I can communicate in more than 60 languages of the world!</h1>
|
42 |
+
<p></p>
|
43 |
+
<h1>📂 I work on open source and keep your data safe, I am a non-commercial project!</h1>
|
44 |
+
<p></p>
|
45 |
+
<h1>▶️ I'm almost the perfect chat assistant, so try me!</h1>
|
46 |
+
<p></p>
|
47 |
+
"""
|
48 |
+
|
49 |
+
gr.HTML(title)
|
50 |
+
|
51 |
+
gr.ChatInterface(gptresponse, title=title, description=description).launch()
|
gitattributes (1).txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements (1).txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai
|