Spaces:
Runtime error
Runtime error
Duplicate from Yusin/Speech-ChatGPT-Speech
Browse filesCo-authored-by: Chen, Yusin <Yusin@users.noreply.huggingface.co>
- .gitattributes +27 -0
- .gitignore +1 -0
- README.md +41 -0
- app.py +215 -0
- packages.txt +2 -0
- pygpt.py +112 -0
- requirements.txt +8 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
README.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Speech2ChatGPT2Speech
|
3 |
+
emoji: 🗣️🙉
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
python_version: 3.9
|
8 |
+
sdk_version: 3.12.0
|
9 |
+
app_file: app.py
|
10 |
+
models:
|
11 |
+
- neongeckocom/tts-vits-ljspeech-en
|
12 |
+
- neongeckocom/tts-vits-css10-es
|
13 |
+
- neongeckocom/tts-vits-css10-fr
|
14 |
+
- neongeckocom/tts-vits-css10-de
|
15 |
+
- neongeckocom/tts-vits-cv-it
|
16 |
+
- neongeckocom/tts-vits-mai-pl
|
17 |
+
- neongeckocom/tts-vits-mai-uk
|
18 |
+
- neongeckocom/tts-vits-cv-ro
|
19 |
+
- neongeckocom/tts-vits-css10-hu
|
20 |
+
- neongeckocom/tts-vits-cv-el
|
21 |
+
- neongeckocom/tts-vits-cv-cs
|
22 |
+
- neongeckocom/tts-vits-cv-sv
|
23 |
+
- neongeckocom/tts-vits-cv-pt
|
24 |
+
- neongeckocom/tts-vits-cv-bg
|
25 |
+
- neongeckocom/tts-vits-cv-hr
|
26 |
+
- neongeckocom/tts-vits-cv-da
|
27 |
+
- neongeckocom/tts-vits-cv-sk
|
28 |
+
- neongeckocom/tts-vits-css10-nl
|
29 |
+
- neongeckocom/tts-vits-css10-fi
|
30 |
+
- neongeckocom/tts-vits-cv-lt
|
31 |
+
- neongeckocom/tts-vits-cv-sl
|
32 |
+
- neongeckocom/tts-vits-cv-lv
|
33 |
+
- neongeckocom/tts-vits-cv-et
|
34 |
+
- neongeckocom/tts-vits-cv-ga
|
35 |
+
- neongeckocom/tts-vits-cv-mt
|
36 |
+
pinned: false
|
37 |
+
license: apache-2.0
|
38 |
+
duplicated_from: Yusin/Speech-ChatGPT-Speech
|
39 |
+
---
|
40 |
+
|
41 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
import gradio as gr
|
3 |
+
from neon_tts_plugin_coqui import CoquiTTS
|
4 |
+
LANGUAGES = list(CoquiTTS.langs.keys())
|
5 |
+
default_lang = "en"
|
6 |
+
import telnetlib
|
7 |
+
#import whisper
|
8 |
+
#whisper_model = whisper.load_model("small")
|
9 |
+
#whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2")
|
10 |
+
chatgpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
|
11 |
+
import os
|
12 |
+
import json
|
13 |
+
session_token = os.environ.get('SessionToken')
|
14 |
+
bypass_node = "https://gpt.pawan.krd"
|
15 |
+
#api_endpoint = os.environ.get('API_EndPoint')
|
16 |
+
# ChatGPT
|
17 |
+
#from revChatGPT.ChatGPT import Chatbot
|
18 |
+
#chatbot = Chatbot({"session_token": session_token}) # You can start a custom conversation
|
19 |
+
import asyncio
|
20 |
+
from pygpt import PyGPT
|
21 |
+
import argparse
|
22 |
+
import sys
|
23 |
+
import asyncio
|
24 |
+
from ChatGPT_lite.ChatGPT import Chatbot
|
25 |
+
|
26 |
+
title = "Speech to ChatGPT to Speech"
|
27 |
+
#info = "more info at [Neon Coqui TTS Plugin](https://github.com/NeonGeckoCom/neon-tts-plugin-coqui), [Coqui TTS](https://github.com/coqui-ai/TTS)"
|
28 |
+
#badge = "https://visitor-badge-reloaded.herokuapp.com/badge?page_id=neongeckocom.neon-tts-plugin-coqui"
|
29 |
+
coquiTTS = CoquiTTS()
|
30 |
+
chat_id = {'conversation_id': None, 'parent_id': None}
|
31 |
+
headers = {'Authorization': 'yusin'}
|
32 |
+
|
33 |
+
##############################################################
|
34 |
+
async def async_main(prompt):
|
35 |
+
if session_token is None:
|
36 |
+
print("Please provide a session token")
|
37 |
+
print(session_token, bypass_node)
|
38 |
+
chat = Chatbot(session_token, bypass_node)
|
39 |
+
await asyncio.gather(chat.wait_for_ready())
|
40 |
+
|
41 |
+
while True:
|
42 |
+
response = await chat.ask(prompt)
|
43 |
+
print(f"\nBot: {response['answer']}\n")
|
44 |
+
# Close sockets
|
45 |
+
chat.close()
|
46 |
+
# exit
|
47 |
+
#sys.exit(0)
|
48 |
+
print(response)
|
49 |
+
return response['answer']
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
def sync_main(prompt):
|
54 |
+
chat = Chatbot(session_token, bypass_node)
|
55 |
+
# Create loop
|
56 |
+
loop = asyncio.new_event_loop()
|
57 |
+
# Set
|
58 |
+
asyncio.set_event_loop(loop)
|
59 |
+
# Run
|
60 |
+
loop.run_until_complete(chat.wait_for_ready())
|
61 |
+
while True:
|
62 |
+
response = loop.run_until_complete(chat.ask(prompt))
|
63 |
+
print(f"\nBot: {response['answer']}\n")
|
64 |
+
# Close sockets
|
65 |
+
chat.close()
|
66 |
+
# stop asyncio event loop
|
67 |
+
loop.stop()
|
68 |
+
# exit
|
69 |
+
#sys.exit(0)
|
70 |
+
print(response)
|
71 |
+
return response['answer']
|
72 |
+
|
73 |
+
###########################################
|
74 |
+
parser = argparse.ArgumentParser()
|
75 |
+
parser.add_argument('--session_token', type=str, default=None)
|
76 |
+
parser.add_argument('--bypass_node', type=str, default="https://gpt.pawan.krd")
|
77 |
+
parser.add_argument('--async_mode', action='store_true')
|
78 |
+
args = parser.parse_args()
|
79 |
+
args.session_token = session_token
|
80 |
+
##########################################
|
81 |
+
|
82 |
+
async def chat_gpt_ask(prompt):
|
83 |
+
print(session_token)
|
84 |
+
chat_gpt = PyGPT(session_token)
|
85 |
+
await chat_gpt.connect()
|
86 |
+
await chat_gpt.wait_for_ready()
|
87 |
+
print(prompt)
|
88 |
+
answer = await chat_gpt.ask(prompt)
|
89 |
+
print(answer)
|
90 |
+
await chat_gpt.disconnect()
|
91 |
+
|
92 |
+
# ChatGPT
|
93 |
+
def chat_hf(audio, custom_token, language):
|
94 |
+
output = chatgpt(audio, "transcribe", fn_index=0)
|
95 |
+
whisper_text, gpt_response = output[0], output[1]
|
96 |
+
#whisper_text = translate(audio)
|
97 |
+
#gpt_response = asyncio.run(async_main(whisper_text))
|
98 |
+
'''
|
99 |
+
try:
|
100 |
+
whisper_text = translate(audio)
|
101 |
+
if whisper_text == "ERROR: You have to either use the microphone or upload an audio file":
|
102 |
+
gpt_response = "MISSING AUDIO: Record your voice by clicking the microphone button, do not forget to stop recording before sending your message ;)"
|
103 |
+
else:
|
104 |
+
#gpt_response = chatbot.ask(whisper_text, conversation_id=conversation_id, parent_id=None)
|
105 |
+
#gpt_response = asyncio.run(chat_gpt_ask(whisper_text))
|
106 |
+
gpt_response = asyncio.run(async_main(whisper_text))
|
107 |
+
#gpt_response = async_main(whisper_text)
|
108 |
+
#if chat_id['conversation_id'] != None:
|
109 |
+
# data = {"content": whisper_text, "conversation_id": chat_id['conversation_id'], "parent_id": chat_id['parent_id']}
|
110 |
+
#else:
|
111 |
+
# data = {"content": whisper_text}
|
112 |
+
#print(data)
|
113 |
+
#res = requests.get('http://myip.ipip.net', timeout=5).text
|
114 |
+
#print(res)
|
115 |
+
#response = requests.post('api_endpoint', headers=headers, json=data, verify=False, timeout=5)
|
116 |
+
#print('this is my answear', response.text)
|
117 |
+
#chat_id['parent_id'] = response.json()["response_id"]
|
118 |
+
#chat_id['conversation_id'] = response.json()["conversation_id"]
|
119 |
+
#gpt_response = response.json()["content"]
|
120 |
+
#response = requests.get('https://api.pawan.krd/chat/gpt?text=' + whisper_text + '&cache=false', verify=False, timeout=5)
|
121 |
+
#print(response.text)
|
122 |
+
|
123 |
+
#whisper_text = translate(audio)
|
124 |
+
#api = ChatGPT(session_token)
|
125 |
+
#resp = api.send_message(whisper_text)
|
126 |
+
|
127 |
+
#api.refresh_auth() # refresh the authorization token
|
128 |
+
#api.reset_conversation() # reset the conversation
|
129 |
+
#gpt_response = resp['message']
|
130 |
+
|
131 |
+
except:
|
132 |
+
whisper_text = translate(audio)
|
133 |
+
gpt_response = """Sorry, I'm quite busy right now, but please try again later :)"""
|
134 |
+
#whisper_text = translate(audio)
|
135 |
+
#api = ChatGPT(custom_token)
|
136 |
+
#resp = api.send_message(whisper_text)
|
137 |
+
|
138 |
+
#api.refresh_auth() # refresh the authorization token
|
139 |
+
#api.reset_conversation() # reset the conversation
|
140 |
+
#gpt_response = resp['message']
|
141 |
+
'''
|
142 |
+
# to voice
|
143 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
144 |
+
coquiTTS.get_tts(gpt_response, fp, speaker = {"language" : language})
|
145 |
+
|
146 |
+
return whisper_text, gpt_response, fp.name
|
147 |
+
|
148 |
+
# whisper
|
149 |
+
#def translate(audio):
|
150 |
+
# print("""
|
151 |
+
# —
|
152 |
+
# Sending audio to Whisper ...
|
153 |
+
# —
|
154 |
+
# """)
|
155 |
+
#
|
156 |
+
# audio = whisper.load_audio(audio)
|
157 |
+
# audio = whisper.pad_or_trim(audio)
|
158 |
+
#
|
159 |
+
# mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
|
160 |
+
#
|
161 |
+
# _, probs = whisper_model.detect_language(mel)
|
162 |
+
#
|
163 |
+
# transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
|
164 |
+
#
|
165 |
+
# transcription = whisper.decode(whisper_model, mel, transcript_options)
|
166 |
+
#
|
167 |
+
# print("language spoken: " + transcription.language)
|
168 |
+
# print("transcript: " + transcription.text)
|
169 |
+
# print("———————————————————————————————————————————")
|
170 |
+
#
|
171 |
+
# return transcription.text
|
172 |
+
|
173 |
+
def translate(audio):
|
174 |
+
print("""
|
175 |
+
—
|
176 |
+
Sending audio to Whisper ...
|
177 |
+
—
|
178 |
+
""")
|
179 |
+
|
180 |
+
text_result = whisper(audio, None, "transcribe", fn_index=0)
|
181 |
+
print(text_result)
|
182 |
+
return text_result
|
183 |
+
|
184 |
+
|
185 |
+
with gr.Blocks() as blocks:
|
186 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>"
|
187 |
+
+ title
|
188 |
+
+ "</h1>")
|
189 |
+
#gr.Markdown(description)
|
190 |
+
radio = gr.Radio(label="Language",choices=LANGUAGES,value=default_lang)
|
191 |
+
with gr.Row(equal_height=True):# equal_height=False
|
192 |
+
with gr.Column():# variant="panel"
|
193 |
+
audio_file = gr.Audio(source="microphone",type="filepath")
|
194 |
+
custom_token = gr.Textbox(label='If it fails, use your own session token', placeholder="your own session token")
|
195 |
+
with gr.Row():# mobile_collapse=False
|
196 |
+
submit = gr.Button("Submit", variant="primary")
|
197 |
+
with gr.Column():
|
198 |
+
text1 = gr.Textbox(label="Speech to Text")
|
199 |
+
text2 = gr.Textbox(label="ChatGPT Response")
|
200 |
+
audio = gr.Audio(label="Output", interactive=False)
|
201 |
+
#gr.Markdown(info)
|
202 |
+
#gr.Markdown("<center>"
|
203 |
+
# +f'<img src={badge} alt="visitors badge"/>'
|
204 |
+
# +"</center>")
|
205 |
+
|
206 |
+
# actions
|
207 |
+
submit.click(
|
208 |
+
chat_hf,
|
209 |
+
[audio_file, custom_token, radio],
|
210 |
+
[text1, text2, audio],
|
211 |
+
)
|
212 |
+
radio.change(lambda lang: CoquiTTS.langs[lang]["sentence"], radio, text2)
|
213 |
+
|
214 |
+
|
215 |
+
blocks.launch(debug=True)
|
packages.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
libsndfile1
|
2 |
+
espeak-ng
|
pygpt.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
import asyncio
|
3 |
+
import socketio
|
4 |
+
import datetime
|
5 |
+
import json
|
6 |
+
import base64
|
7 |
+
|
8 |
+
class PyGPT:
|
9 |
+
def __init__(self, session_token, bypass_node='https://gpt.pawan.krd'):
|
10 |
+
self.ready = False
|
11 |
+
self.socket = socketio.AsyncClient()
|
12 |
+
self.socket.on('connect', self.on_connect)
|
13 |
+
self.socket.on('disconnect', self.on_disconnect)
|
14 |
+
self.session_token = session_token
|
15 |
+
self.conversations = []
|
16 |
+
self.auth = None
|
17 |
+
self.expires = datetime.datetime.now()
|
18 |
+
self.pause_token_checks = False
|
19 |
+
self.bypass_node = bypass_node
|
20 |
+
asyncio.create_task(self.cleanup_conversations())
|
21 |
+
|
22 |
+
async def connect(self):
|
23 |
+
await self.socket.connect(self.bypass_node)
|
24 |
+
|
25 |
+
async def disconnect(self):
|
26 |
+
await self.socket.disconnect()
|
27 |
+
await self.socket.close()
|
28 |
+
|
29 |
+
def on_connect(self):
|
30 |
+
print('Connected to server')
|
31 |
+
asyncio.create_task(self.check_tokens())
|
32 |
+
|
33 |
+
def on_disconnect(self):
|
34 |
+
print('Disconnected from server')
|
35 |
+
self.ready = False
|
36 |
+
|
37 |
+
async def check_tokens(self):
|
38 |
+
while True:
|
39 |
+
if self.pause_token_checks:
|
40 |
+
await asyncio.sleep(0.5)
|
41 |
+
continue
|
42 |
+
self.pause_token_checks = True
|
43 |
+
now = datetime.datetime.now()
|
44 |
+
offset = datetime.timedelta(minutes=2)
|
45 |
+
if self.expires < (now - offset) or not self.auth:
|
46 |
+
await self.get_tokens()
|
47 |
+
self.pause_token_checks = False
|
48 |
+
await asyncio.sleep(0.5)
|
49 |
+
|
50 |
+
async def cleanup_conversations(self):
|
51 |
+
while True:
|
52 |
+
await asyncio.sleep(60)
|
53 |
+
now = datetime.datetime.now()
|
54 |
+
self.conversations = [c for c in self.conversations if now - c['last_active'] < datetime.timedelta(minutes=2)]
|
55 |
+
|
56 |
+
def add_conversation(self, id):
|
57 |
+
conversation = {
|
58 |
+
'id': id,
|
59 |
+
'conversation_id': None,
|
60 |
+
'parent_id': uuid.uuid4(),
|
61 |
+
'last_active': datetime.datetime.now()
|
62 |
+
}
|
63 |
+
self.conversations.append(conversation)
|
64 |
+
return conversation
|
65 |
+
|
66 |
+
def get_conversation_by_id(self, id):
|
67 |
+
conversation = next((c for c in self.conversations if c['id'] == id), None)
|
68 |
+
if conversation is None:
|
69 |
+
conversation = self.add_conversation(id)
|
70 |
+
else:
|
71 |
+
conversation['last_active'] = datetime.datetime.now()
|
72 |
+
return conversation
|
73 |
+
|
74 |
+
async def wait_for_ready(self):
|
75 |
+
while not self.ready:
|
76 |
+
await asyncio.sleep(0.025)
|
77 |
+
print('Ready!!')
|
78 |
+
|
79 |
+
async def ask(self, prompt, id='default'):
|
80 |
+
if not self.auth or not self.validate_token(self.auth):
|
81 |
+
await self.get_tokens()
|
82 |
+
conversation = self.get_conversation_by_id(id)
|
83 |
+
data = await self.socket.call('askQuestion', {
|
84 |
+
'prompt': prompt,
|
85 |
+
'parentId': str(conversation['parent_id']),
|
86 |
+
'conversationId': str(conversation['conversation_id']),
|
87 |
+
'auth': self.auth
|
88 |
+
})
|
89 |
+
|
90 |
+
if 'error' in data:
|
91 |
+
print(f'Error: {data["error"]}')
|
92 |
+
conversation['parent_id'] = data['messageId']
|
93 |
+
conversation['conversation_id'] = data['conversationId']
|
94 |
+
return data['answer']
|
95 |
+
|
96 |
+
def validate_token(self, token):
|
97 |
+
if not token:
|
98 |
+
return False
|
99 |
+
parsed = json.loads(base64.b64decode(f'{token.split(".")[1]}==').decode())
|
100 |
+
return datetime.datetime.now() <= datetime.datetime.fromtimestamp(parsed['exp'])
|
101 |
+
|
102 |
+
async def get_tokens(self):
|
103 |
+
await asyncio.sleep(1)
|
104 |
+
data = await self.socket.call('getSession', self.session_token)
|
105 |
+
|
106 |
+
if 'error' in data:
|
107 |
+
print(f'Error getting session: {data["error"]}')
|
108 |
+
else:
|
109 |
+
self.auth = data['auth']
|
110 |
+
self.expires = datetime.datetime.strptime(data['expires'], '%Y-%m-%dT%H:%M:%S.%fZ')
|
111 |
+
self.session_token = data['sessionToken']
|
112 |
+
self.ready = True
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
neon-tts-plugin-coqui==0.7.0
|
2 |
+
python-socketio[asyncio_client]
|
3 |
+
python-engineio
|
4 |
+
python-socketio
|
5 |
+
ChatGPT-lite
|
6 |
+
#undetected-chromedriver
|
7 |
+
#revChatGPT
|
8 |
+
#git+https://github.com/openai/whisper.git
|