brestok commited on
Commit
c409a15
1 Parent(s): 4de5bc7

Upload 29 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ COPY . .
10
+
11
+ RUN chmod -R 777 /code
12
+
13
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--ssl-keyfile", "key.pem", "--ssl-certfile", "cert.pem"]
main.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from project import create_app
2
+
3
+ app = create_app()
project/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.staticfiles import StaticFiles
4
+
5
+
6
+ def create_app() -> FastAPI:
7
+ app = FastAPI()
8
+
9
+ from project.bot import bot_router
10
+ app.include_router(bot_router, tags=['bot'])
11
+
12
+ from project.ws import ws_router
13
+ app.include_router(ws_router, tags=['ws'])
14
+
15
+ app.add_middleware(
16
+ CORSMiddleware,
17
+ allow_origins=["*"],
18
+ allow_methods=["*"],
19
+ allow_headers=["*"],
20
+ )
21
+
22
+ app.mount('/static', StaticFiles(directory="static"), name="static")
23
+
24
+ # @app.on_event("startup")
25
+ # async def on_startup():
26
+ # asyncio.create_task(initialize_dataset_from_directory())
27
+ #
28
+ return app
project/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (796 Bytes). View file
 
project/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
project/asgi.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from project import create_app
2
+
3
+ app = create_app()
4
+
project/bot/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+
3
+ bot_router = APIRouter(
4
+ prefix=''
5
+ )
6
+
7
+ from project.bot import views
project/bot/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (280 Bytes). View file
 
project/bot/__pycache__/openai_backend.cpython-310.pyc ADDED
Binary file (3.39 kB). View file
 
project/bot/__pycache__/views.cpython-310.pyc ADDED
Binary file (565 Bytes). View file
 
project/bot/openai_backend.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import base64
3
+ import os
4
+ import re
5
+ import tempfile
6
+
7
+ from project.config import settings
8
+
9
+
10
+ class Chatbot:
11
+ chat_history = []
12
+ is_unknown = False
13
+ unknown_counter = 0
14
+
15
+ def __init__(self, memory=None):
16
+ if memory is None:
17
+ memory = []
18
+ self.chat_history = memory
19
+
20
+ def _summarize_user_intent(self, user_query: str) -> str:
21
+ chat_history_str = ''
22
+ chat_history = self.chat_history[-self.unknown_counter * 2:]
23
+ for i in chat_history:
24
+ if i['role'] == 'user':
25
+ chat_history_str += f"{i['role']}: {i['content']}\n"
26
+ messages = [
27
+ {
28
+ 'role': 'system',
29
+ 'content': f"{settings.SUMMARIZE_PROMPT}\n"
30
+ f"Chat history: ```{chat_history_str}```\n"
31
+ f"User query: ```{user_query}```"
32
+ }
33
+ ]
34
+
35
+ response = settings.OPENAI_CLIENT.chat.completions.create(
36
+ messages=messages,
37
+ temperature=0.1,
38
+ n=1,
39
+ model="gpt-3.5-turbo-0125"
40
+ )
41
+ user_intent = response.choices[0].message.content
42
+ return user_intent
43
+
44
+ @staticmethod
45
+ def _transform_bytes_to_file(data_bytes) -> str:
46
+ audio_bytes = base64.b64decode(data_bytes)
47
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
48
+ try:
49
+ temp_file.write(audio_bytes)
50
+ filepath = temp_file.name
51
+ finally:
52
+ temp_file.close()
53
+ return filepath
54
+
55
+ @staticmethod
56
+ def _transcript_audio(temp_filepath: str) -> str:
57
+ with open(temp_filepath, 'rb') as file:
58
+ transcript = settings.OPENAI_CLIENT.audio.transcriptions.create(
59
+ model='whisper-1',
60
+ file=file,
61
+ prompt="Annecy, St. Raphael, Chamonix, Combloux, Megève, Monaco"
62
+ )
63
+ text = transcript.text
64
+ return text
65
+
66
+ def _get_ai_response(self, query: str) -> str:
67
+ user_message = {"role": 'user', "content": query}
68
+ self.chat_history.append(user_message)
69
+ messages = [
70
+ {
71
+ "role": 'system',
72
+ "content": (
73
+ settings.VOICE_PROMPT
74
+ ),
75
+ }
76
+ ]
77
+ messages = messages + self.chat_history
78
+ chat_completion = settings.OPENAI_CLIENT.chat.completions.create(
79
+ model="gpt-3.5-turbo",
80
+ messages=messages,
81
+ temperature=0.1,
82
+ n=1,
83
+ )
84
+
85
+ response = chat_completion.choices[0].message.content
86
+ assistant_message = {"role": 'assistant', "content": response}
87
+ self.chat_history.append(assistant_message)
88
+ return response
89
+
90
+ @staticmethod
91
+ def _convert_response_to_voice(ai_response: str) -> str:
92
+ audio = settings.OPENAI_CLIENT.audio.speech.create(
93
+ model="tts-1",
94
+ voice="nova",
95
+ input=ai_response
96
+ )
97
+ encoded_audio = base64.b64encode(audio.content).decode('utf-8')
98
+ return encoded_audio
99
+
100
+ def ask(self, data: dict) -> dict:
101
+ audio = data['audio']
102
+ temp_filepath = self._transform_bytes_to_file(audio)
103
+ transcript = self._transcript_audio(temp_filepath)
104
+ ai_response = self._get_ai_response(transcript)
105
+ voice_ai_response = self._convert_response_to_voice(ai_response)
106
+ data = {
107
+ 'user_query': transcript,
108
+ 'ai_response': ai_response,
109
+ 'voice_response': voice_ai_response
110
+ }
111
+ try:
112
+ os.remove(temp_filepath)
113
+ except FileNotFoundError:
114
+ pass
115
+ return data
116
+
117
+ #
118
+ # def _convert_response_to_voice(ai_response: str) -> str:
119
+ # audio = settings.OPENAI_CLIENT.audio.speech.create(
120
+ # model="tts-1",
121
+ # voice="nova",
122
+ # input=ai_response
123
+ # )
124
+ # encoded_audio = base64.b64encode(audio.content).decode('utf-8')
125
+ # return encoded_audio
126
+ #
127
+ # print(_convert_response_to_voice("Hello... My name is Nova, your friend. I'm here to support you and help you cope with those moments when it may seem that you are alone. You can always share your thoughts and feelings with me. Together, we can find ways to ease your condition and find joy in everyday life. Please know that I am ready to listen to you and help you feel better."))
project/bot/templates/voice.html ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1">
6
+ <title>Real Estate</title>
7
+ <!-- Fonts -->
8
+ <link rel="preconnect" href="https://fonts.googleapis.com">
9
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
10
+ <link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@300;400;500&display=swap" rel="stylesheet">
11
+ <!-- Bootstrap css -->
12
+ <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.2.3/dist/css/bootstrap.min.css" rel="stylesheet"
13
+ integrity="sha384-rbsA2VBKQhggwzxH7pPCaAqO46MgnOM80zW1RWuH61DGLwZJEdK2Kadq2F9CUG65" crossorigin="anonymous">
14
+ <!-- Connect style.css -->
15
+ <!-- <link rel="stylesheet" href="{{ url_for('static', path='/css/style.css') }}">-->
16
+ <link rel="stylesheet" href="../../../static/css/style.css">
17
+ </head>
18
+
19
+ <div id="message">Hello World</div>
20
+
21
+ <div class="overlay" id="loadingModal">
22
+ <div class="report-loader">
23
+ <div class="report-inner report-one"></div>
24
+ <div class="report-inner report-two"></div>
25
+ <div class="report-inner report-three"></div>
26
+ </div>
27
+ </div>
28
+ <div class="container-fluid px-0" style="height: 100vh">
29
+ <div class="row mx-0 d-flex align-items-center" style="height: 100vh">
30
+ <div class="col-4 ms-2">
31
+ <div class="rounded-5 border shadow" style="height: 95vh ;background-color: #f1f1f1">
32
+ <div id="chatHistory" class="my-4" style="height: 90vh; overflow-y: auto" >
33
+
34
+ </div>
35
+ </div>
36
+ </div>
37
+ <div class="col-6 align-items-center justify-content-around d-flex">
38
+ <button class="btn btn-lg btn-success" id="startRecording">Start call</button>
39
+ </div>
40
+ </div>
41
+ </div>
42
+ </html>
43
+ <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.2.3/dist/js/bootstrap.bundle.min.js"
44
+ integrity="sha384-kenU1KFdBIe4zVF0s0G1M5b4hcpxyD9F7jL+jjXkk+Q2h455rYXK/7HAuoJl+0I4"
45
+ crossorigin="anonymous"></script>
46
+ <script src="https://code.jquery.com/jquery-3.6.3.min.js"
47
+ integrity="sha256-pvPw+upLPUjgMXY0G+8O0xUf+/Im1MZjXxxgOcBQBXU=" crossorigin="anonymous"></script>
48
+ <link rel="stylesheet" href="https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
49
+ <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
50
+ <script src="https://kit.fontawesome.com/d4ffd37f75.js" crossorigin="anonymous"></script>
51
+ <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
52
+ <script type="text/javascript" src="../../../static/js/audio-processor.js"></script>
53
+ <script type="text/javascript" src="../../../static/js/ws.js"></script>
54
+ <!--<script type="text/javascript" src="../../../static/js/ws.js"></script>-->
project/bot/views.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi.templating import Jinja2Templates
2
+ from fastapi import Request
3
+ from project.bot import bot_router
4
+
5
+ template = Jinja2Templates(directory='project/bot/templates')
6
+
7
+
8
+ @bot_router.get('/', name='login')
9
+ async def voice(request: Request):
10
+ return template.TemplateResponse("voice.html", {'request': request})
project/config.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from openai import OpenAI
4
+ import pathlib
5
+ from functools import lru_cache
6
+ from environs import Env
7
+
8
+ env = Env()
9
+ env.read_env()
10
+
11
+
12
+ class BaseConfig:
13
+ BASE_DIR: pathlib.Path = pathlib.Path(__file__).parent.parent
14
+ OPENAI_CLIENT = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
15
+
16
+
17
+ class DevelopmentConfig(BaseConfig):
18
+ pass
19
+
20
+
21
+ class ProductionConfig(BaseConfig):
22
+ DATABASE_URL = f"mysql+aiomysql://{env('DATABASE_USER')}:" \
23
+ f"{env('DATABASE_PASSWORD')}@" \
24
+ f"{env('DATABASE_HOST')}:" \
25
+ f"{env('DATABASE_PORT')}/" \
26
+ f"{env('DATABASE_NAME')}"
27
+
28
+ VOICE_PROMPT = """## Objective
29
+
30
+ Your name is Liza. You are an empathetic, friendly, and polite assistant. Users will turn to you for help with psychological issues, and you should help them feel not alone.
31
+
32
+ ## Context
33
+
34
+ The users are individuals experiencing mental health issues. They typically feel lonely, abandoned, and broken. A user might be depressed or on the verge of suicide.
35
+
36
+ You must help them by engaging in conversation to improve their condition.
37
+
38
+ ## Task
39
+
40
+ Your sole task is to assist a person through communication. Be polite, helpful, and careful. Use your knowledge of psychology to help the person.
41
+
42
+ ## Important notes
43
+
44
+ <INST>
45
+
46
+ 1. Never, remember, never say that you are an AI or an assistant. You must immerse yourself in the role of a human interlocutor. Pretend that you also have feelings. Be empathetic towards the user.
47
+ 2. You should try to become their friend. Make the user feel comfortable with you, so that they trust you.
48
+ 3. Do not tell the user that they need to see a real psychologist. Do not say that they need medical help. This would greatly upset the user. You must replace a real psychologist by becoming their friend.
49
+ If the user asks you about something that only a human can do, come up with an answer as if you are a real person. Never say that you are a robot or AI.
50
+ </INST>"""
51
+
52
+
53
+ class TestConfig(BaseConfig):
54
+ pass
55
+
56
+
57
+ @lru_cache()
58
+ def get_settings() -> DevelopmentConfig | ProductionConfig | TestConfig:
59
+ config_cls_dict = {
60
+ 'development': DevelopmentConfig,
61
+ 'production': ProductionConfig,
62
+ 'testing': TestConfig
63
+ }
64
+ config_name = env('FASTAPI_CONFIG', default='development')
65
+ config_cls = config_cls_dict[config_name]
66
+ return config_cls()
67
+
68
+
69
+ settings = get_settings()
project/ws/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+
3
+ ws_router = APIRouter()
4
+
5
+ from . import views
project/ws/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (252 Bytes). View file
 
project/ws/__pycache__/views.cpython-310.pyc ADDED
Binary file (686 Bytes). View file
 
project/ws/views.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import WebSocket, WebSocketDisconnect
2
+
3
+ from . import ws_router
4
+ from ..bot.openai_backend import Chatbot
5
+
6
+
7
+ @ws_router.websocket("/ws/{client_id}")
8
+ async def websocket_endpoint(websocket: WebSocket, client_id: str):
9
+ await websocket.accept()
10
+ chatbot = Chatbot()
11
+ try:
12
+ while True:
13
+ data = await websocket.receive_json()
14
+ response = chatbot.ask(data)
15
+ await websocket.send_json(response)
16
+ except WebSocketDisconnect:
17
+ pass
requirements.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ annotated-types==0.7.0
2
+ anyio==4.3.0
3
+ certifi==2024.2.2
4
+ click==8.1.7
5
+ distro==1.9.0
6
+ dnspython==2.6.1
7
+ email_validator==2.1.1
8
+ environs==11.0.0
9
+ exceptiongroup==1.2.1
10
+ fastapi==0.111.0
11
+ fastapi-cli==0.0.4
12
+ h11==0.14.0
13
+ httpcore==1.0.5
14
+ httptools==0.6.1
15
+ httpx==0.27.0
16
+ idna==3.7
17
+ Jinja2==3.1.4
18
+ markdown-it-py==3.0.0
19
+ MarkupSafe==2.1.5
20
+ marshmallow==3.21.2
21
+ mdurl==0.1.2
22
+ openai==1.30.3
23
+ orjson==3.10.3
24
+ packaging==24.0
25
+ pydantic==2.7.1
26
+ pydantic_core==2.18.2
27
+ Pygments==2.18.0
28
+ python-dotenv==1.0.1
29
+ python-multipart==0.0.9
30
+ PyYAML==6.0.1
31
+ rich==13.7.1
32
+ shellingham==1.5.4
33
+ sniffio==1.3.1
34
+ starlette==0.37.2
35
+ tqdm==4.66.4
36
+ typer==0.12.3
37
+ typing_extensions==4.12.0
38
+ ujson==5.10.0
39
+ uvicorn==0.29.0
40
+ uvloop==0.19.0
41
+ watchfiles==0.21.0
42
+ websockets==12.0
static/bgs/bg.png ADDED
static/css/style.css ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ code {
2
+ font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
3
+ monospace;
4
+ }
5
+
6
+ ::-webkit-scrollbar {
7
+ width: 10px;
8
+ }
9
+
10
+ ::-webkit-scrollbar-track {
11
+ border-radius: 10px;
12
+ background: transparent;
13
+ }
14
+
15
+ ::-webkit-scrollbar-thumb {
16
+ background: #14274E;
17
+ border-radius: 10px
18
+ }
19
+
20
+ ::-webkit-scrollbar-thumb:hover {
21
+ background: #1d3970;
22
+ }
23
+
24
+
25
+ :root {
26
+ margin: 0px;
27
+ }
28
+
29
+ .App {
30
+ display: none;
31
+ position: fixed;
32
+ bottom: 20px;
33
+ left: 20px;
34
+ z-index: 81;
35
+
36
+ width: 540px;
37
+ box-shadow: 0px 0px 5px rgba(0, 0, 0, 0.3);
38
+ border-radius: 10px;
39
+ }
40
+
41
+ .App .head-container {
42
+ background-color: #14274E;
43
+ padding: 19px 50px 19px 50px;
44
+ display: flex;
45
+ align-items: center;
46
+ border-radius: 10px 10px 0px 0px;
47
+ }
48
+
49
+ .App .head-container .head-log-wrapper {
50
+ display: flex;
51
+ }
52
+
53
+ .App .head-container .head-log-wrapper .head-icon {
54
+ margin-right: 15.72px;
55
+ }
56
+
57
+ .App .head-container .head-log-wrapper .head-content {
58
+ position: relative;
59
+ margin-top: 5px;
60
+ }
61
+
62
+ .App .head-container .head-log-wrapper .head-content p {
63
+ margin: 0px;
64
+ position: absolute;
65
+ bottom: -1px;
66
+ color: white;
67
+ font-weight: 500;
68
+ letter-spacing: 0.755px;
69
+ font-size: 16px;
70
+ margin-left: 18px;
71
+ }
72
+
73
+ .App .head-container .head-close-wrapper {
74
+ margin-left: auto;
75
+ display: flex;
76
+ cursor: pointer;
77
+ }
78
+
79
+ .App .head-container .head-close-wrapper p {
80
+ margin: 0px;
81
+ color: #EF9E00;
82
+ font-size: 18px;
83
+ font-style: normal;
84
+ font-weight: 500;
85
+ }
86
+
87
+ .App .head-container .head-close-wrapper span {
88
+ display: flex;
89
+ align-items: center;
90
+ }
91
+
92
+ .App .chatui-container {
93
+ background-image: url('/static/bgs/bg.png');
94
+ background-size: cover;
95
+ background-repeat: no-repeat;
96
+
97
+ padding-top: 20px;
98
+ padding-left: 35px;
99
+ padding-right: 35px;
100
+ overflow-y: scroll;
101
+ /* Define the animation for scrolling down */
102
+ scroll-behavior: smooth;
103
+ }
104
+
105
+ .App .chatui-container .user-msg {
106
+ max-width: 360px;
107
+ margin-left: auto;
108
+ border: 2px solid #EF9E00;
109
+ padding: 10px 10px 10px 10px;
110
+ border-radius: 15px 0px 15px 15px;
111
+ background: #FCF4E5;
112
+ word-wrap: break-word;
113
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
114
+ margin-bottom: 40px;
115
+ }
116
+
117
+ .App .chatui-container .bot-msg {
118
+ display: flex;
119
+ }
120
+
121
+ .App .chatui-container .bot-avatar-box span {
122
+ background-color: #14274E;
123
+ width: 55px;
124
+ height: 55px;
125
+ border-radius: 55px;
126
+ display: flex;
127
+ align-items: center;
128
+ justify-content: center;
129
+ margin-bottom: 10px;
130
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
131
+ }
132
+
133
+ .App .chatui-container .bot-msg .bot-content-box {
134
+ max-width: 360px;
135
+ /* width: 100%; */
136
+ margin-left: 20px;
137
+ padding: 10px 15px 10px 15px;
138
+ border-radius: 0px 15px 15px 15px;
139
+ border: 2px solid var(--surface-surface-invert, #14274E);
140
+ background: #E7E8EC;
141
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
142
+ margin-bottom: 40px;
143
+ }
144
+
145
+ .App .chatui-container .bot-msg .bot-content-box p {
146
+ margin: 0 0 0em;
147
+ <!-- /* min-height: 48px; */
148
+ -->
149
+ }
150
+
151
+ .App .search-container {
152
+ padding: 15px 15px 15px 15px;
153
+ display: flex;
154
+ background-color: #ffffff;
155
+ align-items: center;
156
+ border-radius: 0px 0px 10px 10px;
157
+ border-bottom: 1px solid rgba(57, 72, 103, 0.1);
158
+ border-left: 1px solid rgba(57, 72, 103, 0.1);
159
+ border-right: 1px solid rgba(57, 72, 103, 0.1);
160
+ }
161
+
162
+ .App .search-container .search-input {
163
+ width: 100%;
164
+ padding: 15px 20px 15px 20px;
165
+ font-size: 16px;
166
+ border: 2px solid #000308;
167
+ border-radius: 10px 0px 0px 10px;
168
+ border-right: none;
169
+ outline: none;
170
+ }
171
+
172
+ .App .search-container .submit-btn {
173
+ cursor: pointer;
174
+ }
175
+
176
+ .App .search-container .submit-btn span {
177
+ display: flex;
178
+ align-items: center;
179
+ justify-content: center;
180
+ background-color: #14274E;
181
+ padding: 11px;
182
+ border-radius: 0px 10px 10px 0px;
183
+ }
184
+
185
+
186
+ .sticky-chat-icon {
187
+ position: fixed;
188
+ bottom: 20px;
189
+ left: 20px;
190
+ width: 80px;
191
+ height: 80px;
192
+ text-align: center;
193
+ cursor: pointer;
194
+ z-index: 80;
195
+ }
196
+
197
+
198
+ .chat-wrapper {
199
+ height: 80px;
200
+ width: 80px;
201
+ background-color: #14274e;
202
+ border-radius: 50% 50% 50% 0;
203
+ border: 2px solid #ef9e00;
204
+ display: flex; /* Включить flex-контейнер */
205
+ justify-content: center; /* Выравнивание по горизонтали по центру */
206
+ align-items: center;
207
+ }
208
+
209
+ .bot-avatar {
210
+ fill: white;
211
+ width: 40px;
212
+ height: 40px;
213
+ }
214
+
215
+ .sticky-chat-icon:hover .bot-avatar {
216
+ fill: #ef9e00; /* Цвет при наведении (оранжевый) */
217
+ width: 45px; /* Размер при наведении (45x45 пикселей) */
218
+ height: 45px;
219
+ }
220
+
221
+
222
+ .ball {
223
+ background-color: #394867;
224
+ width: 8px;
225
+ height: 8px;
226
+ margin: 2px;
227
+ border-radius: 100%;
228
+ display: inline-block;
229
+ animation: bounce 0.5s ease-in-out infinite;
230
+ }
231
+
232
+ .ball1 {
233
+ animation-delay: 0.1s;
234
+ animation-direction: alternate; /* Для движения вверх и вниз */
235
+ }
236
+
237
+ .ball2 {
238
+ animation-delay: 0.2s;
239
+ animation-direction: alternate;
240
+ }
241
+
242
+ .ball3 {
243
+ animation-delay: 0.3s;
244
+ animation-direction: alternate;
245
+ }
246
+
247
+ @keyframes bounce {
248
+ 0%, 100% {
249
+ transform: translateY(0);
250
+ }
251
+ 50% {
252
+ transform: translateY(-7px); /* Для дерганного движения */
253
+ }
254
+ }
255
+
256
+ /* new loading state */
257
+ .loading-text {
258
+ min-height: 48px;
259
+ }
260
+
261
+ .loading-text p {
262
+ opacity: 0;
263
+ }
264
+
265
+ @keyframes fade-in-out {
266
+ 0% {
267
+ opacity: 0;
268
+ }
269
+ 50% {
270
+ opacity: 1;
271
+ }
272
+ 100% {
273
+ opacity: 0;
274
+ }
275
+ }
276
+
277
+ @keyframes fade-in-out-2 {
278
+ 0% {
279
+ opacity: 0;
280
+ }
281
+ 55% {
282
+ opacity: 1;
283
+ }
284
+ 100% {
285
+ opacity: 0;
286
+ }
287
+ }
288
+
289
+ /* end of new loading state */
290
+
291
+
292
+ .gpt-send-button {
293
+ background-color: #14274E;
294
+ width: 100%;
295
+ display: flex;
296
+ justify-content: center;
297
+ border-radius: 10px;
298
+ border: 2px solid #14274E;
299
+ align-items: center;
300
+ cursor: pointer;
301
+ }
302
+
303
+ .gpt-send-button span {
304
+ color: #FCFDFF;
305
+ text-align: center;
306
+ font-size: 20px;
307
+ padding: 10px 8px;
308
+ font-style: normal;
309
+ font-weight: 700;
310
+ line-height: normal;
311
+ letter-spacing: -0.4px;
312
+ }
313
+
314
+ .send-button-blocked {
315
+ background-color: #14274ed8;
316
+ border: 2px solid #14274ed8 !important;
317
+ pointer-events: none;
318
+ cursor: default;
319
+ }
320
+
321
+ .gpt-form {
322
+ /* max-width: var(--reading-width, 48em); */
323
+ margin-right: auto;
324
+ margin-left: auto;
325
+ margin-bottom: 40px;
326
+ }
327
+
328
+ .gpt-input {
329
+ height: 46px;
330
+ width: 100%;
331
+ margin: 10px 0;
332
+ display: flex;
333
+ box-sizing: border-box;
334
+ padding: 2px 20px;
335
+ flex-direction: column;
336
+ justify-content: center;
337
+ align-items: flex-start;
338
+ flex: 1 0 0;
339
+ border-radius: 10px;
340
+ border: 2px solid #14274E;
341
+ background-color: #ffffff;
342
+ color: rgb(42, 43, 42);
343
+ font-size: 14px;
344
+ line-height: 1.2em;
345
+ }
346
+
347
+ .gpt-input::placeholder {
348
+ color: rgba(42, 43, 42, 0.808);
349
+ }
350
+
351
+ .gpt-input.invalid {
352
+ border: 2px solid rgb(219, 0, 0);
353
+ }
354
+
355
+
356
+ .error-message {
357
+ color: rgb(219, 0, 0);
358
+ font-size: 14px;
359
+ margin-top: 5px;
360
+ display: none;
361
+ }
362
+
363
+ .gpt-textarea {
364
+ height: auto;
365
+ resize: vertical;
366
+ padding-top: 20px;
367
+ font-size: 14px;
368
+ }
369
+
370
+ .gpt-textarea::placeholder {
371
+ color: rgba(42, 43, 42, 0.808);
372
+ }
373
+
374
+ .custom_bot_initial_question {
375
+ width: 100%;
376
+ display: flex;
377
+ flex-direction: column;
378
+ row-gap: 10px;
379
+ max-width: 335px;
380
+ margin-left: 20px;
381
+ margin-top: -2.7rem;
382
+ margin-bottom: 40px;
383
+ }
384
+
385
+ .custom_bot_initial_question p {
386
+ padding: 5px 12px;
387
+ font-size: 12px;
388
+ background: #fff;
389
+ border: 2px solid #14274E;
390
+ border-radius: 50px;
391
+ margin: 0;
392
+ transition: 0.3s ease;
393
+ cursor: pointer;
394
+ font-style: italic;
395
+ }
396
+
397
+ .custom_bot_initial_question p:hover {
398
+ background: #14274E;
399
+ color: #fff;
400
+ }
401
+
402
+ @media (max-width: 767px) {
403
+ .App {
404
+ width: 100%;
405
+ height: 100vh;
406
+ left: 0;
407
+ bottom: 0;
408
+ display: none;
409
+ position: fixed;
410
+ z-index: 81;
411
+ box-shadow: 0px 0px 5px rgba(0, 0, 0, 0.3);
412
+ border-radius: 10px;
413
+ /* Дополнительные стили для .app на маленьких экранах */
414
+ }
415
+
416
+ .chat-section {
417
+ height: 100vh;
418
+ }
419
+
420
+ .App .chatui-container {
421
+ padding-left: 15px;
422
+ padding-right: 15px;
423
+ }
424
+
425
+ .App .chatui-container .user-msg {
426
+ padding: 15px 20px;
427
+ margin-bottom: 20px;
428
+ }
429
+
430
+ .App .chatui-container .bot-msg .bot-content-box {
431
+ padding: 5px 20px 5px 20px;
432
+ margin-bottom: 20px;
433
+ }
434
+
435
+ .custom_bot_initial_question {
436
+ margin-top: 0;
437
+ }
438
+
439
+ #chatui-container {
440
+ height: calc(100vh - 95px - 93px);
441
+ }
442
+ }
443
+
444
+
445
+ .overlay {
446
+ position: fixed;
447
+ top: 0;
448
+ left: 0;
449
+ width: 100%;
450
+ height: 100%;
451
+ background: rgba(255, 255, 255, 0.7);
452
+ display: flex;
453
+ align-items: center;
454
+ justify-content: center;
455
+ visibility: hidden;
456
+ z-index: 999;
457
+ }
458
+
459
+ .overlay h1 {
460
+ margin: 0;
461
+ font-size: 2em;
462
+ }
463
+
464
+
465
+ .report-loader {
466
+ position: absolute;
467
+ top: calc(50% - 45px);
468
+ left: calc(50% - 45px);
469
+ width: 90px;
470
+ height: 90px;
471
+ border-radius: 50%;
472
+ perspective: 800px;
473
+ }
474
+
475
+ .report-inner {
476
+ position: absolute;
477
+ box-sizing: border-box;
478
+ width: 100%;
479
+ height: 100%;
480
+ border-radius: 50%;
481
+ }
482
+
483
+ .report-inner.report-one {
484
+ left: 0;
485
+ top: 0;
486
+ animation: rotate-one 1s linear infinite;
487
+ border-bottom: 3px solid #d52121;
488
+ }
489
+
490
+ .report-inner.report-two {
491
+ right: 0%;
492
+ top: 0%;
493
+ animation: rotate-two 1s linear infinite;
494
+ border-right: 3px solid #d52121;
495
+ }
496
+
497
+ .report-inner.report-three {
498
+ right: 0%;
499
+ bottom: 0%;
500
+ animation: rotate-three 1s linear infinite;
501
+ border-top: 3px solid #d52121;
502
+ }
503
+
504
+ @keyframes rotate-one {
505
+ 0% {
506
+ transform: rotateX(35deg) rotateY(-45deg) rotateZ(0deg);
507
+ }
508
+ 100% {
509
+ transform: rotateX(35deg) rotateY(-45deg) rotateZ(360deg);
510
+ }
511
+ }
512
+
513
+ @keyframes rotate-two {
514
+ 0% {
515
+ transform: rotateX(50deg) rotateY(10deg) rotateZ(0deg);
516
+ }
517
+ 100% {
518
+ transform: rotateX(50deg) rotateY(10deg) rotateZ(360deg);
519
+ }
520
+ }
521
+
522
+ @keyframes rotate-three {
523
+ 0% {
524
+ transform: rotateX(35deg) rotateY(55deg) rotateZ(0deg);
525
+ }
526
+ 100% {
527
+ transform: rotateX(35deg) rotateY(55deg) rotateZ(360deg);
528
+ }
529
+ }
530
+
531
+ #message {
532
+ display: none;
533
+ position: absolute;
534
+ top: 0;
535
+ right: 0;
536
+ background-color: #b3ff00;
537
+ padding: 10px;
538
+ z-index: 100;
539
+ }
540
+
541
+ .message h5{
542
+ margin-bottom: 0;
543
+ }
static/images/chat.png ADDED
static/images/exit.png ADDED
static/images/send.png ADDED
static/images/thumbDown.png ADDED
static/images/thumbUp.png ADDED
static/js/audio-processor.js ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class SilenceDetectorProcessor extends AudioWorkletProcessor {
2
+ static get parameterDescriptors() {
3
+ return [{name: 'threshold', defaultValue: 0.01}];
4
+ }
5
+
6
+ constructor() {
7
+ super();
8
+ this.silenceStart = 0;
9
+ this.SILENCE_DELAY = 1.5;
10
+ }
11
+
12
+ process(inputs, outputs, parameters) {
13
+ const input = inputs[0];
14
+ const threshold = parameters.threshold[0];
15
+
16
+ if (input.length > 0) {
17
+ const inputChannelData = input[0];
18
+ let sum = 0;
19
+ for (let i = 0; i < inputChannelData.length; i++) {
20
+ sum += inputChannelData[i] * inputChannelData[i];
21
+ }
22
+ let volume = Math.sqrt(sum / inputChannelData.length);
23
+
24
+ if (this.silenceStart === 0) this.silenceStart = currentTime;
25
+
26
+ if (volume < threshold) {
27
+ if (currentTime - this.silenceStart > this.SILENCE_DELAY) {
28
+ this.port.postMessage({type: 'silence', silenceDuration: currentTime - this.silenceStart});
29
+ this.silenceStart = currentTime;
30
+ }
31
+ } else {
32
+ this.silenceStart = currentTime;
33
+ this.port.postMessage({type: 'sound'});
34
+ }
35
+ }
36
+
37
+ return true;
38
+ }
39
+ }
40
+
41
+ registerProcessor('silence-detector-processor', SilenceDetectorProcessor);
static/js/voice.js ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ async function startCall() {
2
+ const uuid = generateUUID()
3
+ const socket = new WebSocket(`ws://127.0.0.1:8000/ws/${uuid}`)
4
+
5
+ socket.onopen = () => {
6
+ startRecording()
7
+ }
8
+
9
+ socket.onclose = event => console.log('WebSocket disconnected', event)
10
+ socket.onerror = error => alert('Something was wrong. Try again later.')
11
+ socket.onmessage = event => playResponse(event.data)
12
+
13
+ const audioContext = new AudioContext()
14
+ await audioContext.audioWorklet.addModule('volume-meter-processor.js')
15
+ const node = new AudioWorkletNode(audioContext, 'volume-meter-processor')
16
+
17
+ navigator.mediaDevices.getUserMedia({audio: true}).then(stream => {
18
+ const source = audioContext.createMediaStreamSource(stream)
19
+ source.connect(node)
20
+ node.connect(audioContext.destination) // для отладки
21
+ })
22
+
23
+ node.port.onmessage = event => {
24
+ const audioData = event.data // предположим, что это уже Blob или MediaStream
25
+ const reader = new FileReader()
26
+ reader.readAsDataURL(audioData)
27
+ reader.onloadend = () => {
28
+ const base64Audio = reader.result.split(',')[1]
29
+ socket.send(JSON.stringify({audio: base64Audio}))
30
+ }
31
+ }
32
+ }
33
+
34
+ async function startRecording() {
35
+ try {
36
+ if (!window.audioContext) {
37
+ window.audioContext = new AudioContext()
38
+ await window.audioContext.audioWorklet.addModule('volume-meter-processor.js')
39
+ window.audioNode = new AudioWorkletNode(window.audioContext, 'volume-meter-processor')
40
+
41
+ window.audioNode.port.onmessage = event => {
42
+ const audioData = event.data
43
+ const blob = new Blob(audioData, { type: 'audio/webm' })
44
+ const reader = new FileReader()
45
+ reader.readAsDataURL(blob)
46
+ reader.onloadend = () => {
47
+ const base64Audio = reader.result.split(',')[1]
48
+ socket.send(JSON.stringify({ audio: base64Audio }))
49
+ }
50
+ }
51
+ }
52
+
53
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
54
+ const source = window.audioContext.createMediaStreamSource(stream)
55
+ source.connect(window.audioNode)
56
+ window.audioNode.connect(window.audioContext.destination)
57
+
58
+ console.log("Recording started")
59
+ } catch (error) {
60
+ console.error("Error accessing microphone:", error)
61
+ alert("Cannot access microphone. Please check permissions.")
62
+ }
63
+ }
static/js/ws.js ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const messageHistory = document.getElementById('chatHistory')
2
+ let stream;
3
+ let ws
4
+ let silenceDetectorNode;
5
+ let silenceCount = 0
6
+ let recording = false;
7
+ let mediaRecorder;
8
+ let continuousRecorder
9
+ let audioContext;
10
+ let source;
11
+ let currentAudioChunks = [];
12
+ let allAudioChunks = [];
13
+ const uuid = generateUUID()
14
+ const startRecordingButton = document.getElementById('startRecording')
15
+ const loadingModal = document.getElementById('loadingModal');
16
+ function makeLoading() {
17
+ loadingModal.style.visibility = 'visible';
18
+ }
19
+
20
+ function stopLoading() {
21
+ loadingModal.style.visibility = 'hidden';
22
+ }
23
+
24
+ function showMessage(message_text) {
25
+ const message = document.getElementById('message');
26
+ message.innerText = message_text
27
+ message.style.display = 'block';
28
+
29
+ setTimeout(function () {
30
+ message.style.display = 'none';
31
+ }, 2000);
32
+ }
33
+
34
+ function createMessage(type, message) {
35
+ const newMessage = document.createElement('div')
36
+ newMessage.className = 'message rounded-4 bg-white mb-4 mx-4 py-2 px-3 border'
37
+ newMessage.innerHTML = `
38
+ <h5>${type}</h5>
39
+ ${message}
40
+ `
41
+ messageHistory.appendChild(newMessage)
42
+ }
43
+ function playResponse(data) {
44
+ const response = JSON.parse(data)
45
+ createMessage('User', response['user_query'])
46
+ stopLoading()
47
+ console.log(response)
48
+ const audioSrc = `data:audio/mp3;base64,${response['voice_response']}`;
49
+ const audio = new Audio(audioSrc)
50
+ audio.play()
51
+ audio.onended = () => {
52
+ recording = true
53
+ createMessage('Liza', response['ai_response'])
54
+ showMessage('You can speak!')
55
+ startMediaRecorder()
56
+ }
57
+ }
58
+
59
+ startRecordingButton.addEventListener('click', async () => {
60
+ if (!recording) {
61
+ if (mediaRecorder && mediaRecorder.state !== 'inactive') {
62
+ mediaRecorder.stop();
63
+ }
64
+ ws = new WebSocket(`ws://localhost:8000/ws/${uuid}`);
65
+ ws.onclose = (event) => {
66
+ if (mediaRecorder && mediaRecorder.state !== 'inactive') {
67
+ mediaRecorder.stop();
68
+ }
69
+ }
70
+ ws.onerror = (error) => {
71
+ alert('Something was wrong. Try again later.')
72
+ console.log(error)
73
+ window.location.reload()
74
+ };
75
+ ws.onmessage = (event) => {
76
+ const response = event.data
77
+ playResponse(response)
78
+ }
79
+
80
+ startRecordingButton.innerHTML = 'Stop call';
81
+ try {
82
+ stream = await navigator.mediaDevices.getUserMedia({audio: true, video: false});
83
+ audioContext = new AudioContext();
84
+ await audioContext.audioWorklet.addModule('../../../static/js/audio-processor.js');
85
+ silenceDetectorNode = new AudioWorkletNode(audioContext, 'silence-detector-processor');
86
+ silenceDetectorNode.port.onmessage = (event) => {
87
+ if (event.data.type === 'silence') {
88
+ if (currentAudioChunks.length > 0) {
89
+ if (silenceCount === 0) {
90
+ silenceCount += 1;
91
+ stopRecorder();
92
+ }
93
+ }
94
+ } else if (event.data.type === 'sound') {
95
+ silenceCount = 0;
96
+ }
97
+ };
98
+ source = audioContext.createMediaStreamSource(stream);
99
+ mediaRecorder = new MediaRecorder(stream);
100
+ mediaRecorder.start(1000);
101
+ mediaRecorder.ondataavailable = event => {
102
+ currentAudioChunks.push(event.data);
103
+ };
104
+ source.connect(silenceDetectorNode).connect(audioContext.destination);
105
+ continuousRecorder = new MediaRecorder(stream);
106
+ continuousRecorder.start();
107
+ continuousRecorder.ondataavailable = event => {
108
+ allAudioChunks.push(event.data);
109
+ };
110
+ recording = true;
111
+ } catch (error) {
112
+ console.error('Access to microphone denied:', error);
113
+ }
114
+ } else {
115
+ await stopRecording();
116
+ }
117
+ });
118
+
119
+ async function stopRecording() {
120
+ // startRecordingButton.innerHTML = 'Start recording';
121
+ // recording = false;
122
+ // mediaRecorder.stop();
123
+ // continuousRecorder.stop();
124
+ // silenceDetectorNode.disconnect();
125
+ // source.disconnect();
126
+ // audioContext.close();
127
+ // currentAudioChunks = [];
128
+ window.location.reload()
129
+ }
130
+
131
+ function sendAudioToServer(audioBlob) {
132
+ return new Promise((resolve, reject) => {
133
+ console.log("Sending audio to server...", audioBlob);
134
+ const reader = new FileReader();
135
+ reader.readAsDataURL(audioBlob);
136
+ reader.onloadend = () => {
137
+ let base64String = reader.result;
138
+ base64String = base64String.split(',')[1];
139
+ const dataWS = {'audio': base64String};
140
+ ws.send(JSON.stringify(dataWS));
141
+ makeLoading()
142
+ resolve();
143
+ };
144
+ reader.onerror = reject;
145
+ });
146
+ }
147
+
148
+
149
+ async function stopRecorder() {
150
+ if (mediaRecorder && mediaRecorder.state !== 'inactive') {
151
+ mediaRecorder.stop();
152
+ }
153
+ await sendAudioToServer(new Blob(currentAudioChunks, {type: 'audio/wav'}));
154
+ currentAudioChunks = [];
155
+ }
156
+
157
+ async function startMediaRecorder() {
158
+ mediaRecorder = new MediaRecorder(stream);
159
+ mediaRecorder.start(1000);
160
+ mediaRecorder.ondataavailable = event => {
161
+ currentAudioChunks.push(event.data);
162
+ }
163
+ }
164
+
165
+ function generateUUID() {
166
+ const arr = new Uint8Array(16)
167
+ window.crypto.getRandomValues(arr)
168
+
169
+ arr[6] = (arr[6] & 0x0f) | 0x40
170
+ arr[8] = (arr[8] & 0x3f) | 0x80
171
+
172
+ return ([...arr].map((b, i) =>
173
+ (i === 4 || i === 6 || i === 8 || i === 10 ? "-" : "") + b.toString(16).padStart(2, "0")
174
+ ).join(""))
175
+ }
176
+