Justin Haaheim commited on
Commit
42d9c16
1 Parent(s): a95bed7

[Streaming] Pre-launch cleanup of streaming-react-app and seamless_server (#170)

Browse files
seamless_server/app_pubsub.py CHANGED
@@ -622,7 +622,6 @@ async def configure_stream(sid, config):
622
  await emit_server_state_update()
623
 
624
  return {"status": "ok", "message": "server_ready"}
625
- # await sio.emit("server_ready", None, to=sid)
626
 
627
 
628
  # The config here is a partial config, meaning it may not contain all the config values -- only the ones the user
@@ -636,14 +635,10 @@ async def set_dynamic_config(
636
  ):
637
  session_data = await get_session_data(sid)
638
 
639
- # client_id = None
640
  member = None
641
- # room = None
642
 
643
  if session_data:
644
- # client_id = session_data.get("client_id")
645
  member = session_data.get("member_object")
646
- # room = session_data.get("room_object")
647
 
648
  if member:
649
  new_dynamic_config = {
@@ -661,8 +656,6 @@ async def set_dynamic_config(
661
  @sio.event
662
  @catch_and_log_exceptions_for_sio_event_handlers
663
  async def incoming_audio(sid, blob):
664
- # logger.info(f"[event: incoming_audio] {sid}")
665
-
666
  session_data = await get_session_data(sid)
667
 
668
  client_id = None
@@ -721,9 +714,11 @@ async def incoming_audio(sid, blob):
721
  blob, dynamic_config=member.transcoder_dynamic_config
722
  )
723
 
724
- # TODO: What we have below is NOT a good way to do this, because instead of sending output when its ready we're
725
- # sending it when new input comes in, which means it's just sitting around. This is a temporary hack until
726
- # we figure out a better architecture for awaiting transcoder output and sending it to the client.
 
 
727
  events = get_transcoder_output_events(member.transcoder)
728
  logger.debug(f"[incoming_audio] transcoder output events: {len(events)}")
729
 
 
622
  await emit_server_state_update()
623
 
624
  return {"status": "ok", "message": "server_ready"}
 
625
 
626
 
627
  # The config here is a partial config, meaning it may not contain all the config values -- only the ones the user
 
635
  ):
636
  session_data = await get_session_data(sid)
637
 
 
638
  member = None
 
639
 
640
  if session_data:
 
641
  member = session_data.get("member_object")
 
642
 
643
  if member:
644
  new_dynamic_config = {
 
656
  @sio.event
657
  @catch_and_log_exceptions_for_sio_event_handlers
658
  async def incoming_audio(sid, blob):
 
 
659
  session_data = await get_session_data(sid)
660
 
661
  client_id = None
 
714
  blob, dynamic_config=member.transcoder_dynamic_config
715
  )
716
 
717
+ # Send back any available model output
718
+ # NOTE: In theory it would make sense remove this from the incoming_audio handler and
719
+ # handle this in a dedicated thread that checks for output and sends it right away,
720
+ # but in practice for our limited demo use cases this approach didn't add noticeable
721
+ # latency, so we're keeping it simple for now.
722
  events = get_transcoder_output_events(member.transcoder)
723
  logger.debug(f"[incoming_audio] transcoder output events: {len(events)}")
724
 
seamless_server/src/connection_tracker.py DELETED
@@ -1,64 +0,0 @@
1
- from logging import Logger
2
- import time
3
-
4
-
5
- class StreamingConnectionInfo:
6
- def __init__(self, address, active_connections, latest_message_received_timestamp):
7
- self.address = address
8
- self.active_connections = active_connections
9
- self.latest_message_received_timestamp = latest_message_received_timestamp
10
-
11
- def __repr__(self):
12
- return str(self)
13
-
14
- def __str__(self):
15
- return str(
16
- {
17
- "address": self.address,
18
- "active_connections": self.active_connections,
19
- "latest_message_received_timestamp": self.latest_message_received_timestamp,
20
- }
21
- )
22
-
23
-
24
- class ConnectionTracker:
25
- def __init__(self, logger: Logger):
26
- self.connections = dict()
27
- self.logger = logger
28
-
29
- def __str__(self):
30
- return str(self.connections)
31
-
32
- def add_connection(self, address):
33
- if address not in self.connections:
34
- self.connections[address] = StreamingConnectionInfo(address, 1, time.time())
35
- else:
36
- self.connections[address].active_connections += 1
37
- self.connections[address].latest_message_received_timestamp = time.time()
38
-
39
- def log_recent_message(self, address):
40
- if address in self.connections:
41
- self.connections[address].latest_message_received_timestamp = time.time()
42
- else:
43
- self.logger.warning(
44
- f"Address {address} not found in connection tracker when attempting to log recent message"
45
- )
46
-
47
- def remove_connection(self, address):
48
- if address in self.connections:
49
- self.connections[address].active_connections -= 1
50
- if self.connections[address].active_connections < 0:
51
- self.logger.warning(
52
- f"Address {address} has negative active connections ({self.connections[address].active_connections})"
53
- )
54
- if self.connections[address].active_connections <= 0:
55
- del self.connections[address]
56
- else:
57
- self.logger.warning(
58
- f"Address {address} not found in connection tracker when attempting to remove it"
59
- )
60
-
61
- def get_active_connection_count(self):
62
- return sum(
63
- [connection.active_connections for connection in self.connections.values()]
64
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
streaming-react-app/README.md CHANGED
@@ -2,64 +2,13 @@
2
 
3
  ## Getting Started
4
 
5
- - `yarn run dev` - Run the app with a development server that supports hot module reloading
6
 
7
- ## URL Parameters
8
-
9
- You can provide URL parameters in order to change the behavior of the app. Those are documented in [URLParams.ts](src/URLParams.ts).
10
-
11
- # Vite Information: React + TypeScript + Vite
12
-
13
- This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
14
-
15
- Currently, two official plugins are available:
16
-
17
- - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
18
- - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
19
-
20
- ## Expanding the ESLint configuration
21
-
22
- If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
23
-
24
- - Configure the top-level `parserOptions` property like this:
25
-
26
- ```js
27
- parserOptions: {
28
- ecmaVersion: 'latest',
29
- sourceType: 'module',
30
- project: ['./tsconfig.json', './tsconfig.node.json'],
31
- tsconfigRootDir: __dirname,
32
- },
33
- ```
34
 
35
- - Replace `plugin:@typescript-eslint/recommended` to `plugin:@typescript-eslint/recommended-type-checked` or `plugin:@typescript-eslint/strict-type-checked`
36
- - Optionally add `plugin:@typescript-eslint/stylistic-type-checked`
37
- - Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and add `plugin:react/recommended` & `plugin:react/jsx-runtime` to the `extends` list
38
 
39
- # To Deploy to AWS
40
-
41
- 1. Acquire AWS credentials (not needed if already on an EC2 instance with permissions)
42
-
43
- On your local mac use the following command.
44
-
45
- ```
46
- eval $(corp_cloud aws get-creds 790537050551)
47
- ```
48
-
49
- 2. Deploy to AWS
50
-
51
- Build the react and copy the contents of [dist](dist) folder to s3 bucket and then invalidate the cloudfront (CDN) cache. Note step 2 has been automated using `yarn deploy_dev`
52
-
53
- To deploy to the (old) seamless-vc s3 bucket:
54
-
55
- ```
56
- yarn build:dev_vc
57
- yarn deploy_dev_vc
58
- ```
59
-
60
- To deploy to the (new) seamless-vr terraform-based s3 bucket:
61
 
62
- ```
63
- yarn build:dev_vr
64
- yarn deploy_dev_vr
65
- ```
 
2
 
3
  ## Getting Started
4
 
5
+ This project uses the [Yarn Package Manager](https://yarnpkg.com/).
6
 
7
+ 1. `yarn` - Install project dependencies
8
+ 2. `yarn run dev` - Run the app with a development server that supports hot module reloading
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ NOTE: You will either need to provide the server URL via environment variable (you can use the `.env` file for this) or via a url param when you load the react app (example: `http://localhost:5173/?serverURL=localhost:8000`)
 
 
11
 
12
+ ## URL Parameters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ You can provide URL parameters in order to change the behavior of the app. Those are documented in [URLParams.ts](src/URLParams.ts).
 
 
 
streaming-react-app/package.json CHANGED
@@ -6,16 +6,12 @@
6
  "scripts": {
7
  "dev": "vite --host --strictPort",
8
  "build": "vite build",
9
- "build:dev_vr": "yarn build --mode deploy_dev_vr",
10
- "build:dev_vc": "yarn build --mode deploy_dev_vc",
11
  "preview": "vite preview",
12
  "clean:node-modules": "rm -rf node_modules/",
13
  "ts-check": "tsc --noEmit",
14
  "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
15
  "prettier-check": "cd ../ && yarn run prettier-base --check streaming-react-app",
16
- "signal": "concurrently --names \"TS,LINT,PRETTIER\" -c \"bgBlack.bold,bgRed.bold,bgCyan.bold\" \"yarn run ts-check\" \"yarn run lint\" \"yarn run prettier-check\"",
17
- "deploy_dev_vr": "aws s3 sync dist/ s3://dev-seamless-vr-www && aws cloudfront create-invalidation --distribution-id E38ZTD4R79FGYF --paths \"/*\"",
18
- "deploy_dev_vc": "aws s3 sync dist/ s3://seamless-vc.dev.metademolab.com && aws cloudfront create-invalidation --distribution-id E29D1W2ORBP77G --paths \"/*\""
19
  },
20
  "dependencies": {
21
  "@emotion/react": "11.11.1",
 
6
  "scripts": {
7
  "dev": "vite --host --strictPort",
8
  "build": "vite build",
 
 
9
  "preview": "vite preview",
10
  "clean:node-modules": "rm -rf node_modules/",
11
  "ts-check": "tsc --noEmit",
12
  "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
13
  "prettier-check": "cd ../ && yarn run prettier-base --check streaming-react-app",
14
+ "signal": "concurrently --names \"TS,LINT,PRETTIER\" -c \"bgBlack.bold,bgRed.bold,bgCyan.bold\" \"yarn run ts-check\" \"yarn run lint\" \"yarn run prettier-check\""
 
 
15
  },
16
  "dependencies": {
17
  "@emotion/react": "11.11.1",
streaming-react-app/public/vite.svg DELETED
streaming-react-app/src/RoomConfig.tsx CHANGED
@@ -74,7 +74,6 @@ export default function RoomConfig({
74
  const lockServerValidated: string | null =
75
  lockServer && roles['speaker'] ? lockServerName : null;
76
 
77
- // TODO: Show error state if roomID isn't valid
78
  setJoinInProgress(true);
79
 
80
  const configObject: JoinRoomConfig = {
 
74
  const lockServerValidated: string | null =
75
  lockServer && roles['speaker'] ? lockServerName : null;
76
 
 
77
  setJoinInProgress(true);
78
 
79
  const configObject: JoinRoomConfig = {
streaming-react-app/src/SeamlessLogo.tsx DELETED
@@ -1,33 +0,0 @@
1
- function SeamlessLogo() {
2
- return (
3
- <svg
4
- width="24"
5
- height="24"
6
- viewBox="0 0 24 24"
7
- fill="none"
8
- xmlns="http://www.w3.org/2000/svg">
9
- <circle cx="12" cy="12" r="12" fill="#1C2B33" />
10
- <rect x="7" y="9" width="2" height="6" rx="1" fill="white" />
11
- <rect
12
- x="15"
13
- y="9"
14
- width="2"
15
- height="6"
16
- rx="1"
17
- fill="white"
18
- fill-opacity="0.5"
19
- />
20
- <rect
21
- x="11"
22
- y="6"
23
- width="2"
24
- height="12"
25
- rx="1"
26
- fill="white"
27
- fill-opacity="0.5"
28
- />
29
- </svg>
30
- );
31
- }
32
-
33
- export default SeamlessLogo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
streaming-react-app/src/StreamingInterface.css CHANGED
@@ -47,16 +47,10 @@
47
 
48
  .translation-text-container-sra {
49
  background-color: #f8f8f8;
50
- /* flex-grow: 1; make it expand to fill the available space */
51
  padding-top: 12px;
52
  padding-bottom: 4px;
53
  }
54
 
55
- .translation-text-sra {
56
- /* overflow-y: scroll; */
57
- /* max-height: 500px; */
58
- }
59
-
60
  .text-chunk-sra {
61
  margin-bottom: 12px;
62
  }
 
47
 
48
  .translation-text-container-sra {
49
  background-color: #f8f8f8;
 
50
  padding-top: 12px;
51
  padding-bottom: 4px;
52
  }
53
 
 
 
 
 
 
54
  .text-chunk-sra {
55
  margin-bottom: 12px;
56
  }
streaming-react-app/src/StreamingInterface.tsx CHANGED
@@ -98,7 +98,6 @@ async function requestDisplayMediaAudioStream(
98
  ) {
99
  const stream = await navigator.mediaDevices.getDisplayMedia({
100
  audio: {...config, channelCount: 1},
101
- // selfBrowserSurface: false, // don't allow the user to select the current tab as the source
102
  });
103
  console.debug(
104
  '[requestDisplayMediaAudioStream] stream created with settings:',
@@ -183,8 +182,6 @@ export default function StreamingInterface() {
183
  );
184
 
185
  const [receivedData, setReceivedData] = useState<Array<ServerTextData>>([]);
186
- // const [translationSentencesAnimated, setTranslationSentencesAnimated] =
187
- // useState<TranslationSentences>([]);
188
  const [
189
  translationSentencesAnimatedIndex,
190
  setTranslationSentencesAnimatedIndex,
@@ -261,7 +258,6 @@ export default function StreamingInterface() {
261
  if (prevAgent?.name !== newAgent?.name) {
262
  setTargetLang(newAgent?.targetLangs[0] ?? null);
263
  setEnableExpressive(null);
264
- // setOutputMode(newAgent.modalities[0]);
265
  }
266
  return newAgent;
267
  });
@@ -310,7 +306,6 @@ export default function StreamingInterface() {
310
  event: 'config',
311
  rate: sampleRate,
312
  model_name: modelName,
313
- // source_language: inputLang,
314
  debug: serverDebugFlag,
315
  // synchronous processing isn't implemented on the v2 pubsub server, so hardcode this to true
316
  async_processing: true,
@@ -390,11 +385,12 @@ export default function StreamingInterface() {
390
  const mediaStreamSource = audioContext.createMediaStreamSource(stream);
391
  setInputStreamSource(mediaStreamSource);
392
  /**
393
- * NOTE: This currently uses a deprecated way of processing the audio (createScriptProcessor).
 
394
  *
395
  * Documentation for the deprecated way of doing it is here: https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createScriptProcessor
396
  *
397
- * This should be migrated to something like this SO answer: https://stackoverflow.com/a/65448287
398
  */
399
  const scriptProcessor = audioContext.createScriptProcessor(16384, 1, 1);
400
  setScriptNodeProcessor(scriptProcessor);
@@ -408,7 +404,6 @@ export default function StreamingInterface() {
408
  console.warn('[onaudioprocess] socket is null in onaudioprocess');
409
  return;
410
  }
411
- // console.debug('[onaudioprocess] event', event);
412
 
413
  if (mutedRef.current) {
414
  // We still want to send audio to the server when we're muted to ensure we
@@ -475,10 +470,6 @@ export default function StreamingInterface() {
475
  inputStreamSource.disconnect(scriptNodeProcessor);
476
  scriptNodeProcessor.disconnect(audioContext.destination);
477
 
478
- // From: https://stackoverflow.com/questions/65447236/scriptnode-onaudioprocess-is-deprecated-any-alternative
479
- // do we also need this??
480
- // recorder?.stop();
481
-
482
  // Release the mic input so we stop showing the red recording icon in the browser
483
  inputStream?.getTracks().forEach((track) => track.stop());
484
  }
@@ -518,7 +509,6 @@ export default function StreamingInterface() {
518
  }
519
 
520
  const onRoomStateUpdate = (roomState: RoomState) => {
521
- // console.log('[event: room_state_update]', roomState);
522
  setRoomState(roomState);
523
  };
524
 
@@ -632,11 +622,9 @@ export default function StreamingInterface() {
632
  useEffect(() => {
633
  const onScroll = () => {
634
  if (isScrolledToDocumentBottom(SCROLLED_TO_BOTTOM_THRESHOLD_PX)) {
635
- // console.debug('scrolled to bottom!');
636
  isScrolledToBottomRef.current = true;
637
  return;
638
  }
639
- // console.debug('NOT scrolled to bottom!');
640
  isScrolledToBottomRef.current = false;
641
  return;
642
  };
@@ -712,7 +700,6 @@ export default function StreamingInterface() {
712
  valueLabelDisplay="auto"
713
  value={gain}
714
  onChange={(_event: Event, newValue: number | number[]) => {
715
- // console.log({event, newValue});
716
  if (typeof newValue === 'number') {
717
  const scaledGain = getGainScaledValue(newValue);
718
  // We want the actual gain node to use the scaled value
@@ -1002,7 +989,7 @@ export default function StreamingInterface() {
1002
  }
1003
  label="Noise Suppression (Browser)"
1004
  />
1005
- <FormControlLabel
1006
  control={
1007
  <Checkbox
1008
  checked={
 
98
  ) {
99
  const stream = await navigator.mediaDevices.getDisplayMedia({
100
  audio: {...config, channelCount: 1},
 
101
  });
102
  console.debug(
103
  '[requestDisplayMediaAudioStream] stream created with settings:',
 
182
  );
183
 
184
  const [receivedData, setReceivedData] = useState<Array<ServerTextData>>([]);
 
 
185
  const [
186
  translationSentencesAnimatedIndex,
187
  setTranslationSentencesAnimatedIndex,
 
258
  if (prevAgent?.name !== newAgent?.name) {
259
  setTargetLang(newAgent?.targetLangs[0] ?? null);
260
  setEnableExpressive(null);
 
261
  }
262
  return newAgent;
263
  });
 
306
  event: 'config',
307
  rate: sampleRate,
308
  model_name: modelName,
 
309
  debug: serverDebugFlag,
310
  // synchronous processing isn't implemented on the v2 pubsub server, so hardcode this to true
311
  async_processing: true,
 
385
  const mediaStreamSource = audioContext.createMediaStreamSource(stream);
386
  setInputStreamSource(mediaStreamSource);
387
  /**
388
+ * NOTE: This currently uses a deprecated way of processing the audio (createScriptProcessor), but
389
+ * which is easy and convenient for our purposes.
390
  *
391
  * Documentation for the deprecated way of doing it is here: https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createScriptProcessor
392
  *
393
+ * In an ideal world this would be migrated to something like this SO answer: https://stackoverflow.com/a/65448287
394
  */
395
  const scriptProcessor = audioContext.createScriptProcessor(16384, 1, 1);
396
  setScriptNodeProcessor(scriptProcessor);
 
404
  console.warn('[onaudioprocess] socket is null in onaudioprocess');
405
  return;
406
  }
 
407
 
408
  if (mutedRef.current) {
409
  // We still want to send audio to the server when we're muted to ensure we
 
470
  inputStreamSource.disconnect(scriptNodeProcessor);
471
  scriptNodeProcessor.disconnect(audioContext.destination);
472
 
 
 
 
 
473
  // Release the mic input so we stop showing the red recording icon in the browser
474
  inputStream?.getTracks().forEach((track) => track.stop());
475
  }
 
509
  }
510
 
511
  const onRoomStateUpdate = (roomState: RoomState) => {
 
512
  setRoomState(roomState);
513
  };
514
 
 
622
  useEffect(() => {
623
  const onScroll = () => {
624
  if (isScrolledToDocumentBottom(SCROLLED_TO_BOTTOM_THRESHOLD_PX)) {
 
625
  isScrolledToBottomRef.current = true;
626
  return;
627
  }
 
628
  isScrolledToBottomRef.current = false;
629
  return;
630
  };
 
700
  valueLabelDisplay="auto"
701
  value={gain}
702
  onChange={(_event: Event, newValue: number | number[]) => {
 
703
  if (typeof newValue === 'number') {
704
  const scaledGain = getGainScaledValue(newValue);
705
  // We want the actual gain node to use the scaled value
 
989
  }
990
  label="Noise Suppression (Browser)"
991
  />
992
+ <FormControlLabel
993
  control={
994
  <Checkbox
995
  checked={
streaming-react-app/src/createBufferedSpeechPlayer.ts CHANGED
@@ -100,8 +100,7 @@ export default function createBufferedSpeechPlayer({
100
  );
101
 
102
  source.connect(gainNode);
103
- // the gain node is already connected to audioContext.destination
104
- // source.connect(audioContext.destination);
105
  const startTime = new Date().getTime();
106
  source.start();
107
  currentPlayingBufferSource = source;
@@ -118,9 +117,6 @@ export default function createBufferedSpeechPlayer({
118
  debug()?.playedAudio(startTime, endTime, buffer);
119
  currentPlayingBufferSource = null;
120
 
121
- // TODO: should we disconnect source from gain node here?
122
- // source.disconnect(gainNode);
123
-
124
  // We don't set isPlaying = false here because we are attempting to continue playing. It will get set to false if there are no more buffers to play
125
  playNextBuffer();
126
  };
 
100
  );
101
 
102
  source.connect(gainNode);
103
+
 
104
  const startTime = new Date().getTime();
105
  source.start();
106
  currentPlayingBufferSource = source;
 
117
  debug()?.playedAudio(startTime, endTime, buffer);
118
  currentPlayingBufferSource = null;
119
 
 
 
 
120
  // We don't set isPlaying = false here because we are attempting to continue playing. It will get set to false if there are no more buffers to play
121
  playNextBuffer();
122
  };
streaming-react-app/src/generateNewRoomID.ts CHANGED
@@ -54,7 +54,3 @@ export function getSequentialRoomIDForTestingGenerator(): () => string {
54
  return result;
55
  };
56
  }
57
-
58
- // const generator = getSequentialRoomIDForTestingGenerator();
59
-
60
- // Array.from({length: 200}, () => console.log(generator()));
 
54
  return result;
55
  };
56
  }
 
 
 
 
streaming-react-app/src/getTranslationSentencesFromReceivedData.ts CHANGED
@@ -6,7 +6,6 @@ export default function getTranslationSentencesFromReceivedData(
6
  return receivedData
7
  .reduce(
8
  (acc, data) => {
9
- // TODO: Add special handling if the payload starts/ends with an apostrophe?
10
  const newAcc = [
11
  ...acc.slice(0, -1),
12
  acc[acc.length - 1].trim() + ' ' + data.payload,
 
6
  return receivedData
7
  .reduce(
8
  (acc, data) => {
 
9
  const newAcc = [
10
  ...acc.slice(0, -1),
11
  acc[acc.length - 1].trim() + ' ' + data.payload,
streaming-react-app/src/index.css DELETED
File without changes
streaming-react-app/src/main.tsx CHANGED
@@ -1,7 +1,6 @@
1
  import React from 'react';
2
  import ReactDOM from 'react-dom/client';
3
  import App from './App.tsx';
4
- import './index.css';
5
 
6
  ReactDOM.createRoot(document.getElementById('root')!).render(
7
  <React.StrictMode>
 
1
  import React from 'react';
2
  import ReactDOM from 'react-dom/client';
3
  import App from './App.tsx';
 
4
 
5
  ReactDOM.createRoot(document.getElementById('root')!).render(
6
  <React.StrictMode>
streaming-react-app/vite.config.ts CHANGED
@@ -9,6 +9,11 @@ import react from '@vitejs/plugin-react';
9
  // https://vitejs.dev/config/
10
  export default defineConfig(({ command }) => {
11
  let define = {};
 
 
 
 
 
12
  return {
13
  plugins: [react()],
14
  define: define,
 
9
  // https://vitejs.dev/config/
10
  export default defineConfig(({ command }) => {
11
  let define = {};
12
+ if (command === 'serve') {
13
+ define = {
14
+ global: {},
15
+ };
16
+ }
17
  return {
18
  plugins: [react()],
19
  define: define,