jan-hq commited on
Commit
7582b70
1 Parent(s): a20e0b6

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. app.py +5 -11
  2. model.yml +0 -39
  3. models/audio.py +0 -4
app.py CHANGED
@@ -1,18 +1,18 @@
1
  import argparse, os,sys
2
  parser = argparse.ArgumentParser(description="WhisperVQ Application")
3
- parser.add_argument('--log_path', type=str,
4
  default='whisper.log', help='The log file path')
5
- parser.add_argument('--log_level', type=str, default='INFO',
6
  choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'TRACE'], help='The log level')
7
  parser.add_argument('--port', type=int, default=3348,
8
  help='The port to run the WhisperVQ app on')
9
- parser.add_argument('--device_id', type=str, default="0",
10
  help='The port to run the WhisperVQ app on')
11
- parser.add_argument('--package_dir', type=str, default="",
12
  help='The package-dir to be extended to sys.path')
13
  args = parser.parse_args()
14
  sys.path.insert(0, args.package_dir)
15
- os.environ["CUDA_VISIBLE_DEVICES"] =args.device_id # Use the first Nvidia GPU
16
 
17
  import logging
18
  import uvicorn
@@ -34,7 +34,6 @@ logger = logging.getLogger(__name__)
34
 
35
  from services.AudioTokenizerService import get_audio_tokenizer_service
36
  from routes.AudioTokenizerRoute import audio_tokenizer_router
37
- from routes.InferenceRoute import audio_inference_router
38
 
39
  @asynccontextmanager
40
  async def lifespan(app: FastAPI):
@@ -48,7 +47,6 @@ app = FastAPI(lifespan=lifespan)
48
 
49
  # include the routes
50
  app.include_router(audio_tokenizer_router)
51
- app.include_router(audio_inference_router)
52
 
53
  def self_terminate():
54
  time.sleep(1)
@@ -61,10 +59,6 @@ async def destroy():
61
  threading.Thread(target=self_terminate, daemon=True).start()
62
  return {"success": True}
63
 
64
- @app.get("/health")
65
- async def health():
66
- return {"status": "OK"}
67
-
68
  if __name__ == "__main__":
69
  import uvicorn
70
  from uvicorn.config import LOGGING_CONFIG
 
1
  import argparse, os,sys
2
  parser = argparse.ArgumentParser(description="WhisperVQ Application")
3
+ parser.add_argument('--log-path', type=str,
4
  default='whisper.log', help='The log file path')
5
+ parser.add_argument('--log-level', type=str, default='INFO',
6
  choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'TRACE'], help='The log level')
7
  parser.add_argument('--port', type=int, default=3348,
8
  help='The port to run the WhisperVQ app on')
9
+ parser.add_argument('--device-id', type=str, default="0",
10
  help='The port to run the WhisperVQ app on')
11
+ parser.add_argument('--package-dir', type=str, default="",
12
  help='The package-dir to be extended to sys.path')
13
  args = parser.parse_args()
14
  sys.path.insert(0, args.package_dir)
15
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.device_id # Use the first Nvidia GPU
16
 
17
  import logging
18
  import uvicorn
 
34
 
35
  from services.AudioTokenizerService import get_audio_tokenizer_service
36
  from routes.AudioTokenizerRoute import audio_tokenizer_router
 
37
 
38
  @asynccontextmanager
39
  async def lifespan(app: FastAPI):
 
47
 
48
  # include the routes
49
  app.include_router(audio_tokenizer_router)
 
50
 
51
  def self_terminate():
52
  time.sleep(1)
 
59
  threading.Thread(target=self_terminate, daemon=True).start()
60
  return {"success": True}
61
 
 
 
 
 
62
  if __name__ == "__main__":
63
  import uvicorn
64
  from uvicorn.config import LOGGING_CONFIG
model.yml CHANGED
@@ -1,39 +0,0 @@
1
- # BEGIN GENERAL GGUF METADATA
2
- id: whispervq:fp16 # Model ID unique between models
3
- model: whispervq:fp16 # Model ID which is used for request construct - should be unique between models (author / quantization)
4
- name: Ichigo WhisperVQ
5
- version: 1 # metadata.version
6
-
7
- # END GENERAL METADATA
8
-
9
- # BEGIN INFERENCE PARAMETERS
10
- # BEGIN REQUIRED
11
-
12
-
13
- # END REQUIRED
14
-
15
- # BEGIN OPTIONAL
16
-
17
- # END OPTIONAL
18
- # END INFERENCE PARAMETERS
19
-
20
- # BEGIN SERVER START PARAMETERS
21
- # BEGIN REQUIRED
22
- model_location: /home/thuan/cortexcpp/models/cortex.so/whispervq/fp16
23
- port: 3348
24
- log_path: whisper.log
25
- log_level: INFO
26
- environment: whispervq # python environment to run model
27
- script: app.py
28
- command: ["python"] # this is the base command, cortex will automatic find the correct location of python in env and add params when execute command
29
-
30
- engine: python-engine
31
- # END REQUIRED
32
-
33
- # BEGIN OPTIONAL
34
- extra_params:
35
- device_id: "0"
36
- package_dir: "" # the package directory to be searched
37
-
38
- # END OPTIONAL
39
- # END SERVER START PARAMETERS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/audio.py CHANGED
@@ -20,7 +20,3 @@ FORMAT_BACKENDS = {
20
  AudioFormat.OPUS: ["ffmpeg"],
21
  AudioFormat.PCM: ["soundfile"]
22
  }
23
-
24
- class AudioRequest(BaseModel):
25
- data: str
26
- format: AudioFormat = "wav"
 
20
  AudioFormat.OPUS: ["ffmpeg"],
21
  AudioFormat.PCM: ["soundfile"]
22
  }