operablepattern commited on
Commit
d6af2ee
1 Parent(s): a7da833

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -0
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import torch
3
  from transformers import pipeline
 
4
 
5
  MODEL_NAME = "openai/whisper-tiny"
6
  BATCH_SIZE = 8
@@ -21,6 +22,11 @@ def transcribe(inputs, task = "transcribe"):
21
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
22
  return text
23
 
 
 
 
 
 
24
  iface = gr.Interface(
25
  fn=transcribe,
26
  inputs=[
 
1
  import gradio as gr
2
  import torch
3
  from transformers import pipeline
4
+ from ctransformers import AutoModelForCausalLM
5
 
6
  MODEL_NAME = "openai/whisper-tiny"
7
  BATCH_SIZE = 8
 
22
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
23
  return text
24
 
25
+ # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
26
+ llm = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-v0.1-GGUF", model_file="mistral-7b-v0.1.Q4_K_M.gguf", model_type="mistral", gpu_layers=0)
27
+
28
+ print(llm("AI is going to"))
29
+
30
  iface = gr.Interface(
31
  fn=transcribe,
32
  inputs=[