yellowcandle commited on
Commit
bab5632
1 Parent(s): 5576fae

parse josn output from prompt

Browse files
Files changed (2) hide show
  1. app.py +11 -1
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import spaces
2
  import gradio as gr
3
  import os
 
4
  import torch
5
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, AutoModelForCausalLM, AutoTokenizer
6
 
@@ -36,6 +37,7 @@ def transcribe_audio(audio, model_id):
36
  result = pipe(audio)
37
  return result["text"]
38
 
 
39
  def proofread(text):
40
  if text is None:
41
  return "Please provide the transcribed text for proofreading."
@@ -48,7 +50,15 @@ def proofread(text):
48
  {"role": "user", "content": text},
49
  ]
50
  pipe = pipeline("text-generation", model="hfl/llama-3-chinese-8b-instruct-v3")
51
- proofread_text = pipe(messages)
 
 
 
 
 
 
 
 
52
  return proofread_text
53
 
54
  with gr.Blocks() as demo:
 
1
  import spaces
2
  import gradio as gr
3
  import os
4
+ import orjson
5
  import torch
6
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, AutoModelForCausalLM, AutoTokenizer
7
 
 
37
  result = pipe(audio)
38
  return result["text"]
39
 
40
+ @spaces.GPU(duration=60)
41
  def proofread(text):
42
  if text is None:
43
  return "Please provide the transcribed text for proofreading."
 
50
  {"role": "user", "content": text},
51
  ]
52
  pipe = pipeline("text-generation", model="hfl/llama-3-chinese-8b-instruct-v3")
53
+ llm_output = pipe(messages)
54
+
55
+ # Extract the generated text
56
+ generated_text = llm_output[0]['generated_text']
57
+
58
+ # Extract the assistant's content
59
+ assistant_content = next(item['content'] for item in generated_text if item['role'] == 'assistant')
60
+
61
+ proofread_text = assistant_content
62
  return proofread_text
63
 
64
  with gr.Blocks() as demo:
requirements.txt CHANGED
@@ -4,4 +4,5 @@ torch
4
  torchvision
5
  torchaudio
6
  accelerate
7
- datasets
 
 
4
  torchvision
5
  torchaudio
6
  accelerate
7
+ datasets
8
+ orjson