Chitti_ver1 / app.py
Pavankalyan's picture
Update app.py
db5004d
import gradio as gr
from output_beautify import *
import pandas as pd
from load_data import *
import os
from gingerit import *
#os.system("pip install git+https://github.com/openai/whisper.git")
#import whisper
#model = whisper.load_model("small")
#current_size = 'small'
hf_writer = gr.HuggingFaceDatasetSaver('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', "Pavankalyan/chitti_data")
'''
def inference(audio):
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
_, probs = model.detect_language(mel)
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(model, mel, options)
return result.text
'''
def chitti(query):
re_table = search(query)
answers_re_table = [re_table[i][0] for i in range(0,5)]
answer_links = [re_table[i][3] for i in range(0,5)]
sorted_indices = sorted(range(len(answers_re_table)), key=lambda k: len(answers_re_table[k]))
repeated_answers_indices =list()
for i in range(4):
if answers_re_table[sorted_indices[i]] in answers_re_table[sorted_indices[i+1]]:
repeated_answers_indices.append(sorted_indices[i])
for idx in repeated_answers_indices:
answers_re_table.pop(idx)
answer_links.pop(idx)
#return [res1,answers_re_table[0],res2,answers_re_table[1]]
return [runGinger(answers_re_table[0]),answer_links[0],runGinger(answers_re_table[1]),answer_links[1]]
#return [re_table[0][0],re_table[0][3],re_table[1][0],re_table[1][3]]
demo = gr.Interface(
fn=chitti,
inputs=["text"],
#inputs=[gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")],
outputs=["text","text","text","text"],
allow_flagging = "manual",
flagging_options = ["0","1","None"],
flagging_callback=hf_writer
)
demo.launch()