Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -31,12 +31,9 @@ from helper import parse_transcription,hindi_to_english,translate_english_to_hin
|
|
31 |
def extract_text_from_html(html):
|
32 |
cleanr = re.compile('<.*?>')
|
33 |
cleantext = re.sub(cleanr, '', html)
|
34 |
-
def conversational_chat(
|
35 |
-
|
36 |
-
|
37 |
-
st.session_state['history_text'].append((query, result["answer"]))
|
38 |
-
|
39 |
-
return result["answer"]
|
40 |
|
41 |
def save_uploaded_file_as_mp3(uploaded_file, output_file_path):
|
42 |
audio = AudioSegment.from_file(uploaded_file)
|
@@ -63,8 +60,15 @@ def ui():
|
|
63 |
# template=template
|
64 |
# )
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
)
|
69 |
good_morining_audio,sample_rate1=librosa.load('./good-morning.mp3')
|
70 |
hi_audio,sample_rate2=librosa.load('./good-morning-sir.mp3')
|
|
|
31 |
def extract_text_from_html(html):
|
32 |
cleanr = re.compile('<.*?>')
|
33 |
cleantext = re.sub(cleanr, '', html)
|
34 |
+
def conversational_chat(llm_chain,query):
|
35 |
+
output = llm_chain.predict(human_input=query)
|
36 |
+
return extract_text_from_html(output
|
|
|
|
|
|
|
37 |
|
38 |
def save_uploaded_file_as_mp3(uploaded_file, output_file_path):
|
39 |
audio = AudioSegment.from_file(uploaded_file)
|
|
|
60 |
# template=template
|
61 |
# )
|
62 |
|
63 |
+
prompt = PromptTemplate(
|
64 |
+
input_variables=["history", "human_input"],
|
65 |
+
template=template
|
66 |
+
)
|
67 |
+
llm_chain = LLMChain(
|
68 |
+
llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo'),
|
69 |
+
prompt=prompt,
|
70 |
+
verbose=True,
|
71 |
+
memory=ConversationBufferWindowMemory(k=2)
|
72 |
)
|
73 |
good_morining_audio,sample_rate1=librosa.load('./good-morning.mp3')
|
74 |
hi_audio,sample_rate2=librosa.load('./good-morning-sir.mp3')
|