Mykes commited on
Commit
e2ef81d
1 Parent(s): d9fbeaa

Upload app_working.py

Browse files
Files changed (1) hide show
  1. app_working.py +29 -0
app_working.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_cpp import Llama
3
+
4
+ llm = Llama.from_pretrained(
5
+ repo_id="Mykes/med_gemma7b_gguf",
6
+ filename="*Q4_K_M.gguf",
7
+ verbose=False
8
+ )
9
+
10
+ basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"
11
+
12
+ def generate_response(question):
13
+ model_input = basic_prompt.format(question=input_text)
14
+ if question:
15
+ output = llm(
16
+ model_input, # Prompt
17
+ max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
18
+ stop=["<end_of_turn>"],
19
+ echo=False # Echo the prompt back in the output
20
+ ) # Generate a completion, can also call create_completion
21
+ st.write(output["choices"][0]["text"])
22
+ else:
23
+ st.write("Please enter a question to get a response.")
24
+
25
+ input_text = st.text_input('Задайте мне медицинский вопрос...')
26
+
27
+ # Button to trigger response generation
28
+ if st.button('Generate Response'):
29
+ generate_response(input_text)