File size: 1,273 Bytes
e2ef81d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import streamlit as st
from llama_cpp import Llama

llm = Llama.from_pretrained(
    repo_id="Mykes/med_gemma7b_gguf",
    filename="*Q4_K_M.gguf",
    verbose=False
)

basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"

def generate_response(question):
    model_input = basic_prompt.format(question=input_text)
    if question:
        output = llm(
          model_input, # Prompt
          max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
          stop=["<end_of_turn>"],
          echo=False # Echo the prompt back in the output
        ) # Generate a completion, can also call create_completion
        st.write(output["choices"][0]["text"])
    else:
        st.write("Please enter a question to get a response.")
    
input_text = st.text_input('Задайте мне медицинский вопрос...')

# Button to trigger response generation
if st.button('Generate Response'):
    generate_response(input_text)