|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "biomistral/Biomistral-7b" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") |
|
|
|
|
|
def generate_text(prompt, max_length=500, num_return_sequences=1, temperature=0.7): |
|
input_ids = tokenizer.encode(prompt, return_tensors="pt") |
|
output = model.generate( |
|
input_ids, |
|
max_length=max_length, |
|
num_return_sequences=num_return_sequences, |
|
temperature=temperature, |
|
pad_token_id=tokenizer.eos_token_id, |
|
) |
|
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True) |
|
return generated_text |
|
|
|
|
|
def main(): |
|
st.title("Doctor Chatbot (Powered by Biomistral 7b)") |
|
st.write("Welcome to the Doctor Chatbot. Please describe your symptoms or ask a medical question, and I'll provide a response.") |
|
|
|
user_input = st.text_area("Enter your symptoms or question:") |
|
|
|
if user_input: |
|
with st.spinner("Generating response..."): |
|
generated_text = generate_text(user_input) |
|
st.write(generated_text[0]) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|
|
|