Spaces:
Running
Running
File size: 2,623 Bytes
3b55d4b 9ab0176 94576e1 f150754 76792d2 e72ed81 51e9476 e72ed81 94576e1 e72ed81 76792d2 e72ed81 76792d2 e72ed81 76792d2 789e9e5 e72ed81 76792d2 e72ed81 76792d2 e72ed81 94576e1 e72ed81 76792d2 e72ed81 76792d2 e72ed81 94576e1 e72ed81 f150754 76792d2 f150754 76792d2 e72ed81 f150754 76792d2 e72ed81 f150754 76792d2 77c7a99 f150754 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
# app.py
import streamlit as st
from models import load_model
# Load the model once
generator = load_model()
# Page configuration
st.set_page_config(
page_title="DeepSeek Chatbot - ruslanmv.com",
page_icon="🤖",
layout="centered"
)
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar for model parameters
with st.sidebar:
st.header("Model Configuration")
# System message
system_message = st.text_area(
"System Message",
value="You are a friendly Chatbot created by ruslanmv.com",
height=100
)
# Generation parameters
max_tokens = st.slider(
"Max Tokens",
min_value=1,
max_value=4000,
value=512,
step=10
)
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=4.0,
value=0.7,
step=0.1
)
top_p = st.slider(
"Top-p (nucleus sampling)",
min_value=0.1,
max_value=1.0,
value=0.9,
step=0.1
)
# Main chat interface
st.title("🤖 DeepSeek Chatbot")
st.caption("Powered by ruslanmv.com - Configure parameters in the sidebar")
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Type your message..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
try:
# Generate response using the model
with st.spinner("Generating response..."):
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
response = generator(
full_prompt,
max_length=max_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
num_return_sequences=1
)[0]['generated_text']
# Extract only the assistant's response
assistant_response = response.split("Assistant:")[-1].strip()
# Display assistant response
with st.chat_message("assistant"):
st.markdown(assistant_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
except Exception as e:
st.error(f"An error occurred: {str(e)}") |