File size: 5,708 Bytes
4e00df7 8a70a7b 4e00df7 8a70a7b 4e00df7 0a26e47 4e00df7 8a70a7b 4e00df7 8a70a7b 4e00df7 995a6ce 8a70a7b 0a26e47 8a70a7b 4e00df7 8a70a7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
# main.py
import os
import tempfile
import streamlit as st
from files import file_uploader, url_uploader
from question import chat_with_doc
from brain import brain
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase import Client, create_client
from explorer import view_document
from stats import get_usage_today
from st_login_form import login_form
supabase_url = st.secrets.SUPABASE_URL
supabase_key = st.secrets.SUPABASE_KEY
openai_api_key = st.secrets.openai_api_key
anthropic_api_key = st.secrets.anthropic_api_key
hf_api_key = st.secrets.hf_api_key
supabase: Client = create_client(supabase_url, supabase_key)
self_hosted = st.secrets.self_hosted
# embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
embeddings = HuggingFaceInferenceAPIEmbeddings(
api_key=hf_api_key,
model_name="BAAI/bge-large-en-v1.5"
)
vector_store = SupabaseVectorStore(supabase, embeddings, query_name='match_documents', table_name="documents")
models = ["meta-llama/Llama-2-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
if openai_api_key:
models += ["gpt-3.5-turbo", "gpt-4"]
if anthropic_api_key:
models += ["claude-v1", "claude-v1.3",
"claude-instant-v1-100k", "claude-instant-v1.1-100k"]
# Set the theme
st.set_page_config(
page_title="meraKB",
layout="wide",
initial_sidebar_state="expanded",
)
st.title("🧠 meraKB - Your digital brain 🧠")
st.markdown("Store your knowledge in a vector store and chat with it.")
if self_hosted == "false":
st.markdown('**📢 Note: In the public demo, access to functionality is restricted. You can only use the GPT-3.5-turbo model and upload files up to 1Mb. To use more models and upload larger files, consider self-hosting meraKB.**')
st.markdown("---\n\n")
st.session_state["overused"] = False
if self_hosted == "false":
usage = get_usage_today(supabase)
if usage > st.secrets.usage_limit:
st.markdown(
f"<span style='color:red'>You have used {usage} tokens today, which is more than your daily limit of {st.secrets.usage_limit} tokens. Please come back later or consider self-hosting.</span>", unsafe_allow_html=True)
st.session_state["overused"] = True
else:
st.markdown(f"<span style='color:blue'>Usage today: {usage} tokens out of {st.secrets.usage_limit}</span>", unsafe_allow_html=True)
st.write("---")
client = login_form()
if st.session_state["authenticated"]:
if st.session_state["username"]:
st.success(f"Welcome {st.session_state['username']}")
else:
st.session_state["username"] = 'guest'
st.success("Welcome guest")
# Initialize session state variables
if 'model' not in st.session_state:
st.session_state['model'] = "meta-llama/Llama-2-70b-chat-hf"
if 'temperature' not in st.session_state:
st.session_state['temperature'] = 0.1
if 'chunk_size' not in st.session_state:
st.session_state['chunk_size'] = 500
if 'chunk_overlap' not in st.session_state:
st.session_state['chunk_overlap'] = 0
if 'max_tokens' not in st.session_state:
st.session_state['max_tokens'] = 500
# Create a radio button for user to choose between adding knowledge or asking a question
user_choice = st.radio(
"Choose an action", ('Add Knowledge', 'Chat with your Brain', 'Forget', "Explore"))
st.markdown("---\n\n")
if user_choice == 'Add Knowledge':
# Display chunk size and overlap selection only when adding knowledge
st.sidebar.title("Configuration")
st.sidebar.markdown(
"Choose your chunk size and overlap for adding knowledge.")
st.session_state['chunk_size'] = st.sidebar.slider(
"Select Chunk Size", 100, 1000, st.session_state['chunk_size'], 50)
st.session_state['chunk_overlap'] = st.sidebar.slider(
"Select Chunk Overlap", 0, 100, st.session_state['chunk_overlap'], 10)
# Create two columns for the file uploader and URL uploader
col1, col2 = st.columns(2)
with col1:
file_uploader(supabase, vector_store)
with col2:
url_uploader(supabase, vector_store)
elif user_choice == 'Chat with your Brain':
# Display model and temperature selection only when asking questions
st.sidebar.title("Configuration")
st.sidebar.markdown(
"Choose your model and temperature for asking questions.")
if self_hosted != "false":
st.session_state['model'] = st.sidebar.selectbox(
"Select Model", models, index=(models).index(st.session_state['model']))
else:
st.sidebar.write("**Model**: gpt-3.5-turbo")
st.sidebar.write("**Self Host to unlock more models such as claude-v1 and GPT4**")
st.session_state['model'] = "gpt-3.5-turbo"
st.session_state['temperature'] = st.sidebar.slider(
"Select Temperature", 0.1, 1.0, st.session_state['temperature'], 0.1)
if st.secrets.self_hosted != "false":
st.session_state['max_tokens'] = st.sidebar.slider(
"Select Max Tokens", 500, 4000, st.session_state['max_tokens'], 500)
else:
st.session_state['max_tokens'] = 500
chat_with_doc(st.session_state['model'], vector_store, stats_db=supabase)
elif user_choice == 'Forget':
st.sidebar.title("Configuration")
brain(supabase)
elif user_choice == 'Explore':
st.sidebar.title("Configuration")
view_document(supabase)
st.markdown("---\n\n")
else:
st.error("Not authenticated") |