file_research / app.py
alok94's picture
test file_research
baba351
raw
history blame contribute delete
No virus
4.04 kB
import streamlit as st
import os
import time
from dotenv import load_dotenv
from getpass import getpass
from langchain.llms import replicate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from PyPDF2 import PdfReader
from streamlit_extras.add_vertical_space import add_vertical_space
from langchain.text_splitter import RecursiveCharacterTextSplitter
#from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import faiss
load_dotenv()
REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
with st.sidebar:
st.title("File Research using LLM")
st.markdown(''' Upload your file and ask questions and do Research''')
add_vertical_space(5)
pdf=st.file_uploader('Upload your file (PDF)', type='pdf')
if pdf is not None:
pdf_reader=PdfReader(pdf)
text=""
for page in pdf_reader.pages:
text+=page.extract_text()
text_splitter=RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks=text_splitter.split_text(text)
st.write('Made by ALOK')
def main():
st.header('Talk to your file')
os.environ["REPLICATE_API_TOKEN"]=REPLICATE_API_TOKEN
#embeddings=OpenAIEmbeddings()
#vectorstore=faiss.FAISS.from_texts(chunks, embedding=embeddings)
# The meta/llama-2-70b-chat model can stream output as it's running.
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("Type Here"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
replite_api='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
# rep = replicate.Client(api_token=replite_api)
# output = replicate.run(
# "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
# input={"prompt": prompt}
# )
model="meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
llm=replicate.Replicate(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
model=model,
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
replicate_api_token=REPLICATE_API_TOKEN
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Please don't provide incomplete answer. Can a dog drive a car?
Assistant:
"""
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
message_placeholder.markdown(llm(prompt) + "β–Œ")
# # The predict method returns an iterator, and you can iterate over that output.
# response_till_now=''
# for item in output:
# response_till_now+=item
# time.sleep(0.03)
# message_placeholder.markdown(response_till_now + "β–Œ")
# message_placeholder.markdown(response_till_now)
# response = f"AI: {response_till_now}"
# Add assistant response to chat history
# st.session_state.messages.append({"role": "assistant", "content": response})
# https://replicate.com/meta/llama-2-70b-chat/versions/02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3/api#output-schema
#print(item, end="")
if __name__=='__main__':
main()