Spaces:
Runtime error
Runtime error
from dataclasses import dataclass | |
from typing import Literal | |
import streamlit as st | |
import os | |
from llamaapi import LlamaAPI | |
from langchain_experimental.llms import ChatLlamaAPI | |
from langchain.embeddings import HuggingFaceEmbeddings | |
import pinecone | |
from langchain.vectorstores import Pinecone | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import RetrievalQA | |
import streamlit.components.v1 as components | |
from langchain_groq import ChatGroq | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.memory import ChatMessageHistory, ConversationBufferMemory | |
import time | |
HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN'] | |
class Message: | |
"""Class for keeping track of a chat message.""" | |
origin: Literal["π€ Human", "π¨π»ββοΈ Ai"] | |
message: str | |
def download_hugging_face_embeddings(): | |
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') | |
return embeddings | |
def initialize_session_state(): | |
if "history" not in st.session_state: | |
st.session_state.history = [] | |
if "conversation" not in st.session_state: | |
chat = ChatGroq(temperature=0.5, groq_api_key=st.secrets["Groq_api"], model_name="mixtral-8x7b-32768") | |
embeddings = download_hugging_face_embeddings() | |
# Initializing Pinecone | |
pinecone.init( | |
api_key=st.secrets["PINECONE_API_KEY"], # find at app.pinecone.io | |
environment=st.secrets["PINECONE_API_ENV"] # next to api key in console | |
) | |
index_name = "book-recommendations" # updated index name for books | |
docsearch = Pinecone.from_existing_index(index_name, embeddings) | |
prompt_template = """ | |
You are an AI trained to recommend books. You will suggest books based on the user's preferences and previous likes. | |
Please provide insightful recommendations and explain why each book might be of interest to the user. | |
Context: {context} | |
User Preference: {question} | |
Suggested Books: | |
""" | |
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) | |
message_history = ChatMessageHistory() | |
memory = ConversationBufferMemory( | |
memory_key="chat_history", | |
output_key="answer", | |
chat_memory=message_history, | |
return_messages=True, | |
) | |
retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat, | |
chain_type="recommendation", | |
retriever=docsearch.as_retriever( | |
search_kwargs={'k': 5}), | |
return_source_documents=True, | |
combine_docs_chain_kwargs={"prompt": PROMPT}, | |
memory=memory | |
) | |
st.session_state.conversation = retrieval_chain | |
def on_click_callback(): | |
human_prompt = st.session_state.human_prompt | |
st.session_state.human_prompt="" | |
response = st.session_state.conversation( | |
human_prompt | |
) | |
llm_response = response['answer'] | |
st.session_state.history.append( | |
Message("π€ Human", human_prompt) | |
) | |
st.session_state.history.append( | |
Message("π¨π»ββοΈ Ai", llm_response) | |
) | |
initialize_session_state() | |
st.title("AI Book Recommender") | |
st.markdown( | |
""" | |
π **Welcome to the AI Book Recommender!** | |
Share your favorite genres or books, and I'll recommend your next reads! | |
""" | |
) | |
chat_placeholder = st.container() | |
prompt_placeholder = st.form("chat-form") | |
with chat_placeholder: | |
for chat in st.session_state.history: | |
st.markdown(f"{chat.origin} : {chat.message}") | |
with prompt_placeholder: | |
st.markdown("**Chat**") | |
cols = st.columns((6, 1)) | |
cols[0].text_input( | |
"Chat", | |
label_visibility="collapsed", | |
key="human_prompt", | |
) | |
cols[1].form_submit_button( | |
"Submit", | |
type="primary", | |
on_click=on_click_callback, | |
) |