Spaces:
Sleeping
Sleeping
File size: 3,193 Bytes
3607afa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
# Import required libraries
import json
import os
import shutil
import langchain
import pinecone
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain, LLMChain, load_qa_chain
from langchain.document_loaders import PyMuPDFLoader, UnstructuredFileLoader, UnstructuredWordDocumentLoader
from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, Pinecone
# API keys and model names
OPENAI_API_KEY = ''
PINECONE_API_KEY = ''
PINECONE_API_ENV = ''
gpt3p5 = 'gpt-3.5-turbo-1106'
gpt4 = 'gpt-4-1106-preview'
local_model_tuples = [
# Local model tuples as in the original code
]
local_model_names = [t[1] for t in local_model_tuples]
langchain.verbose = False
# Initialization function
@st.cache_data()
def init():
pinecone_index_name = ''
chroma_collection_name = ''
persist_directory = ''
docsearch_ready = False
directory_name = 'tmp_docs'
return pinecone_index_name, chroma_collection_name, persist_directory, docsearch_ready, directory_name
# File saving function
@st.cache_data()
def save_file(files, directory_name):
# Original save_file function logic
# File loading function
def load_files(directory_name):
# Original load_files function logic
# Ingestion function
@st.cache_resource()
def ingest(_all_texts, use_pinecone, _embeddings, pinecone_index_name, chroma_collection_name, persist_directory):
# Original ingest function logic
# Retriever setup function
def setup_retriever(docsearch, k):
# Original setup_retriever function logic
# Docsearch setup function
def setup_docsearch(use_pinecone, pinecone_index_name, embeddings, chroma_collection_name, persist_directory):
# Original setup_docsearch function logic
# Response generation function
def get_response(query, chat_history, CRqa):
# Original get_response function logic
# Local LLM usage function
@st.cache_resource()
def use_local_llm(r_llm, local_llm_path, temperature):
# Original use_local_llm function logic
# Prompt setup function
def setup_prompt(r_llm, usage):
# Original setup_prompt function logic
# Embeddings and LLM setup function
def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path, usage):
# Original setup_em_llm function logic
# Chat history loading function
def load_chat_history(CHAT_HISTORY_FILENAME):
# Original load_chat_history function logic
# Chat history saving function
def save_chat_history(chat_history, CHAT_HISTORY_FILENAME):
# Original save_chat_history function logic
# Initialization
pinecone_index_name, chroma_collection_name, persist_directory, docsearch_ready, directory_name = init()
# Main function
def main(pinecone_index_name, chroma_collection_name, persist_directory, docsearch_ready, directory_name):
# Original main function logic
# Entry point
if __name__ == '__main__':
main(pinecone_index_name, chroma_collection_name, persist_directory, docsearch_ready, directory_name)
|