Spaces:
Sleeping
Sleeping
File size: 7,760 Bytes
5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 eac20da 5030f92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import os
import gradio as gr
from dotenv import load_dotenv
from langchain.callbacks.base import BaseCallbackHandler
from langchain.embeddings import CacheBackedEmbeddings
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain.storage import LocalFileStore
from langchain_anthropic import ChatAnthropic
from langchain_community.chat_models import ChatOllama
from langchain_community.document_loaders import NotebookLoader, TextLoader
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.language.language_parser import (
LanguageParser,
)
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.callbacks.manager import CallbackManager
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField, RunnablePassthrough
from langchain_google_genai import GoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import Language, RecursiveCharacterTextSplitter
# Load environment variables
load_dotenv()
# Repository directories
repo_root_dir = "./docs/langchain"
repo_dirs = [
"libs/core/langchain_core",
"libs/community/langchain_community",
"libs/experimental/langchain_experimental",
"libs/partners",
"libs/cookbook",
]
repo_dirs = [os.path.join(repo_root_dir, repo) for repo in repo_dirs]
# Load Python documents
py_documents = []
for path in repo_dirs:
py_loader = GenericLoader.from_filesystem(
path,
glob="**/*",
suffixes=[".py"],
parser=LanguageParser(language=Language.PYTHON, parser_threshold=30),
)
py_documents.extend(py_loader.load())
print(f".py νμΌμ κ°μ: {len(py_documents)}")
# Load Markdown documents
mdx_documents = []
for dirpath, _, filenames in os.walk(repo_root_dir):
for file in filenames:
if file.endswith(".mdx") and "*venv/" not in dirpath:
try:
mdx_loader = TextLoader(os.path.join(dirpath, file), encoding="utf-8")
mdx_documents.extend(mdx_loader.load())
except Exception:
pass
print(f".mdx νμΌμ κ°μ: {len(mdx_documents)}")
# Load Jupyter Notebook documents
ipynb_documents = []
for dirpath, _, filenames in os.walk(repo_root_dir):
for file in filenames:
if file.endswith(".ipynb") and "*venv/" not in dirpath:
try:
ipynb_loader = NotebookLoader(
os.path.join(dirpath, file),
include_outputs=True,
max_output_length=20,
remove_newline=True,
)
ipynb_documents.extend(ipynb_loader.load())
except Exception:
pass
print(f".ipynb νμΌμ κ°μ: {len(ipynb_documents)}")
# Split documents into chunks
def split_documents(documents, language, chunk_size=2000, chunk_overlap=200):
splitter = RecursiveCharacterTextSplitter.from_language(
language=language, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
return splitter.split_documents(documents)
py_docs = split_documents(py_documents, Language.PYTHON)
mdx_docs = split_documents(mdx_documents, Language.MARKDOWN)
ipynb_docs = split_documents(ipynb_documents, Language.PYTHON)
print(f"λΆν λ .py νμΌμ κ°μ: {len(py_docs)}")
print(f"λΆν λ .mdx νμΌμ κ°μ: {len(mdx_docs)}")
print(f"λΆν λ .ipynb νμΌμ κ°μ: {len(ipynb_docs)}")
combined_documents = py_docs + mdx_docs + ipynb_docs
print(f"μ΄ λνλ¨ΌνΈ κ°μ: {len(combined_documents)}")
# Initialize embeddings and cache
store = LocalFileStore("~/.cache/embedding")
embeddings = HuggingFaceBgeEmbeddings(
model_name="BAAI/bge-m3",
model_kwargs={"device": "mps"},
encode_kwargs={"normalize_embeddings": True},
)
cached_embeddings = CacheBackedEmbeddings.from_bytes_store(
embeddings, store, namespace=embeddings.model_name
)
# Create and save FAISS index
FAISS_DB_INDEX = "./langchain_faiss"
# db = FAISS.from_documents(combined_documents, cached_embeddings)
# db.save_local(folder_path=FAISS_DB_INDEX)
db = FAISS.load_local(
FAISS_DB_INDEX, cached_embeddings, allow_dangerous_deserialization=True
)
# Create retrievers
faiss_retriever = db.as_retriever(search_type="mmr", search_kwargs={"k": 10})
bm25_retriever = BM25Retriever.from_documents(combined_documents)
bm25_retriever.k = 10
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5], search_type="mmr"
)
# Create prompt template
prompt = PromptTemplate.from_template(
"""λΉμ μ 20λ
μ°¨ AI κ°λ°μμ
λλ€. λΉμ μ μ무λ μ£Όμ΄μ§ μ§λ¬Έμ λνμ¬ μ΅λν λ¬Έμμ μ 보λ₯Ό νμ©νμ¬ λ΅λ³νλ κ²μ
λλ€.
λ¬Έμλ Python μ½λμ λν μ 보λ₯Ό λ΄κ³ μμ΅λλ€. λ°λΌμ, λ΅λ³μ μμ±ν λμλ Python μ½λμ λν μμΈν code snippetμ ν¬ν¨νμ¬ μμ±ν΄μ£ΌμΈμ.
μ΅λν μμΈνκ² λ΅λ³νκ³ , νκΈλ‘ λ΅λ³ν΄ μ£ΌμΈμ. μ£Όμ΄μ§ λ¬Έμμμ λ΅λ³μ μ°Ύμ μ μλ κ²½μ°, "λ¬Έμμ λ΅λ³μ΄ μμ΅λλ€."λΌκ³ λ΅λ³ν΄ μ£ΌμΈμ.
λ΅λ³μ μΆμ²(source)λ₯Ό λ°λμ νκΈ°ν΄ μ£ΌμΈμ.
#μ°Έκ³ λ¬Έμ:
{context}
#μ§λ¬Έ:
{question}
#λ΅λ³:
μΆμ²:
- source1
- source2
- ...
"""
)
# Define callback handler for streaming
class StreamCallback(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs):
print(token, end="", flush=True)
# Initialize LLMs with configuration
llm = ChatOpenAI(
model="gpt-4o",
temperature=0,
streaming=True,
callbacks=[StreamCallback()],
).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="gpt4",
claude=ChatAnthropic(
model="claude-3-opus-20240229",
temperature=0,
streaming=True,
callbacks=[StreamCallback()],
),
gpt3=ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0,
streaming=True,
callbacks=[StreamCallback()],
),
gemini=GoogleGenerativeAI(
model="gemini-1.5-flash",
temperature=0,
streaming=True,
callbacks=[StreamCallback()],
),
llama3=ChatGroq(
model_name="llama3-70b-8192",
temperature=0,
streaming=True,
callbacks=[StreamCallback()],
),
ollama=ChatOllama(
model="EEVE-Korean-10.8B:long",
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
),
)
# Create retrieval-augmented generation chain
rag_chain = (
{"context": ensemble_retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
model_key = os.getenv("LLM_MODEL", "gpt4")
print("model", model_key)
def respond(
message,
history: list[tuple[str, str]],
):
response = ""
for chunk in rag_chain.with_config(configurable={"llm": model_key}).stream(message):
response += chunk
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
title="λ체μΈμ λν΄μ λ¬Όμ΄λ³΄μΈμ!",
description="μλ
νμΈμ!\nμ λ λ체μΈμ λν μΈκ³΅μ§λ₯ QAλ΄μ
λλ€. λ체μΈμ λν΄ κΉμ μ§μμ κ°μ§κ³ μμ΄μ. λμ²΄μΈ κ°λ°μ κ΄ν λμμ΄ νμνμλ©΄ μΈμ λ μ§ μ§λ¬Έν΄μ£ΌμΈμ!",
)
if __name__ == "__main__":
demo.launch()
|