prompt
stringlengths 70
19.8k
| completion
stringlengths 8
1.03k
| api
stringlengths 23
93
|
---|---|---|
from llama_index import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(documents)
import pinecone
from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
pinecone.init(api_key="<api_key>", environment="<environment>")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
storage_context = StorageContext.from_defaults(
vector_store=PineconeVectorStore(pinecone.Index("quickstart"))
)
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = PineconeVectorStore(pinecone.Index("quickstart"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
query_engine = index.as_query_engine(
similarity_top_k=3,
vector_store_query_mode="default",
filters=MetadataFilters(
filters=[
| ExactMatchFilter(key="name", value="paul graham") | llama_index.vector_stores.types.ExactMatchFilter |
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt')
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
reader = | SimpleDirectoryReader(input_files=["paul_graham_essay.txt"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-kuzu')
import os
os.environ["OPENAI_API_KEY"] = "API_KEY_HERE"
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import shutil
shutil.rmtree("./test1", ignore_errors=True)
shutil.rmtree("./test2", ignore_errors=True)
shutil.rmtree("./test3", ignore_errors=True)
get_ipython().run_line_magic('pip', 'install kuzu')
import kuzu
db = kuzu.Database("test1")
from llama_index.graph_stores.kuzu import KuzuGraphStore
graph_store = KuzuGraphStore(db)
from llama_index.core import SimpleDirectoryReader, KnowledgeGraphIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from IPython.display import Markdown, display
import kuzu
documents = SimpleDirectoryReader(
"../../../../examples/paul_graham_essay/data"
).load_data()
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = llm
Settings.chunk_size = 512
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
include_text=False, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
db = kuzu.Database("test2")
graph_store = KuzuGraphStore(db)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
new_index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
storage_context=storage_context,
include_embeddings=True,
)
rel_map = graph_store.get_rel_map()
query_engine = index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
similarity_top_k=5,
)
response = query_engine.query(
"Tell me more about what the author worked on at Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
get_ipython().run_line_magic('pip', 'install pyvis')
from pyvis.network import Network
g = index.get_networkx_graph()
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(g)
net.show("kuzugraph_draw.html")
from llama_index.core.node_parser import SentenceSplitter
node_parser = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install llama-index')
get_ipython().system('pip install spacy')
wiki_titles = [
"Toronto",
"Seattle",
"Chicago",
"Boston",
"Houston",
"Tokyo",
"Berlin",
"Lisbon",
]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
from llama_index.core import SimpleDirectoryReader
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
city_descs_dict = {}
choices = []
choice_to_id_dict = {}
for idx, wiki_title in enumerate(wiki_titles):
vector_desc = (
"Useful for questions related to specific aspects of"
f" {wiki_title} (e.g. the history, arts and culture,"
" sports, demographics, or more)."
)
summary_desc = (
"Useful for any requests that require a holistic summary"
f" of EVERYTHING about {wiki_title}. For questions about"
" more specific sections, please use the vector_tool."
)
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
city_descs_dict[doc_id_vector] = vector_desc
city_descs_dict[doc_id_summary] = summary_desc
choices.extend([vector_desc, summary_desc])
choice_to_id_dict[idx * 2] = f"{wiki_title}_vector"
choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary"
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
llm = OpenAI(model_name="gpt-3.5-turbo")
summary_q_tmpl = """\
You are a summary question generator. Given an existing question which asks for a summary of a given topic, \
generate {num_vary} related queries that also ask for a summary of the topic.
For example, assuming we're generating 3 related questions:
Base Question: Can you tell me more about Boston?
Question Variations:
Give me an overview of Boston as a city.
Can you describe different aspects of Boston, from the history to the sports scene to the food?
Write a concise summary of Boston; I've never been.
Now let's give it a shot!
Base Question: {base_question}
Question Variations:
"""
summary_q_prompt = PromptTemplate(summary_q_tmpl)
from collections import defaultdict
from llama_index.core.evaluation import DatasetGenerator
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
from llama_index.core.node_parser import SimpleNodeParser
from tqdm.notebook import tqdm
def generate_dataset(
wiki_titles,
city_descs_dict,
llm,
summary_q_prompt,
num_vector_qs_per_node=2,
num_summary_qs=4,
):
queries = {}
corpus = {}
relevant_docs = defaultdict(list)
for idx, wiki_title in enumerate(tqdm(wiki_titles)):
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
corpus[doc_id_vector] = city_descs_dict[doc_id_vector]
corpus[doc_id_summary] = city_descs_dict[doc_id_summary]
node_parser = SimpleNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title])
dataset_generator = DatasetGenerator(
nodes,
llm=llm,
num_questions_per_chunk=num_vector_qs_per_node,
)
doc_questions = dataset_generator.generate_questions_from_nodes(
num=len(nodes) * num_vector_qs_per_node
)
for query_idx, doc_question in enumerate(doc_questions):
query_id = f"{wiki_title}_{query_idx}"
relevant_docs[query_id] = [doc_id_vector]
queries[query_id] = doc_question
base_q = f"Give me a summary of {wiki_title}"
fmt_prompt = summary_q_prompt.format(
num_vary=num_summary_qs,
base_question=base_q,
)
raw_response = llm.complete(fmt_prompt)
raw_lines = str(raw_response).split("\n")
doc_summary_questions = [l for l in raw_lines if l != ""]
print(f"[{idx}] Original Question: {base_q}")
print(
f"[{idx}] Generated Question Variations: {doc_summary_questions}"
)
for query_idx, doc_summary_question in enumerate(
doc_summary_questions
):
query_id = f"{wiki_title}_{query_idx}"
relevant_docs[query_id] = [doc_id_summary]
queries[query_id] = doc_summary_question
return EmbeddingQAFinetuneDataset(
queries=queries, corpus=corpus, relevant_docs=relevant_docs
)
dataset = generate_dataset(
wiki_titles,
city_descs_dict,
llm,
summary_q_prompt,
num_vector_qs_per_node=4,
num_summary_qs=5,
)
dataset.save_json("dataset.json")
dataset = EmbeddingQAFinetuneDataset.from_json("dataset.json")
import random
def split_train_val_by_query(dataset, split=0.7):
"""Split dataset by queries."""
query_ids = list(dataset.queries.keys())
query_ids_shuffled = random.sample(query_ids, len(query_ids))
split_idx = int(len(query_ids) * split)
train_query_ids = query_ids_shuffled[:split_idx]
eval_query_ids = query_ids_shuffled[split_idx:]
train_queries = {qid: dataset.queries[qid] for qid in train_query_ids}
eval_queries = {qid: dataset.queries[qid] for qid in eval_query_ids}
train_rel_docs = {
qid: dataset.relevant_docs[qid] for qid in train_query_ids
}
eval_rel_docs = {qid: dataset.relevant_docs[qid] for qid in eval_query_ids}
train_dataset = EmbeddingQAFinetuneDataset(
queries=train_queries,
corpus=dataset.corpus,
relevant_docs=train_rel_docs,
)
eval_dataset = EmbeddingQAFinetuneDataset(
queries=eval_queries,
corpus=dataset.corpus,
relevant_docs=eval_rel_docs,
)
return train_dataset, eval_dataset
train_dataset, eval_dataset = split_train_val_by_query(dataset, split=0.7)
from llama_index.finetuning import SentenceTransformersFinetuneEngine
finetune_engine = SentenceTransformersFinetuneEngine(
train_dataset,
model_id="BAAI/bge-small-en",
model_output_path="test_model3",
val_dataset=eval_dataset,
epochs=30, # can set to higher (haven't tested)
)
finetune_engine.finetune()
ft_embed_model = finetune_engine.get_finetuned_model()
ft_embed_model
from llama_index.core.embeddings import resolve_embed_model
base_embed_model = resolve_embed_model("local:BAAI/bge-small-en")
from llama_index.core.selectors import (
EmbeddingSingleSelector,
LLMSingleSelector,
)
ft_selector = EmbeddingSingleSelector.from_defaults(embed_model=ft_embed_model)
base_selector = EmbeddingSingleSelector.from_defaults(
embed_model=base_embed_model
)
import numpy as np
def run_evals(eval_dataset, selector, choices, choice_to_id_dict):
eval_pairs = eval_dataset.query_docid_pairs
matches = []
for query, relevant_doc_ids in tqdm(eval_pairs):
result = selector.select(choices, query)
pred_doc_id = choice_to_id_dict[result.inds[0]]
gt_doc_id = relevant_doc_ids[0]
matches.append(gt_doc_id == pred_doc_id)
return np.array(matches)
ft_matches = run_evals(eval_dataset, ft_selector, choices, choice_to_id_dict)
np.mean(ft_matches)
base_matches = run_evals(
eval_dataset, base_selector, choices, choice_to_id_dict
)
np.mean(base_matches)
from llama_index.llms.openai import OpenAI
eval_llm = OpenAI(model="gpt-3.5-turbo")
llm_selector = LLMSingleSelector.from_defaults(
llm=eval_llm,
)
llm_matches = run_evals(eval_dataset, llm_selector, choices, choice_to_id_dict)
np.mean(llm_matches)
import pandas as pd
eval_df = pd.DataFrame(
{
"Base embedding model": np.mean(base_matches),
"GPT-3.5": np.mean(llm_matches),
"Fine-tuned embedding model": np.mean(ft_matches),
},
index=["Match Rate"],
)
display(eval_df)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import SummaryIndex
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool
tools = []
for idx, wiki_title in enumerate(tqdm(wiki_titles)):
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
vector_index = | VectorStoreIndex.from_documents(city_docs[wiki_title]) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = | SQLDatabase(engine) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-packs-node-parser-semantic-chunking')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-node-parser-semantic-chunking-base')
from llama_index.core import SimpleDirectoryReader
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'pg_essay.txt'")
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
from llama_index.packs.node_parser_semantic_chunking.base import SemanticChunker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"SemanticChunkingQueryEnginePack",
"./semantic_chunking_pack",
skip_load=True,
)
from semantic_chunking_pack.base import SemanticChunker
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
splitter = SemanticChunker(
buffer_size=1, breakpoint_percentile_threshold=95, embed_model=embed_model
)
base_splitter = | SentenceSplitter(chunk_size=512) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
import json
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
TRAIN_FILES = ["./data/10k/lyft_2021.pdf"]
VAL_FILES = ["./data/10k/uber_2021.pdf"]
TRAIN_CORPUS_FPATH = "./data/train_corpus.json"
VAL_CORPUS_FPATH = "./data/val_corpus.json"
def load_corpus(files, verbose=False):
if verbose:
print(f"Loading files {files}")
reader = SimpleDirectoryReader(input_files=files)
docs = reader.load_data()
if verbose:
print(f"Loaded {len(docs)} docs")
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs, show_progress=verbose)
if verbose:
print(f"Parsed {len(nodes)} nodes")
return nodes
train_nodes = load_corpus(TRAIN_FILES, verbose=True)
val_nodes = load_corpus(VAL_FILES, verbose=True)
from llama_index.finetuning import generate_qa_embedding_pairs
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.llms.openai import OpenAI
train_dataset = generate_qa_embedding_pairs(
llm=OpenAI(model="gpt-3.5-turbo"), nodes=train_nodes
)
val_dataset = generate_qa_embedding_pairs(
llm= | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-packs-arize-phoenix-query-engine')
import os
from llama_index.packs.arize_phoenix_query_engine import ArizePhoenixQueryEnginePack
from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.web import SimpleWebPageReader
from tqdm.auto import tqdm
os.environ["OPENAI_API_KEY"] = "copy-your-openai-api-key-here"
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/jerryjliu/llama_index/adb054429f642cc7bbfcb66d4c232e072325eeab/examples/paul_graham_essay/data/paul_graham_essay.txt"
]
)
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(documents)
phoenix_pack = | ArizePhoenixQueryEnginePack(nodes=nodes) | llama_index.packs.arize_phoenix_query_engine.ArizePhoenixQueryEnginePack |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import nltk
nltk.download("stopwords")
import llama_index.core
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
storage_context = | StorageContext.from_defaults(persist_dir="storage") | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-typesense')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.vector_stores.typesense import TypesenseVectorStore
from typesense import Client
typesense_client = Client(
{
"api_key": "xyz",
"nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
"connection_timeout_seconds": 2,
}
)
typesense_vector_store = TypesenseVectorStore(typesense_client)
storage_context = StorageContext.from_defaults(
vector_store=typesense_vector_store
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
from llama_index.core import QueryBundle
from llama_index.embeddings.openai import OpenAIEmbedding
query_str = "What did the author do growing up?"
embed_model = OpenAIEmbedding()
from llama_index.core import Settings
query_embedding = embed_model.get_agg_embedding_from_queries(query_str)
query_bundle = | QueryBundle(query_str, embedding=query_embedding) | llama_index.core.QueryBundle |
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index ipywidgets')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from IPython.display import Markdown, display
import torch
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core import PromptTemplate
LLAMA2_7B = "meta-llama/Llama-2-7b-hf"
LLAMA2_7B_CHAT = "meta-llama/Llama-2-7b-chat-hf"
LLAMA2_13B = "meta-llama/Llama-2-13b-hf"
LLAMA2_13B_CHAT = "meta-llama/Llama-2-13b-chat-hf"
LLAMA2_70B = "meta-llama/Llama-2-70b-hf"
LLAMA2_70B_CHAT = "meta-llama/Llama-2-70b-chat-hf"
selected_model = LLAMA2_13B_CHAT
SYSTEM_PROMPT = """You are an AI assistant that answers questions in a friendly manner, based on the given source documents. Here are some rules you always follow:
- Generate human readable output, avoid creating output with gibberish text.
- Generate only the requested output, don't include any other language before or after the requested output.
- Never say thank you, that you are happy to help, that you are an AI agent, etc. Just answer directly.
- Generate professional language typically used in business documents in North America.
- Never generate offensive or foul language.
"""
query_wrapper_prompt = PromptTemplate(
"[INST]<<SYS>>\n" + SYSTEM_PROMPT + "<</SYS>>\n\n{query_str}[/INST] "
)
llm = HuggingFaceLLM(
context_window=4096,
max_new_tokens=2048,
generate_kwargs={"temperature": 0.0, "do_sample": False},
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name=selected_model,
model_name=selected_model,
device_map="auto",
model_kwargs={"torch_dtype": torch.float16, "load_in_8bit": True},
)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-readers-database')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from __future__ import absolute_import
import os
os.environ["OPENAI_API_KEY"] = ""
from llama_index.readers.database import DatabaseReader
from llama_index.core import VectorStoreIndex
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="FakeExamplePassword", # Database Password
dbname="postgres", # Database Name
)
print(type(db))
print(type(db.load_data))
print(type(db.sql_database))
print(type(db.sql_database.from_uri))
print(type(db.sql_database.get_single_table_info))
print(type(db.sql_database.get_table_columns))
print(type(db.sql_database.get_usable_table_names))
print(type(db.sql_database.insert_into_table))
print(type(db.sql_database.run_sql))
print(type(db.sql_database.dialect))
print(type(db.sql_database.engine))
print(type(db.sql_database))
db_from_sql_database = DatabaseReader(sql_database=db.sql_database)
print(type(db_from_sql_database))
print(type(db.sql_database.engine))
db_from_engine = | DatabaseReader(engine=db.sql_database.engine) | llama_index.readers.database.DatabaseReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
from llama_index.core.query_engine.pandas import PandasInstructionParser
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
import pandas as pd
df = pd.read_csv("./titanic_train.csv")
instruction_str = (
"1. Convert the query to executable Python code using Pandas.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
pandas_prompt_str = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"Expression:"
)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Pandas Instructions (optional):\n{pandas_instructions}\n\n"
"Pandas Output: {pandas_output}\n\n"
"Response: "
)
pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(
instruction_str=instruction_str, df_str=df.head(5)
)
pandas_output_parser = PandasInstructionParser(df)
response_synthesis_prompt = PromptTemplate(response_synthesis_prompt_str)
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.readers.file import PDFReader
reader = PDFReader()
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
docs = reader.load_data("./data/10k/lyft_2021.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter()
nodes = node_parser.get_nodes_from_documents(docs)
print(nodes[8].get_content(metadata_mode="all"))
get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet')
from pgvector.sqlalchemy import Vector
from sqlalchemy import insert, create_engine, String, text, Integer
from sqlalchemy.orm import declarative_base, mapped_column
engine = create_engine("postgresql+psycopg2://localhost/postgres")
with engine.connect() as conn:
conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
conn.commit()
Base = declarative_base()
class SECTextChunk(Base):
__tablename__ = "sec_text_chunk"
id = mapped_column(Integer, primary_key=True)
page_label = mapped_column(Integer)
file_name = mapped_column(String)
text = mapped_column(String)
embedding = mapped_column(Vector(384))
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
for node in nodes:
text_embedding = embed_model.get_text_embedding(node.get_content())
node.embedding = text_embedding
for node in nodes:
row_dict = {
"text": node.get_content(),
"embedding": node.embedding,
**node.metadata,
}
stmt = insert(SECTextChunk).values(**row_dict)
with engine.connect() as connection:
cursor = connection.execute(stmt)
connection.commit()
from llama_index.core import PromptTemplate
text_to_sql_tmpl = """\
Given an input question, first create a syntactically correct {dialect} \
query to run, then look at the results of the query and return the answer. \
You can order the results by a relevant column to return the most \
interesting examples in the database.
Pay attention to use only the column names that you can see in the schema \
description. Be careful to not query for columns that do not exist. \
Pay attention to which column is in which table. Also, qualify column names \
with the table name when needed.
IMPORTANT NOTE: you can use specialized pgvector syntax (`<->`) to do nearest \
neighbors/semantic search to a given vector from an embeddings column in the table. \
The embeddings value for a given row typically represents the semantic meaning of that row. \
The vector represents an embedding representation \
of the question, given below. Do NOT fill in the vector values directly, but rather specify a \
`[query_vector]` placeholder. For instance, some select statement examples below \
(the name of the embeddings column is `embedding`):
SELECT * FROM items ORDER BY embedding <-> '[query_vector]' LIMIT 5;
SELECT * FROM items WHERE id != 1 ORDER BY embedding <-> (SELECT embedding FROM items WHERE id = 1) LIMIT 5;
SELECT * FROM items WHERE embedding <-> '[query_vector]' < 5;
You are required to use the following format, \
each taking one line:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
Only use tables listed below.
{schema}
Question: {query_str}
SQLQuery: \
"""
text_to_sql_prompt = PromptTemplate(text_to_sql_tmpl)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import PGVectorSQLQueryEngine
from llama_index.core import Settings
sql_database = | SQLDatabase(engine, include_tables=["sec_text_chunk"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anyscale')
get_ipython().system('pip install llama-index')
from llama_index.llms.anyscale import Anyscale
from llama_index.core.llms import ChatMessage
llm = Anyscale(api_key="<your-api-key>")
message = | ChatMessage(role="user", content="Tell me a joke") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from IPython.display import Markdown, display
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
selector = LLMMultiSelector.from_defaults()
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="covid_nyt",
description=("This tool contains a NYT news article about COVID-19"),
),
ToolMetadata(
name="covid_wiki",
description=("This tool contains the Wikipedia page about COVID-19"),
),
ToolMetadata(
name="covid_tesla",
description=("This tool contains the Wikipedia page about apples"),
),
]
display_prompt_dict(selector.get_prompts())
selector_result = selector.select(
tool_choices, query="Tell me more about COVID-19"
)
selector_result.selections
from llama_index.core import PromptTemplate
from llama_index.llms.openai import OpenAI
query_gen_str = """\
You are a helpful assistant that generates multiple search queries based on a \
single input query. Generate {num_queries} search queries, one on each line, \
related to the following input query:
Query: {query}
Queries:
"""
query_gen_prompt = PromptTemplate(query_gen_str)
llm = OpenAI(model="gpt-3.5-turbo")
def generate_queries(query: str, llm, num_queries: int = 4):
response = llm.predict(
query_gen_prompt, num_queries=num_queries, query=query
)
queries = response.split("\n")
queries_str = "\n".join(queries)
print(f"Generated queries:\n{queries_str}")
return queries
queries = generate_queries("What happened at Interleaf and Viaweb?", llm)
queries
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.llms.openai import OpenAI
hyde = HyDEQueryTransform(include_original=True)
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-together')
get_ipython().system('pip install llama-index')
from llama_index.llms.together import TogetherLLM
llm = TogetherLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
from llama_index.core.llms import ChatMessage
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="What is your name") | llama_index.core.llms.ChatMessage |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb')
get_ipython().system('pip install llama-index-vector-stores-duckdb')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.duckdb import DuckDBVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = DuckDBVectorStore.from_local("./persist/pg.duckdb")
index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
**{
"text": "The Shawshank Redemption",
"metadata": {
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
"ref_doc_id": "doc_1",
},
}
),
TextNode(
**{
"text": "The Godfather",
"metadata": {
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
"ref_doc_id": "doc_1",
},
}
),
TextNode(
**{
"text": "Inception",
"metadata": {
"director": "Christopher Nolan",
"theme": "Sci-fi",
"year": 2010,
"ref_doc_id": "doc_2",
},
}
),
]
vector_store = | DuckDBVectorStore() | llama_index.vector_stores.duckdb.DuckDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"A thief who steals corporate secrets through the use of dream-sharing technology "
+ "is given the inverse task of planting an idea into the mind of a C.E.O., "
+ "but his tragic past may doom the project and his team to disaster."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text="Barbie suffers a crisis that leads her to question her world and her existence.",
metadata={
"year": 2023,
"director": "Greta Gerwig",
"genre": "fantasy",
"rating": 9.5,
},
),
TextNode(
text=(
"A cowboy doll is profoundly threatened and jealous when a new spaceman action "
+ "figure supplants him as top toy in a boy's bedroom."
),
metadata={"year": 1995, "genre": "animated", "rating": 8.3},
),
TextNode(
text=(
"When Woody is stolen by a toy collector, Buzz and his friends set out on a "
+ "rescue mission to save Woody before he becomes a museum toy property with his "
+ "roundup gang Jessie, Prospector, and Bullseye. "
),
metadata={"year": 1999, "genre": "animated", "rating": 7.9},
),
TextNode(
text=(
"The toys are mistakenly delivered to a day-care center instead of the attic "
+ "right before Andy leaves for college, and it's up to Woody to convince the "
+ "other toys that they weren't abandoned and to return home."
),
metadata={"year": 2010, "genre": "animated", "rating": 8.3},
),
]
index = VectaraIndex(nodes=nodes)
import getpass
import openai
if not os.environ.get("OPENAI_API_KEY", None):
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
openai.api_key = os.environ["OPENAI_API_KEY"]
vector_store_info = VectorStoreInfo(
content_info="information about a movie",
metadata_info=[
MetadataInfo(
name="genre",
description="The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']",
type="string",
),
MetadataInfo(
name="year",
description="The year the movie was released",
type="integer",
),
MetadataInfo(
name="director",
description="The name of the movie director",
type="string",
),
MetadataInfo(
name="rating",
description="A 1-10 rating for the movie",
type="float",
),
],
)
llm = | OpenAI(model="gpt-4-1106-preview", temperature=0) | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = | SimpleChatEngine.from_defaults() | llama_index.core.chat_engine.SimpleChatEngine.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core.ingestion.cache import RedisCache
from llama_index.core.ingestion import IngestionCache
ingest_cache = IngestionCache(
cache=RedisCache.from_host_and_port(host="127.0.0.1", port=6379),
collection="my_test_cache",
)
get_ipython().system('pip install weaviate-client')
import weaviate
auth_config = weaviate.AuthApiKey(api_key="...")
client = weaviate.Client(url="https://...", auth_client_secret=auth_config)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="CachingTest"
)
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
text_splitter = TokenTextSplitter(chunk_size=512)
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
import re
from llama_index.core.schema import TransformComponent
class TextCleaner(TransformComponent):
def __call__(self, nodes, **kwargs):
for node in nodes:
node.text = re.sub(r"[^0-9A-Za-z ]", "", node.text)
return nodes
from llama_index.core.ingestion import IngestionPipeline
pipeline = IngestionPipeline(
transformations=[
TextCleaner(),
text_splitter,
embed_model,
TitleExtractor(),
],
vector_store=vector_store,
cache=ingest_cache,
)
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("../data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
get_ipython().system('pip install llama-index')
from llama_index.program.openai import OpenAIPydanticProgram
from llama_index.core.program import (
DFFullProgram,
DataFrame,
DataFrameRowsOnly,
)
from llama_index.llms.openai import OpenAI
program = OpenAIPydanticProgram.from_defaults(
output_cls=DataFrame,
llm= | OpenAI(temperature=0, model="gpt-4-0613") | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import nltk
nltk.download("stopwords")
import llama_index.core
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_files=["./data/paul_graham/paul_graham_essay.txt"]
)
docs = reader.load_data()
text = docs[0].text
from llama_index.core import PromptTemplate
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Please also write the answer in the style of {tone_name}.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = | PromptTemplate(qa_prompt_tmpl) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-guidance')
get_ipython().system('pip install llama-index')
from llama_index.question_gen.guidance import GuidanceQuestionGenerator
from guidance.llms import OpenAI as GuidanceOpenAI
question_gen = GuidanceQuestionGenerator.from_defaults(
guidance_llm=GuidanceOpenAI("text-davinci-003"), verbose=False
)
from llama_index.core.tools import ToolMetadata
from llama_index.core import QueryBundle
tools = [
ToolMetadata(
name="lyft_10k",
description="Provides information about Lyft financials for year 2021",
),
ToolMetadata(
name="uber_10k",
description="Provides information about Uber financials for year 2021",
),
]
sub_questions = question_gen.generate(
tools=tools,
query=QueryBundle("Compare and contrast Uber and Lyft financial in 2021"),
)
sub_questions
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.response.pprint_utils import pprint_response
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.query_engine import SubQuestionQueryEngine
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
lyft_index = | VectorStoreIndex.from_documents(lyft_docs) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import PyMuPDFReader
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
index = VectorStoreIndex(base_nodes)
query_engine = index.as_query_engine(similarity_top_k=2)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.core.node_parser import SimpleNodeParser
dataset_generator = DatasetGenerator(
base_nodes[:20],
llm=OpenAI(model="gpt-4"),
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import random
full_qr_pairs = eval_dataset.qr_pairs
num_exemplars = 2
num_eval = 40
exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars)
eval_qr_pairs = random.sample(full_qr_pairs, num_eval)
len(exemplar_qr_pairs)
from llama_index.core.evaluation.eval_utils import get_responses
from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo"))
evaluator_dict = {
"correctness": evaluator_c,
}
batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
async def get_correctness(query_engine, eval_qa_pairs, batch_runner):
eval_qs = [q for q, _ in eval_qa_pairs]
eval_answers = [a for _, a in eval_qa_pairs]
pred_responses = | get_responses(eval_qs, query_engine, show_progress=True) | llama_index.core.evaluation.eval_utils.get_responses |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store=FirestoreIndexStore(kvstore),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store=FirestoreIndexStore(kvstore),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, vector_id=vector_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, keyword_id=keyword_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
| display_response(list_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-monsterapi')
get_ipython().system('python3 -m pip install llama-index --quiet -y')
get_ipython().system('python3 -m pip install monsterapi --quiet')
get_ipython().system('python3 -m pip install sentence_transformers --quiet')
import os
from llama_index.llms.monsterapi import MonsterLLM
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
os.environ["MONSTER_API_KEY"] = ""
model = "llama2-7b-chat"
llm = MonsterLLM(model=model, temperature=0.75)
result = llm.complete("Who are you?")
print(result)
from llama_index.core.llms import ChatMessage
history_message = ChatMessage(
**{
"role": "user",
"content": (
"When asked 'who are you?' respond as 'I am qblocks llm model'"
" everytime."
),
}
)
current_message = ChatMessage(**{"role": "user", "content": "Who are you?"})
response = llm.chat([history_message, current_message])
print(response)
get_ipython().system('python3 -m pip install pypdf --quiet')
get_ipython().system('rm -r ./data')
get_ipython().system('mkdir -p data&&cd data&&curl \'https://arxiv.org/pdf/2005.11401.pdf\' -o "RAG.pdf"')
documents = SimpleDirectoryReader("./data").load_data()
llm = | MonsterLLM(model=model, temperature=0.75, context_window=1024) | llama_index.llms.monsterapi.MonsterLLM |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lancedb import LanceDBVectorStore
import textwrap
import openai
openai.api_key = ""
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id, "Document Hash:", documents[0].hash)
vector_store = LanceDBVectorStore(uri="/tmp/lancedb")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("How much did Viaweb charge per month?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
del index
index = VectorStoreIndex.from_documents(
[ | Document(text="The sky is purple in Portland, Maine") | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import generate_question_context_pairs
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
node_parser = | SentenceSplitter(chunk_size=512) | llama_index.core.node_parser.SentenceSplitter |
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm')
get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference')
import hashlib
import json
from pathlib import Path
import os
import textwrap
from typing import List, Union
import llama_index.core
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.openinference import OpenInferenceCallbackHandler
from llama_index.callbacks.openinference.base import (
as_dataframe,
QueryData,
NodeData,
)
from llama_index.core.node_parser import SimpleNodeParser
import pandas as pd
from tqdm import tqdm
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
]
)
print(documents[0].text)
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(documents)
print(nodes[0].text)
callback_handler = OpenInferenceCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llama_index.core.Settings.callback_manager = callback_manager
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
max_characters_per_line = 80
queries = [
"What did Paul Graham do growing up?",
"When and how did Paul Graham's mother die?",
"What, in Paul Graham's opinion, is the most distinctive thing about YC?",
"When and how did Paul Graham meet Jessica Livingston?",
"What is Bel, and when and where was it written?",
]
for query in queries:
response = query_engine.query(query)
print("Query")
print("=====")
print(textwrap.fill(query, max_characters_per_line))
print()
print("Response")
print("========")
print(textwrap.fill(str(response), max_characters_per_line))
print()
query_data_buffer = callback_handler.flush_query_data_buffer()
query_dataframe = as_dataframe(query_data_buffer)
query_dataframe
class ParquetCallback:
def __init__(
self, data_path: Union[str, Path], max_buffer_length: int = 1000
):
self._data_path = Path(data_path)
self._data_path.mkdir(parents=True, exist_ok=False)
self._max_buffer_length = max_buffer_length
self._batch_index = 0
def __call__(
self,
query_data_buffer: List[QueryData],
node_data_buffer: List[NodeData],
) -> None:
if len(query_data_buffer) >= self._max_buffer_length:
query_dataframe = | as_dataframe(query_data_buffer) | llama_index.callbacks.openinference.base.as_dataframe |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine_with_threshold = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine_with_threshold.query(
"What did the author do growing up?"
)
print(response)
index1 = VectorStoreIndex.from_documents(documents)
query_engine_no_mrr = index1.as_query_engine()
response_no_mmr = query_engine_no_mrr.query(
"What did the author do growing up?"
)
index2 = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -U llama_index')
get_ipython().system('pip install -U portkey-ai')
from llama_index.llms.portkey import Portkey
from llama_index.core.llms import ChatMessage
import portkey as pk
import os
os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY"
openai_virtual_key_a = ""
openai_virtual_key_b = ""
anthropic_virtual_key_a = ""
anthropic_virtual_key_b = ""
cohere_virtual_key_a = ""
cohere_virtual_key_b = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["ANTHROPIC_API_KEY"] = ""
portkey_client = Portkey(
mode="single",
)
openai_llm = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
)
portkey_client.add_llms(openai_llm)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
| ChatMessage(role="user", content="What can you do?") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from IPython.display import HTML, display
def set_css():
display(
HTML(
"""
<style>
pre {
white-space: pre-wrap;
}
</style>
"""
)
)
get_ipython().events.register("pre_run_cell", set_css)
get_ipython().system('mkdir data')
get_ipython().system('wget "https://www.dropbox.com/s/948jr9cfs7fgj99/UBER.zip?dl=1" -O data/UBER.zip')
get_ipython().system('unzip data/UBER.zip -d data')
from llama_index.readers.file import UnstructuredReader
from pathlib import Path
years = [2022, 2021, 2020, 2019]
loader = UnstructuredReader()
doc_set = {}
all_docs = []
for year in years:
year_docs = loader.load_data(
file=Path(f"./data/UBER/UBER_{year}.html"), split_documents=False
)
for d in year_docs:
d.metadata = {"year": year}
doc_set[year] = year_docs
all_docs.extend(year_docs)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.chunk_size = 512
Settings.chunk_overlap = 64
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
index_set = {}
for year in years:
storage_context = StorageContext.from_defaults()
cur_index = VectorStoreIndex.from_documents(
doc_set[year],
storage_context=storage_context,
)
index_set[year] = cur_index
storage_context.persist(persist_dir=f"./storage/{year}")
from llama_index.core import load_index_from_storage
index_set = {}
for year in years:
storage_context = StorageContext.from_defaults(
persist_dir=f"./storage/{year}"
)
cur_index = load_index_from_storage(
storage_context,
)
index_set[year] = cur_index
from llama_index.core.tools import QueryEngineTool, ToolMetadata
individual_query_engine_tools = [
QueryEngineTool(
query_engine=index_set[year].as_query_engine(),
metadata=ToolMetadata(
name=f"vector_index_{year}",
description=(
"useful for when you want to answer queries about the"
f" {year} SEC 10-K for Uber"
),
),
)
for year in years
]
from llama_index.core.query_engine import SubQuestionQueryEngine
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=individual_query_engine_tools,
)
query_engine_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="sub_question_query_engine",
description=(
"useful for when you want to answer queries that require analyzing"
" multiple SEC 10-K documents for Uber"
),
),
)
tools = individual_query_engine_tools + [query_engine_tool]
from llama_index.agent.openai import OpenAIAgent
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.openai.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.litellm import LiteLLM
from llama_index.core.llms import ChatMessage
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
message = ChatMessage(role="user", content="Hey! how's it going?")
llm = LiteLLM("gpt-3.5-turbo")
chat_response = llm.chat([message])
llm = LiteLLM("command-nightly")
chat_response = llm.chat([message])
from llama_index.core.llms import ChatMessage
from llama_index.llms.litellm import LiteLLM
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = LiteLLM("gpt-3.5-turbo").chat(messages)
print(resp)
from llama_index.llms.litellm import LiteLLM
llm = | LiteLLM("gpt-3.5-turbo") | llama_index.llms.litellm.LiteLLM |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
import logging
import sys
import os
import qdrant_client
from IPython.display import Markdown, display
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
client = qdrant_client.QdrantClient(
location=":memory:"
)
vector_store = | QdrantVectorStore(client=client, collection_name="paul_graham") | llama_index.vector_stores.qdrant.QdrantVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-ragatouille-retriever')
from llama_index.packs.ragatouille_retriever import RAGatouilleRetrieverPack
from llama_index.core.llama_pack import download_llama_pack
get_ipython().system('wget "https://arxiv.org/pdf/2004.12832.pdf" -O colbertv1.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
reader = SimpleDirectoryReader(input_files=["colbertv1.pdf"])
docs = reader.load_data()
index_name = "my_index"
ragatouille_pack = RAGatouilleRetrieverPack(
docs, llm=OpenAI(model="gpt-3.5-turbo"), index_name=index_name, top_k=5
)
from llama_index.core.response.notebook_utils import display_source_node
retriever = ragatouille_pack.get_modules()["retriever"]
nodes = retriever.retrieve("How does ColBERTv2 compare with other BERT models?")
for node in nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import weaviate
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client("http://localhost:8080")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core.response.notebook_utils import display_response
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import StorageContext
vector_store = WeaviateVectorStore(weaviate_client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine(similarity_top_k=2)
response = query_engine.query("What did the author do growing up?")
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-tencentvectordb')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install tcvectordb')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.vector_stores.tencentvectordb import TencentVectorDB
from llama_index.core.vector_stores.tencentvectordb import (
CollectionParams,
FilterField,
)
import tcvectordb
tcvectordb.debug.DebugEnable = False
import openai
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
print(
f"First document, text ({len(documents[0].text)} characters):\n{'='*20}\n{documents[0].text[:360]} ..."
)
vector_store = TencentVectorDB(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
collection_params=CollectionParams(dimension=1536, drop_exists=True),
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Why did the author choose to work on AI?")
print(response)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("Why did the author choose to work on AI?")
print(response)
new_vector_store = TencentVectorDB(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
collection_params=CollectionParams(dimension=1536, drop_exists=False),
)
new_index_instance = VectorStoreIndex.from_vector_store(
vector_store=new_vector_store
)
query_engine = index.as_query_engine(similarity_top_k=5)
response = query_engine.query(
"What did the author study prior to working on AI?"
)
print(response)
retriever = new_index_instance.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_prefetch_factor": 4},
)
nodes_with_scores = retriever.retrieve(
"What did the author study prior to working on AI?"
)
print(f"Found {len(nodes_with_scores)} nodes.")
for idx, node_with_score in enumerate(nodes_with_scores):
print(f" [{idx}] score = {node_with_score.score}")
print(f" id = {node_with_score.node.node_id}")
print(f" text = {node_with_score.node.text[:90]} ...")
print("Nodes' ref_doc_id:")
print("\n".join([nws.node.ref_doc_id for nws in nodes_with_scores]))
new_vector_store.delete(nodes_with_scores[0].node.ref_doc_id)
nodes_with_scores = retriever.retrieve(
"What did the author study prior to working on AI?"
)
print(f"Found {len(nodes_with_scores)} nodes.")
filter_fields = [
| FilterField(name="source_type") | llama_index.core.vector_stores.tencentvectordb.FilterField |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayHnswVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install "google-generativeai" -q')
from llama_index.core.llama_dataset import download_llama_dataset
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
evaluator_dataset.to_pandas()[:5]
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms.openai import OpenAI
from llama_index.llms.gemini import Gemini
from llama_index.llms.cohere import Cohere
llm_gpt4 = OpenAI(temperature=0, model="gpt-4")
llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo")
llm_gemini = Gemini(model="models/gemini-pro", temperature=0)
evaluators = {
"gpt-4": CorrectnessEvaluator(llm=llm_gpt4),
"gpt-3.5": CorrectnessEvaluator(llm=llm_gpt35),
"gemini-pro": | CorrectnessEvaluator(llm=llm_gemini) | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-xinference')
port = 9997 # replace with your endpoint port number
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.core import (
TreeIndex,
VectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
SimpleDirectoryReader,
)
from llama_index.llms.xinference import Xinference
from xinference.client import RESTfulClient
from IPython.display import Markdown, display
client = RESTfulClient(f"http://localhost:{port}")
model_uid = client.launch_model(
model_name="llama-2-chat",
model_size_in_billions=7,
model_format="ggmlv3",
quantization="q2_K",
)
llm = Xinference(
endpoint=f"http://localhost:{port}",
model_uid=model_uid,
temperature=0.0,
max_tokens=512,
)
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
index = | VectorStoreIndex.from_documents(documents=documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-watsonx')
from llama_index.llms.watsonx import WatsonX
credentials = {
"url": "https://enter.your-ibm.url",
"apikey": "insert_your_api_key",
}
project_id = "insert_your_project_id"
resp = | WatsonX(credentials=credentials, project_id=project_id) | llama_index.llms.watsonx.WatsonX |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
import requests
import yaml
f = requests.get(
"https://raw.githubusercontent.com/sisbell/chatgpt-plugin-store/main/manifests/today-currency-converter.oiconma.repl.co.json"
).text
manifest = yaml.safe_load(f)
from llama_index.tools.chatgpt_plugin.base import ChatGPTPluginToolSpec
from llama_index.tools.requests.base import RequestsToolSpec
requests_spec = | RequestsToolSpec() | llama_index.tools.requests.base.RequestsToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic(model="claude-3-opus-20240229") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().system('pip install llama-index gradientai -q')
import os
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.finetuning import GradientFinetuneEngine
os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY")
os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>"
from pydantic import BaseModel
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.openai import OpenAI
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
openai_handler = LlamaDebugHandler()
openai_callback = CallbackManager([openai_handler])
openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback)
gradient_handler = LlamaDebugHandler()
gradient_callback = CallbackManager([gradient_handler])
base_model_slug = "llama2-7b-chat"
gradient_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug,
max_tokens=300,
callback_manager=gradient_callback,
is_chat_model=True,
)
from llama_index.core.llms import LLMMetadata
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=openai_llm,
verbose=True,
)
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=gradient_llm,
verbose=True,
)
response = openai_program(movie_name="The Shining")
print(str(response))
tmp = openai_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
response = gradient_program(movie_name="The Shining")
print(str(response))
tmp = gradient_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
from llama_index.core.program import LLMTextCompletionProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import GradientAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.output_parsers import PydanticOutputParser
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = GradientAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm_gpt4 = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=llm_gpt4,
verbose=True,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = openai_program(movie_name=movie_name)
print(output.json())
events = finetuning_handler.get_finetuning_events()
events
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
base_model_slug = "llama2-7b-chat"
base_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True
)
from llama_index.finetuning import GradientFinetuneEngine
finetune_engine = GradientFinetuneEngine(
base_model_slug=base_model_slug,
name="movies_structured",
data_path="mock_finetune_songs.jsonl",
verbose=True,
max_steps=200,
batch_size=1,
)
finetune_engine.model_adapter_id
epochs = 2
for i in range(epochs):
print(f"** EPOCH {i} **")
finetune_engine.finetune()
ft_llm = finetune_engine.get_finetuned_model(
max_tokens=500, is_chat_model=True
)
from llama_index.llms.gradient import GradientModelAdapterLLM
new_prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
Please only generate one album.
"""
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=new_prompt_template_str,
llm=ft_llm,
verbose=True,
)
gradient_program(movie_name="Goodfellas")
gradient_program(movie_name="Chucky")
base_gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(Album) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = Field(..., description="The suggested new question.")
explanation: str = Field(
...,
description=(
"The explanation for the error as well as for the new question."
"Can include the direct stack trace as well."
),
)
from llama_index.core.bridge.pydantic import PrivateAttr
class RetryAgentWorker(CustomSimpleAgentWorker):
"""Agent worker that adds a retry layer on top of a router.
Continues iterating until there's no errors / task is done.
"""
prompt_str: str = Field(default=DEFAULT_PROMPT_STR)
max_iterations: int = Field(default=10)
_router_query_engine: RouterQueryEngine = PrivateAttr()
def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None:
"""Init params."""
for tool in tools:
if not isinstance(tool, QueryEngineTool):
raise ValueError(
f"Tool {tool.metadata.name} is not a query engine tool."
)
self._router_query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=tools,
verbose=kwargs.get("verbose", False),
)
super().__init__(
tools=tools,
**kwargs,
)
def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]:
"""Initialize state."""
return {"count": 0, "current_reasoning": []}
def _run_step(
self, state: Dict[str, Any], task: Task, input: Optional[str] = None
) -> Tuple[AgentChatResponse, bool]:
"""Run step.
Returns:
Tuple of (agent_response, is_done)
"""
if "new_input" not in state:
new_input = task.input
else:
new_input = state["new_input"]
response = self._router_query_engine.query(new_input)
state["current_reasoning"].extend(
[("user", new_input), ("assistant", str(response))]
)
chat_prompt_tmpl = get_chat_prompt_template(
self.prompt_str, state["current_reasoning"]
)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_cls=ResponseEval),
prompt=chat_prompt_tmpl,
llm=self.llm,
)
response_eval = llm_program(
query_str=new_input, response_str=str(response)
)
if not response_eval.has_error:
is_done = True
else:
is_done = False
state["new_input"] = response_eval.new_question
if self.verbose:
print(f"> Question: {new_input}")
print(f"> Response: {response}")
print(f"> Response eval: {response_eval.dict()}")
return AgentChatResponse(response=str(response)), is_done
def _finalize_task(self, state: Dict[str, Any], **kwargs) -> None:
"""Finalize task."""
pass
from llama_index.core.tools import QueryEngineTool
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
from llama_index.core.query_engine import NLSQLTableQueryEngine
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database, tables=["city_stats"], verbose=True
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
description=(
"Useful for translating a natural language query into a SQL query over"
" a table containing: city_stats, containing the population/country of"
" each city"
),
)
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = | load_index_from_storage(storage_context) | llama_index.core.load_index_from_storage |
get_ipython().system('pip install llama-index llama-hub rank-bm25')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt')
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
reader = | SimpleDirectoryReader(input_files=["paul_graham_essay.txt"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
| HTMLNodeParser.from_defaults() | llama_index.core.node_parser.HTMLNodeParser.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = | Field(..., description="The suggested new question.") | llama_index.core.bridge.pydantic.Field |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import StorageContext
splitter = SentenceSplitter(chunk_size=1024)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], storage_context=storage_context
)
query_str = "Can you tell me about the key concepts for safety finetuning"
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
query_embedding = embed_model.get_query_embedding(query_str)
from llama_index.core.vector_stores import VectorStoreQuery
query_mode = "default"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
query_result = vector_store.query(vector_store_query)
query_result
from llama_index.core.schema import NodeWithScore
from typing import Optional
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
from llama_index.core.response.notebook_utils import display_source_node
for node in nodes_with_scores:
display_source_node(node, source_length=1000)
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
class PineconeRetriever(BaseRetriever):
"""Retriever over a pinecone vector store."""
def __init__(
self,
vector_store: PineconeVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = embed_model.get_query_embedding(query_str)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
retriever = PineconeRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=2
)
retrieved_nodes = retriever.retrieve(query_str)
for node in retrieved_nodes:
display_source_node(node, source_length=1000)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = | RetrieverQueryEngine.from_args(retriever) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
from pathlib import Path
import requests
wiki_titles = [
"batman",
"Vincent van Gogh",
"San Francisco",
"iPhone",
"Tesla Model S",
"BTS",
]
data_path = Path("data_wiki")
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
import wikipedia
import urllib.request
image_path = Path("data_wiki")
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 30
wiki_titles = [
"San Francisco",
"Batman",
"Vincent van Gogh",
"iPhone",
"Tesla Model S",
"BTS band",
]
if not image_path.exists():
Path.mkdir(image_path)
for title in wiki_titles:
images_per_wiki = 0
print(title)
try:
page_py = wikipedia.page(title)
list_img_urls = page_py.images
for url in list_img_urls:
if url.endswith(".jpg") or url.endswith(".png"):
image_uuid += 1
image_file_name = title + "_" + url.split("/")[-1]
image_metadata_dict[image_uuid] = {
"filename": image_file_name,
"img_path": "./" + str(image_path / f"{image_uuid}.jpg"),
}
urllib.request.urlretrieve(
url, image_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
import qdrant_client
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.indices import MultiModalVectorStoreIndex
client = qdrant_client.QdrantClient(path="qdrant_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = | SimpleDirectoryReader("./data_wiki/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3)
llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3)
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/march"
)
march_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/june"
)
june_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/sept"
)
sept_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
if not index_loaded:
march_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_march_2022.pdf"]
).load_data()
june_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_june_2022.pdf"]
).load_data()
sept_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_sept_2022.pdf"]
).load_data()
march_index = VectorStoreIndex.from_documents(
march_docs,
)
june_index = VectorStoreIndex.from_documents(
june_docs,
)
sept_index = VectorStoreIndex.from_documents(
sept_docs,
)
march_index.storage_context.persist(persist_dir="./storage/march")
june_index.storage_context.persist(persist_dir="./storage/june")
sept_index.storage_context.persist(persist_dir="./storage/sept")
march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm_35)
june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm_35)
sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm_35)
from llama_index.core.tools import QueryEngineTool
query_tool_sept = QueryEngineTool.from_defaults(
query_engine=sept_engine,
name="sept_2022",
description=(
f"Provides information about Uber quarterly financials ending"
f" September 2022"
),
)
query_tool_june = QueryEngineTool.from_defaults(
query_engine=june_engine,
name="june_2022",
description=(
f"Provides information about Uber quarterly financials ending June"
f" 2022"
),
)
query_tool_march = QueryEngineTool.from_defaults(
query_engine=march_engine,
name="march_2022",
description=(
f"Provides information about Uber quarterly financials ending March"
f" 2022"
),
)
query_engine_tools = [query_tool_march, query_tool_june, query_tool_sept]
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-0613")
base_agent = ReActAgent.from_tools(query_engine_tools, llm=llm, verbose=True)
response = base_agent.chat(
"Analyze Uber revenue growth over the last few quarters"
)
print(str(response))
print(str(response))
response = base_agent.chat(
"Can you tell me about the risk factors in the quarter with the highest"
" revenue growth?"
)
print(str(response))
from llama_index.core.evaluation import DatasetGenerator
base_question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup a quiz/examination."
" Using the provided context from the Uber March 10Q filing, formulate a"
" single question that captures an important fact from the context."
" context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
march_docs,
question_gen_query=base_question_gen_query,
llm=llm_35,
)
questions = dataset_generator.generate_questions_from_nodes(num=20)
questions
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
vary_question_tmpl = """\
You are a financial assistant. Given a question over a 2023 Uber 10Q filing, your goal
is to generate up to {num_vary} variations of that question that might span multiple 10Q's.
This can include compare/contrasting different 10Qs, replacing the current quarter with
another quarter, or generating questions that can only be answered over multiple quarters (be creative!)
You are given a valid set of 10Q filings. Please only generate question variations that can be
answered in that set.
For example:
Base Question: What was the free cash flow of Uber in March 2023?
Valid 10Qs: [March 2023, June 2023, September 2023]
Question Variations:
What was the free cash flow of Uber in June 2023?
Can you compare/contrast the free cash flow of Uber in June/September 2023 and offer explanations for the change?
Did the free cash flow of Uber increase of decrease in 2023?
Now let's give it a shot!
Base Question: {base_question}
Valid 10Qs: {valid_10qs}
Question Variations:
"""
def gen_question_variations(base_questions, num_vary=3):
"""Generate question variations."""
VALID_10Q_STR = "[March 2022, June 2022, September 2022]"
llm = OpenAI(model="gpt-4")
prompt_tmpl = PromptTemplate(vary_question_tmpl)
new_questions = []
for idx, question in enumerate(base_questions):
new_questions.append(question)
response = llm.complete(
prompt_tmpl.format(
num_vary=num_vary,
base_question=question,
valid_10qs=VALID_10Q_STR,
)
)
raw_lines = str(response).split("\n")
cur_new_questions = [l for l in raw_lines if l != ""]
print(f"[{idx}] Original Question: {question}")
print(f"[{idx}] Generated Question Variations: {cur_new_questions}")
new_questions.extend(cur_new_questions)
return new_questions
def save_questions(questions, path):
with open(path, "w") as f:
for question in questions:
f.write(question + "\n")
def load_questions(path):
questions = []
with open(path, "r") as f:
for line in f:
questions.append(line.strip())
return questions
new_questions = gen_question_variations(questions)
len(new_questions)
train_questions, eval_questions = new_questions[:60], new_questions[60:]
save_questions(train_questions, "train_questions_10q.txt")
save_questions(eval_questions, "eval_questions_10q.txt")
train_questions = load_questions("train_questions_10q.txt")
eval_questions = load_questions("eval_questions_10q.txt")
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.agent import ReActAgent
finetuning_handler = | OpenAIFineTuningHandler() | llama_index.finetuning.callbacks.OpenAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = | PromptTemplate(json_prompt_str) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"A thief who steals corporate secrets through the use of dream-sharing technology "
+ "is given the inverse task of planting an idea into the mind of a C.E.O., "
+ "but his tragic past may doom the project and his team to disaster."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text="Barbie suffers a crisis that leads her to question her world and her existence.",
metadata={
"year": 2023,
"director": "Greta Gerwig",
"genre": "fantasy",
"rating": 9.5,
},
),
TextNode(
text=(
"A cowboy doll is profoundly threatened and jealous when a new spaceman action "
+ "figure supplants him as top toy in a boy's bedroom."
),
metadata={"year": 1995, "genre": "animated", "rating": 8.3},
),
TextNode(
text=(
"When Woody is stolen by a toy collector, Buzz and his friends set out on a "
+ "rescue mission to save Woody before he becomes a museum toy property with his "
+ "roundup gang Jessie, Prospector, and Bullseye. "
),
metadata={"year": 1999, "genre": "animated", "rating": 7.9},
),
TextNode(
text=(
"The toys are mistakenly delivered to a day-care center instead of the attic "
+ "right before Andy leaves for college, and it's up to Woody to convince the "
+ "other toys that they weren't abandoned and to return home."
),
metadata={"year": 2010, "genre": "animated", "rating": 8.3},
),
]
index = | VectaraIndex(nodes=nodes) | llama_index.indices.managed.vectara.VectaraIndex |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from pathlib import Path
input_image_path = Path("input_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1nUhsBRiSWxcVQv8t8Cvvro8HJZ88LCzj" -O ./input_images/long_range_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=19pLwx0nVqsop7lo0ubUSYTzQfMtKJJtJ" -O ./input_images/model_y.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1utu3iD9XEgR5Sb7PrbtMf1qw8T1WdNmF" -O ./input_images/performance_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1dpUakWMqaXR4Jjn1kHuZfB0pAXvjn2-i" -O ./input_images/price.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1qNeT201QAesnAP5va1ty0Ky5Q_jKkguV" -O ./input_images/real_wheel_spec.png')
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./input_images"):
image_paths.append(str(os.path.join("./input_images", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./input_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1500
)
response_1 = openai_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response_1)
response_2 = openai_mm_llm.complete(
prompt="Can you tell me what is the price with each spec?",
image_documents=image_documents,
)
print(response_2)
import requests
def get_wikipedia_images(title):
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "imageinfo",
"iiprop": "url|dimensions|mime",
"generator": "images",
"gimlimit": "50",
},
).json()
image_urls = []
for page in response["query"]["pages"].values():
if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][
0
]["url"].endswith(".png"):
image_urls.append(page["imageinfo"][0]["url"])
return image_urls
from pathlib import Path
import requests
import urllib.request
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 20
wiki_titles = {
"Tesla Model Y",
"Tesla Model X",
"Tesla Model 3",
"Tesla Model S",
"Kia EV6",
"BMW i3",
"Audi e-tron",
"Ford Mustang",
"Porsche Taycan",
"Rivian",
"Polestar",
}
data_path = Path("mixed_wiki")
if not data_path.exists():
Path.mkdir(data_path)
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
images_per_wiki = 0
try:
list_img_urls = get_wikipedia_images(title)
for url in list_img_urls:
if (
url.endswith(".jpg")
or url.endswith(".png")
or url.endswith(".svg")
):
image_uuid += 1
urllib.request.urlretrieve(
url, data_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm')
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
import qdrant_client
from llama_index.core import SimpleDirectoryReader
client = qdrant_client.QdrantClient(path="qdrant_mm_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = | SimpleDirectoryReader("./mixed_wiki/") | llama_index.core.SimpleDirectoryReader |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.packs.koda_retriever import KodaRetriever
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.core import SimpleDirectoryReader
import os
from pinecone import Pinecone
from llama_index.core.node_parser import SemanticSplitterNodeParser
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.evaluation import generate_qa_embedding_pairs
import pandas as pd
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("llama2-paper") # this was previously created in my pinecone account
Settings.llm = | OpenAI() | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = | CohereRerank(top_n=10) | llama_index.postprocessor.cohere_rerank.CohereRerank |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import openai
import os
os.environ["OPENAI_API_KEY"] = "[You API key]"
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp-free")
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.core import StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import VectorStoreIndex
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="wiki_cities"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
vector_index = VectorStoreIndex([], storage_context=storage_context)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
from llama_index.core import SQLDatabase
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": SummaryExtractor(in_place=False),
"qa": QuestionsAnsweredExtractor(in_place=False),
"default": None,
}
html_parser = HTMLNodeParser.from_defaults()
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
pipeline_dict = {}
html_parser = HTMLNodeParser.from_defaults()
for k, extractor in extractor_dict.items():
if k == "default":
transformations = [
html_parser,
sent_parser_o200,
OpenAIEmbedding(),
]
else:
transformations = [
html_parser,
sent_parser_o200,
extractor,
OpenAIEmbedding(),
]
pipeline = | IngestionPipeline(transformations=transformations) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-llms-ai21')
get_ipython().system('pip install llama-index')
from llama_index.llms.ai21 import AI21
api_key = "Your api key"
resp = AI21(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.ai21 import AI21
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
| ChatMessage(role="user", content="What is your name") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-packs-node-parser-semantic-chunking')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-node-parser-semantic-chunking-base')
from llama_index.core import SimpleDirectoryReader
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'pg_essay.txt'")
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
from llama_index.packs.node_parser_semantic_chunking.base import SemanticChunker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"SemanticChunkingQueryEnginePack",
"./semantic_chunking_pack",
skip_load=True,
)
from semantic_chunking_pack.base import SemanticChunker
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
splitter = SemanticChunker(
buffer_size=1, breakpoint_percentile_threshold=95, embed_model=embed_model
)
base_splitter = SentenceSplitter(chunk_size=512)
nodes = splitter.get_nodes_from_documents(documents)
print(nodes[1].get_content())
print(nodes[2].get_content())
print(nodes[3].get_content())
base_nodes = base_splitter.get_nodes_from_documents(documents)
print(base_nodes[2].get_content())
from llama_index.core import VectorStoreIndex
from llama_index.core.response.notebook_utils import display_source_node
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
base_vector_index = | VectorStoreIndex(base_nodes) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks')
get_ipython().run_line_magic('pip', 'install llama-index')
from llama_index.llms.fireworks import Fireworks
resp = Fireworks().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.fireworks import Fireworks
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Fireworks().chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = Fireworks()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
from llama_index.core.llms import ChatMessage
llm = Fireworks()
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="What is your name") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.evaluation.benchmarks import HotpotQAEvaluator
from llama_index.core import VectorStoreIndex
from llama_index.core import Document
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.postprocessor import (
PIINodePostprocessor,
NERPIINodePostprocessor,
)
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.schema import TextNode
text = """
Hello Paulo Santos. The latest statement for your credit card account \
1111-0000-1111-0000 was mailed to 123 Any Street, Seattle, WA 98109.
"""
node = TextNode(text=text)
processor = NERPIINodePostprocessor()
from llama_index.core.schema import NodeWithScore
new_nodes = processor.postprocess_nodes([NodeWithScore(node=node)])
new_nodes[0].node.get_text()
new_nodes[0].node.metadata["__pii_node_info__"]
from llama_index.llms.openai import OpenAI
processor = PIINodePostprocessor(llm=OpenAI())
from llama_index.core.schema import NodeWithScore
new_nodes = processor.postprocess_nodes([ | NodeWithScore(node=node) | llama_index.core.schema.NodeWithScore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install "google-generativeai" -q')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.llama_dataset import download_llama_dataset
pairwise_evaluator_dataset, _ = download_llama_dataset(
"MtBenchHumanJudgementDataset", "./mt_bench_data"
)
pairwise_evaluator_dataset.to_pandas()[:5]
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.llms.openai import OpenAI
from llama_index.llms.gemini import Gemini
from llama_index.llms.cohere import Cohere
llm_gpt4 = OpenAI(temperature=0, model="gpt-4")
llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo")
llm_gemini = Gemini(model="models/gemini-pro", temperature=0)
evaluators = {
"gpt-4": | PairwiseComparisonEvaluator(llm=llm_gpt4) | llama_index.core.evaluation.PairwiseComparisonEvaluator |
from llama_index.tools.waii import WaiiToolSpec
waii_tool = WaiiToolSpec(
url="https://tweakit.waii.ai/api/",
api_key="3........",
database_key="snowflake://....",
verbose=True,
)
from llama_index import VectorStoreIndex
documents = waii_tool.load_data("Get all tables with their number of columns")
index = VectorStoreIndex.from_documents(documents).as_query_engine()
index.query(
"Which table contains most columns, tell me top 5 tables with number of columns?"
).response
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
agent = OpenAIAgent.from_tools(
waii_tool.to_tool_list(), llm= | OpenAI(model="gpt-4-1106-preview") | llama_index.llms.OpenAI |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.azure_speech.base import AzureSpeechToolSpec
from llama_index.tools.azure_translate.base import AzureTranslateToolSpec
speech_tool = | AzureSpeechToolSpec(speech_key="your-key", region="eastus") | llama_index.tools.azure_speech.base.AzureSpeechToolSpec |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import DatasetGenerator
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
import random
random.seed(42)
random.shuffle(documents)
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context from a "
"report on climate change and the oceans, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
documents[:50],
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("train_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
dataset_generator = DatasetGenerator.from_documents(
documents[
50:
], # since we generated ~1 question for 40 documents, we can skip the first 40
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("eval_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex, Settings
Settings.context_window = 2048
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt_35_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
finetuning_handler = | OpenAIFineTuningHandler() | llama_index.core.callbacks.OpenAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, Document
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.core.evaluation import CorrectnessEvaluator
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
uber_docs0 = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0]))
from llama_index.core.utils import globals_helper
num_tokens = len(globals_helper.tokenizer(uber_doc.get_content()))
print(f"NUM TOKENS: {num_tokens}")
context_str = "Jerry's favorite snack is Hot Cheetos."
query_str = "What is Jerry's favorite snack?"
def augment_doc(doc_str, context, position):
"""Augment doc with additional context at a given position."""
doc_str1 = doc_str[:position]
doc_str2 = doc_str[position:]
return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}"
test_str = augment_doc(
uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content()))
)
async def run_experiments(
doc, position_percentiles, context_str, query, llm, response_mode="compact"
):
eval_llm = OpenAI(model="gpt-4-1106-preview")
correctness_evaluator = CorrectnessEvaluator(llm=eval_llm)
eval_scores = {}
for idx, position_percentile in enumerate(position_percentiles):
print(f"Position percentile: {position_percentile}")
position_idx = int(position_percentile * len(uber_doc.get_content()))
new_doc_str = augment_doc(
uber_doc.get_content(), context_str, position_idx
)
new_doc = Document(text=new_doc_str)
index = SummaryIndex.from_documents(
[new_doc],
)
query_engine = index.as_query_engine(
response_mode=response_mode, llm=llm
)
print(f"Query: {query}")
response = query_engine.query(query)
print(f"Response: {str(response)}")
eval_result = correctness_evaluator.evaluate(
query=query, response=str(response), reference=context_str
)
eval_score = eval_result.score
print(f"Eval score: {eval_score}")
eval_scores[position_percentile] = eval_score
return eval_scores
position_percentiles = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4 = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="compact",
)
llm = | OpenAI(model="gpt-4-1106-preview") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-predibase')
get_ipython().system('pip install llama-index --quiet')
get_ipython().system('pip install predibase --quiet')
get_ipython().system('pip install sentence-transformers --quiet')
import os
os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
from llama_index.llms.predibase import PredibaseLLM
llm = PredibaseLLM(
model_name="llama-2-13b", temperature=0.3, max_new_tokens=512
)
result = llm.complete("Can you recommend me a nice dry white wine?")
print(result)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import os
os.environ[
"PINECONE_API_KEY"
] = "<Your Pinecone API key, from app.pinecone.io>"
from pinecone import Pinecone
from pinecone import ServerlessSpec
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
try:
pc.create_index(
"quickstart-index",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
except Exception as e:
print(e)
pass
pinecone_index = pc.Index("quickstart-index")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Fiction",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index,
namespace="test",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.retrievers import VectorIndexAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="famous books and movies",
metadata_info=[
MetadataInfo(
name="director",
type="str",
description=("Name of the director"),
),
MetadataInfo(
name="theme",
type="str",
description=("Theme of the book/movie"),
),
MetadataInfo(
name="year",
type="int",
description=("Year of the book/movie"),
),
],
)
retriever = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
empty_query_top_k=10,
default_empty_query_vector=[0] * 1536,
verbose=True,
)
nodes = retriever.retrieve(
"Tell me about some books/movies after the year 2000"
)
for node in nodes:
print(node.text)
print(node.metadata)
nodes = retriever.retrieve("Tell me about some books that are Fiction")
for node in nodes:
print(node.text)
print(node.metadata)
from llama_index.core.vector_stores import MetadataFilters
filter_dicts = [{"key": "year", "operator": "==", "value": 1997}]
filters = MetadataFilters.from_dicts(filter_dicts)
retriever2 = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
empty_query_top_k=10,
default_empty_query_vector=[0] * 1536,
extra_filters=filters,
)
nodes = retriever2.retrieve("Tell me about some books that are Fiction")
for node in nodes:
print(node.text)
print(node.metadata)
nodes = retriever.retrieve("Tell me about some books that are mafia-themed")
for node in nodes:
print(node.text)
print(node.metadata)
from llama_index.core.prompts import display_prompt_dict
from llama_index.core import PromptTemplate
prompts_dict = retriever.get_prompts()
| display_prompt_dict(prompts_dict) | llama_index.core.prompts.display_prompt_dict |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().system('pip install llama-index')
from llama_index.llms.cohere import Cohere
api_key = "Your api key"
resp = Cohere(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.cohere import Cohere
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Cohere(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
for r in resp:
print(r.delta, end="")
from llama_index.llms.cohere import Cohere
llm = | Cohere(model="command", api_key=api_key) | llama_index.llms.cohere.Cohere |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().system('pip install llama-index gradientai -q')
import os
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.finetuning import GradientFinetuneEngine
os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY")
os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>"
from pydantic import BaseModel
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.openai import OpenAI
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
openai_handler = LlamaDebugHandler()
openai_callback = CallbackManager([openai_handler])
openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback)
gradient_handler = LlamaDebugHandler()
gradient_callback = CallbackManager([gradient_handler])
base_model_slug = "llama2-7b-chat"
gradient_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug,
max_tokens=300,
callback_manager=gradient_callback,
is_chat_model=True,
)
from llama_index.core.llms import LLMMetadata
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=openai_llm,
verbose=True,
)
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=gradient_llm,
verbose=True,
)
response = openai_program(movie_name="The Shining")
print(str(response))
tmp = openai_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
response = gradient_program(movie_name="The Shining")
print(str(response))
tmp = gradient_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
from llama_index.core.program import LLMTextCompletionProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import GradientAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.output_parsers import PydanticOutputParser
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = GradientAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm_gpt4 = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=llm_gpt4,
verbose=True,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = openai_program(movie_name=movie_name)
print(output.json())
events = finetuning_handler.get_finetuning_events()
events
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
base_model_slug = "llama2-7b-chat"
base_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True
)
from llama_index.finetuning import GradientFinetuneEngine
finetune_engine = GradientFinetuneEngine(
base_model_slug=base_model_slug,
name="movies_structured",
data_path="mock_finetune_songs.jsonl",
verbose=True,
max_steps=200,
batch_size=1,
)
finetune_engine.model_adapter_id
epochs = 2
for i in range(epochs):
print(f"** EPOCH {i} **")
finetune_engine.finetune()
ft_llm = finetune_engine.get_finetuned_model(
max_tokens=500, is_chat_model=True
)
from llama_index.llms.gradient import GradientModelAdapterLLM
new_prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
Please only generate one album.
"""
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=new_prompt_template_str,
llm=ft_llm,
verbose=True,
)
gradient_program(movie_name="Goodfellas")
gradient_program(movie_name="Chucky")
base_gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=base_llm,
verbose=True,
)
base_gradient_program(movie_name="Goodfellas")
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pydantic import Field
from typing import List
class Citation(BaseModel):
"""Citation class."""
author: str = Field(
..., description="Inferred first author (usually last name"
)
year: int = Field(..., description="Inferred year")
desc: str = Field(
...,
description=(
"Inferred description from the text of the work that the author is"
" cited for"
),
)
class Response(BaseModel):
"""List of author citations.
Extracted over unstructured text.
"""
citations: List[Citation] = Field(
...,
description=(
"List of author citations (organized by author, year, and"
" description)."
),
)
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SimpleNodeParser
from pathlib import Path
from llama_index.core.callbacks import GradientAIFineTuningHandler
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
metadata = {
"paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
}
docs = [Document(text=doc_text, metadata=metadata)]
chunk_size = 1024
node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size)
nodes = node_parser.get_nodes_from_documents(docs)
len(nodes)
finetuning_handler = | GradientAIFineTuningHandler() | llama_index.core.callbacks.GradientAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-typesense')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.vector_stores.typesense import TypesenseVectorStore
from typesense import Client
typesense_client = Client(
{
"api_key": "xyz",
"nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
"connection_timeout_seconds": 2,
}
)
typesense_vector_store = TypesenseVectorStore(typesense_client)
storage_context = StorageContext.from_defaults(
vector_store=typesense_vector_store
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
from llama_index.core import QueryBundle
from llama_index.embeddings.openai import OpenAIEmbedding
query_str = "What did the author do growing up?"
embed_model = | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().system('pip install openai matplotlib')
import os
OPENAI_API_TOKEN = "sk-" # Your OpenAI API token here
os.environ["OPENAI_API_TOKEN"] = OPENAI_API_TOKEN
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg",
]
image_documents = load_image_urls(image_urls)
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=300
)
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
img_response = requests.get(image_urls[0])
print(image_urls[0])
img = Image.open(BytesIO(img_response.content))
plt.imshow(img)
complete_response = openai_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(complete_response)
stream_complete_response = openai_mm_llm.stream_complete(
prompt="give me more context for this image",
image_documents=image_documents,
)
for r in stream_complete_response:
print(r.delta, end="")
from llama_index.core.multi_modal_llms.openai_utils import (
generate_openai_multi_modal_chat_message,
)
chat_msg_1 = generate_openai_multi_modal_chat_message(
prompt="Describe the images as an alternative text",
role="user",
image_documents=image_documents,
)
chat_msg_2 = generate_openai_multi_modal_chat_message(
prompt="The image is a graph showing the surge in US mortgage rates. It is a visual representation of data, with a title at the top and labels for the x and y-axes. Unfortunately, without seeing the image, I cannot provide specific details about the data or the exact design of the graph.",
role="assistant",
)
chat_msg_3 = generate_openai_multi_modal_chat_message(
prompt="can I know more?",
role="user",
)
chat_messages = [chat_msg_1, chat_msg_2, chat_msg_3]
chat_response = openai_mm_llm.chat(
messages=chat_messages,
)
for msg in chat_messages:
print(msg.role, msg.content)
print(chat_response)
stream_chat_response = openai_mm_llm.stream_chat(
messages=chat_messages,
)
for r in stream_chat_response:
print(r.delta, end="")
response_acomplete = await openai_mm_llm.acomplete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response_acomplete)
response_astream_complete = await openai_mm_llm.astream_complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
async for delta in response_astream_complete:
print(delta.delta, end="")
achat_response = await openai_mm_llm.achat(
messages=chat_messages,
)
print(achat_response)
astream_chat_response = await openai_mm_llm.astream_chat(
messages=chat_messages,
)
async for delta in astream_chat_response:
print(delta.delta, end="")
image_urls = [
"https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg",
"https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg",
]
image_documents_1 = | load_image_urls(image_urls) | llama_index.core.multi_modal_llms.generic_utils.load_image_urls |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system("pip install llama-index 'google-generativeai>=0.3.0' matplotlib qdrant_client")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from pathlib import Path
import random
from typing import Optional
def get_image_files(
dir_path, sample: Optional[int] = 10, shuffle: bool = False
):
dir_path = Path(dir_path)
image_paths = []
for image_path in dir_path.glob("*.jpg"):
image_paths.append(image_path)
random.shuffle(image_paths)
if sample:
return image_paths[:sample]
else:
return image_paths
image_files = get_image_files("SROIE2019/test/img", sample=100)
from pydantic import BaseModel, Field
class ReceiptInfo(BaseModel):
company: str = Field(..., description="Company name")
date: str = Field(..., description="Date field in DD/MM/YYYY format")
address: str = Field(..., description="Address")
total: float = Field(..., description="total amount")
currency: str = Field(
..., description="Currency of the country (in abbreviations)"
)
summary: str = Field(
...,
description="Extracted text summary of the receipt, including items purchased, the type of store, the location, and any other notable salient features (what does the purchase seem to be for?).",
)
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
Can you summarize the image and return a response \
with the following JSON format: \
"""
async def pydantic_gemini(output_class, image_documents, prompt_template_str):
gemini_llm = GeminiMultiModal(
api_key=GOOGLE_API_KEY, model_name="models/gemini-pro-vision"
)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_class),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=gemini_llm,
verbose=True,
)
response = await llm_program.acall()
return response
from llama_index.core import SimpleDirectoryReader
from llama_index.core.async_utils import run_jobs
async def aprocess_image_file(image_file):
print(f"Image file: {image_file}")
img_docs = SimpleDirectoryReader(input_files=[image_file]).load_data()
output = await pydantic_gemini(ReceiptInfo, img_docs, prompt_template_str)
return output
async def aprocess_image_files(image_files):
"""Process metadata on image files."""
new_docs = []
tasks = []
for image_file in image_files:
task = aprocess_image_file(image_file)
tasks.append(task)
outputs = await run_jobs(tasks, show_progress=True, workers=5)
return outputs
outputs = await aprocess_image_files(image_files)
outputs[4]
from llama_index.core.schema import TextNode
from typing import List
def get_nodes_from_objs(
objs: List[ReceiptInfo], image_files: List[str]
) -> TextNode:
"""Get nodes from objects."""
nodes = []
for image_file, obj in zip(image_files, objs):
node = TextNode(
text=obj.summary,
metadata={
"company": obj.company,
"date": obj.date,
"address": obj.address,
"total": obj.total,
"currency": obj.currency,
"image_file": str(image_file),
},
excluded_embed_metadata_keys=["image_file"],
excluded_llm_metadata_keys=["image_file"],
)
nodes.append(node)
return nodes
nodes = get_nodes_from_objs(outputs, image_files)
print(nodes[0].get_content(metadata_mode="all"))
import qdrant_client
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.gemini import GeminiEmbedding
from llama_index.llms.gemini import Gemini
from llama_index.core import Settings
client = qdrant_client.QdrantClient(path="qdrant_gemini")
vector_store = QdrantVectorStore(client=client, collection_name="collection")
Settings.embed_model = GeminiEmbedding(
model_name="models/embedding-001", api_key=GOOGLE_API_KEY
)
Settings.llm = ( | Gemini(api_key=GOOGLE_API_KEY) | llama_index.llms.gemini.Gemini |
from utils import get_train_str, get_train_and_eval_data, get_eval_preds, train_prompt
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore")
train_df, train_labels, eval_df, eval_labels = get_train_and_eval_data("data/train.csv")
print(train_prompt.template)
train_n = 10
eval_n = 40
train_str = get_train_str(train_df, train_labels, train_n=train_n)
print(f"Example datapoints in `train_str`: \n{train_str}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds = get_eval_preds(train_prompt, train_str, eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc = accuracy_score(eval_label_chunk, np.array(eval_preds).round())
print(f"ACCURACY: {acc}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds_null = get_eval_preds(train_prompt, "", eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc_null = accuracy_score(eval_label_chunk, np.array(eval_preds_null).round())
print(f"ACCURACY: {acc_null}")
from llama_index import SummaryIndex
from llama_index.schema import Document
index = | SummaryIndex([]) | llama_index.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter(chunk_size=256)
nodes = node_parser.get_nodes_from_documents(documents)
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, get_response_synthesizer
from llama_index.core import DocumentSummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.node_parser import SentenceSplitter
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = []
for wiki_title in wiki_titles:
docs = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
docs[0].doc_id = wiki_title
city_docs.extend(docs)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=1024)
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize", use_async=True
)
doc_summary_index = DocumentSummaryIndex.from_documents(
city_docs,
llm=chatgpt,
transformations=[splitter],
response_synthesizer=response_synthesizer,
show_progress=True,
)
doc_summary_index.get_document_summary("Boston")
doc_summary_index.storage_context.persist("index")
from llama_index.core import load_index_from_storage
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(persist_dir="index")
doc_summary_index = load_index_from_storage(storage_context)
query_engine = doc_summary_index.as_query_engine(
response_mode="tree_summarize", use_async=True
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexLLMRetriever,
)
retriever = DocumentSummaryIndexLLMRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
print(len(retrieved_nodes))
print(retrieved_nodes[0].score)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = get_response_synthesizer(response_mode="tree_summarize")
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexEmbeddingRetriever,
)
retriever = DocumentSummaryIndexEmbeddingRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
len(retrieved_nodes)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = | get_response_synthesizer(response_mode="tree_summarize") | llama_index.core.get_response_synthesizer |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-extractors-marvin')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.extractors.marvin import MarvinMetadataExtractor
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
documents = SimpleDirectoryReader("data").load_data()
documents[0].text = documents[0].text[:10000]
import marvin
from marvin import ai_model
from llama_index.core.bridge.pydantic import BaseModel, Field
marvin.settings.openai.api_key = os.environ["OPENAI_API_KEY"]
@ai_model
class SportsSupplement(BaseModel):
name: str = Field(..., description="The name of the sports supplement")
description: str = Field(
..., description="A description of the sports supplement"
)
pros_cons: str = Field(
..., description="The pros and cons of the sports supplement"
)
llm_model = "gpt-3.5-turbo"
node_parser = TokenTextSplitter(
separator=" ", chunk_size=512, chunk_overlap=128
)
metadata_extractor = MarvinMetadataExtractor(
marvin_model=SportsSupplement, llm_model_string=llm_model
) # let's extract custom entities for each node.
from llama_index.core.ingestion import IngestionPipeline
pipeline = | IngestionPipeline(transformations=[node_parser, metadata_extractor]) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import StorageContext
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
splitter = SentenceSplitter(chunk_size=1024)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], storage_context=storage_context
)
retriever = index.as_retriever()
query_str = (
"Can you tell me about results from RLHF using both model-based and"
" human-based evaluation?"
)
retrieved_nodes = retriever.retrieve(query_str)
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
llm = OpenAI(model="text-davinci-003")
qa_prompt = PromptTemplate(
"""\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query_str}
Answer: \
"""
)
query_str = (
"Can you tell me about results from RLHF using both model-based and"
" human-based evaluation?"
)
retrieved_nodes = retriever.retrieve(query_str)
def generate_response(retrieved_nodes, query_str, qa_prompt, llm):
context_str = "\n\n".join([r.get_content() for r in retrieved_nodes])
fmt_qa_prompt = qa_prompt.format(
context_str=context_str, query_str=query_str
)
response = llm.complete(fmt_qa_prompt)
return str(response), fmt_qa_prompt
response, fmt_qa_prompt = generate_response(
retrieved_nodes, query_str, qa_prompt, llm
)
print(f"*****Response******:\n{response}\n\n")
print(f"*****Formatted Prompt*****:\n{fmt_qa_prompt}\n\n")
retriever = index.as_retriever(similarity_top_k=6)
retrieved_nodes = retriever.retrieve(query_str)
response, fmt_qa_prompt = generate_response(
retrieved_nodes, query_str, qa_prompt, llm
)
print(f"Response (k=5): {response}")
refine_prompt = PromptTemplate(
"""\
The original query is as follows: {query_str}
We have provided an existing answer: {existing_answer}
We have the opportunity to refine the existing answer \
(only if needed) with some more context below.
------------
{context_str}
------------
Given the new context, refine the original answer to better answer the query. \
If the context isn't useful, return the original answer.
Refined Answer: \
"""
)
from llama_index.core.response.notebook_utils import display_source_node
def generate_response_cr(
retrieved_nodes, query_str, qa_prompt, refine_prompt, llm
):
"""Generate a response using create and refine strategy.
The first node uses the 'QA' prompt.
All subsequent nodes use the 'refine' prompt.
"""
cur_response = None
fmt_prompts = []
for idx, node in enumerate(retrieved_nodes):
print(f"[Node {idx}]")
| display_source_node(node, source_length=2000) | llama_index.core.response.notebook_utils.display_source_node |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.packs.koda_retriever import KodaRetriever
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.core import SimpleDirectoryReader
import os
from pinecone import Pinecone
from llama_index.core.node_parser import SemanticSplitterNodeParser
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.evaluation import generate_qa_embedding_pairs
import pandas as pd
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("llama2-paper") # this was previously created in my pinecone account
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = PineconeVectorStore(pinecone_index=index)
vector_index = VectorStoreIndex.from_vector_store(
vector_store=vector_store, embed_model=Settings.embed_model
)
reranker = | LLMRerank(llm=Settings.llm) | llama_index.core.postprocessor.LLMRerank |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-google')
get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import (
DocstoreStrategy,
IngestionPipeline,
IngestionCache,
)
from llama_index.core.ingestion.cache import RedisCache
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.redis import RedisVectorStore
vector_store = RedisVectorStore(
index_name="redis_vector_store",
index_prefix="vectore_store",
redis_url="redis://localhost:6379",
)
cache = IngestionCache(
cache=RedisCache.from_host_and_port("localhost", 6379),
collection="redis_cache",
)
if vector_store._index_exists():
vector_store.delete_index()
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
embed_model,
],
docstore=RedisDocumentStore.from_host_and_port(
"localhost", 6379, namespace="document_store"
),
vector_store=vector_store,
cache=cache,
docstore_strategy=DocstoreStrategy.UPSERTS,
)
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_vector_store(
pipeline.vector_store, embed_model=embed_model
)
from llama_index.readers.google import GoogleDriveReader
loader = | GoogleDriveReader() | llama_index.readers.google.GoogleDriveReader |
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex
from llama_index.core import SimpleDirectoryReader
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query = "What did the author do growing up?"
base_query_engine = index.as_query_engine()
response = base_query_engine.query(query)
print(response)
from llama_index.core.query_engine import RetryQueryEngine
from llama_index.core.evaluation import RelevancyEvaluator
query_response_evaluator = RelevancyEvaluator()
retry_query_engine = RetryQueryEngine(
base_query_engine, query_response_evaluator
)
retry_response = retry_query_engine.query(query)
print(retry_response)
from llama_index.core.query_engine import RetrySourceQueryEngine
retry_source_query_engine = RetrySourceQueryEngine(
base_query_engine, query_response_evaluator
)
retry_source_response = retry_source_query_engine.query(query)
print(retry_source_response)
from llama_index.core.evaluation import GuidelineEvaluator
from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES
from llama_index.core import Response
from llama_index.core.indices.query.query_transform.feedback_transform import (
FeedbackQueryTransformation,
)
from llama_index.core.query_engine import RetryGuidelineQueryEngine
guideline_eval = GuidelineEvaluator(
guidelines=DEFAULT_GUIDELINES
+ "\nThe response should not be overly long.\n"
"The response should try to summarize where possible.\n"
) # just for example
typed_response = (
response if isinstance(response, Response) else response.get_response()
)
eval = guideline_eval.evaluate_response(query, typed_response)
print(f"Guideline eval evaluation result: {eval.feedback}")
feedback_query_transform = | FeedbackQueryTransformation(resynthesize_query=True) | llama_index.core.indices.query.query_transform.feedback_transform.FeedbackQueryTransformation |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE"
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import MetadataMode
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512)
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
TitleExtractor,
KeywordExtractor,
BaseExtractor,
)
from llama_index.extractors.entity import EntityExtractor
from llama_index.core.node_parser import TokenTextSplitter
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=512, chunk_overlap=128
)
class CustomExtractor(BaseExtractor):
def extract(self, nodes):
metadata_list = [
{
"custom": (
node.metadata["document_title"]
+ "\n"
+ node.metadata["excerpt_keywords"]
)
}
for node in nodes
]
return metadata_list
extractors = [
TitleExtractor(nodes=5, llm=llm),
QuestionsAnsweredExtractor(questions=3, llm=llm),
]
transformations = [text_splitter] + extractors
from llama_index.core import SimpleDirectoryReader
get_ipython().system('mkdir -p data')
get_ipython().system('wget -O "data/10k-132.pdf" "https://www.dropbox.com/scl/fi/6dlqdk6e2k1mjhi8dee5j/uber.pdf?rlkey=2jyoe49bg2vwdlz30l76czq6g&dl=1"')
get_ipython().system('wget -O "data/10k-vFinal.pdf" "https://www.dropbox.com/scl/fi/qn7g3vrk5mqb18ko4e5in/lyft.pdf?rlkey=j6jxtjwo8zbstdo4wz3ns8zoj&dl=1"')
uber_docs = SimpleDirectoryReader(input_files=["data/10k-132.pdf"]).load_data()
uber_front_pages = uber_docs[0:3]
uber_content = uber_docs[63:69]
uber_docs = uber_front_pages + uber_content
from llama_index.core.ingestion import IngestionPipeline
pipeline = | IngestionPipeline(transformations=transformations) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = | SimpleDirectoryReader(input_files=["pg_essay.txt"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine_with_threshold = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine_with_threshold.query(
"What did the author do growing up?"
)
print(response)
index1 = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().system('pip install "transformers[torch]" "huggingface_hub[inference]"')
get_ipython().system('pip install llama-index')
import os
from typing import List, Optional
from llama_index.llms.huggingface import (
HuggingFaceInferenceAPI,
HuggingFaceLLM,
)
HF_TOKEN: Optional[str] = os.getenv("HUGGING_FACE_TOKEN")
locally_run = | HuggingFaceLLM(model_name="HuggingFaceH4/zephyr-7b-alpha") | llama_index.llms.huggingface.HuggingFaceLLM |
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = "sk-your-key"
from llama_index.tools.multion.base import MultionToolSpec
multion_tool = | MultionToolSpec() | llama_index.tools.multion.base.MultionToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = Field(..., description="The suggested new question.")
explanation: str = Field(
...,
description=(
"The explanation for the error as well as for the new question."
"Can include the direct stack trace as well."
),
)
from llama_index.core.bridge.pydantic import PrivateAttr
class RetryAgentWorker(CustomSimpleAgentWorker):
"""Agent worker that adds a retry layer on top of a router.
Continues iterating until there's no errors / task is done.
"""
prompt_str: str = Field(default=DEFAULT_PROMPT_STR)
max_iterations: int = Field(default=10)
_router_query_engine: RouterQueryEngine = PrivateAttr()
def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None:
"""Init params."""
for tool in tools:
if not isinstance(tool, QueryEngineTool):
raise ValueError(
f"Tool {tool.metadata.name} is not a query engine tool."
)
self._router_query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=tools,
verbose=kwargs.get("verbose", False),
)
super().__init__(
tools=tools,
**kwargs,
)
def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]:
"""Initialize state."""
return {"count": 0, "current_reasoning": []}
def _run_step(
self, state: Dict[str, Any], task: Task, input: Optional[str] = None
) -> Tuple[AgentChatResponse, bool]:
"""Run step.
Returns:
Tuple of (agent_response, is_done)
"""
if "new_input" not in state:
new_input = task.input
else:
new_input = state["new_input"]
response = self._router_query_engine.query(new_input)
state["current_reasoning"].extend(
[("user", new_input), ("assistant", str(response))]
)
chat_prompt_tmpl = get_chat_prompt_template(
self.prompt_str, state["current_reasoning"]
)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_cls=ResponseEval),
prompt=chat_prompt_tmpl,
llm=self.llm,
)
response_eval = llm_program(
query_str=new_input, response_str=str(response)
)
if not response_eval.has_error:
is_done = True
else:
is_done = False
state["new_input"] = response_eval.new_question
if self.verbose:
print(f"> Question: {new_input}")
print(f"> Response: {response}")
print(f"> Response eval: {response_eval.dict()}")
return AgentChatResponse(response=str(response)), is_done
def _finalize_task(self, state: Dict[str, Any], **kwargs) -> None:
"""Finalize task."""
pass
from llama_index.core.tools import QueryEngineTool
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
from llama_index.core.query_engine import NLSQLTableQueryEngine
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database, tables=["city_stats"], verbose=True
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
description=(
"Useful for translating a natural language query into a SQL query over"
" a table containing: city_stats, containing the population/country of"
" each city"
),
)
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
vector_tools = []
for city, wiki_doc in zip(cities, wiki_docs):
vector_index = VectorStoreIndex.from_documents([wiki_doc])
vector_query_engine = vector_index.as_query_engine()
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=f"Useful for answering semantic questions about {city}",
)
vector_tools.append(vector_tool)
from llama_index.core.agent import AgentRunner
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
callback_manager = llm.callback_manager
query_engine_tools = [sql_tool] + vector_tools
agent_worker = RetryAgentWorker.from_tools(
query_engine_tools,
llm=llm,
verbose=True,
callback_manager=callback_manager,
)
agent = | AgentRunner(agent_worker, callback_manager=callback_manager) | llama_index.core.agent.AgentRunner |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = | AgentFnComponent(fn=parse_react_output_fn) | llama_index.core.query_pipeline.AgentFnComponent |
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-zilliz')
from getpass import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass("Enter your OpenAI API Key:")
ZILLIZ_PROJECT_ID = getpass("Enter your Zilliz Project ID:")
ZILLIZ_CLUSTER_ID = getpass("Enter your Zilliz Cluster ID:")
ZILLIZ_TOKEN = getpass("Enter your Zilliz API Key:")
from llama_index.indices.managed.zilliz import ZillizCloudPipelineIndex
zcp_index = ZillizCloudPipelineIndex.from_document_url(
url="https://publicdataset.zillizcloud.com/milvus_doc.md",
project_id=ZILLIZ_PROJECT_ID,
cluster_id=ZILLIZ_CLUSTER_ID,
token=ZILLIZ_TOKEN,
metadata={"version": "2.3"}, # used for filtering
collection_name="zcp_llamalection", # change this value will specify customized collection name
)
zcp_index.insert_doc_url(
url="https://publicdataset.zillizcloud.com/milvus_doc_22.md",
metadata={"version": "2.2"},
)
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
query_engine_milvus23 = zcp_index.as_query_engine(
search_top_k=3,
filters=MetadataFilters(
filters=[
| ExactMatchFilter(key="version", value="2.3") | llama_index.core.vector_stores.ExactMatchFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "ht@runllama.ai" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
google_index = GoogleIndex.create_corpus(display_name="My first corpus!")
print(f"Newly created corpus ID is {google_index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
google_index.insert_documents(documents)
google_index = GoogleIndex.from_corpus(corpus_id="")
query_engine = google_index.as_query_engine()
response = query_engine.query("which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.VERBOSE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.EXTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.7, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=5,
llm=Gemini(api_key=GOOGLE_API_KEY),
)
retriever = google_index.as_retriever(similarity_top_k=5)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[reranker],
)
response = query_engine.query("Which program did this author attend?")
print(response.response)
from llama_index.core.postprocessor import SentenceTransformerRerank
sbert_rerank = SentenceTransformerRerank(
model="cross-encoder/ms-marco-MiniLM-L-2-v2", top_n=5
)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.1, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
retriever = google_index.as_retriever(similarity_top_k=5)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[sbert_rerank],
)
response = query_engine.query("Which program did this author attend?")
print(response.response)
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import Settings
import qdrant_client
Settings.chunk_size = 256
client = qdrant_client.QdrantClient(path="qdrant_retrieval_2")
vector_store = QdrantVectorStore(client=client, collection_name="collection")
qdrant_index = VectorStoreIndex.from_documents(documents)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
query_engine = qdrant_index.as_query_engine()
response = query_engine.query("Which program did this author attend?")
print(response)
for r in response.source_nodes:
| display_source_node(r, source_length=1000) | llama_index.core.response.notebook_utils.display_source_node |