prompt
stringlengths
70
19.8k
completion
stringlengths
8
1.03k
api
stringlengths
23
93
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes =
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) from llama_index.core.query_engine.pandas import PandasInstructionParser from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'") import pandas as pd df = pd.read_csv("./titanic_train.csv") instruction_str = ( "1. Convert the query to executable Python code using Pandas.\n" "2. The final line of code should be a Python expression that can be called with the `eval()` function.\n" "3. The code should represent a solution to the query.\n" "4. PRINT ONLY THE EXPRESSION.\n" "5. Do not quote the expression.\n" ) pandas_prompt_str = ( "You are working with a pandas dataframe in Python.\n" "The name of the dataframe is `df`.\n" "This is the result of `print(df.head())`:\n" "{df_str}\n\n" "Follow these instructions:\n" "{instruction_str}\n" "Query: {query_str}\n\n" "Expression:" ) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n\n" "Pandas Instructions (optional):\n{pandas_instructions}\n\n" "Pandas Output: {pandas_output}\n\n" "Response: " ) pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format( instruction_str=instruction_str, df_str=df.head(5) ) pandas_output_parser = PandasInstructionParser(df) response_synthesis_prompt =
PromptTemplate(response_synthesis_prompt_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-callbacks-uptrain') get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas tqdm uptrain torch sentence-transformers') from llama_index.core import Settings, VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.readers.web import SimpleWebPageReader from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.uptrain.base import UpTrainCallbackHandler from llama_index.core.query_engine import SubQuestionQueryEngine from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.llms.openai import OpenAI import os os.environ[ "OPENAI_API_KEY" ] = "sk-************" # Replace with your OpenAI API key callback_handler = UpTrainCallbackHandler( key_type="openai", api_key=os.environ["OPENAI_API_KEY"], project_name_prefix="llama", ) Settings.callback_manager =
CallbackManager([callback_handler])
llama_index.core.callbacks.CallbackManager
get_ipython().system('pip install llama-index llama-index-packs-raptor llama-index-vector-stores-qdrant') from llama_index.packs.raptor import RaptorPack get_ipython().system('wget https://arxiv.org/pdf/2401.18059.pdf -O ./raptor_paper.pdf') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader(input_files=["./raptor_paper.pdf"]).load_data() from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.chroma import ChromaVectorStore import chromadb client = chromadb.PersistentClient(path="./raptor_paper_db") collection = client.get_or_create_collection("raptor") vector_store = ChromaVectorStore(chroma_collection=collection) raptor_pack = RaptorPack( documents, embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=OpenAI(model="gpt-3.5-turbo", temperature=0.1), # used for generating summaries vector_store=vector_store, # used for storage similarity_top_k=2, # top k for each layer, or overall top-k for collapsed mode="collapsed", # sets default mode transformations=[ SentenceSplitter(chunk_size=400, chunk_overlap=50) ], # transformations applied for ingestion ) nodes = raptor_pack.run("What baselines is raptor compared against?", mode="collapsed") print(len(nodes)) print(nodes[0].text) nodes = raptor_pack.run( "What baselines is raptor compared against?", mode="tree_traversal" ) print(len(nodes)) print(nodes[0].text) from llama_index.packs.raptor import RaptorRetriever retriever = RaptorRetriever( [], embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=OpenAI(model="gpt-3.5-turbo", temperature=0.1), # used for generating summaries vector_store=vector_store, # used for storage similarity_top_k=2, # top k for each layer, or overall top-k for collapsed mode="tree_traversal", # sets default mode ) from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine.from_args( raptor_pack.retriever, llm=
OpenAI(model="gpt-3.5-turbo", temperature=0.1)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb duckdb-engine') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document from llama_index.readers.wikipedia import WikipediaReader from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("duckdb:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) from llama_index.core import SQLDatabase sql_database = SQLDatabase(engine, include_tables=["city_stats"]) query_engine =
NLSQLTableQueryEngine(sql_database)
llama_index.core.query_engine.NLSQLTableQueryEngine
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai') get_ipython().system('pip install llama-index') from llama_index.llms.mistralai import MistralAI llm = MistralAI() resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage(role="system", content="You are CEO of MistralAI."), ChatMessage(role="user", content="Tell me the story about La plateforme"), ] resp =
MistralAI()
llama_index.llms.mistralai.MistralAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) documents = reader.load_data() from llama_index.core.query_pipeline import ( QueryPipeline, InputComponent, ArgPackComponent, ) from typing import Dict, Any, List, Optional from llama_index.core.llama_pack import BaseLlamaPack from llama_index.core.llms import LLM from llama_index.llms.openai import OpenAI from llama_index.core import Document, VectorStoreIndex from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core.node_parser import SentenceSplitter llm = OpenAI(model="gpt-3.5-turbo") chunk_sizes = [128, 256, 512, 1024] query_engines = {} for chunk_size in chunk_sizes: splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=0) nodes = splitter.get_nodes_from_documents(documents) vector_index =
VectorStoreIndex(nodes)
llama_index.core.VectorStoreIndex
get_ipython().system('pip install llama-index llama-index-packs-raptor llama-index-vector-stores-qdrant') from llama_index.packs.raptor import RaptorPack get_ipython().system('wget https://arxiv.org/pdf/2401.18059.pdf -O ./raptor_paper.pdf') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader(input_files=["./raptor_paper.pdf"]).load_data() from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.chroma import ChromaVectorStore import chromadb client = chromadb.PersistentClient(path="./raptor_paper_db") collection = client.get_or_create_collection("raptor") vector_store = ChromaVectorStore(chroma_collection=collection) raptor_pack = RaptorPack( documents, embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=
OpenAI(model="gpt-3.5-turbo", temperature=0.1)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm = Konko(model="gpt-3.5-turbo") message = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([message]) print(resp) message =
ChatMessage(role="user", content="Tell me a story in 250 words")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres') get_ipython().system('pip install llama-index') from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.postgres import PGVectorStore import textwrap import openai import os os.environ["OPENAI_API_KEY"] = "<your key>" openai.api_key = "<your key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) import psycopg2 connection_string = "postgresql://postgres:password@localhost:5432" db_name = "vector_db" conn = psycopg2.connect(connection_string) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from sqlalchemy import make_url url = make_url(connection_string) vector_store = PGVectorStore.from_params( database=db_name, host=url.host, password=url.password, port=url.port, user=url.username, table_name="paul_graham_essay", embed_dim=1536, # openai embedding dimension ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context, show_progress=True ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What happened in the mid 1980s?") print(textwrap.fill(str(response), 100)) vector_store = PGVectorStore.from_params( database="vector_db", host="localhost", password="password", port=5432, user="postgres", table_name="paul_graham_essay", embed_dim=1536, # openai embedding dimension ) index =
VectorStoreIndex.from_vector_store(vector_store=vector_store)
llama_index.core.VectorStoreIndex.from_vector_store
import openai openai.api_key = "sk-xxx" from llama_index.agent.openai import OpenAIAgent from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec agent = OpenAIAgent.from_tools(
DuckDuckGoSearchToolSpec()
llama_index.tools.duckduckgo.DuckDuckGoSearchToolSpec
import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( FixedRecencyPostprocessor, EmbeddingRecencyPostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response from llama_index.core import StorageContext def get_file_metadata(file_name: str): """Get file metadata.""" if "v1" in file_name: return {"date": "2020-01-01"} elif "v2" in file_name: return {"date": "2020-02-03"} elif "v3" in file_name: return {"date": "2022-04-12"} else: raise ValueError("invalid file") documents = SimpleDirectoryReader( input_files=[ "test_versioned_data/paul_graham_essay_v1.txt", "test_versioned_data/paul_graham_essay_v2.txt", "test_versioned_data/paul_graham_essay_v3.txt", ], file_metadata=get_file_metadata, ).load_data() from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) nodes =
Settings.text_splitter.get_nodes_from_documents(documents)
llama_index.core.Settings.text_splitter.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().system('pip install llama-index') from llama_index.llms.cohere import Cohere api_key = "Your api key" resp = Cohere(api_key=api_key).complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.cohere import Cohere messages = [ ChatMessage(role="user", content="hello there"), ChatMessage( role="assistant", content="Arrrr, matey! How can I help ye today?" ), ChatMessage(role="user", content="What is your name"), ] resp = Cohere(api_key=api_key).chat( messages, preamble_override="You are a pirate with a colorful personality" ) print(resp) from llama_index.llms.openai import OpenAI llm = Cohere(api_key=api_key) resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.openai import OpenAI llm = Cohere(api_key=api_key) messages = [ ChatMessage(role="user", content="hello there"), ChatMessage( role="assistant", content="Arrrr, matey! How can I help ye today?" ), ChatMessage(role="user", content="What is your name"), ] resp = llm.stream_chat( messages, preamble_override="You are a pirate with a colorful personality" ) for r in resp: print(r.delta, end="") from llama_index.llms.cohere import Cohere llm = Cohere(model="command", api_key=api_key) resp = llm.complete("Paul Graham is ") print(resp) from llama_index.llms.cohere import Cohere llm = Cohere(model="command", api_key=api_key) resp = await llm.acomplete("Paul Graham is ") print(resp) resp = await llm.astream_complete("Paul Graham is ") async for delta in resp: print(delta.delta, end="") from llama_index.llms.cohere import Cohere llm_good = Cohere(api_key=api_key) llm_bad =
Cohere(model="command", api_key="BAD_KEY")
llama_index.llms.cohere.Cohere
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [ SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes ] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node = IndexNode.from_text_node(base_node, base_node.node_id) all_nodes.append(original_node) all_nodes_dict = {n.node_id: n for n in all_nodes} vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model) vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2) retriever_chunk = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_chunk}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_chunk.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) response = query_engine_chunk.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) import nest_asyncio nest_asyncio.apply() from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) extractors = [ SummaryExtractor(summaries=["self"], show_progress=True), QuestionsAnsweredExtractor(questions=5, show_progress=True), ] node_to_metadata = {} for extractor in extractors: metadata_dicts = extractor.extract(base_nodes) for node, metadata in zip(base_nodes, metadata_dicts): if node.node_id not in node_to_metadata: node_to_metadata[node.node_id] = metadata else: node_to_metadata[node.node_id].update(metadata) def save_metadata_dicts(path, data): with open(path, "w") as fp: json.dump(data, fp) def load_metadata_dicts(path): with open(path, "r") as fp: data = json.load(fp) return data save_metadata_dicts("data/llama2_metadata_dicts.json", node_to_metadata) metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.json") import copy all_nodes = copy.deepcopy(base_nodes) for node_id, metadata in node_to_metadata.items(): for val in metadata.values(): all_nodes.append(IndexNode(text=val, index_id=node_id)) all_nodes_dict = {n.node_id: n for n in all_nodes} from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf') from llama_index.core import SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import DatasetGenerator documents = SimpleDirectoryReader( input_files=["IPCC_AR6_WGII_Chapter03.pdf"] ).load_data() import random random.seed(42) random.shuffle(documents) gpt_35_llm =
OpenAI(model="gpt-3.5-turbo", temperature=0.3)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-cassandra') get_ipython().system('pip install --quiet "astrapy>=0.5.8"') import os from getpass import getpass from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, Document, StorageContext, ) from llama_index.vector_stores.cassandra import CassandraVectorStore from cassandra.cluster import Cluster cluster = Cluster(["127.0.0.1"]) session = cluster.connect() import cassio CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ") cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE) ASTRA_DB_ID = input("ASTRA_DB_ID = ") ASTRA_DB_TOKEN = getpass("ASTRA_DB_TOKEN = ") desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ") if desired_keyspace: ASTRA_DB_KEYSPACE = desired_keyspace else: ASTRA_DB_KEYSPACE = None import cassio cassio.init( database_id=ASTRA_DB_ID, token=ASTRA_DB_TOKEN, keyspace=ASTRA_DB_KEYSPACE, ) os.environ["OPENAI_API_KEY"] = getpass("OpenAI API Key:") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install -q llama-index google-generativeai') get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...') import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from llama_index.llms.gemini import Gemini resp =
Gemini()
llama_index.llms.gemini.Gemini
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.postprocessor import ( PIINodePostprocessor, NERPIINodePostprocessor, ) from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.core import Document, VectorStoreIndex from llama_index.core.schema import TextNode text = """ Hello Paulo Santos. The latest statement for your credit card account \ 1111-0000-1111-0000 was mailed to 123 Any Street, Seattle, WA 98109. """ node = TextNode(text=text) processor = NERPIINodePostprocessor() from llama_index.core.schema import NodeWithScore new_nodes = processor.postprocess_nodes([NodeWithScore(node=node)]) new_nodes[0].node.get_text() new_nodes[0].node.metadata["__pii_node_info__"] from llama_index.llms.openai import OpenAI processor = PIINodePostprocessor(llm=OpenAI()) from llama_index.core.schema import NodeWithScore new_nodes = processor.postprocess_nodes([NodeWithScore(node=node)]) new_nodes[0].node.get_text() new_nodes[0].node.metadata["__pii_node_info__"] text = """ Hello Paulo Santos. The latest statement for your credit card account \ 4095-2609-9393-4932 was mailed to Seattle, WA 98109. \ IBAN GB90YNTU67299444055881 and social security number is 474-49-7577 were verified on the system. \ Further communications will be sent to paulo@presidio.site """ presidio_node = TextNode(text=text) from llama_index.postprocessor.presidio import PresidioPIINodePostprocessor processor =
PresidioPIINodePostprocessor()
llama_index.postprocessor.presidio.PresidioPIINodePostprocessor
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3) llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3) try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/sept" ) sept_index =
load_index_from_storage(storage_context)
llama_index.core.load_index_from_storage
import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm') get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web') get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference') import hashlib import json from pathlib import Path import os import textwrap from typing import List, Union import llama_index.core from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.openinference import OpenInferenceCallbackHandler from llama_index.callbacks.openinference.base import ( as_dataframe, QueryData, NodeData, ) from llama_index.core.node_parser import SimpleNodeParser import pandas as pd from tqdm import tqdm documents = SimpleWebPageReader().load_data( [ "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt" ] ) print(documents[0].text) parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(documents) print(nodes[0].text) callback_handler = OpenInferenceCallbackHandler() callback_manager = CallbackManager([callback_handler]) llama_index.core.Settings.callback_manager = callback_manager index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() max_characters_per_line = 80 queries = [ "What did Paul Graham do growing up?", "When and how did Paul Graham's mother die?", "What, in Paul Graham's opinion, is the most distinctive thing about YC?", "When and how did Paul Graham meet Jessica Livingston?", "What is Bel, and when and where was it written?", ] for query in queries: response = query_engine.query(query) print("Query") print("=====") print(textwrap.fill(query, max_characters_per_line)) print() print("Response") print("========") print(textwrap.fill(str(response), max_characters_per_line)) print() query_data_buffer = callback_handler.flush_query_data_buffer() query_dataframe = as_dataframe(query_data_buffer) query_dataframe class ParquetCallback: def __init__( self, data_path: Union[str, Path], max_buffer_length: int = 1000 ): self._data_path = Path(data_path) self._data_path.mkdir(parents=True, exist_ok=False) self._max_buffer_length = max_buffer_length self._batch_index = 0 def __call__( self, query_data_buffer: List[QueryData], node_data_buffer: List[NodeData], ) -> None: if len(query_data_buffer) >= self._max_buffer_length: query_dataframe = as_dataframe(query_data_buffer) file_path = self._data_path / f"log-{self._batch_index}.parquet" query_dataframe.to_parquet(file_path) self._batch_index += 1 query_data_buffer.clear() # ⚠️ clear the buffer or it will keep growing forever! node_data_buffer.clear() # didn't log node_data_buffer, but still need to clear it data_path = "data" parquet_writer = ParquetCallback( data_path=data_path, max_buffer_length=1, ) callback_handler = OpenInferenceCallbackHandler(callback=parquet_writer) callback_manager = CallbackManager([callback_handler]) llama_index.core.Settings.callback_manager = callback_manager index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex from llama_index.core import PromptTemplate from IPython.display import Markdown, display get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI gpt35_llm = OpenAI(model="gpt-3.5-turbo") gpt4_llm = OpenAI(model="gpt-4") index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator, ) filters = MetadataFilters( filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia")
llama_index.core.vector_stores.MetadataFilter
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') get_ipython().run_line_magic('pip', 'install unstructured replicate') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os REPLICATE_API_TOKEN = "..." # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1UU0xc3uLXs-WG0aDQSXjGacUkp142rLS" -O texas.jpg') from llama_index.readers.file import FlatReader from pathlib import Path from llama_index.core.node_parser import UnstructuredElementNodeParser reader = FlatReader() docs_2021 = reader.load_data(Path("tesla_2021_10k.htm")) node_parser = UnstructuredElementNodeParser() import openai OPENAI_API_TOKEN = "..." openai.api_key = OPENAI_API_TOKEN # add your openai api key here os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN import os import pickle if not os.path.exists("2021_nodes.pkl"): raw_nodes_2021 = node_parser.get_nodes_from_documents(docs_2021) pickle.dump(raw_nodes_2021, open("2021_nodes.pkl", "wb")) else: raw_nodes_2021 = pickle.load(open("2021_nodes.pkl", "rb")) nodes_2021, objects_2021 = node_parser.get_nodes_and_objects(raw_nodes_2021) from llama_index.core import VectorStoreIndex vector_index = VectorStoreIndex(nodes=nodes_2021, objects=objects_2021) query_engine = vector_index.as_query_engine(similarity_top_k=5, verbose=True) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./texas.jpg" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.schema import ImageDocument from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) print(imageUrl) llava_multi_modal_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=200, temperature=0.1, ) prompt = "which Tesla factory is shown in the image? Please answer just the name of the factory." llava_response = llava_multi_modal_llm.complete( prompt=prompt, image_documents=[ImageDocument(image_path=imageUrl)], ) print(llava_response.text) rag_response = query_engine.query(llava_response.text) print(rag_response) input_image_path = Path("instagram_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png') from pydantic import BaseModel class InsAds(BaseModel): """Data model for a Ins Ads.""" account: str brand: str product: str category: str discount: str price: str comments: str review: str description: str from PIL import Image import matplotlib.pyplot as plt ins_imageUrl = "./instagram_images/jordan.png" image = Image.open(ins_imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ def pydantic_llava( model_name, output_class, image_documents, prompt_template_str ): mm_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=1000, ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_llm, verbose=True, ) response = llm_program() print(f"Model: {model_name}") for res in response: print(res) return response from llama_index.core import SimpleDirectoryReader ins_image_documents = SimpleDirectoryReader("./instagram_images").load_data() pydantic_response = pydantic_llava( "llava-13b", InsAds, ins_image_documents, prompt_template_str ) print(pydantic_response.brand) from pathlib import Path import requests wiki_titles = [ "batman", "Vincent van Gogh", "San Francisco", "iPhone", "Tesla Model S", "BTS", "Air Jordan", ] data_path = Path("data_wiki") for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) import wikipedia import urllib.request image_path = Path("data_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "Air Jordan", "San Francisco", "Batman", "Vincent van Gogh", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue import qdrant_client from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core.indices import MultiModalVectorStoreIndex client = qdrant_client.QdrantClient(path="qdrant_mm_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents =
SimpleDirectoryReader("./data_wiki/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader("../data/paul_graham") docs = reader.load_data() import os from llama_index.core import ( StorageContext, VectorStoreIndex, load_index_from_storage, ) if not os.path.exists("storage"): index = VectorStoreIndex.from_documents(docs) index.set_index_id("vector_index") index.storage_context.persist("./storage") else: storage_context = StorageContext.from_defaults(persist_dir="storage") index = load_index_from_storage(storage_context, index_id="vector_index") from llama_index.core.query_pipeline import QueryPipeline from llama_index.core import PromptTemplate prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) llm = OpenAI(model="gpt-3.5-turbo") p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True) output = p.run(movie_name="The Departed") print(str(output)) from typing import List from pydantic import BaseModel, Field from llama_index.core.output_parsers import PydanticOutputParser class Movie(BaseModel): """Object representing a single movie.""" name: str = Field(..., description="Name of the movie.") year: int = Field(..., description="Year of the movie.") class Movies(BaseModel): """Object representing a list of movies.""" movies: List[Movie] = Field(..., description="List of movies.") llm = OpenAI(model="gpt-3.5-turbo") output_parser = PydanticOutputParser(Movies) json_prompt_str = """\ Please generate related movies to {movie_name}. Output with the following JSON format: """ json_prompt_str = output_parser.format(json_prompt_str) json_prompt_tmpl = PromptTemplate(json_prompt_str) p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True) output = p.run(movie_name="Toy Story") output prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) prompt_str2 = """\ Here's some text: {text} Can you rewrite this with a summary of each movie? """ prompt_tmpl2 = PromptTemplate(prompt_str2) llm = OpenAI(model="gpt-3.5-turbo") llm_c = llm.as_query_component(streaming=True) p = QueryPipeline( chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True ) output = p.run(movie_name="The Dark Knight") for o in output: print(o.delta, end="") p = QueryPipeline( chain=[ json_prompt_tmpl, llm.as_query_component(streaming=True), output_parser, ], verbose=True, ) output = p.run(movie_name="Toy Story") print(output) from llama_index.postprocessor.cohere_rerank import CohereRerank prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}" prompt_tmpl1 =
PromptTemplate(prompt_str1)
llama_index.core.PromptTemplate
import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf') from llama_index.core import SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import DatasetGenerator documents = SimpleDirectoryReader( input_files=["IPCC_AR6_WGII_Chapter03.pdf"] ).load_data() import random random.seed(42) random.shuffle(documents) gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) question_gen_query = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context from a " "report on climate change and the oceans, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) dataset_generator = DatasetGenerator.from_documents( documents[:50], question_gen_query=question_gen_query, llm=gpt_35_llm, ) questions = dataset_generator.generate_questions_from_nodes(num=40) print("Generated ", len(questions), " questions") with open("train_questions.txt", "w") as f: for question in questions: f.write(question + "\n") dataset_generator = DatasetGenerator.from_documents( documents[ 50: ], # since we generated ~1 question for 40 documents, we can skip the first 40 question_gen_query=question_gen_query, llm=gpt_35_llm, ) questions = dataset_generator.generate_questions_from_nodes(num=40) print("Generated ", len(questions), " questions") with open("eval_questions.txt", "w") as f: for question in questions: f.write(question + "\n") questions = [] with open("eval_questions.txt", "r") as f: for line in f: questions.append(line.strip()) from llama_index.core import VectorStoreIndex, Settings Settings.context_window = 2048 gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) wandb_callback = llama_index.core.global_handler llama_debug = LlamaDebugHandler(print_trace_on_end=True) run_args = dict( project="llamaindex", ) wandb_callback = WandbCallbackHandler(run_args=run_args) Settings.callback_manager = CallbackManager([llama_debug, wandb_callback]) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") docs =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install -q llama-index-vector-stores-chroma llama-index-llms-fireworks llama-index-embeddings-fireworks==0.1.2') get_ipython().run_line_magic('pip', 'install -q llama-index') get_ipython().system('pip install llama-index chromadb --quiet') get_ipython().system('pip install -q chromadb') get_ipython().system('pip install -q pydantic==1.10.11') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core import StorageContext from llama_index.embeddings.fireworks import FireworksEmbedding from llama_index.llms.fireworks import Fireworks from IPython.display import Markdown, display import chromadb import getpass fw_api_key = getpass.getpass("Fireworks API Key:") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.llms.fireworks import Fireworks from llama_index.embeddings.fireworks import FireworksEmbedding llm = Fireworks( temperature=0, model="accounts/fireworks/models/mixtral-8x7b-instruct" ) chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") embed_model = FireworksEmbedding( model_name="nomic-ai/nomic-embed-text-v1.5", ) documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context, embed_model=embed_model ) query_engine = index.as_query_engine(llm=llm) response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) db = chromadb.PersistentClient(path="./chroma_db") chroma_collection = db.get_or_create_collection("quickstart") vector_store =
ChromaVectorStore(chroma_collection=chroma_collection)
llama_index.vector_stores.chroma.ChromaVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(data) chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True) response = chat_engine.chat("What did Paul Graham do after YC?") print(response) response = chat_engine.chat("What about after that?") print(response) response = chat_engine.chat("Can you tell me more?") print(response) chat_engine.reset() response = chat_engine.chat("What about after that?") print(response) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0) data = SimpleDirectoryReader(input_dir="../data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-cross-encoders') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip install datasets --quiet') get_ipython().system('pip install sentence-transformers --quiet') get_ipython().system('pip install openai --quiet') from datasets import load_dataset import random dataset = load_dataset("allenai/qasper") train_dataset = dataset["train"] validation_dataset = dataset["validation"] test_dataset = dataset["test"] random.seed(42) # Set a random seed for reproducibility train_sampled_indices = random.sample(range(len(train_dataset)), 800) train_samples = [train_dataset[i] for i in train_sampled_indices] test_sampled_indices = random.sample(range(len(test_dataset)), 80) test_samples = [test_dataset[i] for i in test_sampled_indices] from typing import List def get_full_text(sample: dict) -> str: """ :param dict sample: the row sample from QASPER """ title = sample["title"] abstract = sample["abstract"] sections_list = sample["full_text"]["section_name"] paragraph_list = sample["full_text"]["paragraphs"] combined_sections_with_paras = "" if len(sections_list) == len(paragraph_list): combined_sections_with_paras += title + "\t" combined_sections_with_paras += abstract + "\t" for index in range(0, len(sections_list)): combined_sections_with_paras += str(sections_list[index]) + "\t" combined_sections_with_paras += "".join(paragraph_list[index]) return combined_sections_with_paras else: print("Not the same number of sections as paragraphs list") def get_questions(sample: dict) -> List[str]: """ :param dict sample: the row sample from QASPER """ questions_list = sample["qas"]["question"] return questions_list doc_qa_dict_list = [] for train_sample in train_samples: full_text = get_full_text(train_sample) questions_list = get_questions(train_sample) local_dict = {"paper": full_text, "questions": questions_list} doc_qa_dict_list.append(local_dict) len(doc_qa_dict_list) import pandas as pd df_train = pd.DataFrame(doc_qa_dict_list) df_train.to_csv("train.csv") """ The Answers field in the dataset follow the below format:- Unanswerable answers have "unanswerable" set to true. The remaining answers have exactly one of the following fields being non-empty. "extractive_spans" are spans in the paper which serve as the answer. "free_form_answer" is a written out answer. "yes_no" is true iff the answer is Yes, and false iff the answer is No. We accept only free-form answers and for all the other kind of answers we set their value to 'Unacceptable', to better evaluate the performance of the query engine using pairwise comparision evaluator as it uses GPT-4 which is biased towards preferring long answers more. https://www.anyscale.com/blog/a-comprehensive-guide-for-building-rag-based-llm-applications-part-1 So in the case of 'yes_no' answers it can favour Query Engine answers more than reference answers. Also in the case of extracted spans it can favour reference answers more than Query engine generated answers. """ eval_doc_qa_answer_list = [] def get_answers(sample: dict) -> List[str]: """ :param dict sample: the row sample from the train split of QASPER """ final_answers_list = [] answers = sample["qas"]["answers"] for answer in answers: local_answer = "" types_of_answers = answer["answer"][0] if types_of_answers["unanswerable"] == False: if types_of_answers["free_form_answer"] != "": local_answer = types_of_answers["free_form_answer"] else: local_answer = "Unacceptable" else: local_answer = "Unacceptable" final_answers_list.append(local_answer) return final_answers_list for test_sample in test_samples: full_text = get_full_text(test_sample) questions_list = get_questions(test_sample) answers_list = get_answers(test_sample) local_dict = { "paper": full_text, "questions": questions_list, "answers": answers_list, } eval_doc_qa_answer_list.append(local_dict) len(eval_doc_qa_answer_list) import pandas as pd df_test = pd.DataFrame(eval_doc_qa_answer_list) df_test.to_csv("test.csv") get_ipython().system('pip install llama-index --quiet') import os from llama_index.core import SimpleDirectoryReader import openai from llama_index.finetuning.cross_encoders.dataset_gen import ( generate_ce_fine_tuning_dataset, generate_synthetic_queries_over_documents, ) from llama_index.finetuning.cross_encoders import CrossEncoderFinetuneEngine os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import Document final_finetuning_data_list = [] for paper in doc_qa_dict_list: questions_list = paper["questions"] documents = [Document(text=paper["paper"])] local_finetuning_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=questions_list, max_chunk_length=256, top_k=5, ) final_finetuning_data_list.extend(local_finetuning_dataset) len(final_finetuning_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_finetuning_data_list) df_finetuning_dataset.to_csv("fine_tuning.csv") finetuning_dataset = final_finetuning_data_list finetuning_dataset[0] get_ipython().system('wget -O test.csv https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0') import pandas as pd import ast # Used to safely evaluate the string as a list df_test = pd.read_csv("/content/test.csv", index_col=0) df_test["questions"] = df_test["questions"].apply(ast.literal_eval) df_test["answers"] = df_test["answers"].apply(ast.literal_eval) print(f"Number of papers in the test sample:- {len(df_test)}") from llama_index.core import Document final_eval_data_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] local_eval_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=query_list, max_chunk_length=256, top_k=5, ) relevant_query_list = [] relevant_context_list = [] for item in local_eval_dataset: if item.score == 1: relevant_query_list.append(item.query) relevant_context_list.append(item.context) if len(relevant_query_list) > 0: final_eval_data_list.append( { "paper": row["paper"], "questions": relevant_query_list, "context": relevant_context_list, } ) len(final_eval_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_eval_data_list) df_finetuning_dataset.to_csv("reranking_test.csv") get_ipython().system('pip install huggingface_hub --quiet') from huggingface_hub import notebook_login notebook_login() from sentence_transformers import SentenceTransformer finetuning_engine = CrossEncoderFinetuneEngine( dataset=finetuning_dataset, epochs=2, batch_size=8 ) finetuning_engine.finetune() finetuning_engine.push_to_hub( repo_id="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2" ) get_ipython().system('pip install nest-asyncio --quiet') import nest_asyncio nest_asyncio.apply() get_ipython().system('wget -O reranking_test.csv https://www.dropbox.com/scl/fi/mruo5rm46k1acm1xnecev/reranking_test.csv?rlkey=hkniwowq0xrc3m0ywjhb2gf26&dl=0') import pandas as pd import ast df_reranking = pd.read_csv("/content/reranking_test.csv", index_col=0) df_reranking["questions"] = df_reranking["questions"].apply(ast.literal_eval) df_reranking["context"] = df_reranking["context"].apply(ast.literal_eval) print(f"Number of papers in the reranking eval dataset:- {len(df_reranking)}") df_reranking.head(1) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.core.retrievers import VectorIndexRetriever from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core import Settings import os import openai import pandas as pd os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] Settings.chunk_size = 256 rerank_base = SentenceTransformerRerank( model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3 ) rerank_finetuned = SentenceTransformerRerank( model="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2", top_n=3 ) without_reranker_hits = 0 base_reranker_hits = 0 finetuned_reranker_hits = 0 total_number_of_context = 0 for index, row in df_reranking.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] context_list = row["context"] assert len(query_list) == len(context_list) vector_index = VectorStoreIndex.from_documents(documents) retriever_without_reranker = vector_index.as_query_engine( similarity_top_k=3, response_mode="no_text" ) retriever_with_base_reranker = vector_index.as_query_engine( similarity_top_k=8, response_mode="no_text", node_postprocessors=[rerank_base], ) retriever_with_finetuned_reranker = vector_index.as_query_engine( similarity_top_k=8, response_mode="no_text", node_postprocessors=[rerank_finetuned], ) for index in range(0, len(query_list)): query = query_list[index] context = context_list[index] total_number_of_context += 1 response_without_reranker = retriever_without_reranker.query(query) without_reranker_nodes = response_without_reranker.source_nodes for node in without_reranker_nodes: if context in node.node.text or node.node.text in context: without_reranker_hits += 1 response_with_base_reranker = retriever_with_base_reranker.query(query) with_base_reranker_nodes = response_with_base_reranker.source_nodes for node in with_base_reranker_nodes: if context in node.node.text or node.node.text in context: base_reranker_hits += 1 response_with_finetuned_reranker = ( retriever_with_finetuned_reranker.query(query) ) with_finetuned_reranker_nodes = ( response_with_finetuned_reranker.source_nodes ) for node in with_finetuned_reranker_nodes: if context in node.node.text or node.node.text in context: finetuned_reranker_hits += 1 assert ( len(with_finetuned_reranker_nodes) == len(with_base_reranker_nodes) == len(without_reranker_nodes) == 3 ) without_reranker_scores = [without_reranker_hits] base_reranker_scores = [base_reranker_hits] finetuned_reranker_scores = [finetuned_reranker_hits] reranker_eval_dict = { "Metric": "Hits", "OpenAI_Embeddings": without_reranker_scores, "Base_cross_encoder": base_reranker_scores, "Finetuned_cross_encoder": finetuned_reranker_hits, "Total Relevant Context": total_number_of_context, } df_reranker_eval_results = pd.DataFrame(reranker_eval_dict) display(df_reranker_eval_results) get_ipython().system('wget -O test.csv https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0') import pandas as pd import ast # Used to safely evaluate the string as a list df_test = pd.read_csv("/content/test.csv", index_col=0) df_test["questions"] = df_test["questions"].apply(ast.literal_eval) df_test["answers"] = df_test["answers"].apply(ast.literal_eval) print(f"Number of papers in the test sample:- {len(df_test)}") df_test.head(1) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) import os import openai import pandas as pd os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] no_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] reference_answers_list = row["answers"] number_of_accepted_queries = 0 vector_index = VectorStoreIndex.from_documents(documents) query_engine = vector_index.as_query_engine(similarity_top_k=3) assert len(query_list) == len(reference_answers_list) pairwise_local_score = 0 for index in range(0, len(query_list)): query = query_list[index] reference = reference_answers_list[index] if reference != "Unacceptable": number_of_accepted_queries += 1 response = str(query_engine.query(query)) no_reranker_dict = { "query": query, "response": response, "reference": reference, } no_reranker_dict_list.append(no_reranker_dict) pairwise_eval_result = await evaluator_gpt4_pairwise.aevaluate( query, response=response, reference=reference ) pairwise_score = pairwise_eval_result.score pairwise_local_score += pairwise_score else: pass if number_of_accepted_queries > 0: avg_pairwise_local_score = ( pairwise_local_score / number_of_accepted_queries ) pairwise_scores_list.append(avg_pairwise_local_score) overal_pairwise_average_score = sum(pairwise_scores_list) / len( pairwise_scores_list ) df_responses = pd.DataFrame(no_reranker_dict_list) df_responses.to_csv("No_Reranker_Responses.csv") results_dict = { "name": ["Without Reranker"], "pairwise score": [overal_pairwise_average_score], } results_df = pd.DataFrame(results_dict) display(results_df) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator import os import openai os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] rerank = SentenceTransformerRerank( model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3 ) gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] base_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] reference_answers_list = row["answers"] number_of_accepted_queries = 0 vector_index = VectorStoreIndex.from_documents(documents) query_engine = vector_index.as_query_engine( similarity_top_k=8, node_postprocessors=[rerank] ) assert len(query_list) == len(reference_answers_list) pairwise_local_score = 0 for index in range(0, len(query_list)): query = query_list[index] reference = reference_answers_list[index] if reference != "Unacceptable": number_of_accepted_queries += 1 response = str(query_engine.query(query)) base_reranker_dict = { "query": query, "response": response, "reference": reference, } base_reranker_dict_list.append(base_reranker_dict) pairwise_eval_result = await evaluator_gpt4_pairwise.aevaluate( query=query, response=response, reference=reference ) pairwise_score = pairwise_eval_result.score pairwise_local_score += pairwise_score else: pass if number_of_accepted_queries > 0: avg_pairwise_local_score = ( pairwise_local_score / number_of_accepted_queries ) pairwise_scores_list.append(avg_pairwise_local_score) overal_pairwise_average_score = sum(pairwise_scores_list) / len( pairwise_scores_list ) df_responses = pd.DataFrame(base_reranker_dict_list) df_responses.to_csv("Base_Reranker_Responses.csv") results_dict = { "name": ["With base cross-encoder/ms-marco-MiniLM-L-12-v2 as Reranker"], "pairwise score": [overal_pairwise_average_score], } results_df = pd.DataFrame(results_dict) display(results_df) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator import os import openai os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] rerank = SentenceTransformerRerank( model="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2", top_n=3 ) gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] finetuned_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [
Document(text=row["paper"])
llama_index.core.Document
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler = LlamaDebugHandler() openai_callback = CallbackManager([openai_handler]) openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback) gradient_handler = LlamaDebugHandler() gradient_callback = CallbackManager([gradient_handler]) base_model_slug = "llama2-7b-chat" gradient_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=300, callback_manager=gradient_callback, is_chat_model=True, ) from llama_index.core.llms import LLMMetadata prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=openai_llm, verbose=True, ) gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=gradient_llm, verbose=True, ) response = openai_program(movie_name="The Shining") print(str(response)) tmp = openai_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) response = gradient_program(movie_name="The Shining") print(str(response)) tmp = gradient_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) from llama_index.core.program import LLMTextCompletionProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import GradientAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.output_parsers import PydanticOutputParser from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler = GradientAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm_gpt4 = OpenAI(model="gpt-4", callback_manager=callback_manager) prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=llm_gpt4, verbose=True, ) movie_names = [ "The Shining", "The Departed", "Titanic", "Goodfellas", "Pretty Woman", "Home Alone", "Caged Fury", "Edward Scissorhands", "Total Recall", "Ghost", "Tremors", "RoboCop", "Rocky V", ] from tqdm.notebook import tqdm for movie_name in tqdm(movie_names): output = openai_program(movie_name=movie_name) print(output.json()) events = finetuning_handler.get_finetuning_events() events finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl") get_ipython().system('cat mock_finetune_songs.jsonl') base_model_slug = "llama2-7b-chat" base_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True ) from llama_index.finetuning import GradientFinetuneEngine finetune_engine = GradientFinetuneEngine( base_model_slug=base_model_slug, name="movies_structured", data_path="mock_finetune_songs.jsonl", verbose=True, max_steps=200, batch_size=1, ) finetune_engine.model_adapter_id epochs = 2 for i in range(epochs): print(f"** EPOCH {i} **") finetune_engine.finetune() ft_llm = finetune_engine.get_finetuned_model( max_tokens=500, is_chat_model=True ) from llama_index.llms.gradient import GradientModelAdapterLLM new_prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ Please only generate one album. """ gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=new_prompt_template_str, llm=ft_llm, verbose=True, ) gradient_program(movie_name="Goodfellas") gradient_program(movie_name="Chucky") base_gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=base_llm, verbose=True, ) base_gradient_program(movie_name="Goodfellas") get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pydantic import Field from typing import List class Citation(BaseModel): """Citation class.""" author: str = Field( ..., description="Inferred first author (usually last name" ) year: int = Field(..., description="Inferred year") desc: str = Field( ..., description=( "Inferred description from the text of the work that the author is" " cited for" ), ) class Response(BaseModel): """List of author citations. Extracted over unstructured text. """ citations: List[Citation] = Field( ..., description=( "List of author citations (organized by author, year, and" " description)." ), ) from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SimpleNodeParser from pathlib import Path from llama_index.core.callbacks import GradientAIFineTuningHandler loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) metadata = { "paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models" } docs = [Document(text=doc_text, metadata=metadata)] chunk_size = 1024 node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size) nodes = node_parser.get_nodes_from_documents(docs) len(nodes) finetuning_handler = GradientAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm_gpt4 = OpenAI(model="gpt-4-0613", temperature=0.3) llm_gpt4.pydantic_program_mode = "llm" base_model_slug = "llama2-7b-chat" base_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True ) base_llm.pydantic_program_mode = "llm" eval_llm = OpenAI(model="gpt-4-0613", temperature=0) from llama_index.core.evaluation import DatasetGenerator from llama_index.core import SummaryIndex from llama_index.core import PromptTemplate from tqdm.notebook import tqdm from tqdm.asyncio import tqdm_asyncio fp = open("data/qa_pairs.jsonl", "w") question_gen_prompt = PromptTemplate( """ {query_str} Context: {context_str} Questions: """ ) question_gen_query = """\ Snippets from a research paper is given below. It contains citations. Please generate questions from the text asking about these citations. For instance, here are some sample questions: Which citations correspond to related works on transformer models? Tell me about authors that worked on advancing RLHF. Can you tell me citations corresponding to all computer vision works? \ """ qr_pairs = [] node_questions_tasks = [] for idx, node in enumerate(nodes[:39]): num_questions = 1 # change this number to increase number of nodes dataset_generator = DatasetGenerator( [node], question_gen_query=question_gen_query, text_question_template=question_gen_prompt, llm=eval_llm, metadata_mode="all", num_questions_per_chunk=num_questions, ) task = dataset_generator.agenerate_questions_from_nodes(num=num_questions) node_questions_tasks.append(task) node_questions_lists = await tqdm_asyncio.gather(*node_questions_tasks) len(node_questions_lists) node_questions_lists[1] import pickle pickle.dump(node_questions_lists, open("llama2_questions.pkl", "wb")) node_questions_lists = pickle.load(open("llama2_questions.pkl", "rb")) from llama_index.core import VectorStoreIndex gpt4_index = VectorStoreIndex(nodes[:39], callback_manager=callback_manager) gpt4_query_engine = gpt4_index.as_query_engine( output_cls=Response, llm=llm_gpt4, similarity_top_k=1 ) from json import JSONDecodeError for idx, node in enumerate(tqdm(nodes[:39])): node_questions_0 = node_questions_lists[idx] for question in node_questions_0: try: gpt4_query_engine.query(question) except Exception as e: print(f"Error for question {question}, {repr(e)}") pass finetuning_handler.save_finetuning_events("llama2_citation_events.jsonl") from llama_index.finetuning import GradientFinetuneEngine finetune_engine = GradientFinetuneEngine( base_model_slug=base_model_slug, name="llama2_structured", data_path="llama2_citation_events.jsonl", verbose=True, max_steps=200, batch_size=1, ) finetune_engine.model_adapter_id epochs = 2 for i in range(epochs): print(f"** EPOCH {i} **") finetune_engine.finetune() ft_llm = finetune_engine.get_finetuned_model(max_tokens=500) from llama_index.core import VectorStoreIndex vector_index =
VectorStoreIndex(nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/'") get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'") from llama_index.readers.file import UnstructuredReader documents =
UnstructuredReader()
llama_index.readers.file.UnstructuredReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-rag-fusion-query-pipeline') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader =
SimpleDirectoryReader(input_files=["pg_essay.txt"])
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) from llama_index.storage.kvstore.firestore import FirestoreKVStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.index_store.firestore import FirestoreIndexStore kvstore = FirestoreKVStore() storage_context = StorageContext.from_defaults( docstore=FirestoreDocumentStore(kvstore), index_store=FirestoreIndexStore(kvstore), ) storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context ) len(storage_context.docstore.docs) storage_context.persist() list_id = summary_index.index_id vector_id = vector_index.index_id keyword_id = keyword_table_index.index_id from llama_index.core import load_index_from_storage kvstore = FirestoreKVStore() storage_context = StorageContext.from_defaults( docstore=
FirestoreDocumentStore(kvstore)
llama_index.storage.docstore.firestore.FirestoreDocumentStore
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector = LLMMultiSelector.from_defaults() from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="covid_nyt", description=("This tool contains a NYT news article about COVID-19"), ), ToolMetadata( name="covid_wiki", description=("This tool contains the Wikipedia page about COVID-19"), ), ToolMetadata( name="covid_tesla", description=("This tool contains the Wikipedia page about apples"), ), ] display_prompt_dict(selector.get_prompts()) selector_result = selector.select( tool_choices, query="Tell me more about COVID-19" ) selector_result.selections from llama_index.core import PromptTemplate from llama_index.llms.openai import OpenAI query_gen_str = """\ You are a helpful assistant that generates multiple search queries based on a \ single input query. Generate {num_queries} search queries, one on each line, \ related to the following input query: Query: {query} Queries: """ query_gen_prompt = PromptTemplate(query_gen_str) llm = OpenAI(model="gpt-3.5-turbo") def generate_queries(query: str, llm, num_queries: int = 4): response = llm.predict( query_gen_prompt, num_queries=num_queries, query=query ) queries = response.split("\n") queries_str = "\n".join(queries) print(f"Generated queries:\n{queries_str}") return queries queries = generate_queries("What happened at Interleaf and Viaweb?", llm) queries from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.llms.openai import OpenAI hyde =
HyDEQueryTransform(include_original=True)
llama_index.core.indices.query.query_transform.HyDEQueryTransform
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.chat_engine import SimpleChatEngine chat_engine = SimpleChatEngine.from_defaults() chat_engine.chat_repl() from llama_index.llms.openai import OpenAI llm =
OpenAI(temperature=0.0, model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(Restaurant)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().system('pip install llama-index-llms-dashscope') get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY') import os os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY" from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX) resp = dashscope_llm.complete("How to make cake?") print(resp) responses = dashscope_llm.stream_complete("How to make cake?") for response in responses: print(response.delta, end="") from llama_index.core.base.llms.types import MessageRole, ChatMessage messages = [ ChatMessage( role=MessageRole.SYSTEM, content="You are a helpful assistant." ), ChatMessage(role=MessageRole.USER, content="How to make cake?"), ] resp = dashscope_llm.chat(messages) print(resp) responses = dashscope_llm.stream_chat(messages) for response in responses: print(response.delta, end="") messages = [ ChatMessage( role=MessageRole.SYSTEM, content="You are a helpful assistant." ),
ChatMessage(role=MessageRole.USER, content="How to make cake?")
llama_index.core.base.llms.types.ChatMessage
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc = ImageDocument(image_path="./shanghai.jpg") from llama_index.core import VectorStoreIndex from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-m3") vector_index = VectorStoreIndex.from_documents( documents, embed_model=embed_model ) query_engine = vector_index.as_query_engine() from llama_index.core.prompts import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline, FnComponent query_prompt_str = """\ Please expand the initial statement using the provided context from the Tesla 10K report. {initial_statement} """ query_prompt_tmpl = PromptTemplate(query_prompt_str) qp = QueryPipeline( modules={ "mm_model": mm_model.as_query_component( partial={"image_documents": [image_doc]} ), "query_prompt": query_prompt_tmpl, "query_engine": query_engine, }, verbose=True, ) qp.add_chain(["mm_model", "query_prompt", "query_engine"]) rag_response = qp.run("Which Tesla Factory is shown in the image?") print(f"> Retrieval Augmented Response: {rag_response}") rag_response.source_nodes[1].get_content() get_ipython().system('wget "https://drive.usercontent.google.com/download?id=1qQDcaKuzgRGuEC1kxgYL_4mx7vG-v4gC&export=download&authuser=1&confirm=t&uuid=f944e95f-a31f-4b55-b68f-8ea67a6e90e5&at=APZUnTVZ6n1aOg7rtkcjBjw7Pt1D:1707010667927" -O mixed_wiki.zip') get_ipython().system('unzip mixed_wiki.zip') get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm') from llama_index.core.indices.multi_modal.base import ( MultiModalVectorStoreIndex, ) from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.embeddings.clip import ClipEmbedding import qdrant_client from llama_index import ( SimpleDirectoryReader, ) client = qdrant_client.QdrantClient(path="qdrant_mm_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) image_embed_model = ClipEmbedding() documents = SimpleDirectoryReader("./mixed_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, image_embed_model=image_embed_model, ) from llama_index.core.prompts import PromptTemplate from llama_index.core.query_engine import SimpleMultiModalQueryEngine qa_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl =
PromptTemplate(qa_tmpl_str)
llama_index.core.prompts.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.core.node_parser import SentenceWindowNodeParser from llama_index.core.node_parser import SentenceSplitter node_parser = SentenceWindowNodeParser.from_defaults( window_size=3, window_metadata_key="window", original_text_metadata_key="original_text", ) text_splitter = SentenceSplitter() llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) embed_model = HuggingFaceEmbedding( model_name="sentence-transformers/all-mpnet-base-v2", max_length=512 ) from llama_index.core import Settings Settings.llm = llm Settings.embed_model = embed_model Settings.text_splitter = text_splitter get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf') from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader( input_files=["./IPCC_AR6_WGII_Chapter03.pdf"] ).load_data() nodes = node_parser.get_nodes_from_documents(documents) base_nodes = text_splitter.get_nodes_from_documents(documents) from llama_index.core import VectorStoreIndex sentence_index = VectorStoreIndex(nodes) base_index =
VectorStoreIndex(base_nodes)
llama_index.core.VectorStoreIndex
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter =
ReActChatFormatter()
llama_index.core.agent.ReActChatFormatter
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.query_engine import SubQuestionQueryEngine import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.core import Settings Settings.llm =
OpenAI(temperature=0.2, model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, Document from llama_index.core import SummaryIndex from llama_index.llms.openai import OpenAI from llama_index.llms.anthropic import Anthropic from llama_index.core.evaluation import CorrectnessEvaluator get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") uber_docs0 = SimpleDirectoryReader( input_files=["./data/10k/uber_2021.pdf"] ).load_data() uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0])) from llama_index.core.utils import globals_helper num_tokens = len(globals_helper.tokenizer(uber_doc.get_content())) print(f"NUM TOKENS: {num_tokens}") context_str = "Jerry's favorite snack is Hot Cheetos." query_str = "What is Jerry's favorite snack?" def augment_doc(doc_str, context, position): """Augment doc with additional context at a given position.""" doc_str1 = doc_str[:position] doc_str2 = doc_str[position:] return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}" test_str = augment_doc( uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content())) ) async def run_experiments( doc, position_percentiles, context_str, query, llm, response_mode="compact" ): eval_llm = OpenAI(model="gpt-4-1106-preview") correctness_evaluator = CorrectnessEvaluator(llm=eval_llm) eval_scores = {} for idx, position_percentile in enumerate(position_percentiles): print(f"Position percentile: {position_percentile}") position_idx = int(position_percentile * len(uber_doc.get_content())) new_doc_str = augment_doc( uber_doc.get_content(), context_str, position_idx ) new_doc =
Document(text=new_doc_str)
llama_index.core.Document
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-flag-embedding-reranker') get_ipython().system('pip install llama-index') get_ipython().system('pip install git+https://github.com/FlagOpen/FlagEmbedding.git') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os OPENAI_API_TOKEN = "sk-" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-langchain') get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('pip', 'install llama-index --quiet') get_ipython().run_line_magic('pip', 'install gradientai --quiet') import os os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}" os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}" from llama_index.llms.gradient import GradientBaseModelLLM llm = GradientBaseModelLLM( base_model_slug="llama2-7b-chat", max_tokens=400, ) result = llm.complete("Can you tell me about large language models?") print(result) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.embeddings.langchain import LangchainEmbedding from langchain.embeddings import HuggingFaceEmbeddings from llama_index.core.node_parser import SentenceSplitter get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "" import os from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient github_client =
GitHubIssuesClient()
llama_index.readers.github.GitHubIssuesClient
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") import os from getpass import getpass if os.getenv("HONEYHIVE_API_KEY") is None: os.environ["HONEYHIVE_API_KEY"] = getpass( "Paste your HoneyHive key from:" " https://app.honeyhive.ai/settings/account\n" ) print("HoneyHive API key configured") get_ipython().system('pip install llama-index') from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from honeyhive.utils.llamaindex_tracer import HoneyHiveLlamaIndexTracer from llama_index.core import Settings Settings.llm =
OpenAI(model="gpt-4", temperature=0)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index qdrant_client pyMuPDF tools frontend git+https://github.com/openai/CLIP.git easyocr') import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import Patch import io from PIL import Image, ImageDraw import numpy as np import csv import pandas as pd from torchvision import transforms from transformers import AutoModelForObjectDetection import torch import openai import os import fitz device = "cuda" if torch.cuda.is_available() else "cpu" OPENAI_API_TOKEN = "sk-<your-openai-api-token>" openai.api_key = OPENAI_API_TOKEN get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "llama2.pdf"') pdf_file = "llama2.pdf" output_directory_path, _ = os.path.splitext(pdf_file) if not os.path.exists(output_directory_path): os.makedirs(output_directory_path) pdf_document = fitz.open(pdf_file) for page_number in range(pdf_document.page_count): page = pdf_document[page_number] pix = page.get_pixmap() image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) image.save(f"./{output_directory_path}/page_{page_number + 1}.png") pdf_document.close() from PIL import Image import matplotlib.pyplot as plt import os image_paths = [] for img_path in os.listdir("./llama2"): image_paths.append(str(os.path.join("./llama2", img_path))) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(3, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break plot_images(image_paths[9:12]) import qdrant_client from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.core.schema import ImageDocument from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.schema import ImageNode from llama_index.multi_modal_llms.openai import OpenAIMultiModal openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1500 ) documents_images = SimpleDirectoryReader("./llama2/").load_data() client = qdrant_client.QdrantClient(path="qdrant_index") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) index = MultiModalVectorStoreIndex.from_documents( documents_images, storage_context=storage_context, ) retriever_engine = index.as_retriever(image_similarity_top_k=2) from llama_index.core.indices.multi_modal.retriever import ( MultiModalVectorIndexRetriever, ) query = "Compare llama2 with llama1?" assert isinstance(retriever_engine, MultiModalVectorIndexRetriever) retrieval_results = retriever_engine.text_to_image_retrieve(query) retrieved_images = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_images.append(res_node.node.metadata["file_path"]) else: display_source_node(res_node, source_length=200) plot_images(retrieved_images) retrieved_images image_documents = [
ImageDocument(image_path=image_path)
llama_index.core.schema.ImageDocument
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import camelot from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import PandasQueryEngine from llama_index.core.schema import IndexNode from llama_index.llms.openai import OpenAI from llama_index.readers.file import PyMuPDFReader from typing import List import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") file_path = "billionaires_page.pdf" reader = PyMuPDFReader() docs = reader.load(file_path) def get_tables(path: str, pages: List[int]): table_dfs = [] for page in pages: table_list = camelot.read_pdf(path, pages=str(page)) table_df = table_list[0].df table_df = ( table_df.rename(columns=table_df.iloc[0]) .drop(table_df.index[0]) .reset_index(drop=True) ) table_dfs.append(table_df) return table_dfs table_dfs = get_tables(file_path, pages=[3, 25]) table_dfs[0] table_dfs[1] llm = OpenAI(model="gpt-4") df_query_engines = [ PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs ] response = df_query_engines[0].query( "What's the net worth of the second richest billionaire in 2023?" ) print(str(response)) response = df_query_engines[1].query( "How many billionaires were there in 2009?" ) print(str(response)) from llama_index.core import Settings doc_nodes = Settings.node_parser.get_nodes_from_documents(docs) summaries = [ ( "This node provides information about the world's richest billionaires" " in 2023" ), ( "This node provides information on the number of billionaires and" " their combined net worth from 2000 to 2023." ), ] df_nodes = [ IndexNode(text=summary, index_id=f"pandas{idx}") for idx, summary in enumerate(summaries) ] df_id_query_engine_mapping = { f"pandas{idx}": df_query_engine for idx, df_query_engine in enumerate(df_query_engines) } vector_index = VectorStoreIndex(doc_nodes + df_nodes) vector_retriever = vector_index.as_retriever(similarity_top_k=1) vector_index0 = VectorStoreIndex(doc_nodes) vector_query_engine0 = vector_index0.as_query_engine() from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import get_response_synthesizer recursive_retriever = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever}, query_engine_dict=df_id_query_engine_mapping, verbose=True, ) response_synthesizer =
get_response_synthesizer(response_mode="compact")
llama_index.core.get_response_synthesizer
get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai') get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-nebula') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-azure-openai') get_ipython().system('pip install llama-index') import os os.environ["OPENAI_API_KEY"] = "sk-..." import logging import sys logging.basicConfig( stream=sys.stdout, level=logging.INFO ) # logging.DEBUG for more verbose output from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.chunk_size = 512 from llama_index.llms.azure_openai import AzureOpenAI from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding api_key = "<api-key>" azure_endpoint = "https://<your-resource-name>.openai.azure.com/" api_version = "2023-07-01-preview" llm = AzureOpenAI( model="gpt-35-turbo-16k", deployment_name="my-custom-llm", api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, ) embed_model = AzureOpenAIEmbedding( model="text-embedding-ada-002", deployment_name="my-custom-embedding", api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, ) from llama_index.core import Settings Settings.llm = llm Settings.embed_model = embed_model Settings.chunk_size = 512 get_ipython().run_line_magic('pip', 'install ipython-ngql nebula3-python') os.environ["NEBULA_USER"] = "root" os.environ["NEBULA_PASSWORD"] = "nebula" # default is "nebula" os.environ[ "NEBULA_ADDRESS" ] = "127.0.0.1:9669" # assumed we have NebulaGraph installed locally space_name = "llamaindex" edge_types, rel_prop_names = ["relationship"], [ "relationship" ] # default, could be omit if create from an empty kg tags = ["entity"] # default, could be omit if create from an empty kg from llama_index.core import StorageContext from llama_index.graph_stores.nebula import NebulaGraphStore graph_store = NebulaGraphStore( space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, ) storage_context =
StorageContext.from_defaults(graph_store=graph_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm =
OpenAI(model="gpt-4", temperature=0)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() from llama_index.core.evaluation import generate_question_context_pairs from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() node_parser = SentenceSplitter(chunk_size=512) nodes = node_parser.get_nodes_from_documents(documents) for idx, node in enumerate(nodes): node.id_ = f"node_{idx}" llm = OpenAI(model="gpt-4") vector_index = VectorStoreIndex(nodes) retriever = vector_index.as_retriever(similarity_top_k=2) retrieved_nodes = retriever.retrieve("What did the author do growing up?") from llama_index.core.response.notebook_utils import display_source_node for node in retrieved_nodes: display_source_node(node, source_length=1000) from llama_index.core.evaluation import ( generate_question_context_pairs, EmbeddingQAFinetuneDataset, ) qa_dataset = generate_question_context_pairs( nodes, llm=llm, num_questions_per_chunk=2 ) queries = qa_dataset.queries.values() print(list(queries)[2]) qa_dataset.save_json("pg_eval_dataset.json") qa_dataset =
EmbeddingQAFinetuneDataset.from_json("pg_eval_dataset.json")
llama_index.core.evaluation.EmbeddingQAFinetuneDataset.from_json
get_ipython().system('pip install llama-index') get_ipython().system('pip install clickhouse_connect') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="localhost", port=8123, username="default", password="", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.clickhouse import ClickHouseVectorStore documents = SimpleDirectoryReader("../data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) print("Number of Documents: ", len(documents)) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") loader = SimpleDirectoryReader("./data/paul_graham/") documents = loader.load_data() for file in loader.input_files: print(file) from llama_index.core import StorageContext for document in documents: document.metadata = {"user_id": "123", "favorite_color": "blue"} vector_store = ClickHouseVectorStore(clickhouse_client=client) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader =
PDFReader()
llama_index.readers.file.PDFReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla') get_ipython().system('pip/pip3 install pyepsilla') get_ipython().system('pip install llama-index') import logging import sys from llama_index.core import SimpleDirectoryReader, Document, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.epsilla import EpsillaVectorStore import textwrap import openai import getpass OPENAI_API_KEY = getpass.getpass("OpenAI API Key:") openai.api_key = OPENAI_API_KEY get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import SimpleDirectoryReader documents_1 = SimpleDirectoryReader( input_files=["../../community/integrations/vector_stores.md"] ).load_data() documents_2 = SimpleDirectoryReader( input_files=["../../module_guides/storing/vector_stores.md"] ).load_data() from llama_index.core import VectorStoreIndex index_1 = VectorStoreIndex.from_documents(documents_1) index_2 = VectorStoreIndex.from_documents(documents_2) from llama_index.core.retrievers import QueryFusionRetriever retriever = QueryFusionRetriever( [index_1.as_retriever(), index_2.as_retriever()], similarity_top_k=2, num_queries=4, # set this to 1 to disable query generation use_async=True, verbose=True, ) import nest_asyncio nest_asyncio.apply() nodes_with_scores = retriever.retrieve("How do I setup a chroma vector store?") for node in nodes_with_scores: print(f"Score: {node.score:.2f} - {node.text[:100]}...") from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine.from_args(retriever) response = query_engine.query( "How do I setup a chroma vector store? Can you give an example?" ) from llama_index.core.response.notebook_utils import display_response
display_response(response)
llama_index.core.response.notebook_utils.display_response
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex from llama_index.core import PromptTemplate from IPython.display import Markdown, display get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI gpt35_llm = OpenAI(model="gpt-3.5-turbo") gpt4_llm = OpenAI(model="gpt-4") index = VectorStoreIndex.from_documents(documents) query_str = "What are the potential risks associated with the use of Llama 2 as mentioned in the context?" query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt35_llm) vector_retriever = index.as_retriever(similarity_top_k=2) response = query_engine.query(query_str) print(str(response)) def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) from langchain import hub langchain_prompt = hub.pull("rlm/rag-prompt") from llama_index.core.prompts import LangchainPromptTemplate lc_prompt_tmpl = LangchainPromptTemplate( template=langchain_prompt, template_var_mappings={"query_str": "question", "context_str": "context"}, ) query_engine.update_prompts( {"response_synthesizer:text_qa_template": lc_prompt_tmpl} ) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query(query_str) print(str(response)) from llama_index.core.schema import TextNode few_shot_nodes = [] for line in open("../llama2_qa_citation_events.jsonl", "r"): few_shot_nodes.append(TextNode(text=line)) few_shot_index = VectorStoreIndex(few_shot_nodes) few_shot_retriever = few_shot_index.as_retriever(similarity_top_k=2) import json def few_shot_examples_fn(**kwargs): query_str = kwargs["query_str"] retrieved_nodes = few_shot_retriever.retrieve(query_str) result_strs = [] for n in retrieved_nodes: raw_dict = json.loads(n.get_content()) query = raw_dict["query"] response_dict = json.loads(raw_dict["response"]) result_str = f"""\ Query: {query} Response: {response_dict}""" result_strs.append(result_str) return "\n\n".join(result_strs) qa_prompt_tmpl_str = """\ Context information is below. --------------------- {context_str} --------------------- Given the context information and not prior knowledge, \ answer the query asking about citations over different topics. Please provide your answer in the form of a structured JSON format containing \ a list of authors as the citations. Some examples are given below. {few_shot_examples} Query: {query_str} Answer: \ """ qa_prompt_tmpl = PromptTemplate( qa_prompt_tmpl_str, function_mappings={"few_shot_examples": few_shot_examples_fn}, ) citation_query_str = ( "Which citations are mentioned in the section on Safety RLHF?" ) print( qa_prompt_tmpl.format( query_str=citation_query_str, context_str="test_context" ) ) query_engine.update_prompts( {"response_synthesizer:text_qa_template": qa_prompt_tmpl} ) display_prompt_dict(query_engine.get_prompts()) response = query_engine.query(citation_query_str) print(str(response)) print(response.source_nodes[1].get_content()) from llama_index.core.postprocessor import ( NERPIINodePostprocessor, SentenceEmbeddingOptimizer, ) from llama_index.core import QueryBundle from llama_index.core.schema import NodeWithScore, TextNode pii_processor = NERPIINodePostprocessor(llm=gpt4_llm) def filter_pii_fn(**kwargs): query_bundle =
QueryBundle(query_str=kwargs["query_str"])
llama_index.core.QueryBundle
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray') get_ipython().system('pip install llama-index') import os import sys import logging import textwrap import warnings warnings.filterwarnings("ignore") os.environ["TOKENIZERS_PARALLELISM"] = "false" from llama_index.core import ( GPTVectorStoreIndex, SimpleDirectoryReader, Document, ) from llama_index.vector_stores.docarray import DocArrayHnswVectorStore from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "<your openai key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print( "Document ID:", documents[0].doc_id, "Document Hash:", documents[0].doc_hash, ) from llama_index.core import StorageContext vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = GPTVectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What was a hard moment for the author?") print(textwrap.fill(str(response), 100)) from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", }, ), ] from llama_index.core import StorageContext vector_store =
DocArrayHnswVectorStore(work_dir="hnsw_filters")
llama_index.vector_stores.docarray.DocArrayHnswVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-cross-encoders') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip install datasets --quiet') get_ipython().system('pip install sentence-transformers --quiet') get_ipython().system('pip install openai --quiet') from datasets import load_dataset import random dataset = load_dataset("allenai/qasper") train_dataset = dataset["train"] validation_dataset = dataset["validation"] test_dataset = dataset["test"] random.seed(42) # Set a random seed for reproducibility train_sampled_indices = random.sample(range(len(train_dataset)), 800) train_samples = [train_dataset[i] for i in train_sampled_indices] test_sampled_indices = random.sample(range(len(test_dataset)), 80) test_samples = [test_dataset[i] for i in test_sampled_indices] from typing import List def get_full_text(sample: dict) -> str: """ :param dict sample: the row sample from QASPER """ title = sample["title"] abstract = sample["abstract"] sections_list = sample["full_text"]["section_name"] paragraph_list = sample["full_text"]["paragraphs"] combined_sections_with_paras = "" if len(sections_list) == len(paragraph_list): combined_sections_with_paras += title + "\t" combined_sections_with_paras += abstract + "\t" for index in range(0, len(sections_list)): combined_sections_with_paras += str(sections_list[index]) + "\t" combined_sections_with_paras += "".join(paragraph_list[index]) return combined_sections_with_paras else: print("Not the same number of sections as paragraphs list") def get_questions(sample: dict) -> List[str]: """ :param dict sample: the row sample from QASPER """ questions_list = sample["qas"]["question"] return questions_list doc_qa_dict_list = [] for train_sample in train_samples: full_text = get_full_text(train_sample) questions_list = get_questions(train_sample) local_dict = {"paper": full_text, "questions": questions_list} doc_qa_dict_list.append(local_dict) len(doc_qa_dict_list) import pandas as pd df_train = pd.DataFrame(doc_qa_dict_list) df_train.to_csv("train.csv") """ The Answers field in the dataset follow the below format:- Unanswerable answers have "unanswerable" set to true. The remaining answers have exactly one of the following fields being non-empty. "extractive_spans" are spans in the paper which serve as the answer. "free_form_answer" is a written out answer. "yes_no" is true iff the answer is Yes, and false iff the answer is No. We accept only free-form answers and for all the other kind of answers we set their value to 'Unacceptable', to better evaluate the performance of the query engine using pairwise comparision evaluator as it uses GPT-4 which is biased towards preferring long answers more. https://www.anyscale.com/blog/a-comprehensive-guide-for-building-rag-based-llm-applications-part-1 So in the case of 'yes_no' answers it can favour Query Engine answers more than reference answers. Also in the case of extracted spans it can favour reference answers more than Query engine generated answers. """ eval_doc_qa_answer_list = [] def get_answers(sample: dict) -> List[str]: """ :param dict sample: the row sample from the train split of QASPER """ final_answers_list = [] answers = sample["qas"]["answers"] for answer in answers: local_answer = "" types_of_answers = answer["answer"][0] if types_of_answers["unanswerable"] == False: if types_of_answers["free_form_answer"] != "": local_answer = types_of_answers["free_form_answer"] else: local_answer = "Unacceptable" else: local_answer = "Unacceptable" final_answers_list.append(local_answer) return final_answers_list for test_sample in test_samples: full_text = get_full_text(test_sample) questions_list = get_questions(test_sample) answers_list = get_answers(test_sample) local_dict = { "paper": full_text, "questions": questions_list, "answers": answers_list, } eval_doc_qa_answer_list.append(local_dict) len(eval_doc_qa_answer_list) import pandas as pd df_test = pd.DataFrame(eval_doc_qa_answer_list) df_test.to_csv("test.csv") get_ipython().system('pip install llama-index --quiet') import os from llama_index.core import SimpleDirectoryReader import openai from llama_index.finetuning.cross_encoders.dataset_gen import ( generate_ce_fine_tuning_dataset, generate_synthetic_queries_over_documents, ) from llama_index.finetuning.cross_encoders import CrossEncoderFinetuneEngine os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import Document final_finetuning_data_list = [] for paper in doc_qa_dict_list: questions_list = paper["questions"] documents = [Document(text=paper["paper"])] local_finetuning_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=questions_list, max_chunk_length=256, top_k=5, ) final_finetuning_data_list.extend(local_finetuning_dataset) len(final_finetuning_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_finetuning_data_list) df_finetuning_dataset.to_csv("fine_tuning.csv") finetuning_dataset = final_finetuning_data_list finetuning_dataset[0] get_ipython().system('wget -O test.csv https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0') import pandas as pd import ast # Used to safely evaluate the string as a list df_test = pd.read_csv("/content/test.csv", index_col=0) df_test["questions"] = df_test["questions"].apply(ast.literal_eval) df_test["answers"] = df_test["answers"].apply(ast.literal_eval) print(f"Number of papers in the test sample:- {len(df_test)}") from llama_index.core import Document final_eval_data_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] local_eval_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=query_list, max_chunk_length=256, top_k=5, ) relevant_query_list = [] relevant_context_list = [] for item in local_eval_dataset: if item.score == 1: relevant_query_list.append(item.query) relevant_context_list.append(item.context) if len(relevant_query_list) > 0: final_eval_data_list.append( { "paper": row["paper"], "questions": relevant_query_list, "context": relevant_context_list, } ) len(final_eval_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_eval_data_list) df_finetuning_dataset.to_csv("reranking_test.csv") get_ipython().system('pip install huggingface_hub --quiet') from huggingface_hub import notebook_login notebook_login() from sentence_transformers import SentenceTransformer finetuning_engine = CrossEncoderFinetuneEngine( dataset=finetuning_dataset, epochs=2, batch_size=8 ) finetuning_engine.finetune() finetuning_engine.push_to_hub( repo_id="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2" ) get_ipython().system('pip install nest-asyncio --quiet') import nest_asyncio nest_asyncio.apply() get_ipython().system('wget -O reranking_test.csv https://www.dropbox.com/scl/fi/mruo5rm46k1acm1xnecev/reranking_test.csv?rlkey=hkniwowq0xrc3m0ywjhb2gf26&dl=0') import pandas as pd import ast df_reranking = pd.read_csv("/content/reranking_test.csv", index_col=0) df_reranking["questions"] = df_reranking["questions"].apply(ast.literal_eval) df_reranking["context"] = df_reranking["context"].apply(ast.literal_eval) print(f"Number of papers in the reranking eval dataset:- {len(df_reranking)}") df_reranking.head(1) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.core.retrievers import VectorIndexRetriever from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core import Settings import os import openai import pandas as pd os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] Settings.chunk_size = 256 rerank_base = SentenceTransformerRerank( model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3 ) rerank_finetuned = SentenceTransformerRerank( model="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2", top_n=3 ) without_reranker_hits = 0 base_reranker_hits = 0 finetuned_reranker_hits = 0 total_number_of_context = 0 for index, row in df_reranking.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] context_list = row["context"] assert len(query_list) == len(context_list) vector_index = VectorStoreIndex.from_documents(documents) retriever_without_reranker = vector_index.as_query_engine( similarity_top_k=3, response_mode="no_text" ) retriever_with_base_reranker = vector_index.as_query_engine( similarity_top_k=8, response_mode="no_text", node_postprocessors=[rerank_base], ) retriever_with_finetuned_reranker = vector_index.as_query_engine( similarity_top_k=8, response_mode="no_text", node_postprocessors=[rerank_finetuned], ) for index in range(0, len(query_list)): query = query_list[index] context = context_list[index] total_number_of_context += 1 response_without_reranker = retriever_without_reranker.query(query) without_reranker_nodes = response_without_reranker.source_nodes for node in without_reranker_nodes: if context in node.node.text or node.node.text in context: without_reranker_hits += 1 response_with_base_reranker = retriever_with_base_reranker.query(query) with_base_reranker_nodes = response_with_base_reranker.source_nodes for node in with_base_reranker_nodes: if context in node.node.text or node.node.text in context: base_reranker_hits += 1 response_with_finetuned_reranker = ( retriever_with_finetuned_reranker.query(query) ) with_finetuned_reranker_nodes = ( response_with_finetuned_reranker.source_nodes ) for node in with_finetuned_reranker_nodes: if context in node.node.text or node.node.text in context: finetuned_reranker_hits += 1 assert ( len(with_finetuned_reranker_nodes) == len(with_base_reranker_nodes) == len(without_reranker_nodes) == 3 ) without_reranker_scores = [without_reranker_hits] base_reranker_scores = [base_reranker_hits] finetuned_reranker_scores = [finetuned_reranker_hits] reranker_eval_dict = { "Metric": "Hits", "OpenAI_Embeddings": without_reranker_scores, "Base_cross_encoder": base_reranker_scores, "Finetuned_cross_encoder": finetuned_reranker_hits, "Total Relevant Context": total_number_of_context, } df_reranker_eval_results = pd.DataFrame(reranker_eval_dict) display(df_reranker_eval_results) get_ipython().system('wget -O test.csv https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0') import pandas as pd import ast # Used to safely evaluate the string as a list df_test = pd.read_csv("/content/test.csv", index_col=0) df_test["questions"] = df_test["questions"].apply(ast.literal_eval) df_test["answers"] = df_test["answers"].apply(ast.literal_eval) print(f"Number of papers in the test sample:- {len(df_test)}") df_test.head(1) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) import os import openai import pandas as pd os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] no_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] reference_answers_list = row["answers"] number_of_accepted_queries = 0 vector_index = VectorStoreIndex.from_documents(documents) query_engine = vector_index.as_query_engine(similarity_top_k=3) assert len(query_list) == len(reference_answers_list) pairwise_local_score = 0 for index in range(0, len(query_list)): query = query_list[index] reference = reference_answers_list[index] if reference != "Unacceptable": number_of_accepted_queries += 1 response = str(query_engine.query(query)) no_reranker_dict = { "query": query, "response": response, "reference": reference, } no_reranker_dict_list.append(no_reranker_dict) pairwise_eval_result = await evaluator_gpt4_pairwise.aevaluate( query, response=response, reference=reference ) pairwise_score = pairwise_eval_result.score pairwise_local_score += pairwise_score else: pass if number_of_accepted_queries > 0: avg_pairwise_local_score = ( pairwise_local_score / number_of_accepted_queries ) pairwise_scores_list.append(avg_pairwise_local_score) overal_pairwise_average_score = sum(pairwise_scores_list) / len( pairwise_scores_list ) df_responses = pd.DataFrame(no_reranker_dict_list) df_responses.to_csv("No_Reranker_Responses.csv") results_dict = { "name": ["Without Reranker"], "pairwise score": [overal_pairwise_average_score], } results_df = pd.DataFrame(results_dict) display(results_df) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator import os import openai os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] rerank = SentenceTransformerRerank( model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3 ) gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] base_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] reference_answers_list = row["answers"] number_of_accepted_queries = 0 vector_index = VectorStoreIndex.from_documents(documents) query_engine = vector_index.as_query_engine( similarity_top_k=8, node_postprocessors=[rerank] ) assert len(query_list) == len(reference_answers_list) pairwise_local_score = 0 for index in range(0, len(query_list)): query = query_list[index] reference = reference_answers_list[index] if reference != "Unacceptable": number_of_accepted_queries += 1 response = str(query_engine.query(query)) base_reranker_dict = { "query": query, "response": response, "reference": reference, } base_reranker_dict_list.append(base_reranker_dict) pairwise_eval_result = await evaluator_gpt4_pairwise.aevaluate( query=query, response=response, reference=reference ) pairwise_score = pairwise_eval_result.score pairwise_local_score += pairwise_score else: pass if number_of_accepted_queries > 0: avg_pairwise_local_score = ( pairwise_local_score / number_of_accepted_queries ) pairwise_scores_list.append(avg_pairwise_local_score) overal_pairwise_average_score = sum(pairwise_scores_list) / len( pairwise_scores_list ) df_responses = pd.DataFrame(base_reranker_dict_list) df_responses.to_csv("Base_Reranker_Responses.csv") results_dict = { "name": ["With base cross-encoder/ms-marco-MiniLM-L-12-v2 as Reranker"], "pairwise score": [overal_pairwise_average_score], } results_df = pd.DataFrame(results_dict) display(results_df) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core.evaluation import PairwiseComparisonEvaluator import os import openai os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] rerank = SentenceTransformerRerank( model="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2", top_n=3 ) gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4_pairwise = PairwiseComparisonEvaluator(llm=gpt4) pairwise_scores_list = [] finetuned_reranker_dict_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] reference_answers_list = row["answers"] number_of_accepted_queries = 0 vector_index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, SimpleKeywordTableIndex, ) from llama_index.core import SummaryIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context) list_retriever = summary_index.as_retriever() vector_retriever = vector_index.as_retriever() keyword_retriever = keyword_index.as_retriever() from llama_index.core.tools import RetrieverTool list_tool = RetrieverTool.from_defaults( retriever=list_retriever, description=( "Will retrieve all context from Paul Graham's essay on What I Worked" " On. Don't use if the question only requires more specific context." ), ) vector_tool = RetrieverTool.from_defaults( retriever=vector_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) keyword_tool = RetrieverTool.from_defaults( retriever=keyword_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On (using entities mentioned in query)" ), ) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) from llama_index.core.retrievers import RouterRetriever from llama_index.core.response.notebook_utils import display_source_node retriever = RouterRetriever( selector=PydanticSingleSelector.from_defaults(llm=llm), retriever_tools=[ list_tool, vector_tool, ], ) nodes = retriever.retrieve( "Can you give me all the context regarding the author's life?" ) for node in nodes:
display_source_node(node)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') from llama_index.core.node_parser import SimpleFileNodeParser from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() html_file = reader.load_data(Path("./stack-overflow.html")) md_file = reader.load_data(Path("./README.md")) print(html_file[0].metadata) print(html_file[0]) print("----") print(md_file[0].metadata) print(md_file[0]) parser = SimpleFileNodeParser() md_nodes = parser.get_nodes_from_documents(md_file) html_nodes = parser.get_nodes_from_documents(html_file) print(md_nodes[0].metadata) print(md_nodes[0].text) print(md_nodes[1].metadata) print(md_nodes[1].text) print("----") print(html_nodes[0].metadata) print(html_nodes[0].text) from llama_index.core.node_parser import SentenceSplitter splitting_parser = SentenceSplitter(chunk_size=200, chunk_overlap=0) html_chunked_nodes = splitting_parser(html_nodes) md_chunked_nodes = splitting_parser(md_nodes) print(f"\n\nHTML parsed nodes: {len(html_nodes)}") print(html_nodes[0].text) print(f"\n\nHTML chunked nodes: {len(html_chunked_nodes)}") print(html_chunked_nodes[0].text) print(f"\n\nMD parsed nodes: {len(md_nodes)}") print(md_nodes[0].text) print(f"\n\nMD chunked nodes: {len(md_chunked_nodes)}") print(md_chunked_nodes[0].text) from llama_index.core.ingestion import IngestionPipeline pipeline = IngestionPipeline( documents=reader.load_data(Path("./README.md")), transformations=[
SimpleFileNodeParser()
llama_index.core.node_parser.SimpleFileNodeParser
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus') get_ipython().system(' pip install llama-index') import logging import sys from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document from llama_index.vector_stores.milvus import MilvusVectorStore from IPython.display import Markdown, display import textwrap import openai openai.api_key = "sk-" get_ipython().system(" mkdir -p 'data/paul_graham/'") get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print("Document ID:", documents[0].doc_id) from llama_index.core import StorageContext vector_store = MilvusVectorStore(dim=1536, overwrite=True) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author learn?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What was a hard moment for the author?") print(textwrap.fill(str(response), 100)) vector_store = MilvusVectorStore(dim=1536, overwrite=True) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/lyft" ) lyft_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/uber" ) uber_index =
load_index_from_storage(storage_context)
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-llms-watsonx') from llama_index.llms.watsonx import WatsonX credentials = { "url": "https://enter.your-ibm.url", "apikey": "insert_your_api_key", } project_id = "insert_your_project_id" resp = WatsonX(credentials=credentials, project_id=project_id).complete( "Paul Graham is" ) print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.watsonx import WatsonX messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = WatsonX( model_id="meta-llama/llama-2-70b-chat", credentials=credentials, project_id=project_id, ).chat(messages) print(resp) from llama_index.llms.watsonx import WatsonX llm =
WatsonX(credentials=credentials, project_id=project_id)
llama_index.llms.watsonx.WatsonX
get_ipython().system('pip install llama-index') import logging import sys from IPython.display import Markdown, display import pandas as pd from llama_index.core.query_engine import PandasQueryEngine logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) df = pd.DataFrame( { "city": ["Toronto", "Tokyo", "Berlin"], "population": [2930000, 13960000, 3645000], } ) query_engine =
PandasQueryEngine(df=df, verbose=True)
llama_index.core.query_engine.PandasQueryEngine
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') from llama_index.readers.file import ImageTabularChartReader from llama_index.core import SummaryIndex from llama_index.core.response.notebook_utils import display_response from pathlib import Path loader = ImageTabularChartReader(keep_image=True) documents = loader.load_data(file=Path("./marine_chart.png")) print(documents[0].text) summary_index = SummaryIndex.from_documents(documents) response = summary_index.as_query_engine().query( "What is the difference between the shares of Greenland and the share of" " Mauritania?" )
display_response(response, show_source=True)
llama_index.core.response.notebook_utils.display_response
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp =
QP(verbose=True)
llama_index.core.query_pipeline.QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex from llama_index.core import PromptTemplate from IPython.display import Markdown, display get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI gpt35_llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-friendli') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('env', 'FRIENDLI_TOKEN=...') from llama_index.llms.friendli import Friendli llm =
Friendli()
llama_index.llms.friendli.Friendli
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') import json from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") TRAIN_FILES = ["./data/10k/lyft_2021.pdf"] VAL_FILES = ["./data/10k/uber_2021.pdf"] TRAIN_CORPUS_FPATH = "./data/train_corpus.json" VAL_CORPUS_FPATH = "./data/val_corpus.json" def load_corpus(files, verbose=False): if verbose: print(f"Loading files {files}") reader = SimpleDirectoryReader(input_files=files) docs = reader.load_data() if verbose: print(f"Loaded {len(docs)} docs") parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(docs, show_progress=verbose) if verbose: print(f"Parsed {len(nodes)} nodes") return nodes train_nodes = load_corpus(TRAIN_FILES, verbose=True) val_nodes = load_corpus(VAL_FILES, verbose=True) from llama_index.finetuning import generate_qa_embedding_pairs from llama_index.core.evaluation import EmbeddingQAFinetuneDataset import os OPENAI_API_TOKEN = "sk-" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN from llama_index.llms.openai import OpenAI train_dataset = generate_qa_embedding_pairs( llm=OpenAI(model="gpt-3.5-turbo"), nodes=train_nodes ) val_dataset = generate_qa_embedding_pairs( llm=OpenAI(model="gpt-3.5-turbo"), nodes=val_nodes ) train_dataset.save_json("train_dataset.json") val_dataset.save_json("val_dataset.json") train_dataset = EmbeddingQAFinetuneDataset.from_json("train_dataset.json") val_dataset = EmbeddingQAFinetuneDataset.from_json("val_dataset.json") from llama_index.finetuning import SentenceTransformersFinetuneEngine finetune_engine = SentenceTransformersFinetuneEngine( train_dataset, model_id="BAAI/bge-small-en", model_output_path="test_model", val_dataset=val_dataset, ) finetune_engine.finetune() embed_model = finetune_engine.get_finetuned_model() embed_model from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import VectorStoreIndex from llama_index.core.schema import TextNode from tqdm.notebook import tqdm import pandas as pd def evaluate( dataset, embed_model, top_k=5, verbose=False, ): corpus = dataset.corpus queries = dataset.queries relevant_docs = dataset.relevant_docs nodes = [
TextNode(id_=id_, text=text)
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=') get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=') get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma') get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node = IndexNode.from_text_node(base_node, base_node.node_id) all_nodes.append(original_node) all_nodes_dict = {n.node_id: n for n in all_nodes} vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model) vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2) retriever_chunk = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_chunk}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_chunk.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) response = query_engine_chunk.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) extractors = [ SummaryExtractor(summaries=["self"], show_progress=True), QuestionsAnsweredExtractor(questions=5, show_progress=True), ] metadata_dicts = [] for extractor in extractors: metadata_dicts.extend(extractor.extract(base_nodes)) def save_metadata_dicts(path): with open(path, "w") as fp: for m in metadata_dicts: fp.write(json.dumps(m) + "\n") def load_metadata_dicts(path): with open(path, "r") as fp: metadata_dicts = [json.loads(l) for l in fp.readlines()] return metadata_dicts save_metadata_dicts("data/llama2_metadata_dicts.jsonl") metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.jsonl") import copy all_nodes = copy.deepcopy(base_nodes) for idx, d in enumerate(metadata_dicts): inode_q = IndexNode( text=d["questions_this_excerpt_can_answer"], index_id=base_nodes[idx].node_id, ) inode_s = IndexNode( text=d["section_summary"], index_id=base_nodes[idx].node_id ) all_nodes.extend([inode_q, inode_s]) all_nodes_dict = {n.node_id: n for n in all_nodes} from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index') get_ipython().system('pip install clickhouse_connect') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="localhost", port=8123, username="default", password="", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.clickhouse import ClickHouseVectorStore documents = SimpleDirectoryReader("../data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) print("Number of Documents: ", len(documents)) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") loader = SimpleDirectoryReader("./data/paul_graham/") documents = loader.load_data() for file in loader.input_files: print(file) from llama_index.core import StorageContext for document in documents: document.metadata = {"user_id": "123", "favorite_color": "blue"} vector_store = ClickHouseVectorStore(clickhouse_client=client) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) import textwrap from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters query_engine = index.as_query_engine( filters=MetadataFilters( filters=[
ExactMatchFilter(key="user_id", value="123")
llama_index.core.vector_stores.ExactMatchFilter
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.llms.openai import OpenAI resp = OpenAI().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.openai import OpenAI messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = OpenAI().chat(messages) print(resp) from llama_index.llms.openai import OpenAI llm = OpenAI() resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage llm = OpenAI() messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = llm.stream_chat(messages) for r in resp: print(r.delta, end="") from llama_index.llms.openai import OpenAI llm = OpenAI(model="text-davinci-003") resp = llm.complete("Paul Graham is ") print(resp) messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = llm.chat(messages) print(resp) from pydantic import BaseModel from llama_index.core.llms.openai_utils import to_openai_tool class Song(BaseModel): """A song with name and artist""" name: str artist: str song_fn = to_openai_tool(Song) from llama_index.llms.openai import OpenAI response = OpenAI().complete("Generate a song", tools=[song_fn]) tool_calls = response.additional_kwargs["tool_calls"] print(tool_calls) from llama_index.llms.openai import OpenAI llm = OpenAI(model="text-davinci-003") resp = await llm.acomplete("Paul Graham is ") print(resp) resp = await llm.astream_complete("Paul Graham is ") async for delta in resp: print(delta.delta, end="") from llama_index.llms.openai import OpenAI llm =
OpenAI(model="text-davinci-003", api_key="BAD_KEY")
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb') get_ipython().system('pip install llama-index-vector-stores-duckdb') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.duckdb import DuckDBVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import os import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) documents =
SimpleDirectoryReader("data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler = LlamaDebugHandler() openai_callback = CallbackManager([openai_handler]) openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback) gradient_handler = LlamaDebugHandler() gradient_callback = CallbackManager([gradient_handler]) base_model_slug = "llama2-7b-chat" gradient_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=300, callback_manager=gradient_callback, is_chat_model=True, ) from llama_index.core.llms import LLMMetadata prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(Album)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from llama_index.core import SummaryIndex from llama_index.core.response.notebook_utils import display_response from llama_index.llms.openai import OpenAI get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] llm = OpenAI(model="gpt-4") chunk_sizes = [128, 256, 512, 1024] nodes_list = [] vector_indices = [] for chunk_size in chunk_sizes: print(f"Chunk Size: {chunk_size}") splitter = SentenceSplitter(chunk_size=chunk_size) nodes = splitter.get_nodes_from_documents(docs) for node in nodes: node.metadata["chunk_size"] = chunk_size node.excluded_embed_metadata_keys = ["chunk_size"] node.excluded_llm_metadata_keys = ["chunk_size"] nodes_list.append(nodes) vector_index = VectorStoreIndex(nodes) vector_indices.append(vector_index) from llama_index.core.tools import RetrieverTool from llama_index.core.schema import IndexNode retriever_dict = {} retriever_nodes = [] for chunk_size, vector_index in zip(chunk_sizes, vector_indices): node_id = f"chunk_{chunk_size}" node = IndexNode( text=( "Retrieves relevant context from the Llama 2 paper (chunk size" f" {chunk_size})" ), index_id=node_id, ) retriever_nodes.append(node) retriever_dict[node_id] = vector_index.as_retriever() from llama_index.core.selectors import PydanticMultiSelector from llama_index.core.retrievers import RouterRetriever from llama_index.core.retrievers import RecursiveRetriever from llama_index.core import SummaryIndex summary_index = SummaryIndex(retriever_nodes) retriever = RecursiveRetriever( root_id="root", retriever_dict={"root": summary_index.as_retriever(), **retriever_dict}, ) nodes = await retriever.aretrieve( "Tell me about the main aspects of safety fine-tuning" ) print(f"Number of nodes: {len(nodes)}") for node in nodes: print(node.node.metadata["chunk_size"]) print(node.node.get_text()) from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank from llama_index.postprocessor.cohere_rerank import CohereRerank reranker = CohereRerank(top_n=10) from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker]) response = query_engine.query( "Tell me about the main aspects of safety fine-tuning" ) display_response( response, show_source=True, source_length=500, show_source_metadata=True ) from collections import defaultdict import pandas as pd def mrr_all(metadata_values, metadata_key, source_nodes): value_to_mrr_dict = {} for metadata_value in metadata_values: mrr = 0 for idx, source_node in enumerate(source_nodes): if source_node.node.metadata[metadata_key] == metadata_value: mrr = 1 / (idx + 1) break else: continue value_to_mrr_dict[metadata_value] = mrr df = pd.DataFrame(value_to_mrr_dict, index=["MRR"]) df.style.set_caption("Mean Reciprocal Rank") return df print("Mean Reciprocal Rank for each Chunk Size") mrr_all(chunk_sizes, "chunk_size", response.source_nodes) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI import nest_asyncio nest_asyncio.apply() eval_llm = OpenAI(model="gpt-4") dataset_generator = DatasetGenerator( nodes_list[-1], llm=eval_llm, show_progress=True, num_questions_per_chunk=2, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import asyncio import nest_asyncio nest_asyncio.apply() from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, RelevancyEvaluator, FaithfulnessEvaluator, PairwiseComparisonEvaluator, ) evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_r = RelevancyEvaluator(llm=eval_llm) evaluator_f = FaithfulnessEvaluator(llm=eval_llm) pairwise_evaluator = PairwiseComparisonEvaluator(llm=eval_llm) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner max_samples = 60 eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] base_query_engine = vector_indices[-1].as_query_engine(similarity_top_k=2) reranker = CohereRerank(top_n=4) query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker]) base_pred_responses = get_responses( eval_qs[:max_samples], base_query_engine, show_progress=True ) pred_responses = get_responses( eval_qs[:max_samples], query_engine, show_progress=True ) import numpy as np pred_response_strs = [str(p) for p in pred_responses] base_pred_response_strs = [str(p) for p in base_pred_responses] evaluator_dict = { "correctness": evaluator_c, "faithfulness": evaluator_f, "semantic_similarity": evaluator_s, } batch_runner =
BatchEvalRunner(evaluator_dict, workers=1, show_progress=True)
llama_index.core.evaluation.BatchEvalRunner
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().system('pip install llama-index') from llama_index.llms.anthropic import Anthropic from llama_index.core import Settings tokenizer = Anthropic().tokenizer Settings.tokenizer = tokenizer import os os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY" from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229") resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.anthropic import Anthropic messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = Anthropic(model="claude-3-opus-20240229").chat(messages) print(resp) from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229", max_tokens=100) resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229") messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="Tell me a story")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.program.openai import OpenAIPydanticProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler =
OpenAIFineTuningHandler()
llama_index.finetuning.callbacks.OpenAIFineTuningHandler
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') from llama_index.core.llama_dataset import ( LabelledRagDataExample, CreatedByType, CreatedBy, ) query = "This is a test query, is it not?" query_by = CreatedBy(type=CreatedByType.AI, model_name="gpt-4") reference_answer = "Yes it is." reference_answer_by = CreatedBy(type=CreatedByType.HUMAN) reference_contexts = ["This is a sample context"] rag_example = LabelledRagDataExample( query=query, query_by=query_by, reference_contexts=reference_contexts, reference_answer=reference_answer, reference_answer_by=reference_answer_by, ) print(rag_example.json()) LabelledRagDataExample.parse_raw(rag_example.json()) rag_example.dict() LabelledRagDataExample.parse_obj(rag_example.dict()) query = "This is a test query, is it so?" reference_answer = "I think yes, it is." reference_contexts = ["This is a second sample context"] rag_example_2 = LabelledRagDataExample( query=query, query_by=query_by, reference_contexts=reference_contexts, reference_answer=reference_answer, reference_answer_by=reference_answer_by, ) from llama_index.core.llama_dataset import LabelledRagDataset rag_dataset = LabelledRagDataset(examples=[rag_example, rag_example_2]) rag_dataset.to_pandas() rag_dataset.save_json("rag_dataset.json") reload_rag_dataset =
LabelledRagDataset.from_json("rag_dataset.json")
llama_index.core.llama_dataset.LabelledRagDataset.from_json
get_ipython().system('pip install llama-index llama-index-packs-raptor llama-index-vector-stores-qdrant') from llama_index.packs.raptor import RaptorPack get_ipython().system('wget https://arxiv.org/pdf/2401.18059.pdf -O ./raptor_paper.pdf') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader(input_files=["./raptor_paper.pdf"]).load_data() from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.chroma import ChromaVectorStore import chromadb client = chromadb.PersistentClient(path="./raptor_paper_db") collection = client.get_or_create_collection("raptor") vector_store = ChromaVectorStore(chroma_collection=collection) raptor_pack = RaptorPack( documents, embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=OpenAI(model="gpt-3.5-turbo", temperature=0.1), # used for generating summaries vector_store=vector_store, # used for storage similarity_top_k=2, # top k for each layer, or overall top-k for collapsed mode="collapsed", # sets default mode transformations=[
SentenceSplitter(chunk_size=400, chunk_overlap=50)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-readers-web') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install llama-index') from llama_index.core import SummaryIndex from llama_index.readers.web import SimpleWebPageReader from IPython.display import Markdown, display import os documents =
SimpleWebPageReader(html_to_text=True)
llama_index.readers.web.SimpleWebPageReader
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip -q install python-dotenv pinecone-client llama-index pymupdf') dotenv_path = ( "env" # Google Colabs will not let you open a .env, but you can set ) with open(dotenv_path, "w") as f: f.write('PINECONE_API_KEY="<your api key>"\n') f.write('PINECONE_ENVIRONMENT="gcp-starter"\n') f.write('OPENAI_API_KEY="<your api key>"\n') import os from dotenv import load_dotenv load_dotenv(dotenv_path=dotenv_path) import pinecone api_key = os.environ["PINECONE_API_KEY"] environment = os.environ["PINECONE_ENVIRONMENT"] pinecone.init(api_key=api_key, environment=environment) index_name = "llamaindex-rag-fs" pinecone.create_index( index_name, dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index(index_name) pinecone_index.delete(deleteAll=True) from llama_index.vector_stores.pinecone import PineconeVectorStore vector_store = PineconeVectorStore(pinecone_index=pinecone_index) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') import fitz file_path = "./data/llama2.pdf" doc = fitz.open(file_path) from llama_index.core.node_parser import SentenceSplitter text_parser = SentenceSplitter( chunk_size=1024, ) text_chunks = [] doc_idxs = [] for doc_idx, page in enumerate(doc): page_text = page.get_text("text") cur_text_chunks = text_parser.split_text(page_text) text_chunks.extend(cur_text_chunks) doc_idxs.extend([doc_idx] * len(cur_text_chunks)) from llama_index.core.schema import TextNode nodes = [] for idx, text_chunk in enumerate(text_chunks): node = TextNode( text=text_chunk, ) src_doc_idx = doc_idxs[idx] src_page = doc[src_doc_idx] nodes.append(node) print(nodes[0].metadata) print(nodes[0].get_content(metadata_mode="all")) from llama_index.core.extractors import ( QuestionsAnsweredExtractor, TitleExtractor, ) from llama_index.core.ingestion import IngestionPipeline from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") extractors = [ TitleExtractor(nodes=5, llm=llm), QuestionsAnsweredExtractor(questions=3, llm=llm), ] pipeline = IngestionPipeline( transformations=extractors, ) nodes = await pipeline.arun(nodes=nodes, in_place=False) print(nodes[0].metadata) from llama_index.embeddings.openai import OpenAIEmbedding embed_model =
OpenAIEmbedding()
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') get_ipython().run_line_magic('pip', 'install unstructured replicate') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os REPLICATE_API_TOKEN = "..." # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1UU0xc3uLXs-WG0aDQSXjGacUkp142rLS" -O texas.jpg') from llama_index.readers.file import FlatReader from pathlib import Path from llama_index.core.node_parser import UnstructuredElementNodeParser reader = FlatReader() docs_2021 = reader.load_data(Path("tesla_2021_10k.htm")) node_parser = UnstructuredElementNodeParser() import openai OPENAI_API_TOKEN = "..." openai.api_key = OPENAI_API_TOKEN # add your openai api key here os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN import os import pickle if not os.path.exists("2021_nodes.pkl"): raw_nodes_2021 = node_parser.get_nodes_from_documents(docs_2021) pickle.dump(raw_nodes_2021, open("2021_nodes.pkl", "wb")) else: raw_nodes_2021 = pickle.load(open("2021_nodes.pkl", "rb")) nodes_2021, objects_2021 = node_parser.get_nodes_and_objects(raw_nodes_2021) from llama_index.core import VectorStoreIndex vector_index = VectorStoreIndex(nodes=nodes_2021, objects=objects_2021) query_engine = vector_index.as_query_engine(similarity_top_k=5, verbose=True) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./texas.jpg" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.schema import ImageDocument from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) print(imageUrl) llava_multi_modal_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=200, temperature=0.1, ) prompt = "which Tesla factory is shown in the image? Please answer just the name of the factory." llava_response = llava_multi_modal_llm.complete( prompt=prompt, image_documents=[ImageDocument(image_path=imageUrl)], ) print(llava_response.text) rag_response = query_engine.query(llava_response.text) print(rag_response) input_image_path = Path("instagram_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png') from pydantic import BaseModel class InsAds(BaseModel): """Data model for a Ins Ads.""" account: str brand: str product: str category: str discount: str price: str comments: str review: str description: str from PIL import Image import matplotlib.pyplot as plt ins_imageUrl = "./instagram_images/jordan.png" image = Image.open(ins_imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ def pydantic_llava( model_name, output_class, image_documents, prompt_template_str ): mm_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=1000, ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_llm, verbose=True, ) response = llm_program() print(f"Model: {model_name}") for res in response: print(res) return response from llama_index.core import SimpleDirectoryReader ins_image_documents = SimpleDirectoryReader("./instagram_images").load_data() pydantic_response = pydantic_llava( "llava-13b", InsAds, ins_image_documents, prompt_template_str ) print(pydantic_response.brand) from pathlib import Path import requests wiki_titles = [ "batman", "Vincent van Gogh", "San Francisco", "iPhone", "Tesla Model S", "BTS", "Air Jordan", ] data_path = Path("data_wiki") for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) import wikipedia import urllib.request image_path = Path("data_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "Air Jordan", "San Francisco", "Batman", "Vincent van Gogh", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue import qdrant_client from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core.indices import MultiModalVectorStoreIndex client = qdrant_client.QdrantClient(path="qdrant_mm_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./data_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) from PIL import Image import matplotlib.pyplot as plt import os def plot_images(image_metadata_dict): original_images_urls = [] images_shown = 0 for image_id in image_metadata_dict: img_path = image_metadata_dict[image_id]["img_path"] if os.path.isfile(img_path): filename = image_metadata_dict[image_id]["filename"] image = Image.open(img_path).convert("RGB") plt.subplot(8, 8, len(original_images_urls) + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) original_images_urls.append(filename) images_shown += 1 if images_shown >= 64: break plt.tight_layout() plot_images(image_metadata_dict) retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5) retrieval_results = retriever.retrieve(pydantic_response.brand) from llama_index.core.response.notebook_utils import ( display_source_node, display_image_uris, ) from llama_index.core.schema import ImageNode retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else: display_source_node(res_node, source_length=200) display_image_uris(retrieved_image) from llama_index.core import PromptTemplate from llama_index.core.query_engine import SimpleMultiModalQueryEngine qa_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl =
PromptTemplate(qa_tmpl_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().system('pip install llama-index') from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, pprint_response, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents=documents) import os from llama_index.postprocessor.cohere_rerank import CohereRerank api_key = os.environ["COHERE_API_KEY"] cohere_rerank =
CohereRerank(api_key=api_key, top_n=2)
llama_index.postprocessor.cohere_rerank.CohereRerank
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') from llama_index.core.node_parser import SimpleFileNodeParser from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() html_file = reader.load_data(Path("./stack-overflow.html")) md_file = reader.load_data(Path("./README.md")) print(html_file[0].metadata) print(html_file[0]) print("----") print(md_file[0].metadata) print(md_file[0]) parser = SimpleFileNodeParser() md_nodes = parser.get_nodes_from_documents(md_file) html_nodes = parser.get_nodes_from_documents(html_file) print(md_nodes[0].metadata) print(md_nodes[0].text) print(md_nodes[1].metadata) print(md_nodes[1].text) print("----") print(html_nodes[0].metadata) print(html_nodes[0].text) from llama_index.core.node_parser import SentenceSplitter splitting_parser =
SentenceSplitter(chunk_size=200, chunk_overlap=0)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"] from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore from llama_index.vector_stores.dynamodb import DynamoDBVectorStore storage_context = StorageContext.from_defaults( docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME), index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME), vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME), ) storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity') get_ipython().system('pip install llama-index') import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.extractors.entity import EntityExtractor from llama_index.core.node_parser import SentenceSplitter entity_extractor = EntityExtractor( prediction_threshold=0.5, label_entities=False, # include the entity label in the metadata (can be erroneous) device="cpu", # set to "cuda" if you have a GPU ) node_parser = SentenceSplitter() transformations = [node_parser, entity_extractor] get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf') from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader( input_files=["./IPCC_AR6_WGII_Chapter03.pdf"] ).load_data() from llama_index.core.ingestion import IngestionPipeline import random random.seed(42) documents = random.sample(documents, 100) pipeline = IngestionPipeline(transformations=transformations) nodes = pipeline.run(documents=documents) samples = random.sample(nodes, 5) for node in samples: print(node.metadata) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm =
OpenAI(model="gpt-3.5-turbo", temperature=0.2)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SQLDatabase from llama_index.readers.wikipedia import WikipediaReader from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) get_ipython().system('pip install wikipedia') cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs =
WikipediaReader()
llama_index.readers.wikipedia.WikipediaReader
import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm') get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web') get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference') import hashlib import json from pathlib import Path import os import textwrap from typing import List, Union import llama_index.core from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.openinference import OpenInferenceCallbackHandler from llama_index.callbacks.openinference.base import ( as_dataframe, QueryData, NodeData, ) from llama_index.core.node_parser import SimpleNodeParser import pandas as pd from tqdm import tqdm documents = SimpleWebPageReader().load_data( [ "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt" ] ) print(documents[0].text) parser =
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate') get_ipython().system('pip install llama-index') import os os.environ["REPLICATE_API_TOKEN"] = "<your API key>" from llama_index.llms.replicate import Replicate llm = Replicate( model="replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b" ) resp = llm.complete("Who is Paul Graham?") print(resp) from llama_index.core.llms import ChatMessage messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = llm.chat(messages) print(resp) response = llm.stream_complete("Who is Paul Graham?") for r in response: print(r.delta, end="") from llama_index.core.llms import ChatMessage messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="What is your name")
llama_index.core.llms.ChatMessage
from llama_index.agent import OpenAIAgent import openai openai.api_key = "sk-api-key" from llama_index.tools.gmail.base import GmailToolSpec from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec from llama_index.tools.google_search.base import GoogleSearchToolSpec gmail_tools =
GmailToolSpec()
llama_index.tools.gmail.base.GmailToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.retrievers.bm25 import BM25Retriever from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) index = VectorStoreIndex( nodes=nodes, storage_context=storage_context, ) retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) from llama_index.core.response.notebook_utils import display_source_node nodes = retriever.retrieve("What happened at Viaweb and Interleaf?") for node in nodes: display_source_node(node) nodes = retriever.retrieve("What did Paul Graham do after RISD?") for node in nodes: display_source_node(node) from llama_index.core.tools import RetrieverTool vector_retriever = VectorIndexRetriever(index) bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) retriever_tools = [ RetrieverTool.from_defaults( retriever=vector_retriever, description="Useful in most cases", ), RetrieverTool.from_defaults( retriever=bm25_retriever, description="Useful if searching about specific information", ), ] from llama_index.core.retrievers import RouterRetriever retriever = RouterRetriever.from_defaults( retriever_tools=retriever_tools, llm=llm, select_multi=True, ) nodes = retriever.retrieve( "Can you give me all the context regarding the author's life?" ) for node in nodes: display_source_node(node) get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf') from llama_index.core import ( VectorStoreIndex, StorageContext, SimpleDirectoryReader, Document, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI documents = SimpleDirectoryReader( input_files=["IPCC_AR6_WGII_Chapter03.pdf"] ).load_data() llm = OpenAI(model="gpt-3.5-turbo") splitter = SentenceSplitter(chunk_size=256) nodes = splitter.get_nodes_from_documents( [Document(text=documents[0].get_content()[:1000000])] ) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.postprocessor import LLMRerank from llama_index.core import VectorStoreIndex from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import Settings from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.packs.koda_retriever import KodaRetriever import os from pinecone import Pinecone pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY")) index = pc.Index("sample-movies") Settings.llm = OpenAI() Settings.embed_model = OpenAIEmbedding() vector_store =
PineconeVectorStore(pinecone_index=index, text_key="summary")
llama_index.vector_stores.pinecone.PineconeVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west4-gcp-free") import os import getpass import openai openai.api_key = "sk-<your-key>" try: pinecone.create_index( "quickstart-index", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart-index") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", "gender": "male", "born": 1963, }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", "gender": "female", "born": 1975, }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", "gender": "male", "born": 1971, }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", "gender": "female", "born": 1988, }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", "gender": "male", "born": 1985, }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install -q llama-index google-generativeai') get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...') import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from llama_index.llms.gemini import Gemini resp = Gemini().complete("Write a poem about a magic backpack") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.gemini import Gemini messages = [ ChatMessage(role="user", content="Hello friend!"), ChatMessage(role="assistant", content="Yarr what is shakin' matey?"), ChatMessage( role="user", content="Help me decide what to have for dinner." ), ] resp =
Gemini()
llama_index.llms.gemini.Gemini
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-program-evaporate') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) from llama_index.core import SimpleDirectoryReader city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.chunk_size = 512 city_nodes = {} for wiki_title in wiki_titles: docs = city_docs[wiki_title] nodes =
Settings.node_parser.get_nodes_from_documents(docs)
llama_index.core.Settings.node_parser.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import txtai txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"}) from llama_index.core import ( SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext, ) from llama_index.vector_stores.txtai import TxtaiVectorStore from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store =
TxtaiVectorStore(txtai_index=txtai_index)
llama_index.vector_stores.txtai.TxtaiVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm =
OpenAI()
llama_index.llms.openai.OpenAI