prompt
stringlengths
70
19.8k
completion
stringlengths
8
1.03k
api
stringlengths
23
93
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().system('pip install llama-index') import os from llama_index.llms.litellm import LiteLLM from llama_index.core.llms import ChatMessage os.environ["OPENAI_API_KEY"] = "your-api-key" os.environ["COHERE_API_KEY"] = "your-api-key" message = ChatMessage(role="user", content="Hey! how's it going?") llm = LiteLLM("gpt-3.5-turbo") chat_response = llm.chat([message]) llm = LiteLLM("command-nightly") chat_response = llm.chat([message]) from llama_index.core.llms import ChatMessage from llama_index.llms.litellm import LiteLLM messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp =
LiteLLM("gpt-3.5-turbo")
llama_index.llms.litellm.LiteLLM
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.core import VectorStoreIndex from llama_index.agent.openai import OpenAIAgent from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import VectorStoreIndex tool_dict = {} for wiki_title in wiki_titles: vector_index = VectorStoreIndex.from_documents( city_docs[wiki_title], ) vector_query_engine = vector_index.as_query_engine(llm=llm) vector_tool = QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=wiki_title, description=("Useful for questions related to" f" {wiki_title}"), ), ) tool_dict[wiki_title] = vector_tool from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping tool_mapping = SimpleToolNodeMapping.from_objects(list(tool_dict.values())) tool_index = ObjectIndex.from_objects( list(tool_dict.values()), tool_mapping, VectorStoreIndex, ) tool_retriever = tool_index.as_retriever(similarity_top_k=1) from llama_index.core.llms import ChatMessage from llama_index.core import ChatPromptTemplate from typing import List GEN_SYS_PROMPT_STR = """\ Task information is given below. Given the task, please generate a system prompt for an OpenAI-powered bot to solve this task: {task} \ """ gen_sys_prompt_messages = [ ChatMessage( role="system", content="You are helping to build a system prompt for another bot.", ), ChatMessage(role="user", content=GEN_SYS_PROMPT_STR), ] GEN_SYS_PROMPT_TMPL = ChatPromptTemplate(gen_sys_prompt_messages) agent_cache = {} def create_system_prompt(task: str): """Create system prompt for another agent given an input task.""" llm =
OpenAI(llm="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool =
FunctionTool.from_defaults(fn=add)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("../data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SummaryIndex from llama_index.core import VectorStoreIndex summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) list_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool list_tool = QueryEngineTool.from_defaults( query_engine=list_query_engine, description=( "Useful for summarization questions related to Paul Graham eassy on" " What I Worked On." ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) from llama_index.core.query_engine import RouterQueryEngine from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) query_engine = RouterQueryEngine( selector=PydanticSingleSelector.from_defaults(), query_engine_tools=[ list_tool, vector_tool, ], ) response = query_engine.query("What is the summary of the document?") print(str(response)) response = query_engine.query("What did Paul Graham do after RICS?") print(str(response)) query_engine = RouterQueryEngine( selector=LLMSingleSelector.from_defaults(), query_engine_tools=[ list_tool, vector_tool, ], ) response = query_engine.query("What is the summary of the document?") print(str(response)) response = query_engine.query("What did Paul Graham do after RICS?") print(str(response)) print(str(response.metadata["selector_result"])) from llama_index.core import SimpleKeywordTableIndex keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context) keyword_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description=( "Useful for retrieving specific context using keywords from Paul" " Graham essay on What I Worked On." ), ) query_engine = RouterQueryEngine( selector=
PydanticMultiSelector.from_defaults()
llama_index.core.selectors.PydanticMultiSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.core.node_parser import SimpleNodeParser dataset_generator = DatasetGenerator( base_nodes[:20], llm=OpenAI(model="gpt-4"), show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import random full_qr_pairs = eval_dataset.qr_pairs num_exemplars = 2 num_eval = 40 exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars) eval_qr_pairs = random.sample(full_qr_pairs, num_eval) len(exemplar_qr_pairs) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo")) evaluator_dict = { "correctness": evaluator_c, } batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True) async def get_correctness(query_engine, eval_qa_pairs, batch_runner): eval_qs = [q for q, _ in eval_qa_pairs] eval_answers = [a for _, a in eval_qa_pairs] pred_responses = get_responses(eval_qs, query_engine, show_progress=True) eval_results = await batch_runner.aevaluate_responses( eval_qs, responses=pred_responses, reference=eval_answers ) avg_correctness = np.array( [r.score for r in eval_results["correctness"]] ).mean() return avg_correctness QA_PROMPT_KEY = "response_synthesizer:text_qa_template" from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.llms.openai import OpenAI resp =
OpenAI()
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.readers.file import PDFReader reader = PDFReader() get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") docs = reader.load_data("./data/10k/lyft_2021.pdf") from llama_index.core.node_parser import SentenceSplitter node_parser = SentenceSplitter() nodes = node_parser.get_nodes_from_documents(docs) print(nodes[8].get_content(metadata_mode="all")) get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet') from pgvector.sqlalchemy import Vector from sqlalchemy import insert, create_engine, String, text, Integer from sqlalchemy.orm import declarative_base, mapped_column engine = create_engine("postgresql+psycopg2://localhost/postgres") with engine.connect() as conn: conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector")) conn.commit() Base = declarative_base() class SECTextChunk(Base): __tablename__ = "sec_text_chunk" id = mapped_column(Integer, primary_key=True) page_label = mapped_column(Integer) file_name = mapped_column(String) text = mapped_column(String) embedding = mapped_column(Vector(384)) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en") for node in nodes: text_embedding = embed_model.get_text_embedding(node.get_content()) node.embedding = text_embedding for node in nodes: row_dict = { "text": node.get_content(), "embedding": node.embedding, **node.metadata, } stmt = insert(SECTextChunk).values(**row_dict) with engine.connect() as connection: cursor = connection.execute(stmt) connection.commit() from llama_index.core import PromptTemplate text_to_sql_tmpl = """\ Given an input question, first create a syntactically correct {dialect} \ query to run, then look at the results of the query and return the answer. \ You can order the results by a relevant column to return the most \ interesting examples in the database. Pay attention to use only the column names that you can see in the schema \ description. Be careful to not query for columns that do not exist. \ Pay attention to which column is in which table. Also, qualify column names \ with the table name when needed. IMPORTANT NOTE: you can use specialized pgvector syntax (`<->`) to do nearest \ neighbors/semantic search to a given vector from an embeddings column in the table. \ The embeddings value for a given row typically represents the semantic meaning of that row. \ The vector represents an embedding representation \ of the question, given below. Do NOT fill in the vector values directly, but rather specify a \ `[query_vector]` placeholder. For instance, some select statement examples below \ (the name of the embeddings column is `embedding`): SELECT * FROM items ORDER BY embedding <-> '[query_vector]' LIMIT 5; SELECT * FROM items WHERE id != 1 ORDER BY embedding <-> (SELECT embedding FROM items WHERE id = 1) LIMIT 5; SELECT * FROM items WHERE embedding <-> '[query_vector]' < 5; You are required to use the following format, \ each taking one line: Question: Question here SQLQuery: SQL Query to run SQLResult: Result of the SQLQuery Answer: Final answer here Only use tables listed below. {schema} Question: {query_str} SQLQuery: \ """ text_to_sql_prompt =
PromptTemplate(text_to_sql_tmpl)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, get_response_synthesizer from llama_index.core import DocumentSummaryIndex from llama_index.llms.openai import OpenAI from llama_index.core.node_parser import SentenceSplitter wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) city_docs = [] for wiki_title in wiki_titles: docs = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() docs[0].doc_id = wiki_title city_docs.extend(docs) chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo") splitter =
SentenceSplitter(chunk_size=1024)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-zep') get_ipython().system('pip install llama-index') import logging import sys from uuid import uuid4 logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import openai from dotenv import load_dotenv load_dotenv() openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.zep import ZepVectorStore get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("../data/paul_graham/").load_data() from llama_index.core import StorageContext zep_api_url = "http://localhost:8000" collection_name = f"graham{uuid4().hex}" vector_store = ZepVectorStore( api_url=zep_api_url, collection_name=collection_name, embedding_dimensions=1536, ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(str(response)) from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", }, ), ] collection_name = f"movies{uuid4().hex}" vector_store = ZepVectorStore( api_url=zep_api_url, collection_name=collection_name, embedding_dimensions=1536, ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-bagel') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] import bagel from bagel import Settings server_settings = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai" ) client = bagel.Client(server_settings) collection = client.get_or_create_cluster("testing_embeddings") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.bagel import BagelVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", }, ),
TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp') from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en") get_ipython().system('pip install llama-cpp-python') from llama_index.llms.llama_cpp import LlamaCPP model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf" llm = LlamaCPP( model_url=model_url, model_path=None, temperature=0.1, max_new_tokens=256, context_window=3900, generate_kwargs={}, model_kwargs={"n_gpu_layers": 1}, verbose=True, ) get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet') import psycopg2 db_name = "vector_db" host = "localhost" password = "password" port = "5432" user = "jerry" conn = psycopg2.connect( dbname="postgres", host=host, password=password, port=port, user=user, ) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from sqlalchemy import make_url from llama_index.vector_stores.postgres import PGVectorStore vector_store = PGVectorStore.from_params( database=db_name, host=host, password=password, port=port, user=user, table_name="llama2_paper", embed_dim=384, # openai embedding dimension ) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.elasticsearch import ElasticsearchStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "A bunch of scientists bring back dinosaurs and mayhem breaks" " loose" ), metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), TextNode( text=( "Leo DiCaprio gets lost in a dream within a dream within a dream" " within a ..." ), metadata={ "year": 2010, "director": "Christopher Nolan", "rating": 8.2, }, ),
TextNode( text=( "A psychologist / detective gets lost in a series of dreams within" " dreams within dreams and Inception reused the idea" )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') import nest_asyncio nest_asyncio.apply() get_ipython().system('pip install "google-generativeai" -q') from llama_index.core.llama_dataset import download_llama_dataset evaluator_dataset, _ = download_llama_dataset( "MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data" ) evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import CorrectnessEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 = OpenAI(temperature=0, model="gpt-4") llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo") llm_gemini = Gemini(model="models/gemini-pro", temperature=0) evaluators = { "gpt-4": CorrectnessEvaluator(llm=llm_gpt4), "gpt-3.5":
CorrectnessEvaluator(llm=llm_gpt35)
llama_index.core.evaluation.CorrectnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"') from llama_index.core import download_loader from llama_index.readers.file import PyMuPDFReader llama2_docs = PyMuPDFReader().load_data( file_path="./llama2.pdf", metadata=True ) attention_docs = PyMuPDFReader().load_data( file_path="./attention.pdf", metadata=True ) import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core.node_parser import TokenTextSplitter nodes = TokenTextSplitter( chunk_size=1024, chunk_overlap=128 ).get_nodes_from_documents(llama2_docs + attention_docs) from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.docstore.mongodb import MongoDocumentStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore docstore = SimpleDocumentStore() docstore.add_documents(nodes) from llama_index.core import VectorStoreIndex, StorageContext from llama_index.retrievers.bm25 import BM25Retriever from llama_index.vector_stores.qdrant import QdrantVectorStore from qdrant_client import QdrantClient client = QdrantClient(path="./qdrant_data") vector_store =
QdrantVectorStore("composable", client=client)
llama_index.vector_stores.qdrant.QdrantVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE" from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512) from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor, BaseExtractor, ) from llama_index.extractors.entity import EntityExtractor from llama_index.core.node_parser import TokenTextSplitter text_splitter = TokenTextSplitter( separator=" ", chunk_size=512, chunk_overlap=128 ) class CustomExtractor(BaseExtractor): def extract(self, nodes): metadata_list = [ { "custom": ( node.metadata["document_title"] + "\n" + node.metadata["excerpt_keywords"] ) } for node in nodes ] return metadata_list extractors = [ TitleExtractor(nodes=5, llm=llm), QuestionsAnsweredExtractor(questions=3, llm=llm), ] transformations = [text_splitter] + extractors from llama_index.core import SimpleDirectoryReader get_ipython().system('mkdir -p data') get_ipython().system('wget -O "data/10k-132.pdf" "https://www.dropbox.com/scl/fi/6dlqdk6e2k1mjhi8dee5j/uber.pdf?rlkey=2jyoe49bg2vwdlz30l76czq6g&dl=1"') get_ipython().system('wget -O "data/10k-vFinal.pdf" "https://www.dropbox.com/scl/fi/qn7g3vrk5mqb18ko4e5in/lyft.pdf?rlkey=j6jxtjwo8zbstdo4wz3ns8zoj&dl=1"') uber_docs = SimpleDirectoryReader(input_files=["data/10k-132.pdf"]).load_data() uber_front_pages = uber_docs[0:3] uber_content = uber_docs[63:69] uber_docs = uber_front_pages + uber_content from llama_index.core.ingestion import IngestionPipeline pipeline = IngestionPipeline(transformations=transformations) uber_nodes = pipeline.run(documents=uber_docs) uber_nodes[1].metadata lyft_docs = SimpleDirectoryReader( input_files=["data/10k-vFinal.pdf"] ).load_data() lyft_front_pages = lyft_docs[0:3] lyft_content = lyft_docs[68:73] lyft_docs = lyft_front_pages + lyft_content from llama_index.core.ingestion import IngestionPipeline pipeline = IngestionPipeline(transformations=transformations) lyft_nodes = pipeline.run(documents=lyft_docs) lyft_nodes[2].metadata from llama_index.core.question_gen import LLMQuestionGenerator from llama_index.core.question_gen.prompts import ( DEFAULT_SUB_QUESTION_PROMPT_TMPL, ) question_gen = LLMQuestionGenerator.from_defaults( llm=llm, prompt_template_str=""" Follow the example, but instead of giving a question, always prefix the question with: 'By first identifying and quoting the most relevant sources, '. """ + DEFAULT_SUB_QUESTION_PROMPT_TMPL, ) from copy import deepcopy nodes_no_metadata = deepcopy(uber_nodes) + deepcopy(lyft_nodes) for node in nodes_no_metadata: node.metadata = { k: node.metadata[k] for k in node.metadata if k in ["page_label", "file_name"] } print( "LLM sees:\n", (nodes_no_metadata)[9].get_content(metadata_mode=MetadataMode.LLM), ) from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import SubQuestionQueryEngine from llama_index.core.tools import QueryEngineTool, ToolMetadata index_no_metadata = VectorStoreIndex( nodes=nodes_no_metadata, ) engine_no_metadata = index_no_metadata.as_query_engine( similarity_top_k=10, llm=OpenAI(model="gpt-4") ) final_engine_no_metadata = SubQuestionQueryEngine.from_defaults( query_engine_tools=[ QueryEngineTool( query_engine=engine_no_metadata, metadata=ToolMetadata( name="sec_filing_documents", description="financial information on companies", ), ) ], question_gen=question_gen, use_async=True, ) response_no_metadata = final_engine_no_metadata.query( """ What was the cost due to research and development v.s. sales and marketing for uber and lyft in 2019 in millions of USD? Give your answer as a JSON. """ ) print(response_no_metadata.response) print( "LLM sees:\n", (uber_nodes + lyft_nodes)[9].get_content(metadata_mode=MetadataMode.LLM), ) index = VectorStoreIndex( nodes=uber_nodes + lyft_nodes, ) engine = index.as_query_engine(similarity_top_k=10, llm=
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine(vector_store_query_mode="mmr") response = query_engine.query("What did the author do growing up?") print(response) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents) query_engine_with_threshold = index.as_query_engine( vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2} ) response = query_engine_with_threshold.query( "What did the author do growing up?" ) print(response) index1 = VectorStoreIndex.from_documents(documents) query_engine_no_mrr = index1.as_query_engine() response_no_mmr = query_engine_no_mrr.query( "What did the author do growing up?" ) index2 = VectorStoreIndex.from_documents(documents) query_engine_with_high_threshold = index2.as_query_engine( vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.8} ) response_low_threshold = query_engine_with_high_threshold.query( "What did the author do growing up?" ) index3 = VectorStoreIndex.from_documents(documents) query_engine_with_low_threshold = index3.as_query_engine( vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2} ) response_high_threshold = query_engine_with_low_threshold.query( "What did the author do growing up?" ) print( "Scores without MMR ", [node.score for node in response_no_mmr.source_nodes], ) print( "Scores with MMR and a threshold of 0.8 ", [node.score for node in response_high_threshold.source_nodes], ) print( "Scores with MMR and a threshold of 0.2 ", [node.score for node in response_low_threshold.source_nodes], ) documents = SimpleDirectoryReader("../data/paul_graham/").load_data() index = VectorStoreIndex.from_documents( documents, ) retriever = index.as_retriever( vector_store_query_mode="mmr", similarity_top_k=3, vector_store_kwargs={"mmr_threshold": 0.1}, ) nodes = retriever.retrieve( "What did the author do during his time in Y Combinator?" ) from llama_index.core.response.notebook_utils import display_source_node for n in nodes:
display_source_node(n, source_length=1000)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') qa_prompt_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the question: {query_str}\n" ) refine_prompt_str = ( "We have the opportunity to refine the original answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question: {query_str}. " "If the context isn't useful, output the original answer again.\n" "Original Answer: {existing_answer}" ) from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate chat_text_qa_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "Always answer the question, even if the context isn't helpful." ), ), ChatMessage(role=MessageRole.USER, content=qa_prompt_str), ] text_qa_template = ChatPromptTemplate(chat_text_qa_msgs) chat_refine_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "Always answer the question, even if the context isn't helpful." ), ), ChatMessage(role=MessageRole.USER, content=refine_prompt_str), ] refine_template = ChatPromptTemplate(chat_refine_msgs) from llama_index.core import ChatPromptTemplate chat_text_qa_msgs = [ ( "system", "Always answer the question, even if the context isn't helpful.", ), ("user", qa_prompt_str), ] text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs) chat_refine_msgs = [ ( "system", "Always answer the question, even if the context isn't helpful.", ), ("user", refine_prompt_str), ] refine_template = ChatPromptTemplate.from_messages(chat_refine_msgs) import openai import os os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() from llama_index.core.evaluation import generate_question_context_pairs from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') texts = [ "The president in the year 2040 is John Cena.", "The president in the year 2050 is Florence Pugh.", 'The president in the year 2060 is Dwayne "The Rock" Johnson.', ] import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo-0613")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-supabase') get_ipython().system('pip install llama-index') from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="Math Tutor", instructions="You are a personal math tutor. Write and run code to answer math questions.", openai_tools=[{"type": "code_interpreter"}], instructions_prefix="Please address the user as Jane Doe. The user has a premium account.", ) agent.thread_id response = agent.chat( "I need to solve the equation `3x + 11 = 14`. Can you help me?" ) print(str(response)) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="SEC Analyst", instructions="You are a QA assistant designed to analyze sec filings.", openai_tools=[{"type": "retrieval"}], instructions_prefix="Please address the user as Jerry.", files=["data/10k/lyft_2021.pdf"], verbose=True, ) response = agent.chat("What was Lyft's revenue growth in 2021?") print(str(response)) from llama_index.agent.openai import OpenAIAssistantAgent from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/lyft" ) lyft_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/uber" ) uber_index = load_index_from_storage(storage_context) index_loaded = True except: index_loaded = False get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") if not index_loaded: lyft_docs = SimpleDirectoryReader( input_files=["./data/10k/lyft_2021.pdf"] ).load_data() uber_docs = SimpleDirectoryReader( input_files=["./data/10k/uber_2021.pdf"] ).load_data() lyft_index =
VectorStoreIndex.from_documents(lyft_docs)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( FaithfulnessEvaluator, RelevancyEvaluator, CorrectnessEvaluator, ) from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0) gpt4 = OpenAI(temperature=0, model="gpt-4") faithfulness_gpt4 = FaithfulnessEvaluator(llm=gpt4) relevancy_gpt4 = RelevancyEvaluator(llm=gpt4) correctness_gpt4 = CorrectnessEvaluator(llm=gpt4) documents =
SimpleDirectoryReader("./test_wiki_data/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-qdrant') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install llama-index') from llama_index.readers.qdrant import QdrantReader reader =
QdrantReader(host="localhost")
llama_index.readers.qdrant.QdrantReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openrouter') get_ipython().system('pip install llama-index') from llama_index.llms.openrouter import OpenRouter from llama_index.core.llms import ChatMessage llm = OpenRouter( api_key="<your-api-key>", max_tokens=256, context_window=4096, model="gryphe/mythomax-l2-13b", ) message = ChatMessage(role="user", content="Tell me a joke") resp = llm.chat([message]) print(resp) message = ChatMessage(role="user", content="Tell me a story in 250 words") resp = llm.stream_chat([message]) for r in resp: print(r.delta, end="") resp = llm.complete("Tell me a joke") print(resp) resp = llm.stream_complete("Tell me a story in 250 words") for r in resp: print(r.delta, end="") llm =
OpenRouter(model="mistralai/mixtral-8x7b-instruct")
llama_index.llms.openrouter.OpenRouter
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham")
llama_index.core.SimpleDirectoryReader
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools import QueryEngineTool, ToolMetadata from llama_index import SimpleDirectoryReader, VectorStoreIndex import requests response = requests.get( "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" ) essay_txt = response.text with open("pg_essay.txt", "w") as fp: fp.write(essay_txt) documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data() index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() query_engine_tool = QueryEngineTool( query_engine=query_engine, metadata=ToolMetadata( name="paul_graham", description=( "Provides a biography of Paul Graham, from childhood to college to adult" " life" ), ), ) from llama_index.tools.text_to_image.base import TextToImageToolSpec from llama_index.llms import OpenAI llm = OpenAI(model="gpt-4") text_to_image_spec =
TextToImageToolSpec()
llama_index.tools.text_to_image.base.TextToImageToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-readers-make-com') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.readers.make_com import MakeWrapper get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(documents=documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dashvector') get_ipython().system('pip install llama-index') import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import dashvector api_key = os.environ["DASHVECTOR_API_KEY"] client = dashvector.Client(api_key=api_key) client.create("llama-demo", dimension=1536) dashvector_collection = client.get("quickstart") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.dashvector import DashVectorStore from IPython.display import Markdown, display documents =
SimpleDirectoryReader("./data/paul_graham")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-cassandra') get_ipython().system('pip install --quiet "astrapy>=0.5.8"') import os from getpass import getpass from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, Document, StorageContext, ) from llama_index.vector_stores.cassandra import CassandraVectorStore from cassandra.cluster import Cluster cluster = Cluster(["127.0.0.1"]) session = cluster.connect() import cassio CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ") cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE) ASTRA_DB_ID = input("ASTRA_DB_ID = ") ASTRA_DB_TOKEN = getpass("ASTRA_DB_TOKEN = ") desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ") if desired_keyspace: ASTRA_DB_KEYSPACE = desired_keyspace else: ASTRA_DB_KEYSPACE = None import cassio cassio.init( database_id=ASTRA_DB_ID, token=ASTRA_DB_TOKEN, keyspace=ASTRA_DB_KEYSPACE, ) os.environ["OPENAI_API_KEY"] = getpass("OpenAI API Key:") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print(f"Total documents: {len(documents)}") print(f"First document, id: {documents[0].doc_id}") print(f"First document, hash: {documents[0].hash}") print( "First document, text" f" ({len(documents[0].text)} characters):\n{'='*20}\n{documents[0].text[:360]} ..." ) cassandra_store = CassandraVectorStore( table="cass_v_table", embedding_dimension=1536 ) storage_context =
StorageContext.from_defaults(vector_store=cassandra_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-experimental-param-tuner') get_ipython().system('pip install llama-index llama-hub') get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') import nest_asyncio nest_asyncio.apply() from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SimpleNodeParser from llama_index.core.schema import IndexNode get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) eval_qs = eval_dataset.questions ref_response_strs = [r for (_, r) in eval_dataset.qr_pairs] from llama_index.core import ( VectorStoreIndex, load_index_from_storage, StorageContext, ) from llama_index.experimental.param_tuner import ParamTuner from llama_index.core.param_tuner.base import TunedResult, RunResult from llama_index.core.evaluation.eval_utils import ( get_responses, aget_responses, ) from llama_index.core.evaluation import ( SemanticSimilarityEvaluator, BatchEvalRunner, ) from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding import os import numpy as np from pathlib import Path def _build_index(chunk_size, docs): index_out_path = f"./storage_{chunk_size}" if not os.path.exists(index_out_path): Path(index_out_path).mkdir(parents=True, exist_ok=True) node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size) base_nodes = node_parser.get_nodes_from_documents(docs) index = VectorStoreIndex(base_nodes) index.storage_context.persist(index_out_path) else: storage_context = StorageContext.from_defaults( persist_dir=index_out_path ) index = load_index_from_storage( storage_context, ) return index def _get_eval_batch_runner(): evaluator_s = SemanticSimilarityEvaluator(embed_model=OpenAIEmbedding()) eval_batch_runner = BatchEvalRunner( {"semantic_similarity": evaluator_s}, workers=2, show_progress=True ) return eval_batch_runner def objective_function(params_dict): chunk_size = params_dict["chunk_size"] docs = params_dict["docs"] top_k = params_dict["top_k"] eval_qs = params_dict["eval_qs"] ref_response_strs = params_dict["ref_response_strs"] index = _build_index(chunk_size, docs) query_engine = index.as_query_engine(similarity_top_k=top_k) pred_response_objs = get_responses( eval_qs, query_engine, show_progress=True ) eval_batch_runner = _get_eval_batch_runner() eval_results = eval_batch_runner.evaluate_responses( eval_qs, responses=pred_response_objs, reference=ref_response_strs ) mean_score = np.array( [r.score for r in eval_results["semantic_similarity"]] ).mean() return
RunResult(score=mean_score, params=params_dict)
llama_index.core.param_tuner.base.RunResult
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') get_ipython().system('pip install llama-index chromadb --quiet') get_ipython().system('pip install chromadb==0.4.17') get_ipython().system('pip install sentence-transformers') get_ipython().system('pip install pydantic==1.10.11') get_ipython().system('pip install open-clip-torch') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core import StorageContext from llama_index.embeddings.huggingface import HuggingFaceEmbedding from IPython.display import Markdown, display import chromadb import os import openai OPENAI_API_KEY = "" openai.api_key = OPENAI_API_KEY os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY import requests def get_wikipedia_images(title): response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "imageinfo", "iiprop": "url|dimensions|mime", "generator": "images", "gimlimit": "50", }, ).json() image_urls = [] for page in response["query"]["pages"].values(): if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][ 0 ]["url"].endswith(".png"): image_urls.append(page["imageinfo"][0]["url"]) return image_urls from pathlib import Path import urllib.request image_uuid = 0 MAX_IMAGES_PER_WIKI = 20 wiki_titles = { "Tesla Model X", "Pablo Picasso", "Rivian", "The Lord of the Rings", "The Matrix", "The Simpsons", } data_path = Path("mixed_wiki") if not data_path.exists(): Path.mkdir(data_path) for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) images_per_wiki = 0 try: list_img_urls = get_wikipedia_images(title) for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 urllib.request.urlretrieve( url, data_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue from chromadb.utils.embedding_functions import OpenCLIPEmbeddingFunction embedding_function = OpenCLIPEmbeddingFunction() from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext from chromadb.utils.data_loaders import ImageLoader image_loader = ImageLoader() chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection( "multimodal_collection", embedding_function=embedding_function, data_loader=image_loader, ) documents = SimpleDirectoryReader("./mixed_wiki/").load_data() vector_store =
ChromaVectorStore(chroma_collection=chroma_collection)
llama_index.vector_stores.chroma.ChromaVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-cross-encoders') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip install datasets --quiet') get_ipython().system('pip install sentence-transformers --quiet') get_ipython().system('pip install openai --quiet') from datasets import load_dataset import random dataset = load_dataset("allenai/qasper") train_dataset = dataset["train"] validation_dataset = dataset["validation"] test_dataset = dataset["test"] random.seed(42) # Set a random seed for reproducibility train_sampled_indices = random.sample(range(len(train_dataset)), 800) train_samples = [train_dataset[i] for i in train_sampled_indices] test_sampled_indices = random.sample(range(len(test_dataset)), 80) test_samples = [test_dataset[i] for i in test_sampled_indices] from typing import List def get_full_text(sample: dict) -> str: """ :param dict sample: the row sample from QASPER """ title = sample["title"] abstract = sample["abstract"] sections_list = sample["full_text"]["section_name"] paragraph_list = sample["full_text"]["paragraphs"] combined_sections_with_paras = "" if len(sections_list) == len(paragraph_list): combined_sections_with_paras += title + "\t" combined_sections_with_paras += abstract + "\t" for index in range(0, len(sections_list)): combined_sections_with_paras += str(sections_list[index]) + "\t" combined_sections_with_paras += "".join(paragraph_list[index]) return combined_sections_with_paras else: print("Not the same number of sections as paragraphs list") def get_questions(sample: dict) -> List[str]: """ :param dict sample: the row sample from QASPER """ questions_list = sample["qas"]["question"] return questions_list doc_qa_dict_list = [] for train_sample in train_samples: full_text = get_full_text(train_sample) questions_list = get_questions(train_sample) local_dict = {"paper": full_text, "questions": questions_list} doc_qa_dict_list.append(local_dict) len(doc_qa_dict_list) import pandas as pd df_train = pd.DataFrame(doc_qa_dict_list) df_train.to_csv("train.csv") """ The Answers field in the dataset follow the below format:- Unanswerable answers have "unanswerable" set to true. The remaining answers have exactly one of the following fields being non-empty. "extractive_spans" are spans in the paper which serve as the answer. "free_form_answer" is a written out answer. "yes_no" is true iff the answer is Yes, and false iff the answer is No. We accept only free-form answers and for all the other kind of answers we set their value to 'Unacceptable', to better evaluate the performance of the query engine using pairwise comparision evaluator as it uses GPT-4 which is biased towards preferring long answers more. https://www.anyscale.com/blog/a-comprehensive-guide-for-building-rag-based-llm-applications-part-1 So in the case of 'yes_no' answers it can favour Query Engine answers more than reference answers. Also in the case of extracted spans it can favour reference answers more than Query engine generated answers. """ eval_doc_qa_answer_list = [] def get_answers(sample: dict) -> List[str]: """ :param dict sample: the row sample from the train split of QASPER """ final_answers_list = [] answers = sample["qas"]["answers"] for answer in answers: local_answer = "" types_of_answers = answer["answer"][0] if types_of_answers["unanswerable"] == False: if types_of_answers["free_form_answer"] != "": local_answer = types_of_answers["free_form_answer"] else: local_answer = "Unacceptable" else: local_answer = "Unacceptable" final_answers_list.append(local_answer) return final_answers_list for test_sample in test_samples: full_text = get_full_text(test_sample) questions_list = get_questions(test_sample) answers_list = get_answers(test_sample) local_dict = { "paper": full_text, "questions": questions_list, "answers": answers_list, } eval_doc_qa_answer_list.append(local_dict) len(eval_doc_qa_answer_list) import pandas as pd df_test = pd.DataFrame(eval_doc_qa_answer_list) df_test.to_csv("test.csv") get_ipython().system('pip install llama-index --quiet') import os from llama_index.core import SimpleDirectoryReader import openai from llama_index.finetuning.cross_encoders.dataset_gen import ( generate_ce_fine_tuning_dataset, generate_synthetic_queries_over_documents, ) from llama_index.finetuning.cross_encoders import CrossEncoderFinetuneEngine os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import Document final_finetuning_data_list = [] for paper in doc_qa_dict_list: questions_list = paper["questions"] documents = [Document(text=paper["paper"])] local_finetuning_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=questions_list, max_chunk_length=256, top_k=5, ) final_finetuning_data_list.extend(local_finetuning_dataset) len(final_finetuning_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_finetuning_data_list) df_finetuning_dataset.to_csv("fine_tuning.csv") finetuning_dataset = final_finetuning_data_list finetuning_dataset[0] get_ipython().system('wget -O test.csv https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0') import pandas as pd import ast # Used to safely evaluate the string as a list df_test = pd.read_csv("/content/test.csv", index_col=0) df_test["questions"] = df_test["questions"].apply(ast.literal_eval) df_test["answers"] = df_test["answers"].apply(ast.literal_eval) print(f"Number of papers in the test sample:- {len(df_test)}") from llama_index.core import Document final_eval_data_list = [] for index, row in df_test.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] local_eval_dataset = generate_ce_fine_tuning_dataset( documents=documents, questions_list=query_list, max_chunk_length=256, top_k=5, ) relevant_query_list = [] relevant_context_list = [] for item in local_eval_dataset: if item.score == 1: relevant_query_list.append(item.query) relevant_context_list.append(item.context) if len(relevant_query_list) > 0: final_eval_data_list.append( { "paper": row["paper"], "questions": relevant_query_list, "context": relevant_context_list, } ) len(final_eval_data_list) import pandas as pd df_finetuning_dataset = pd.DataFrame(final_eval_data_list) df_finetuning_dataset.to_csv("reranking_test.csv") get_ipython().system('pip install huggingface_hub --quiet') from huggingface_hub import notebook_login notebook_login() from sentence_transformers import SentenceTransformer finetuning_engine = CrossEncoderFinetuneEngine( dataset=finetuning_dataset, epochs=2, batch_size=8 ) finetuning_engine.finetune() finetuning_engine.push_to_hub( repo_id="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2" ) get_ipython().system('pip install nest-asyncio --quiet') import nest_asyncio nest_asyncio.apply() get_ipython().system('wget -O reranking_test.csv https://www.dropbox.com/scl/fi/mruo5rm46k1acm1xnecev/reranking_test.csv?rlkey=hkniwowq0xrc3m0ywjhb2gf26&dl=0') import pandas as pd import ast df_reranking = pd.read_csv("/content/reranking_test.csv", index_col=0) df_reranking["questions"] = df_reranking["questions"].apply(ast.literal_eval) df_reranking["context"] = df_reranking["context"].apply(ast.literal_eval) print(f"Number of papers in the reranking eval dataset:- {len(df_reranking)}") df_reranking.head(1) from llama_index.core.postprocessor import SentenceTransformerRerank from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.core.retrievers import VectorIndexRetriever from llama_index.llms.openai import OpenAI from llama_index.core import Document from llama_index.core import Settings import os import openai import pandas as pd os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] Settings.chunk_size = 256 rerank_base = SentenceTransformerRerank( model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3 ) rerank_finetuned = SentenceTransformerRerank( model="bpHigh/Cross-Encoder-LLamaIndex-Demo-v2", top_n=3 ) without_reranker_hits = 0 base_reranker_hits = 0 finetuned_reranker_hits = 0 total_number_of_context = 0 for index, row in df_reranking.iterrows(): documents = [Document(text=row["paper"])] query_list = row["questions"] context_list = row["context"] assert len(query_list) == len(context_list) vector_index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_str(table_schema_objs: List[SQLTableSchema]): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component = FnComponent(fn=get_table_context_str) from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import FnComponent from llama_index.core.llms import ChatResponse def parse_response_to_sql(response: ChatResponse) -> str: """Parse response to SQL.""" response = response.message.content sql_query_start = response.find("SQLQuery:") if sql_query_start != -1: response = response[sql_query_start:] if response.startswith("SQLQuery:"): response = response[len("SQLQuery:") :] sql_result_start = response.find("SQLResult:") if sql_result_start != -1: response = response[:sql_result_start] return response.strip().strip("```").strip() sql_parser_component = FnComponent(fn=parse_response_to_sql) text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format( dialect=engine.dialect.name ) print(text2sql_prompt.template) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n" "SQL: {sql_query}\n" "SQL Response: {context_str}\n" "Response: " ) response_synthesis_prompt = PromptTemplate( response_synthesis_prompt_str, ) llm = OpenAI(model="gpt-3.5-turbo") from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, CustomQueryComponent, ) qp = QP( modules={ "input": InputComponent(), "table_retriever": obj_retriever, "table_output_parser": table_parser_component, "text2sql_prompt": text2sql_prompt, "text2sql_llm": llm, "sql_output_parser": sql_parser_component, "sql_retriever": sql_retriever, "response_synthesis_prompt": response_synthesis_prompt, "response_synthesis_llm": llm, }, verbose=True, ) qp.add_chain(["input", "table_retriever", "table_output_parser"]) qp.add_link("input", "text2sql_prompt", dest_key="query_str") qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema") qp.add_chain( ["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"] ) qp.add_link( "sql_output_parser", "response_synthesis_prompt", dest_key="sql_query" ) qp.add_link( "sql_retriever", "response_synthesis_prompt", dest_key="context_str" ) qp.add_link("input", "response_synthesis_prompt", dest_key="query_str") qp.add_link("response_synthesis_prompt", "response_synthesis_llm") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("text2sql_dag.html") response = qp.run( query="What was the year that The Notorious B.I.G was signed to Bad Boy?" ) print(str(response)) response = qp.run(query="Who won best director in the 1972 academy awards") print(str(response)) response = qp.run(query="What was the term of Pasquale Preziosa?") print(str(response)) from llama_index.core import VectorStoreIndex, load_index_from_storage from sqlalchemy import text from llama_index.core.schema import TextNode from llama_index.core import StorageContext import os from pathlib import Path from typing import Dict def index_all_tables( sql_database: SQLDatabase, table_index_dir: str = "table_index_dir" ) -> Dict[str, VectorStoreIndex]: """Index all tables.""" if not Path(table_index_dir).exists(): os.makedirs(table_index_dir) vector_index_dict = {} engine = sql_database.engine for table_name in sql_database.get_usable_table_names(): print(f"Indexing rows in table: {table_name}") if not os.path.exists(f"{table_index_dir}/{table_name}"): with engine.connect() as conn: cursor = conn.execute(text(f'SELECT * FROM "{table_name}"')) result = cursor.fetchall() row_tups = [] for row in result: row_tups.append(tuple(row)) nodes = [TextNode(text=str(t)) for t in row_tups] index = VectorStoreIndex(nodes) index.set_index_id("vector_index") index.storage_context.persist(f"{table_index_dir}/{table_name}") else: storage_context = StorageContext.from_defaults( persist_dir=f"{table_index_dir}/{table_name}" ) index = load_index_from_storage( storage_context, index_id="vector_index" ) vector_index_dict[table_name] = index return vector_index_dict vector_index_dict = index_all_tables(sql_database) test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever( similarity_top_k=1 ) nodes = test_retriever.retrieve("P. Diddy") print(nodes[0].get_content()) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_and_rows_str( query_str: str, table_schema_objs: List[SQLTableSchema] ): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context vector_retriever = vector_index_dict[ table_schema_obj.table_name ].as_retriever(similarity_top_k=2) relevant_nodes = vector_retriever.retrieve(query_str) if len(relevant_nodes) > 0: table_row_context = "\nHere are some relevant example rows (values in the same order as columns above)\n" for node in relevant_nodes: table_row_context += str(node.get_content()) + "\n" table_info += table_row_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component =
FnComponent(fn=get_table_context_and_rows_str)
llama_index.core.query_pipeline.FnComponent
import os os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY" get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.04511.pdf" -O "llm_compiler.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.06648.pdf" -O "dense_x_retrieval.pdf"') from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["dense_x_retrieval.pdf"]) dense_x_retrieval_docs = reader.load_data() reader =
SimpleDirectoryReader(input_files=["llm_compiler.pdf"])
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator() evaluator_dict = {"correctness": evaluator_c} batch_runner =
BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
llama_index.core.evaluation.BatchEvalRunner
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( FaithfulnessEvaluator, RelevancyEvaluator, CorrectnessEvaluator, ) from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0) gpt4 = OpenAI(temperature=0, model="gpt-4") faithfulness_gpt4 = FaithfulnessEvaluator(llm=gpt4) relevancy_gpt4 = RelevancyEvaluator(llm=gpt4) correctness_gpt4 = CorrectnessEvaluator(llm=gpt4) documents = SimpleDirectoryReader("./test_wiki_data/").load_data() llm = OpenAI(temperature=0.3, model="gpt-3.5-turbo") splitter = SentenceSplitter(chunk_size=512) vector_index = VectorStoreIndex.from_documents( documents, transformations=[splitter] ) get_ipython().system('pip install spacy datasets span-marker scikit-learn') from llama_index.core.evaluation import DatasetGenerator dataset_generator =
DatasetGenerator.from_documents(documents, llm=llm)
llama_index.core.evaluation.DatasetGenerator.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb duckdb-engine') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document from llama_index.readers.wikipedia import WikipediaReader from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("duckdb:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) from llama_index.core import SQLDatabase sql_database =
SQLDatabase(engine, include_tables=["city_stats"])
llama_index.core.SQLDatabase
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch') get_ipython().system('pip install llama-index') import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.elasticsearch import ElasticsearchStore get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response =
AgentFnComponent(fn=process_agent_response_fn)
llama_index.core.query_pipeline.AgentFnComponent
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().system('pip install llama-index') import os from llama_index.llms.litellm import LiteLLM from llama_index.core.llms import ChatMessage os.environ["OPENAI_API_KEY"] = "your-api-key" os.environ["COHERE_API_KEY"] = "your-api-key" message = ChatMessage(role="user", content="Hey! how's it going?") llm = LiteLLM("gpt-3.5-turbo") chat_response = llm.chat([message]) llm = LiteLLM("command-nightly") chat_response = llm.chat([message]) from llama_index.core.llms import ChatMessage from llama_index.llms.litellm import LiteLLM messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = LiteLLM("gpt-3.5-turbo").chat(messages) print(resp) from llama_index.llms.litellm import LiteLLM llm = LiteLLM("gpt-3.5-turbo") resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.litellm import LiteLLM messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] llm = LiteLLM("gpt-3.5-turbo") resp = llm.stream_chat(messages) for r in resp: print(r.delta, end="") from llama_index.llms.litellm import LiteLLM llm =
LiteLLM("gpt-3.5-turbo")
llama_index.llms.litellm.LiteLLM
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio from tqdm.asyncio import tqdm_asyncio nest_asyncio.apply() def displayify_df(df): """For pretty displaying DataFrame in a notebook.""" display_df = df.style.set_properties( **{ "inline-size": "300px", "overflow-wrap": "break-word", } ) display(display_df) from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex rag_dataset, documents = download_llama_dataset( "EvaluatingLlmSurveyPaperDataset", "./data" ) rag_dataset.to_pandas()[:5] index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() prediction_dataset = await rag_dataset.amake_predictions_with( predictor=query_engine, batch_size=100, show_progress=True ) from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( AnswerRelevancyEvaluator, ContextRelevancyEvaluator, ) judges = {} judges["answer_relevancy"] = AnswerRelevancyEvaluator( llm=
OpenAI(temperature=0, model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import logging import sys import pandas as pd logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.evaluation import DatasetGenerator, RelevancyEvaluator from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Response from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system('pip install "llama_index>=0.9.7"') from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core.ingestion import IngestionPipeline from llama_index.core.extractors import TitleExtractor, SummaryExtractor from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode def build_pipeline(): llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1) transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-anyscale') get_ipython().system('pip install llama-index') from llama_index.llms.anyscale import Anyscale from llama_index.core.llms import ChatMessage llm =
Anyscale(api_key="<your-api-key>")
llama_index.llms.anyscale.Anyscale
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.response.pprint_utils import pprint_response from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0, model="gpt-4") get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") march_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents(march_2022) june_index = VectorStoreIndex.from_documents(june_2022) sept_index = VectorStoreIndex.from_documents(sept_2022) march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm) june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm) sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm) from llama_index.core.tools import QueryEngineTool query_tool_sept = QueryEngineTool.from_defaults( query_engine=sept_engine, name="sept_2022", description=( f"Provides information about Uber quarterly financials ending" f" September 2022" ), ) query_tool_june = QueryEngineTool.from_defaults( query_engine=june_engine, name="june_2022", description=( f"Provides information about Uber quarterly financials ending June" f" 2022" ), ) query_tool_march = QueryEngineTool.from_defaults( query_engine=march_engine, name="march_2022", description=( f"Provides information about Uber quarterly financials ending March" f" 2022" ), ) from llama_index.core.tools import QueryPlanTool from llama_index.core import get_response_synthesizer response_synthesizer =
get_response_synthesizer()
llama_index.core.get_response_synthesizer
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey') get_ipython().system('pip install llama-index') get_ipython().system('pip install -U llama_index') get_ipython().system('pip install -U portkey-ai') from llama_index.llms.portkey import Portkey from llama_index.core.llms import ChatMessage import portkey as pk import os os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY" openai_virtual_key_a = "" openai_virtual_key_b = "" anthropic_virtual_key_a = "" anthropic_virtual_key_b = "" cohere_virtual_key_a = "" cohere_virtual_key_b = "" os.environ["OPENAI_API_KEY"] = "" os.environ["ANTHROPIC_API_KEY"] = "" portkey_client = Portkey( mode="single", ) openai_llm = pk.LLMOptions( provider="openai", model="gpt-4", virtual_key=openai_virtual_key_a, ) portkey_client.add_llms(openai_llm) messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] print("Testing Portkey Llamaindex integration:") response = portkey_client.chat(messages) print(response) prompt = "Why is the sky blue?" print("\nTesting Stream Complete:\n") response = portkey_client.stream_complete(prompt) for i in response: print(i.delta, end="", flush=True) messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] print("\nTesting Stream Chat:\n") response = portkey_client.stream_chat(messages) for i in response: print(i.delta, end="", flush=True) portkey_client = Portkey(mode="fallback") messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] llm1 = pk.LLMOptions( provider="openai", model="gpt-4", retry_settings={"on_status_codes": [429, 500], "attempts": 2}, virtual_key=openai_virtual_key_a, ) llm2 = pk.LLMOptions( provider="openai", model="gpt-3.5-turbo", virtual_key=openai_virtual_key_b, ) portkey_client.add_llms(llm_params=[llm1, llm2]) print("Testing Fallback & Retry functionality:") response = portkey_client.chat(messages) print(response) portkey_client = Portkey(mode="ab_test") messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] llm1 = pk.LLMOptions( provider="openai", model="gpt-4", virtual_key=openai_virtual_key_a, weight=0.2, ) llm2 = pk.LLMOptions( provider="openai", model="gpt-3.5-turbo", virtual_key=openai_virtual_key_a, weight=0.8, ) portkey_client.add_llms(llm_params=[llm1, llm2]) print("Testing Loadbalance functionality:") response = portkey_client.chat(messages) print(response) import time portkey_client = Portkey(mode="single") openai_llm = pk.LLMOptions( provider="openai", model="gpt-3.5-turbo", virtual_key=openai_virtual_key_a, cache_status="semantic", ) portkey_client.add_llms(openai_llm) current_messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What are the ingredients of a pizza?"), ] print("Testing Portkey Semantic Cache:") start = time.time() response = portkey_client.chat(current_messages) end = time.time() - start print(response) print(f"{'-'*50}\nServed in {end} seconds.\n{'-'*50}") new_messages = [ ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="Ingredients of pizza")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core import StorageContext from llama_index.embeddings.huggingface import HuggingFaceEmbedding from IPython.display import Markdown, display import chromadb import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context, embed_model=embed_model ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) db = chromadb.PersistentClient(path="./chroma_db") chroma_collection = db.get_or_create_collection("quickstart") vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool =
FunctionTool.from_defaults(fn=multiply)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().system('pip install llama-index') get_ipython().system('pip install clickhouse_connect') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="localhost", port=8123, username="default", password="", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.clickhouse import ClickHouseVectorStore documents = SimpleDirectoryReader("../data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) print("Number of Documents: ", len(documents)) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") loader = SimpleDirectoryReader("./data/paul_graham/") documents = loader.load_data() for file in loader.input_files: print(file) from llama_index.core import StorageContext for document in documents: document.metadata = {"user_id": "123", "favorite_color": "blue"} vector_store =
ClickHouseVectorStore(clickhouse_client=client)
llama_index.vector_stores.clickhouse.ClickHouseVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().system('pip install llama-index') import os import pinecone api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="eu-west1-gcp") indexes = pinecone.list_indexes() print(indexes) if "quickstart-index" not in indexes: pinecone.create_index( "quickstart-index", dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index("quickstart-index") pinecone_index.delete(deleteAll="true") books = [ { "title": "To Kill a Mockingbird", "author": "Harper Lee", "content": ( "To Kill a Mockingbird is a novel by Harper Lee published in" " 1960..." ), "year": 1960, }, { "title": "1984", "author": "George Orwell", "content": ( "1984 is a dystopian novel by George Orwell published in 1949..." ), "year": 1949, }, { "title": "The Great Gatsby", "author": "F. Scott Fitzgerald", "content": ( "The Great Gatsby is a novel by F. Scott Fitzgerald published in" " 1925..." ), "year": 1925, }, { "title": "Pride and Prejudice", "author": "Jane Austen", "content": ( "Pride and Prejudice is a novel by Jane Austen published in" " 1813..." ), "year": 1813, }, ] import uuid from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding() entries = [] for book in books: vector = embed_model.get_text_embedding(book["content"]) entries.append( {"id": str(uuid.uuid4()), "values": vector, "metadata": book} ) pinecone_index.upsert(entries) from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import VectorStoreIndex from llama_index.core.response.pprint_utils import pprint_source_node vector_store = PineconeVectorStore( pinecone_index=pinecone_index, text_key="content" ) retriever =
VectorStoreIndex.from_vector_store(vector_store)
llama_index.core.VectorStoreIndex.from_vector_store
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import openai import os os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(data) from llama_index.core.memory import ChatMemoryBuffer memory = ChatMemoryBuffer.from_defaults(token_limit=3900) chat_engine = index.as_chat_engine( chat_mode="condense_plus_context", memory=memory, llm=llm, context_prompt=( "You are a chatbot, able to have normal interactions, as well as talk" " about an essay discussing Paul Grahams life." "Here are the relevant documents for the context:\n" "{context_str}" "\nInstruction: Use the previous chat history, or the context above, to interact and help the user." ), verbose=False, ) response = chat_engine.chat("What did Paul Graham do growing up") print(response) response_2 = chat_engine.chat("Can you tell me more?") print(response_2) chat_engine.reset() response = chat_engine.chat("Hello! What do you know?") print(response) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.core import Settings llm = OpenAI(model="gpt-3.5-turbo", temperature=0) Settings.llm = llm data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().system('pip install redis') get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest') import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system('rm -rf test_redis_data') get_ipython().system('mkdir -p test_redis_data') get_ipython().system('echo "This is a test file: one!" > test_redis_data/test1.txt') get_ipython().system('echo "This is a test file: two!" > test_redis_data/test2.txt') from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader( "./test_redis_data", filename_as_id=True ).load_data() from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.core.ingestion import ( DocstoreStrategy, IngestionPipeline, IngestionCache, ) from llama_index.core.ingestion.cache import RedisCache from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.core.node_parser import SentenceSplitter from llama_index.vector_stores.redis import RedisVectorStore embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") pipeline = IngestionPipeline( transformations=[
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install matplotlib') import os os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/prometheus_paper_card.png") plt.imshow(img) from llama_index.core import SimpleDirectoryReader from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal image_documents = SimpleDirectoryReader( input_files=["../data/images/prometheus_paper_card.png"] ).load_data() anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(response) from PIL import Image import requests from io import BytesIO import matplotlib.pyplot as plt from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png", ] img_response = requests.get(image_urls[0]) img = Image.open(BytesIO(img_response.content)) plt.imshow(img) image_url_documents = load_image_urls(image_urls) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_url_documents, ) print(response) from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader( input_files=["../data/images/ark_email_sample.PNG"] ).load_data() from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/ark_email_sample.PNG") plt.imshow(img) from pydantic import BaseModel from typing import List class TickerInfo(BaseModel): """List of ticker info.""" direction: str ticker: str company: str shares_traded: int percent_of_total_etf: float class TickerList(BaseModel): """List of stock tickers.""" fund: str tickers: List[TickerInfo] from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you get the stock information in the image \ and return the answer? Pick just one fund. Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below. """ anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_cls=TickerList, image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=anthropic_mm_llm, verbose=True, ) response = llm_program() print(str(response)) get_ipython().system('wget "https://www.dropbox.com/scl/fi/c1ec6osn0r2ggnitijqhl/mixed_wiki_images_small.zip?rlkey=swwxc7h4qtwlnhmby5fsnderd&dl=1" -O mixed_wiki_images_small.zip') get_ipython().system('unzip mixed_wiki_images_small.zip') from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) from llama_index.core.schema import TextNode from pathlib import Path from llama_index.core import SimpleDirectoryReader nodes = [] for img_file in Path("mixed_wiki_images_small").glob("*.png"): print(img_file) image_documents = SimpleDirectoryReader(input_files=[img_file]).load_data() response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) metadata = {"img_file": img_file} nodes.append(TextNode(text=str(response), metadata=metadata)) from llama_index.core import VectorStoreIndex, StorageContext from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.anthropic import Anthropic from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import Settings from llama_index.core import StorageContext import qdrant_client client = qdrant_client.QdrantClient(path="qdrant_mixed_img") vector_store = QdrantVectorStore(client=client, collection_name="collection") embed_model = OpenAIEmbedding() anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex( nodes=nodes, storage_context=storage_context, ) from llama_index.llms.anthropic import Anthropic query_engine = index.as_query_engine(llm=Anthropic()) response = query_engine.query("Tell me more about the porsche") print(str(response)) from llama_index.core.response.notebook_utils import display_source_node for n in response.source_nodes:
display_source_node(n, metadata_mode="all")
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-myscale') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="YOUR_CLUSTER_HOST", port=8443, username="YOUR_USERNAME", password="YOUR_CLUSTER_PASSWORD", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.myscale import MyScaleVectorStore from IPython.display import Markdown, display documents =
SimpleDirectoryReader("../data/paul_graham")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-.." openai.api_key = os.environ["OPENAI_API_KEY"] from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from llama_index.core import SQLDatabase from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo") sql_database = SQLDatabase(engine, include_tables=["city_stats"]) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) stmt = select( city_stats_table.c.city_name, city_stats_table.c.population, city_stats_table.c.country, ).select_from(city_stats_table) with engine.connect() as connection: results = connection.execute(stmt).fetchall() print(results) from sqlalchemy import text with engine.connect() as con: rows = con.execute(text("SELECT city_name from city_stats")) for row in rows: print(row) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], llm=llm ) query_str = "Which city has the highest population?" response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) from llama_index.core.indices.struct_store.sql_query import ( SQLTableRetrieverQueryEngine, ) from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import VectorStoreIndex table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ (
SQLTableSchema(table_name="city_stats")
llama_index.core.objects.SQLTableSchema
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool from llama_index.agent.openai import OpenAIAgent def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool =
FunctionTool.from_defaults(fn=add)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-clip') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install -U openai-whisper') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install lancedb') get_ipython().run_line_magic('pip', 'install moviepy') get_ipython().run_line_magic('pip', 'install pytube') get_ipython().run_line_magic('pip', 'install pydub') get_ipython().run_line_magic('pip', 'install SpeechRecognition') get_ipython().run_line_magic('pip', 'install ffmpeg-python') get_ipython().run_line_magic('pip', 'install soundfile') from moviepy.editor import VideoFileClip from pathlib import Path import speech_recognition as sr from pytube import YouTube from pprint import pprint import os OPENAI_API_TOKEN = "" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN video_url = "https://www.youtube.com/watch?v=d_qvLDhkg00" output_video_path = "./video_data/" output_folder = "./mixed_data/" output_audio_path = "./mixed_data/output_audio.wav" filepath = output_video_path + "input_vid.mp4" Path(output_folder).mkdir(parents=True, exist_ok=True) from PIL import Image import matplotlib.pyplot as plt import os def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(2, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 7: break def download_video(url, output_path): """ Download a video from a given url and save it to the output path. Parameters: url (str): The url of the video to download. output_path (str): The path to save the video to. Returns: dict: A dictionary containing the metadata of the video. """ yt = YouTube(url) metadata = {"Author": yt.author, "Title": yt.title, "Views": yt.views} yt.streams.get_highest_resolution().download( output_path=output_path, filename="input_vid.mp4" ) return metadata def video_to_images(video_path, output_folder): """ Convert a video to a sequence of images and save them to the output folder. Parameters: video_path (str): The path to the video file. output_folder (str): The path to the folder to save the images to. """ clip = VideoFileClip(video_path) clip.write_images_sequence( os.path.join(output_folder, "frame%04d.png"), fps=0.2 ) def video_to_audio(video_path, output_audio_path): """ Convert a video to audio and save it to the output path. Parameters: video_path (str): The path to the video file. output_audio_path (str): The path to save the audio to. """ clip = VideoFileClip(video_path) audio = clip.audio audio.write_audiofile(output_audio_path) def audio_to_text(audio_path): """ Convert audio to text using the SpeechRecognition library. Parameters: audio_path (str): The path to the audio file. Returns: test (str): The text recognized from the audio. """ recognizer = sr.Recognizer() audio = sr.AudioFile(audio_path) with audio as source: audio_data = recognizer.record(source) try: text = recognizer.recognize_whisper(audio_data) except sr.UnknownValueError: print("Speech recognition could not understand the audio.") except sr.RequestError as e: print(f"Could not request results from service; {e}") return text try: metadata_vid = download_video(video_url, output_video_path) video_to_images(filepath, output_folder) video_to_audio(filepath, output_audio_path) text_data = audio_to_text(output_audio_path) with open(output_folder + "output_text.txt", "w") as file: file.write(text_data) print("Text data saved to file") file.close() os.remove(output_audio_path) print("Audio file removed") except Exception as e: raise e from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.vector_stores.lancedb import LanceDBVectorStore from llama_index.core import SimpleDirectoryReader text_store = LanceDBVectorStore(uri="lancedb", table_name="text_collection") image_store = LanceDBVectorStore(uri="lancedb", table_name="image_collection") storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader(output_folder).load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) retriever_engine = index.as_retriever( similarity_top_k=5, image_similarity_top_k=5 ) import json metadata_str = json.dumps(metadata_vid) qa_tmpl_str = ( "Given the provided information, including relevant images and retrieved context from the video, \ accurately and precisely answer the query without any additional prior knowledge.\n" "Please ensure honesty and responsibility, refraining from any racist or sexist remarks.\n" "---------------------\n" "Context: {context_str}\n" "Metadata for video: {metadata_str} \n" "---------------------\n" "Query: {query_str}\n" "Answer: " ) from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.schema import ImageNode def retrieve(retriever_engine, query_str): retrieval_results = retriever_engine.retrieve(query_str) retrieved_image = [] retrieved_text = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else:
display_source_node(res_node, source_length=200)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector = LLMMultiSelector.from_defaults() from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="covid_nyt", description=("This tool contains a NYT news article about COVID-19"), ), ToolMetadata( name="covid_wiki", description=("This tool contains the Wikipedia page about COVID-19"), ), ToolMetadata( name="covid_tesla", description=("This tool contains the Wikipedia page about apples"), ), ] display_prompt_dict(selector.get_prompts()) selector_result = selector.select( tool_choices, query="Tell me more about COVID-19" ) selector_result.selections from llama_index.core import PromptTemplate from llama_index.llms.openai import OpenAI query_gen_str = """\ You are a helpful assistant that generates multiple search queries based on a \ single input query. Generate {num_queries} search queries, one on each line, \ related to the following input query: Query: {query} Queries: """ query_gen_prompt = PromptTemplate(query_gen_str) llm = OpenAI(model="gpt-3.5-turbo") def generate_queries(query: str, llm, num_queries: int = 4): response = llm.predict( query_gen_prompt, num_queries=num_queries, query=query ) queries = response.split("\n") queries_str = "\n".join(queries) print(f"Generated queries:\n{queries_str}") return queries queries = generate_queries("What happened at Interleaf and Viaweb?", llm) queries from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.llms.openai import OpenAI hyde = HyDEQueryTransform(include_original=True) llm = OpenAI(model="gpt-3.5-turbo") query_bundle = hyde.run("What is Bel?") new_query.custom_embedding_strs from llama_index.core.question_gen import LLMQuestionGenerator from llama_index.question_gen.openai import OpenAIQuestionGenerator from llama_index.llms.openai import OpenAI llm = OpenAI() question_gen = OpenAIQuestionGenerator.from_defaults(llm=llm) display_prompt_dict(question_gen.get_prompts()) from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="uber_2021_10k", description=( "Provides information about Uber financials for year 2021" ), ), ToolMetadata( name="lyft_2021_10k", description=( "Provides information about Lyft financials for year 2021" ), ), ] from llama_index.core import QueryBundle query_str = "Compare and contrast Uber and Lyft" choices = question_gen.generate(tool_choices, QueryBundle(query_str=query_str)) choices from llama_index.core.agent import ReActChatFormatter from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.tools import FunctionTool from llama_index.core.llms import ChatMessage def execute_sql(sql: str) -> str: """Given a SQL input string, execute it.""" return f"Executed {sql}" def add(a: int, b: int) -> int: """Add two numbers.""" return a + b tool1 = FunctionTool.from_defaults(fn=execute_sql) tool2 = FunctionTool.from_defaults(fn=add) tools = [tool1, tool2] chat_formatter = ReActChatFormatter() output_parser = ReActOutputParser() input_msgs = chat_formatter.format( tools, [ ChatMessage( content="Can you find the top three rows from the table named `revenue_years`", role="user", ) ], ) input_msgs llm =
OpenAI(model="gpt-4-1106-preview")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index llama-hub') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "ghp_..." os.environ["OPENAI_API_KEY"] = "sk-..." import os from llama_index.readers.github import ( GitHubRepositoryIssuesReader, GitHubIssuesClient, ) github_client =
GitHubIssuesClient()
llama_index.readers.github.GitHubIssuesClient
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-typesense') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from IPython.display import Markdown, display documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100) len(eval_dataset.qr_pairs) eval_dataset.save_json("data/tesla10k_eval_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" ) eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_dict = { "correctness": evaluator_c, "semantic_similarity": evaluator_s, } batch_eval_runner = BatchEvalRunner( evaluator_dict, workers=2, show_progress=True ) from llama_index.core import VectorStoreIndex async def run_evals( pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref ): nodes = pipeline.run(documents=docs) vector_index = VectorStoreIndex(nodes) query_engine = vector_index.as_query_engine() pred_responses = get_responses(eval_qs, query_engine, show_progress=True) eval_results = await batch_eval_runner.aevaluate_responses( eval_qs, responses=pred_responses, reference=eval_responses_ref ) return eval_results from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0) sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200) sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600) html_parser =
HTMLNodeParser.from_defaults()
llama_index.core.node_parser.HTMLNodeParser.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator, ) filters = MetadataFilters( filters=[ MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"), ] ) retriever = index.as_retriever(filters=filters) retriever.retrieve("What is inception about?") from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Mafia"), MetadataFilter(key="year", value=1972), ] ) retriever = index.as_retriever(filters=filters) retriever.retrieve("What is inception about?") from llama_index.core.vector_stores import FilterOperator, FilterCondition filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Fiction"), MetadataFilter(key="year", value=1997, operator=FilterOperator.GT), ], condition=FilterCondition.AND, ) retriever = index.as_retriever(filters=filters) retriever.retrieve("Harry Potter?") from llama_index.core.vector_stores import FilterOperator, FilterCondition filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Fiction"),
MetadataFilter(key="year", value=1997, operator=FilterOperator.GT)
llama_index.core.vector_stores.MetadataFilter
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) from llama_index.core import SQLDatabase sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex vector_indices = {} vector_query_engines = {} for city, wiki_doc in zip(cities, wiki_docs): vector_index = VectorStoreIndex.from_documents([wiki_doc]) query_engine = vector_index.as_query_engine( similarity_top_k=2, llm=OpenAI(model="gpt-3.5-turbo") ) vector_indices[city] = vector_index vector_query_engines[city] = query_engine from llama_index.core.query_engine import NLSQLTableQueryEngine sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) from llama_index.core.tools import QueryEngineTool from llama_index.core.tools import ToolMetadata from llama_index.core.query_engine import SubQuestionQueryEngine query_engine_tools = [] for city in cities: query_engine = vector_query_engines[city] query_engine_tool = QueryEngineTool( query_engine=query_engine, metadata=ToolMetadata( name=city, description=f"Provides information about {city}" ), ) query_engine_tools.append(query_engine_tool) s_engine = SubQuestionQueryEngine.from_defaults( query_engine_tools=query_engine_tools, llm=
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( FaithfulnessEvaluator, RelevancyEvaluator, CorrectnessEvaluator, ) from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0) gpt4 =
OpenAI(temperature=0, model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) from llama_index.core.query_engine.pandas import PandasInstructionParser from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'") import pandas as pd df = pd.read_csv("./titanic_train.csv") instruction_str = ( "1. Convert the query to executable Python code using Pandas.\n" "2. The final line of code should be a Python expression that can be called with the `eval()` function.\n" "3. The code should represent a solution to the query.\n" "4. PRINT ONLY THE EXPRESSION.\n" "5. Do not quote the expression.\n" ) pandas_prompt_str = ( "You are working with a pandas dataframe in Python.\n" "The name of the dataframe is `df`.\n" "This is the result of `print(df.head())`:\n" "{df_str}\n\n" "Follow these instructions:\n" "{instruction_str}\n" "Query: {query_str}\n\n" "Expression:" ) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n\n" "Pandas Instructions (optional):\n{pandas_instructions}\n\n" "Pandas Output: {pandas_output}\n\n" "Response: " ) pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format( instruction_str=instruction_str, df_str=df.head(5) ) pandas_output_parser = PandasInstructionParser(df) response_synthesis_prompt = PromptTemplate(response_synthesis_prompt_str) llm = OpenAI(model="gpt-3.5-turbo") qp = QP( modules={ "input": InputComponent(), "pandas_prompt": pandas_prompt, "llm1": llm, "pandas_output_parser": pandas_output_parser, "response_synthesis_prompt": response_synthesis_prompt, "llm2": llm, }, verbose=True, ) qp.add_chain(["input", "pandas_prompt", "llm1", "pandas_output_parser"]) qp.add_links( [
Link("input", "response_synthesis_prompt", dest_key="query_str")
llama_index.core.query_pipeline.Link
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().system('pip install llama-index') from llama_index.llms.anthropic import Anthropic from llama_index.core import Settings tokenizer = Anthropic().tokenizer Settings.tokenizer = tokenizer import os os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY" from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229") resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.anthropic import Anthropic messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = Anthropic(model="claude-3-opus-20240229").chat(messages) print(resp) from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229", max_tokens=100) resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229") messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = llm.stream_chat(messages) for r in resp: print(r.delta, end="") from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-sonnet-20240229") resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.anthropic import Anthropic llm =
Anthropic("claude-3-sonnet-20240229")
llama_index.llms.anthropic.Anthropic
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index =
VectorStoreIndex(base_nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(data) chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True) response = chat_engine.chat("What did Paul Graham do after YC?") print(response) response = chat_engine.chat("What about after that?") print(response) response = chat_engine.chat("Can you tell me more?") print(response) chat_engine.reset() response = chat_engine.chat("What about after that?") print(response) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo", temperature=0)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp') from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.llms.llama_cpp import LlamaCPP from llama_index.llms.llama_cpp.llama_utils import ( messages_to_prompt, completion_to_prompt, ) get_ipython().system('pip install llama-index') model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin" llm = LlamaCPP( model_url=model_url, model_path=None, temperature=0.1, max_new_tokens=256, context_window=3900, generate_kwargs={}, model_kwargs={"n_gpu_layers": 1}, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, verbose=True, ) response = llm.complete("Hello! Can you tell me a poem about cats and dogs?") print(response.text) response_iter = llm.stream_complete("Can you write me a poem about fast cars?") for response in response_iter: print(response.delta, end="", flush=True) from llama_index.core import set_global_tokenizer from transformers import AutoTokenizer set_global_tokenizer( AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf").encode ) from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") documents = SimpleDirectoryReader( "../../../examples/paul_graham_essay/data" ).load_data() index =
VectorStoreIndex.from_documents(documents, embed_model=embed_model)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-readers-faiss') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.readers.faiss import FaissReader import faiss id_to_text_map = { "id1": "text blob 1", "id2": "text blob 2", } index = ... reader =
FaissReader(index)
llama_index.readers.faiss.FaissReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install "transformers[torch]" "huggingface_hub[inference]"') get_ipython().system('pip install llama-index') import os from typing import List, Optional from llama_index.llms.huggingface import ( HuggingFaceInferenceAPI, HuggingFaceLLM, ) HF_TOKEN: Optional[str] = os.getenv("HUGGING_FACE_TOKEN") locally_run = HuggingFaceLLM(model_name="HuggingFaceH4/zephyr-7b-alpha") remotely_run = HuggingFaceInferenceAPI( model_name="HuggingFaceH4/zephyr-7b-alpha", token=HF_TOKEN ) remotely_run_anon = HuggingFaceInferenceAPI( model_name="HuggingFaceH4/zephyr-7b-alpha" ) remotely_run_recommended =
HuggingFaceInferenceAPI(token=HF_TOKEN)
llama_index.llms.huggingface.HuggingFaceInferenceAPI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') import json from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") TRAIN_FILES = ["./data/10k/lyft_2021.pdf"] VAL_FILES = ["./data/10k/uber_2021.pdf"] TRAIN_CORPUS_FPATH = "./data/train_corpus.json" VAL_CORPUS_FPATH = "./data/val_corpus.json" def load_corpus(files, verbose=False): if verbose: print(f"Loading files {files}") reader =
SimpleDirectoryReader(input_files=files)
llama_index.core.SimpleDirectoryReader
from llama_index.agent import OpenAIAgent import openai openai.api_key = "sk-your-key" from llama_index.tools.wikipedia.base import WikipediaToolSpec from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec wiki_spec = WikipediaToolSpec() tool = wiki_spec.to_tool_list()[1] agent = OpenAIAgent.from_tools(
LoadAndSearchToolSpec.from_defaults(tool)
llama_index.tools.tool_spec.load_and_search.base.LoadAndSearchToolSpec.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install "google-generativeai" -q') import nest_asyncio nest_asyncio.apply() from llama_index.core.llama_dataset import download_llama_dataset pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) pairwise_evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 = OpenAI(temperature=0, model="gpt-4") llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo") llm_gemini = Gemini(model="models/gemini-pro", temperature=0) evaluators = { "gpt-4": PairwiseComparisonEvaluator(llm=llm_gpt4), "gpt-3.5": PairwiseComparisonEvaluator(llm=llm_gpt35), "gemini-pro":
PairwiseComparisonEvaluator(llm=llm_gemini)
llama_index.core.evaluation.PairwiseComparisonEvaluator
get_ipython().system('pip install llama-index') import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.core import Settings nodes = Settings.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SimpleKeywordTableIndex, VectorStoreIndex vector_index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/'") get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'") from llama_index.readers.file import UnstructuredReader documents = UnstructuredReader().load_data("data/llama2.pdf") from llama_index.core.llama_pack import download_llama_pack DenseXRetrievalPack = download_llama_pack("DenseXRetrievalPack", "./dense_pack") from llama_index.llms.openai import OpenAI from llama_index.core.node_parser import SentenceSplitter dense_pack = DenseXRetrievalPack( documents, proposition_llm=OpenAI(model="gpt-3.5-turbo", max_tokens=750), query_llm=
OpenAI(model="gpt-3.5-turbo", max_tokens=256)
llama_index.llms.openai.OpenAI
from llama_hub.semanticscholar.base import SemanticScholarReader import os import openai from llama_index.llms import OpenAI from llama_index.query_engine import CitationQueryEngine from llama_index import ( VectorStoreIndex, StorageContext, load_index_from_storage, ServiceContext, ) from llama_index.response.notebook_utils import display_response s2reader = SemanticScholarReader() openai.api_key = os.environ["OPENAI_API_KEY"] service_context = ServiceContext.from_defaults( llm=OpenAI(model="gpt-3.5-turbo", temperature=0) ) query_space = "large language models" full_text = True total_papers = 50 persist_dir = ( "./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text) ) if not os.path.exists(persist_dir): documents = s2reader.load_data(query_space, total_papers, full_text=full_text) index = VectorStoreIndex.from_documents(documents, service_context=service_context) index.storage_context.persist(persist_dir=persist_dir) else: index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=persist_dir)
llama_index.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader =
PDFReader()
llama_index.readers.file.PDFReader
get_ipython().system('pip install -q llama-index llama-index-vector-stores-mongodb llama-index-embeddings-fireworks==0.1.2 llama-index-llms-fireworks') get_ipython().system('pip install -q pymongo datasets pandas') import os import getpass fw_api_key = getpass.getpass("Fireworks API Key:") os.environ["FIREWORKS_API_KEY"] = fw_api_key from datasets import load_dataset import pandas as pd dataset = load_dataset("AIatMongoDB/whatscooking.restaurants") dataset_df = pd.DataFrame(dataset["train"]) dataset_df.head(5) from llama_index.core.settings import Settings from llama_index.llms.fireworks import Fireworks from llama_index.embeddings.fireworks import FireworksEmbedding embed_model = FireworksEmbedding( embed_batch_size=512, model_name="nomic-ai/nomic-embed-text-v1.5", api_key=fw_api_key, ) llm = Fireworks( temperature=0, model="accounts/fireworks/models/mixtral-8x7b-instruct", api_key=fw_api_key, ) Settings.llm = llm Settings.embed_model = embed_model import json from llama_index.core import Document from llama_index.core.schema import MetadataMode documents_json = dataset_df.to_json(orient="records") documents_list = json.loads(documents_json) llama_documents = [] for document in documents_list: document["name"] = json.dumps(document["name"]) document["cuisine"] = json.dumps(document["cuisine"]) document["attributes"] = json.dumps(document["attributes"]) document["menu"] = json.dumps(document["menu"]) document["borough"] = json.dumps(document["borough"]) document["address"] = json.dumps(document["address"]) document["PriceRange"] = json.dumps(document["PriceRange"]) document["HappyHour"] = json.dumps(document["HappyHour"]) document["review_count"] = json.dumps(document["review_count"]) document["TakeOut"] = json.dumps(document["TakeOut"]) del document["embedding"] del document["location"] llama_document = Document( text=json.dumps(document), metadata=document, metadata_template="{key}=>{value}", text_template="Metadata: {metadata_str}\n-----\nContent: {content}", ) llama_documents.append(llama_document) print( "\nThe LLM sees this: \n", llama_documents[0].get_content(metadata_mode=MetadataMode.LLM), ) print( "\nThe Embedding model sees this: \n", llama_documents[0].get_content(metadata_mode=MetadataMode.EMBED), ) llama_documents[0] from llama_index.core.node_parser import SentenceSplitter parser =
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv") df from llama_index.packs.tables.chain_of_table.base import ( ChainOfTableQueryEngine, serialize_table, ) from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "ChainOfTablePack", "./chain_of_table_pack", skip_load=True, ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") import pandas as pd df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv") df query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True) response = query_engine.query("Who won best Director in the 1972 Academy Awards?") str(response.response) import pandas as pd df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv") df query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True) response = query_engine.query("What was the precipitation in inches during June?") str(response) from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline prompt_str = """\ Here's a serialized table. {serialized_table} Given this table please answer the question: {question} Answer: """ prompt =
PromptTemplate(prompt_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index llama-hub') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader reader = UnstructuredReader() from pathlib import Path all_files_gen = Path("./docs.llamaindex.ai/").rglob("*") all_files = [f.resolve() for f in all_files_gen] all_html_files = [f for f in all_files if f.suffix.lower() == ".html"] len(all_html_files) from llama_index.core import Document doc_limit = 100 docs = [] for idx, f in enumerate(all_html_files): if idx > doc_limit: break print(f"Idx {idx}/{len(all_html_files)}") loaded_docs = reader.load_data(file=f, split_documents=True) start_idx = 72 loaded_doc = Document( text="\n\n".join([d.get_content() for d in loaded_docs[72:]]), metadata={"path": str(f)}, ) print(loaded_doc.metadata["path"]) docs.append(loaded_doc) import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.agent.openai import OpenAIAgent from llama_index.core import ( load_index_from_storage, StorageContext, VectorStoreIndex, ) from llama_index.core import SummaryIndex from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.node_parser import SentenceSplitter import os from tqdm.notebook import tqdm import pickle async def build_agent_per_doc(nodes, file_base): print(file_base) vi_out_path = f"./data/llamaindex_docs/{file_base}" summary_out_path = f"./data/llamaindex_docs/{file_base}_summary.pkl" if not os.path.exists(vi_out_path): Path("./data/llamaindex_docs/").mkdir(parents=True, exist_ok=True) vector_index = VectorStoreIndex(nodes) vector_index.storage_context.persist(persist_dir=vi_out_path) else: vector_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=vi_out_path), ) summary_index = SummaryIndex(nodes) vector_query_engine = vector_index.as_query_engine(llm=llm) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", llm=llm ) if not os.path.exists(summary_out_path): Path(summary_out_path).parent.mkdir(parents=True, exist_ok=True) summary = str( await summary_query_engine.aquery( "Extract a concise 1-2 line summary of this document" ) ) pickle.dump(summary, open(summary_out_path, "wb")) else: summary = pickle.load(open(summary_out_path, "rb")) query_engine_tools = [ QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=f"vector_tool_{file_base}", description=f"Useful for questions related to specific facts", ), ), QueryEngineTool( query_engine=summary_query_engine, metadata=ToolMetadata( name=f"summary_tool_{file_base}", description=f"Useful for summarization questions", ), ), ] function_llm = OpenAI(model="gpt-4") agent = OpenAIAgent.from_tools( query_engine_tools, llm=function_llm, verbose=True, system_prompt=f"""\ You are a specialized agent designed to answer queries about the `{file_base}.html` part of the LlamaIndex docs. You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\ """, ) return agent, summary async def build_agents(docs): node_parser = SentenceSplitter() agents_dict = {} extra_info_dict = {} for idx, doc in enumerate(tqdm(docs)): nodes = node_parser.get_nodes_from_documents([doc]) file_path = Path(doc.metadata["path"]) file_base = str(file_path.parent.stem) + "_" + str(file_path.stem) agent, summary = await build_agent_per_doc(nodes, file_base) agents_dict[file_base] = agent extra_info_dict[file_base] = {"summary": summary, "nodes": nodes} return agents_dict, extra_info_dict agents_dict, extra_info_dict = await build_agents(docs) all_tools = [] for file_base, agent in agents_dict.items(): summary = extra_info_dict[file_base]["summary"] doc_tool = QueryEngineTool( query_engine=agent, metadata=ToolMetadata( name=f"tool_{file_base}", description=summary, ), ) all_tools.append(doc_tool) print(all_tools[0].metadata) from llama_index.core import VectorStoreIndex from llama_index.core.objects import ( ObjectIndex, SimpleToolNodeMapping, ObjectRetriever, ) from llama_index.core.retrievers import BaseRetriever from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.query_engine import SubQuestionQueryEngine from llama_index.llms.openai import OpenAI llm = OpenAI(model_name="gpt-4-0613") tool_mapping = SimpleToolNodeMapping.from_objects(all_tools) obj_index = ObjectIndex.from_objects( all_tools, tool_mapping, VectorStoreIndex, ) vector_node_retriever = obj_index.as_node_retriever(similarity_top_k=10) class CustomRetriever(BaseRetriever): def __init__(self, vector_retriever, postprocessor=None): self._vector_retriever = vector_retriever self._postprocessor = postprocessor or CohereRerank(top_n=5) super().__init__() def _retrieve(self, query_bundle): retrieved_nodes = self._vector_retriever.retrieve(query_bundle) filtered_nodes = self._postprocessor.postprocess_nodes( retrieved_nodes, query_bundle=query_bundle ) return filtered_nodes class CustomObjectRetriever(ObjectRetriever): def __init__(self, retriever, object_node_mapping, all_tools, llm=None): self._retriever = retriever self._object_node_mapping = object_node_mapping self._llm = llm or
OpenAI("gpt-4-0613")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') import os os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY" get_ipython().system('pip install llama-index') from llama_index.core import download_loader from llama_index.readers.wikipedia import WikipediaReader loader =
WikipediaReader()
llama_index.readers.wikipedia.WikipediaReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter splitter = SentenceSplitter(chunk_size=256) index = VectorStoreIndex.from_documents( documents, transformations=[splitter], show_progress=True ) from llama_index.retrievers.bm25 import BM25Retriever vector_retriever = index.as_retriever(similarity_top_k=5) bm25_retriever = BM25Retriever.from_defaults( docstore=index.docstore, similarity_top_k=10 ) from llama_index.core.retrievers import QueryFusionRetriever retriever = QueryFusionRetriever( [vector_retriever, bm25_retriever], retriever_weights=[0.6, 0.4], similarity_top_k=10, num_queries=1, # set this to 1 to disable query generation mode="relative_score", use_async=True, verbose=True, ) import nest_asyncio nest_asyncio.apply() nodes_with_scores = retriever.retrieve( "What happened at Interleafe and Viaweb?" ) for node in nodes_with_scores: print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----") from llama_index.core.retrievers import QueryFusionRetriever retriever = QueryFusionRetriever( [vector_retriever, bm25_retriever], retriever_weights=[0.6, 0.4], similarity_top_k=10, num_queries=1, # set this to 1 to disable query generation mode="dist_based_score", use_async=True, verbose=True, ) nodes_with_scores = retriever.retrieve( "What happened at Interleafe and Viaweb?" ) for node in nodes_with_scores: print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----") from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine.from_args(retriever) response = query_engine.query("What happened at Interleafe and Viaweb?") from llama_index.core.response.notebook_utils import display_response
display_response(response)
llama_index.core.response.notebook_utils.display_response
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy') get_ipython().system('pip install llama-index') import json from typing import Sequence from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/sept" ) sept_index = load_index_from_storage(storage_context) index_loaded = True except: index_loaded = False get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") if not index_loaded: march_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents(march_docs) june_index = VectorStoreIndex.from_documents(june_docs) sept_index =
VectorStoreIndex.from_documents(sept_docs)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.agent import ReActAgent from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiply two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) llm = OpenAI(model="gpt-3.5-turbo-instruct") agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True) response = agent.chat("What is 20+(2*4)? Calculate step by step ") response_gen = agent.stream_chat("What is 20+2*4? Calculate step by step") response_gen.print_response_stream() llm = OpenAI(model="gpt-4") agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True) response = agent.chat("What is 2+2*4") print(response) llm =
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_str(table_schema_objs: List[SQLTableSchema]): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component = FnComponent(fn=get_table_context_str) from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import FnComponent from llama_index.core.llms import ChatResponse def parse_response_to_sql(response: ChatResponse) -> str: """Parse response to SQL.""" response = response.message.content sql_query_start = response.find("SQLQuery:") if sql_query_start != -1: response = response[sql_query_start:] if response.startswith("SQLQuery:"): response = response[len("SQLQuery:") :] sql_result_start = response.find("SQLResult:") if sql_result_start != -1: response = response[:sql_result_start] return response.strip().strip("```").strip() sql_parser_component = FnComponent(fn=parse_response_to_sql) text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format( dialect=engine.dialect.name ) print(text2sql_prompt.template) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n" "SQL: {sql_query}\n" "SQL Response: {context_str}\n" "Response: " ) response_synthesis_prompt = PromptTemplate( response_synthesis_prompt_str, ) llm = OpenAI(model="gpt-3.5-turbo") from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, CustomQueryComponent, ) qp = QP( modules={ "input": InputComponent(), "table_retriever": obj_retriever, "table_output_parser": table_parser_component, "text2sql_prompt": text2sql_prompt, "text2sql_llm": llm, "sql_output_parser": sql_parser_component, "sql_retriever": sql_retriever, "response_synthesis_prompt": response_synthesis_prompt, "response_synthesis_llm": llm, }, verbose=True, ) qp.add_chain(["input", "table_retriever", "table_output_parser"]) qp.add_link("input", "text2sql_prompt", dest_key="query_str") qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema") qp.add_chain( ["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"] ) qp.add_link( "sql_output_parser", "response_synthesis_prompt", dest_key="sql_query" ) qp.add_link( "sql_retriever", "response_synthesis_prompt", dest_key="context_str" ) qp.add_link("input", "response_synthesis_prompt", dest_key="query_str") qp.add_link("response_synthesis_prompt", "response_synthesis_llm") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("text2sql_dag.html") response = qp.run( query="What was the year that The Notorious B.I.G was signed to Bad Boy?" ) print(str(response)) response = qp.run(query="Who won best director in the 1972 academy awards") print(str(response)) response = qp.run(query="What was the term of Pasquale Preziosa?") print(str(response)) from llama_index.core import VectorStoreIndex, load_index_from_storage from sqlalchemy import text from llama_index.core.schema import TextNode from llama_index.core import StorageContext import os from pathlib import Path from typing import Dict def index_all_tables( sql_database: SQLDatabase, table_index_dir: str = "table_index_dir" ) -> Dict[str, VectorStoreIndex]: """Index all tables.""" if not Path(table_index_dir).exists(): os.makedirs(table_index_dir) vector_index_dict = {} engine = sql_database.engine for table_name in sql_database.get_usable_table_names(): print(f"Indexing rows in table: {table_name}") if not os.path.exists(f"{table_index_dir}/{table_name}"): with engine.connect() as conn: cursor = conn.execute(text(f'SELECT * FROM "{table_name}"')) result = cursor.fetchall() row_tups = [] for row in result: row_tups.append(tuple(row)) nodes = [TextNode(text=str(t)) for t in row_tups] index = VectorStoreIndex(nodes) index.set_index_id("vector_index") index.storage_context.persist(f"{table_index_dir}/{table_name}") else: storage_context = StorageContext.from_defaults( persist_dir=f"{table_index_dir}/{table_name}" ) index = load_index_from_storage( storage_context, index_id="vector_index" ) vector_index_dict[table_name] = index return vector_index_dict vector_index_dict = index_all_tables(sql_database) test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever( similarity_top_k=1 ) nodes = test_retriever.retrieve("P. Diddy") print(nodes[0].get_content()) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever =
SQLRetriever(sql_database)
llama_index.core.retrievers.SQLRetriever
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os OPENAI_API_TOKEN = "sk-" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN import wikipedia import urllib.request from pathlib import Path image_path = Path("mixed_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "Vincent van Gogh", "San Francisco", "Batman", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue from PIL import Image import matplotlib.pyplot as plt import os image_paths = [] for img_path in os.listdir("./mixed_wiki"): image_paths.append(str(os.path.join("./mixed_wiki", img_path))) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(3, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break plot_images(image_paths) from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext import qdrant_client from llama_index.core import SimpleDirectoryReader client = qdrant_client.QdrantClient(path="qdrant_img_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./mixed_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) input_image = "./mixed_wiki/2.jpg" plot_images([input_image]) retriever_engine = index.as_retriever(image_similarity_top_k=4) retrieval_results = retriever_engine.image_to_image_retrieve( "./mixed_wiki/2.jpg" ) retrieved_images = [] for res in retrieval_results: retrieved_images.append(res.node.metadata["file_path"]) plot_images(retrieved_images[1:]) from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader from llama_index.core.schema import ImageDocument image_documents = [
ImageDocument(image_path=input_image)
llama_index.core.schema.ImageDocument
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lantern') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install psycopg2-binary llama-index asyncpg') from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.lantern import LanternVectorStore import textwrap import openai import os os.environ["OPENAI_API_KEY"] = "<your_key>" openai.api_key = "<your_key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) import psycopg2 connection_string = "postgresql://postgres:postgres@localhost:5432" db_name = "postgres" conn = psycopg2.connect(connection_string) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from sqlalchemy import make_url url = make_url(connection_string) vector_store = LanternVectorStore.from_params( database=db_name, host=url.host, password=url.password, port=url.port, user=url.username, table_name="paul_graham_essay", embed_dim=1536, # openai embedding dimension ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context, show_progress=True ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What happened in the mid 1980s?") print(textwrap.fill(str(response), 100)) vector_store = LanternVectorStore.from_params( database=db_name, host=url.host, password=url.password, port=url.port, user=url.username, table_name="paul_graham_essay", embed_dim=1536, # openai embedding dimension m=16, # HNSW M parameter ef_construction=128, # HNSW ef construction parameter ef=64, # HNSW ef search parameter ) index =
VectorStoreIndex.from_vector_store(vector_store=vector_store)
llama_index.core.VectorStoreIndex.from_vector_store
get_ipython().run_line_magic('pip', 'install llama-index-readers-discord') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install nest_asyncio') import nest_asyncio nest_asyncio.apply() from llama_index.core import SummaryIndex from llama_index.readers.discord import DiscordReader from IPython.display import Markdown, display import os discord_token = os.getenv("DISCORD_TOKEN") channel_ids = [1057178784895348746] # Replace with your channel_id documents =
DiscordReader(discord_token=discord_token)
llama_index.readers.discord.DiscordReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') from llama_index.core.llama_dataset import ( LabelledRagDataExample, CreatedByType, CreatedBy, ) query = "This is a test query, is it not?" query_by = CreatedBy(type=CreatedByType.AI, model_name="gpt-4") reference_answer = "Yes it is." reference_answer_by =
CreatedBy(type=CreatedByType.HUMAN)
llama_index.core.llama_dataset.CreatedBy
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE" from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512) from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor, BaseExtractor, ) from llama_index.extractors.entity import EntityExtractor from llama_index.core.node_parser import TokenTextSplitter text_splitter = TokenTextSplitter( separator=" ", chunk_size=512, chunk_overlap=128 ) class CustomExtractor(BaseExtractor): def extract(self, nodes): metadata_list = [ { "custom": ( node.metadata["document_title"] + "\n" + node.metadata["excerpt_keywords"] ) } for node in nodes ] return metadata_list extractors = [ TitleExtractor(nodes=5, llm=llm), QuestionsAnsweredExtractor(questions=3, llm=llm), ] transformations = [text_splitter] + extractors from llama_index.core import SimpleDirectoryReader get_ipython().system('mkdir -p data') get_ipython().system('wget -O "data/10k-132.pdf" "https://www.dropbox.com/scl/fi/6dlqdk6e2k1mjhi8dee5j/uber.pdf?rlkey=2jyoe49bg2vwdlz30l76czq6g&dl=1"') get_ipython().system('wget -O "data/10k-vFinal.pdf" "https://www.dropbox.com/scl/fi/qn7g3vrk5mqb18ko4e5in/lyft.pdf?rlkey=j6jxtjwo8zbstdo4wz3ns8zoj&dl=1"') uber_docs = SimpleDirectoryReader(input_files=["data/10k-132.pdf"]).load_data() uber_front_pages = uber_docs[0:3] uber_content = uber_docs[63:69] uber_docs = uber_front_pages + uber_content from llama_index.core.ingestion import IngestionPipeline pipeline = IngestionPipeline(transformations=transformations) uber_nodes = pipeline.run(documents=uber_docs) uber_nodes[1].metadata lyft_docs = SimpleDirectoryReader( input_files=["data/10k-vFinal.pdf"] ).load_data() lyft_front_pages = lyft_docs[0:3] lyft_content = lyft_docs[68:73] lyft_docs = lyft_front_pages + lyft_content from llama_index.core.ingestion import IngestionPipeline pipeline = IngestionPipeline(transformations=transformations) lyft_nodes = pipeline.run(documents=lyft_docs) lyft_nodes[2].metadata from llama_index.core.question_gen import LLMQuestionGenerator from llama_index.core.question_gen.prompts import ( DEFAULT_SUB_QUESTION_PROMPT_TMPL, ) question_gen = LLMQuestionGenerator.from_defaults( llm=llm, prompt_template_str=""" Follow the example, but instead of giving a question, always prefix the question with: 'By first identifying and quoting the most relevant sources, '. """ + DEFAULT_SUB_QUESTION_PROMPT_TMPL, ) from copy import deepcopy nodes_no_metadata = deepcopy(uber_nodes) + deepcopy(lyft_nodes) for node in nodes_no_metadata: node.metadata = { k: node.metadata[k] for k in node.metadata if k in ["page_label", "file_name"] } print( "LLM sees:\n", (nodes_no_metadata)[9].get_content(metadata_mode=MetadataMode.LLM), ) from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import SubQuestionQueryEngine from llama_index.core.tools import QueryEngineTool, ToolMetadata index_no_metadata = VectorStoreIndex( nodes=nodes_no_metadata, ) engine_no_metadata = index_no_metadata.as_query_engine( similarity_top_k=10, llm=
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import PromptTemplate choices = [ "Useful for questions related to apples", "Useful for questions related to oranges", ] def get_choice_str(choices): choices_str = "\n\n".join( [f"{idx+1}. {c}" for idx, c in enumerate(choices)] ) return choices_str choices_str = get_choice_str(choices) router_prompt0 =
PromptTemplate( "Some choices are given below. It is provided in a numbered list (1 to" " {num_choices}), where each item in the list corresponds to a" " summary.\n---------------------\n{context_list}\n---------------------\nUsing" " only the choices above and not prior knowledge, return the top choices" " (no more than {max_outputs}, but only select what is needed)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool =
FunctionTool.from_defaults(fn=add)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm =
Konko(model="gpt-3.5-turbo")
llama_index.llms.konko.Konko
get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100) len(eval_dataset.qr_pairs) eval_dataset.save_json("data/tesla10k_eval_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" ) eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner evaluator_c =
CorrectnessEvaluator(llm=eval_llm)
llama_index.core.evaluation.CorrectnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-tencentvectordb') get_ipython().system('pip install llama-index') get_ipython().system('pip install tcvectordb') from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from llama_index.vector_stores.tencentvectordb import TencentVectorDB from llama_index.core.vector_stores.tencentvectordb import ( CollectionParams, FilterField, ) import tcvectordb tcvectordb.debug.DebugEnable = False import openai OPENAI_API_KEY = getpass.getpass("OpenAI API Key:") openai.api_key = OPENAI_API_KEY get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham")
llama_index.core.SimpleDirectoryReader
get_ipython().system('pip install llama-index') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( PrevNextNodePostprocessor, AutoPrevNextNodePostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import StorageContext documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 512 nodes = Settings.node_parser.get_nodes_from_documents(documents) docstore = SimpleDocumentStore() docstore.add_documents(nodes) storage_context = StorageContext.from_defaults(docstore=docstore) index = VectorStoreIndex(nodes, storage_context=storage_context) node_postprocessor =
PrevNextNodePostprocessor(docstore=docstore, num_nodes=4)
llama_index.core.postprocessor.PrevNextNodePostprocessor