prompt
stringlengths
70
19.8k
completion
stringlengths
8
1.03k
api
stringlengths
23
93
get_ipython().run_line_magic('pip', 'install llama-index-program-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-api') get_ipython().system('pip install llama-index') from llama_index.llms.llama_api import LlamaAPI api_key = "LL-your-key" llm = LlamaAPI(api_key=api_key) resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="What is your name")
llama_index.core.llms.ChatMessage
import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm') get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web') get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference') import hashlib import json from pathlib import Path import os import textwrap from typing import List, Union import llama_index.core from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.openinference import OpenInferenceCallbackHandler from llama_index.callbacks.openinference.base import ( as_dataframe, QueryData, NodeData, ) from llama_index.core.node_parser import SimpleNodeParser import pandas as pd from tqdm import tqdm documents =
SimpleWebPageReader()
llama_index.readers.web.SimpleWebPageReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-readers-papers') get_ipython().system('pip install llama_index transformers wikipedia html2text pyvis') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import KnowledgeGraphIndex from llama_index.readers.web import SimpleWebPageReader from llama_index.core.graph_stores import SimpleGraphStore from llama_index.core import StorageContext from llama_index.llms.openai import OpenAI from transformers import pipeline triplet_extractor = pipeline( "text2text-generation", model="Babelscape/rebel-large", tokenizer="Babelscape/rebel-large", device="cuda:0", ) def extract_triplets(input_text): text = triplet_extractor.tokenizer.batch_decode( [ triplet_extractor( input_text, return_tensors=True, return_text=False )[0]["generated_token_ids"] ] )[0] triplets = [] relation, subject, relation, object_ = "", "", "", "" text = text.strip() current = "x" for token in ( text.replace("<s>", "") .replace("<pad>", "") .replace("</s>", "") .split() ): if token == "<triplet>": current = "t" if relation != "": triplets.append( (subject.strip(), relation.strip(), object_.strip()) ) relation = "" subject = "" elif token == "<subj>": current = "s" if relation != "": triplets.append( (subject.strip(), relation.strip(), object_.strip()) ) object_ = "" elif token == "<obj>": current = "o" relation = "" else: if current == "t": subject += " " + token elif current == "s": object_ += " " + token elif current == "o": relation += " " + token if subject != "" and relation != "" and object_ != "": triplets.append((subject.strip(), relation.strip(), object_.strip())) return triplets import wikipedia class WikiFilter: def __init__(self): self.cache = {} def filter(self, candidate_entity): if candidate_entity in self.cache: return self.cache[candidate_entity]["title"] try: page = wikipedia.page(candidate_entity, auto_suggest=False) entity_data = { "title": page.title, "url": page.url, "summary": page.summary, } self.cache[candidate_entity] = entity_data self.cache[page.title] = entity_data return entity_data["title"] except: return None wiki_filter = WikiFilter() def extract_triplets_wiki(text): relations = extract_triplets(text) filtered_relations = [] for relation in relations: (subj, rel, obj) = relation filtered_subj = wiki_filter.filter(subj) filtered_obj = wiki_filter.filter(obj) if filtered_subj is None and filtered_obj is None: continue filtered_relations.append( ( filtered_subj or subj, rel, filtered_obj or obj, ) ) return filtered_relations from llama_index.core import download_loader from llama_index.readers.papers import ArxivReader loader = ArxivReader() documents = loader.load_data( search_query="Retrieval Augmented Generation", max_results=1 ) import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import Document documents = [Document(text="".join([x.text for x in documents]))] from llama_index.core import Settings llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo") Settings.llm = llm Settings.chunk_size = 256 graph_store =
SimpleGraphStore()
llama_index.core.graph_stores.SimpleGraphStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "sk-..." documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents, chunk_size=512) from llama_index.core.output_parsers import LangchainOutputParser from langchain.output_parsers import StructuredOutputParser, ResponseSchema response_schemas = [ ResponseSchema( name="Education", description=( "Describes the author's educational experience/background." ), ), ResponseSchema( name="Work", description="Describes the author's work experience/background.", ), ] lc_output_parser = StructuredOutputParser.from_response_schemas( response_schemas ) output_parser = LangchainOutputParser(lc_output_parser) from llama_index.core.prompts.default_prompts import ( DEFAULT_TEXT_QA_PROMPT_TMPL, ) fmt_qa_tmpl = output_parser.format(DEFAULT_TEXT_QA_PROMPT_TMPL) print(fmt_qa_tmpl) from llama_index.llms.openai import OpenAI llm =
OpenAI(output_parser=output_parser)
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index-llms-dashscope') get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY') import os os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY" from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX) resp = dashscope_llm.complete("How to make cake?") print(resp) responses = dashscope_llm.stream_complete("How to make cake?") for response in responses: print(response.delta, end="") from llama_index.core.base.llms.types import MessageRole, ChatMessage messages = [ ChatMessage( role=MessageRole.SYSTEM, content="You are a helpful assistant." ), ChatMessage(role=MessageRole.USER, content="How to make cake?"), ] resp = dashscope_llm.chat(messages) print(resp) responses = dashscope_llm.stream_chat(messages) for response in responses: print(response.delta, end="") messages = [ ChatMessage( role=MessageRole.SYSTEM, content="You are a helpful assistant." ), ChatMessage(role=MessageRole.USER, content="How to make cake?"), ] resp = dashscope_llm.chat(messages) print(resp) messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=resp.message.content)
llama_index.core.base.llms.types.ChatMessage
get_ipython().system('pip install llama-index llama-hub') import nest_asyncio nest_asyncio.apply() get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt') from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SimpleNodeParser reader = SimpleDirectoryReader(input_files=["paul_graham_essay.txt"]) documents = reader.load_data() node_parser =
SimpleNodeParser.from_defaults()
llama_index.core.node_parser.SimpleNodeParser.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("../data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SummaryIndex from llama_index.core import VectorStoreIndex summary_index =
SummaryIndex(nodes, storage_context=storage_context)
llama_index.core.SummaryIndex
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index llama-hub') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader reader =
UnstructuredReader()
llama_index.readers.file.UnstructuredReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-metal') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.metal import MetalVectorStore from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.core import StorageContext api_key = "api key" client_id = "client id" index_id = "index id" vector_store = MetalVectorStore( api_key=api_key, client_id=client_id, index_id=index_id, ) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai pandas[jinja2] spacy') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( TreeIndex, VectorStoreIndex, SimpleDirectoryReader, Response, ) from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import RelevancyEvaluator from llama_index.core.node_parser import SentenceSplitter import pandas as pd pd.set_option("display.max_colwidth", 0) gpt3 =
OpenAI(temperature=0, model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys from llama_index.core import SimpleDirectoryReader from llama_index.core import SummaryIndex logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) wiki_titles = ["Michael Jordan", "Elon Musk", "Richard Branson", "Rihanna"] wiki_metadatas = { "Michael Jordan": { "category": "Sports", "country": "United States", }, "Elon Musk": { "category": "Business", "country": "United States", }, "Richard Branson": { "category": "Business", "country": "UK", }, "Rihanna": { "category": "Music", "country": "Barbados", }, } from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) docs_dict = {} for wiki_title in wiki_titles: doc = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data()[0] doc.metadata.update(wiki_metadatas[wiki_title]) docs_dict[wiki_title] = doc from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager from llama_index.core.node_parser import SentenceSplitter llm = OpenAI("gpt-4") callback_manager = CallbackManager([LlamaDebugHandler()]) splitter = SentenceSplitter(chunk_size=256) import weaviate auth_config = weaviate.AuthApiKey(api_key="<api_key>") client = weaviate.Client( "https://llama-index-test-v0oggsoz.weaviate.network", auth_client_secret=auth_config, ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.weaviate import WeaviateVectorStore from IPython.display import Markdown, display client.schema.delete_class("LlamaIndex") from llama_index.core import StorageContext vector_store = WeaviateVectorStore( weaviate_client=client, index_name="LlamaIndex" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) class_schema = client.schema.get("LlamaIndex") display(class_schema) index = VectorStoreIndex( [], storage_context=storage_context, transformations=[splitter], callback_manager=callback_manager, ) for wiki_title in wiki_titles: index.insert(docs_dict[wiki_title]) from llama_index.core.retrievers import VectorIndexAutoRetriever from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description=( "Category of the celebrity, one of [Sports, Entertainment," " Business, Music]" ), ), MetadataInfo( name="country", type="str", description=( "Country of the celebrity, one of [United States, Barbados," " Portugal]" ), ), ], ) retriever = VectorIndexAutoRetriever( index, vector_store_info=vector_store_info, llm=llm, callback_manager=callback_manager, max_top_k=10000, ) nodes = retriever.retrieve( "Tell me about a celebrity from the United States, set top k to 10000" ) print(f"Number of nodes: {len(nodes)}") for node in nodes[:10]: print(node.node.get_content()) nodes = retriever.retrieve( "Tell me about the childhood of a popular sports celebrity in the United" " States" ) for node in nodes: print(node.node.get_content()) nodes = retriever.retrieve( "Tell me about the college life of a billionaire who started at company at" " the age of 16" ) for node in nodes: print(node.node.get_content()) nodes = retriever.retrieve("Tell me about the childhood of a UK billionaire") for node in nodes: print(node.node.get_content()) from llama_index.core.schema import IndexNode nodes = [] vector_query_engines = {} vector_retrievers = {} for wiki_title in wiki_titles: vector_index = VectorStoreIndex.from_documents( [docs_dict[wiki_title]], transformations=[splitter], callback_manager=callback_manager, ) vector_query_engine = vector_index.as_query_engine(llm=llm) vector_query_engines[wiki_title] = vector_query_engine vector_retrievers[wiki_title] = vector_index.as_retriever() out_path = Path("summaries") / f"{wiki_title}.txt" if not out_path.exists(): summary_index = SummaryIndex.from_documents( [docs_dict[wiki_title]], callback_manager=callback_manager ) summarizer = summary_index.as_query_engine( response_mode="tree_summarize", llm=llm ) response = await summarizer.aquery( f"Give me a summary of {wiki_title}" ) wiki_summary = response.response Path("summaries").mkdir(exist_ok=True) with open(out_path, "w") as fp: fp.write(wiki_summary) else: with open(out_path, "r") as fp: wiki_summary = fp.read() print(f"**Summary for {wiki_title}: {wiki_summary}") node =
IndexNode(text=wiki_summary, index_id=wiki_title)
llama_index.core.schema.IndexNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.core.node_parser import SimpleNodeParser dataset_generator = DatasetGenerator( base_nodes[:20], llm=OpenAI(model="gpt-4"), show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import random full_qr_pairs = eval_dataset.qr_pairs num_exemplars = 2 num_eval = 40 exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars) eval_qr_pairs = random.sample(full_qr_pairs, num_eval) len(exemplar_qr_pairs) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo")) evaluator_dict = { "correctness": evaluator_c, } batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True) async def get_correctness(query_engine, eval_qa_pairs, batch_runner): eval_qs = [q for q, _ in eval_qa_pairs] eval_answers = [a for _, a in eval_qa_pairs] pred_responses = get_responses(eval_qs, query_engine, show_progress=True) eval_results = await batch_runner.aevaluate_responses( eval_qs, responses=pred_responses, reference=eval_answers ) avg_correctness = np.array( [r.score for r in eval_results["correctness"]] ).mean() return avg_correctness QA_PROMPT_KEY = "response_synthesizer:text_qa_template" from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate llm = OpenAI(model="gpt-3.5-turbo") qa_tmpl_str = ( "---------------------\n" "{context_str}\n" "---------------------\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl = PromptTemplate(qa_tmpl_str) print(query_engine.get_prompts()[QA_PROMPT_KEY].get_template()) meta_tmpl_str = """\ Your task is to generate the instruction <INS>. Below are some previous instructions with their scores. The score ranges from 1 to 5. {prev_instruction_score_pairs} Below we show the task. The <INS> tag is prepended to the below prompt template, e.g. as follows: ``` <INS> {prompt_tmpl_str} ``` The prompt template contains template variables. Given an input set of template variables, the formatted prompt is then given to an LLM to get an output. Some examples of template variable inputs and expected outputs are given below to illustrate the task. **NOTE**: These do NOT represent the \ entire evaluation dataset. {qa_pairs_str} We run every input in an evaluation dataset through an LLM. If the LLM-generated output doesn't match the expected output, we mark it as wrong (score 0). A correct answer has a score of 1. The final "score" for an instruction is the average of scores across an evaluation dataset. Write your new instruction (<INS>) that is different from the old ones and has a score as high as possible. Instruction (<INS>): \ """ meta_tmpl = PromptTemplate(meta_tmpl_str) from copy import deepcopy def format_meta_tmpl( prev_instr_score_pairs, prompt_tmpl_str, qa_pairs, meta_tmpl, ): """Call meta-prompt to generate new instruction.""" pair_str_list = [ f"Instruction (<INS>):\n{instr}\nScore:\n{score}" for instr, score in prev_instr_score_pairs ] full_instr_pair_str = "\n\n".join(pair_str_list) qa_str_list = [ f"query_str:\n{query_str}\nAnswer:\n{answer}" for query_str, answer in qa_pairs ] full_qa_pair_str = "\n\n".join(qa_str_list) fmt_meta_tmpl = meta_tmpl.format( prev_instruction_score_pairs=full_instr_pair_str, prompt_tmpl_str=prompt_tmpl_str, qa_pairs_str=full_qa_pair_str, ) return fmt_meta_tmpl def get_full_prompt_template(cur_instr: str, prompt_tmpl): tmpl_str = prompt_tmpl.get_template() new_tmpl_str = cur_instr + "\n" + tmpl_str new_tmpl = PromptTemplate(new_tmpl_str) return new_tmpl import numpy as np def _parse_meta_response(meta_response: str): return str(meta_response).split("\n")[0] async def optimize_prompts( query_engine, initial_instr: str, base_prompt_tmpl, meta_tmpl, meta_llm, batch_eval_runner, eval_qa_pairs, exemplar_qa_pairs, num_iterations: int = 5, ): prev_instr_score_pairs = [] base_prompt_tmpl_str = base_prompt_tmpl.get_template() cur_instr = initial_instr for idx in range(num_iterations): if idx > 0: fmt_meta_tmpl = format_meta_tmpl( prev_instr_score_pairs, base_prompt_tmpl_str, exemplar_qa_pairs, meta_tmpl, ) meta_response = meta_llm.complete(fmt_meta_tmpl) print(fmt_meta_tmpl) print(str(meta_response)) cur_instr = _parse_meta_response(meta_response) new_prompt_tmpl = get_full_prompt_template(cur_instr, base_prompt_tmpl) query_engine.update_prompts({QA_PROMPT_KEY: new_prompt_tmpl}) avg_correctness = await get_correctness( query_engine, eval_qa_pairs, batch_runner ) prev_instr_score_pairs.append((cur_instr, avg_correctness)) max_instr_score_pair = max( prev_instr_score_pairs, key=lambda item: item[1] ) return max_instr_score_pair[0], prev_instr_score_pairs query_engine = index.as_query_engine(similarity_top_k=2) base_qa_prompt = query_engine.get_prompts()[QA_PROMPT_KEY] initial_instr = """\ You are a QA assistant. Context information is below. Given the context information and not prior knowledge, \ answer the query. \ """ old_qa_prompt = get_full_prompt_template(initial_instr, base_qa_prompt) meta_llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index') get_ipython().system('pip install wget') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azureaisearch') get_ipython().run_line_magic('pip', 'install azure-search-documents==11.4.0') get_ipython().run_line_magic('llama-index-embeddings-azure-openai', '') get_ipython().run_line_magic('llama-index-llms-azure-openai', '') import logging import sys from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient from azure.search.documents.indexes import SearchIndexClient from IPython.display import Markdown, display from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.core.settings import Settings from llama_index.llms.azure_openai import AzureOpenAI from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding from llama_index.vector_stores.azureaisearch import AzureAISearchVectorStore from llama_index.vector_stores.azureaisearch import ( IndexManagement, MetadataIndexFieldType, ) aoai_api_key = "YOUR_AZURE_OPENAI_API_KEY" aoai_endpoint = "YOUR_AZURE_OPENAI_ENDPOINT" aoai_api_version = "2023-05-15" llm = AzureOpenAI( model="YOUR_AZURE_OPENAI_COMPLETION_MODEL_NAME", deployment_name="YOUR_AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME", api_key=aoai_api_key, azure_endpoint=aoai_endpoint, api_version=aoai_api_version, ) embed_model = AzureOpenAIEmbedding( model="YOUR_AZURE_OPENAI_EMBEDDING_MODEL_NAME", deployment_name="YOUR_AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", api_key=aoai_api_key, azure_endpoint=aoai_endpoint, api_version=aoai_api_version, ) search_service_api_key = "YOUR-AZURE-SEARCH-SERVICE-ADMIN-KEY" search_service_endpoint = "YOUR-AZURE-SEARCH-SERVICE-ENDPOINT" search_service_api_version = "2023-11-01" credential = AzureKeyCredential(search_service_api_key) index_name = "llamaindex-vector-demo" index_client = SearchIndexClient( endpoint=search_service_endpoint, credential=credential, ) search_client = SearchClient( endpoint=search_service_endpoint, index_name=index_name, credential=credential, ) metadata_fields = { "author": "author", "theme": ("topic", MetadataIndexFieldType.STRING), "director": "director", } vector_store = AzureAISearchVectorStore( search_or_index_client=index_client, filterable_metadata_field_keys=metadata_fields, index_name=index_name, index_management=IndexManagement.CREATE_IF_NOT_EXISTS, id_field_key="id", chunk_field_key="chunk", embedding_field_key="embedding", embedding_dimensionality=1536, metadata_string_field_key="metadata", doc_id_field_key="doc_id", language_analyzer="en.lucene", vector_algorithm_type="exhaustiveKnn", ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("../data/paul_graham/").load_data() storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import Task, AgentChatResponse from typing import Dict, Any from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, ) def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict: """Agent input function.""" if "convo_history" not in state: state["convo_history"] = [] state["count"] = 0 state["convo_history"].append(f"User: {task.input}") convo_history_str = "\n".join(state["convo_history"]) or "None" return {"input": task.input, "convo_history": convo_history_str} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core import PromptTemplate retry_prompt_str = """\ You are trying to generate a proper natural language query given a user input. This query will then be interpreted by a downstream text-to-SQL agent which will convert the query to a SQL statement. If the agent triggers an error, then that will be reflected in the current conversation history (see below). If the conversation history is None, use the user input. If its not None, generate a new SQL query that avoids the problems of the previous SQL query. Input: {input} Convo history (failed attempts): {convo_history} New input: """ retry_prompt = PromptTemplate(retry_prompt_str) from llama_index.core import Response from typing import Tuple validate_prompt_str = """\ Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query. Answer with YES or NO. Query: {input} Inferred SQL query: {sql_query} SQL Response: {sql_response} Result: """ validate_prompt = PromptTemplate(validate_prompt_str) MAX_ITER = 3 def agent_output_fn( task: Task, state: Dict[str, Any], output: Response ) -> Tuple[AgentChatResponse, bool]: """Agent output component.""" print(f"> Inferred SQL Query: {output.metadata['sql_query']}") print(f"> SQL Response: {str(output)}") state["convo_history"].append( f"Assistant (inferred SQL query): {output.metadata['sql_query']}" ) state["convo_history"].append(f"Assistant (response): {str(output)}") validate_prompt_partial = validate_prompt.as_query_component( partial={ "sql_query": output.metadata["sql_query"], "sql_response": str(output), } ) qp = QP(chain=[validate_prompt_partial, llm]) validate_output = qp.run(input=task.input) state["count"] += 1 is_done = False if state["count"] >= MAX_ITER: is_done = True if "YES" in validate_output.message.content: is_done = True return AgentChatResponse(response=str(output)), is_done agent_output_component = AgentFnComponent(fn=agent_output_fn) from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) qp = QP( modules={ "input": agent_input_component, "retry_prompt": retry_prompt, "llm": llm, "sql_query_engine": sql_query_engine, "output_component": agent_output_component, }, verbose=True, ) qp.add_link("input", "retry_prompt", src_key="input", dest_key="input") qp.add_link( "input", "retry_prompt", src_key="convo_history", dest_key="convo_history" ) qp.add_chain(["retry_prompt", "llm", "sql_query_engine", "output_component"]) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker =
QueryPipelineAgentWorker(qp)
llama_index.core.agent.QueryPipelineAgentWorker
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-cross-encoders') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip install datasets --quiet') get_ipython().system('pip install sentence-transformers --quiet') get_ipython().system('pip install openai --quiet') from datasets import load_dataset import random dataset = load_dataset("allenai/qasper") train_dataset = dataset["train"] validation_dataset = dataset["validation"] test_dataset = dataset["test"] random.seed(42) # Set a random seed for reproducibility train_sampled_indices = random.sample(range(len(train_dataset)), 800) train_samples = [train_dataset[i] for i in train_sampled_indices] test_sampled_indices = random.sample(range(len(test_dataset)), 80) test_samples = [test_dataset[i] for i in test_sampled_indices] from typing import List def get_full_text(sample: dict) -> str: """ :param dict sample: the row sample from QASPER """ title = sample["title"] abstract = sample["abstract"] sections_list = sample["full_text"]["section_name"] paragraph_list = sample["full_text"]["paragraphs"] combined_sections_with_paras = "" if len(sections_list) == len(paragraph_list): combined_sections_with_paras += title + "\t" combined_sections_with_paras += abstract + "\t" for index in range(0, len(sections_list)): combined_sections_with_paras += str(sections_list[index]) + "\t" combined_sections_with_paras += "".join(paragraph_list[index]) return combined_sections_with_paras else: print("Not the same number of sections as paragraphs list") def get_questions(sample: dict) -> List[str]: """ :param dict sample: the row sample from QASPER """ questions_list = sample["qas"]["question"] return questions_list doc_qa_dict_list = [] for train_sample in train_samples: full_text = get_full_text(train_sample) questions_list = get_questions(train_sample) local_dict = {"paper": full_text, "questions": questions_list} doc_qa_dict_list.append(local_dict) len(doc_qa_dict_list) import pandas as pd df_train = pd.DataFrame(doc_qa_dict_list) df_train.to_csv("train.csv") """ The Answers field in the dataset follow the below format:- Unanswerable answers have "unanswerable" set to true. The remaining answers have exactly one of the following fields being non-empty. "extractive_spans" are spans in the paper which serve as the answer. "free_form_answer" is a written out answer. "yes_no" is true iff the answer is Yes, and false iff the answer is No. We accept only free-form answers and for all the other kind of answers we set their value to 'Unacceptable', to better evaluate the performance of the query engine using pairwise comparision evaluator as it uses GPT-4 which is biased towards preferring long answers more. https://www.anyscale.com/blog/a-comprehensive-guide-for-building-rag-based-llm-applications-part-1 So in the case of 'yes_no' answers it can favour Query Engine answers more than reference answers. Also in the case of extracted spans it can favour reference answers more than Query engine generated answers. """ eval_doc_qa_answer_list = [] def get_answers(sample: dict) -> List[str]: """ :param dict sample: the row sample from the train split of QASPER """ final_answers_list = [] answers = sample["qas"]["answers"] for answer in answers: local_answer = "" types_of_answers = answer["answer"][0] if types_of_answers["unanswerable"] == False: if types_of_answers["free_form_answer"] != "": local_answer = types_of_answers["free_form_answer"] else: local_answer = "Unacceptable" else: local_answer = "Unacceptable" final_answers_list.append(local_answer) return final_answers_list for test_sample in test_samples: full_text = get_full_text(test_sample) questions_list = get_questions(test_sample) answers_list = get_answers(test_sample) local_dict = { "paper": full_text, "questions": questions_list, "answers": answers_list, } eval_doc_qa_answer_list.append(local_dict) len(eval_doc_qa_answer_list) import pandas as pd df_test = pd.DataFrame(eval_doc_qa_answer_list) df_test.to_csv("test.csv") get_ipython().system('pip install llama-index --quiet') import os from llama_index.core import SimpleDirectoryReader import openai from llama_index.finetuning.cross_encoders.dataset_gen import ( generate_ce_fine_tuning_dataset, generate_synthetic_queries_over_documents, ) from llama_index.finetuning.cross_encoders import CrossEncoderFinetuneEngine os.environ["OPENAI_API_KEY"] = "sk-" openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import Document final_finetuning_data_list = [] for paper in doc_qa_dict_list: questions_list = paper["questions"] documents = [
Document(text=paper["paper"])
llama_index.core.Document
get_ipython().system('pip install -U llama-index-multi-modal-llms-dashscope') get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY') from llama_index.multi_modal_llms.dashscope import ( DashScopeMultiModal, DashScopeMultiModalModels, ) from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg", ] image_documents =
load_image_urls(image_urls)
llama_index.core.multi_modal_llms.generic_utils.load_image_urls
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.core import PromptTemplate query_wrapper_prompt = PromptTemplate( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{query_str}\n\n### Response:" ) import torch llm = HuggingFaceLLM( context_window=2048, max_new_tokens=256, generate_kwargs={"temperature": 0.25, "do_sample": False}, query_wrapper_prompt=query_wrapper_prompt, tokenizer_name="Writer/camel-5b-hf", model_name="Writer/camel-5b-hf", device_map="auto", tokenizer_kwargs={"max_length": 2048}, ) Settings.chunk_size = 512 Settings.llm = llm index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec from llama_index.tools.google_search.base import GoogleSearchToolSpec google_spec =
GoogleSearchToolSpec(key="your-key", engine="your-engine")
llama_index.tools.google_search.base.GoogleSearchToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return ChatPromptTemplate(message_templates=messages) class ResponseEval(BaseModel): """Evaluation of whether the response has an error.""" has_error: bool = Field( ..., description="Whether the response has an error." ) new_question: str = Field(..., description="The suggested new question.") explanation: str = Field( ..., description=( "The explanation for the error as well as for the new question." "Can include the direct stack trace as well." ), ) from llama_index.core.bridge.pydantic import PrivateAttr class RetryAgentWorker(CustomSimpleAgentWorker): """Agent worker that adds a retry layer on top of a router. Continues iterating until there's no errors / task is done. """ prompt_str: str = Field(default=DEFAULT_PROMPT_STR) max_iterations: int = Field(default=10) _router_query_engine: RouterQueryEngine = PrivateAttr() def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None: """Init params.""" for tool in tools: if not isinstance(tool, QueryEngineTool): raise ValueError( f"Tool {tool.metadata.name} is not a query engine tool." ) self._router_query_engine = RouterQueryEngine( selector=PydanticSingleSelector.from_defaults(), query_engine_tools=tools, verbose=kwargs.get("verbose", False), ) super().__init__( tools=tools, **kwargs, ) def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]: """Initialize state.""" return {"count": 0, "current_reasoning": []} def _run_step( self, state: Dict[str, Any], task: Task, input: Optional[str] = None ) -> Tuple[AgentChatResponse, bool]: """Run step. Returns: Tuple of (agent_response, is_done) """ if "new_input" not in state: new_input = task.input else: new_input = state["new_input"] response = self._router_query_engine.query(new_input) state["current_reasoning"].extend( [("user", new_input), ("assistant", str(response))] ) chat_prompt_tmpl = get_chat_prompt_template( self.prompt_str, state["current_reasoning"] ) llm_program = LLMTextCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(output_cls=ResponseEval)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, SimpleKeywordTableIndex, ) from llama_index.core import SummaryIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context) list_retriever = summary_index.as_retriever() vector_retriever = vector_index.as_retriever() keyword_retriever = keyword_index.as_retriever() from llama_index.core.tools import RetrieverTool list_tool = RetrieverTool.from_defaults( retriever=list_retriever, description=( "Will retrieve all context from Paul Graham's essay on What I Worked" " On. Don't use if the question only requires more specific context." ), ) vector_tool = RetrieverTool.from_defaults( retriever=vector_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) keyword_tool = RetrieverTool.from_defaults( retriever=keyword_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On (using entities mentioned in query)" ), ) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) from llama_index.core.retrievers import RouterRetriever from llama_index.core.response.notebook_utils import display_source_node retriever = RouterRetriever( selector=
PydanticSingleSelector.from_defaults(llm=llm)
llama_index.core.selectors.PydanticSingleSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3) llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3) try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/sept" ) sept_index = load_index_from_storage(storage_context) index_loaded = True except: index_loaded = False if not index_loaded: march_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_march_2022.pdf"] ).load_data() june_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents( march_docs, ) june_index = VectorStoreIndex.from_documents( june_docs, ) sept_index = VectorStoreIndex.from_documents( sept_docs, ) march_index.storage_context.persist(persist_dir="./storage/march") june_index.storage_context.persist(persist_dir="./storage/june") sept_index.storage_context.persist(persist_dir="./storage/sept") march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm_35) june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm_35) sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm_35) from llama_index.core.tools import QueryEngineTool query_tool_sept = QueryEngineTool.from_defaults( query_engine=sept_engine, name="sept_2022", description=( f"Provides information about Uber quarterly financials ending" f" September 2022" ), ) query_tool_june = QueryEngineTool.from_defaults( query_engine=june_engine, name="june_2022", description=( f"Provides information about Uber quarterly financials ending June" f" 2022" ), ) query_tool_march = QueryEngineTool.from_defaults( query_engine=march_engine, name="march_2022", description=( f"Provides information about Uber quarterly financials ending March" f" 2022" ), ) query_engine_tools = [query_tool_march, query_tool_june, query_tool_sept] from llama_index.core.agent import ReActAgent from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo-0613")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.retrievers.bm25 import BM25Retriever from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context =
StorageContext.from_defaults()
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-faiss') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import faiss d = 1536 faiss_index = faiss.IndexFlatL2(d) from llama_index.core import ( SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext, ) from llama_index.vector_stores.faiss import FaissVectorStore from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = FaissVectorStore(faiss_index=faiss_index) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) index.storage_context.persist() vector_store = FaissVectorStore.from_persist_dir("./storage") storage_context = StorageContext.from_defaults( vector_store=vector_store, persist_dir="./storage" ) index =
load_index_from_storage(storage_context=storage_context)
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool summary_tool = QueryEngineTool.from_defaults( query_engine=summary_query_engine, name="summary_tool", description=( "Useful for summarization questions related to the author's life" ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, name="vector_tool", description=( "Useful for retrieving specific context to answer specific questions about the author's life" ), ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="QA bot", instructions="You are a bot designed to answer questions about the author", openai_tools=[], tools=[summary_tool, vector_tool], verbose=True, run_retrieve_sleep_time=1.0, ) response = agent.chat("Can you give me a summary about the author's life?") print(str(response)) response = agent.query("What did the author do after RICS?") print(str(response)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") try: pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-myscale') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="YOUR_CLUSTER_HOST", port=8443, username="YOUR_USERNAME", password="YOUR_CLUSTER_PASSWORD", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.myscale import MyScaleVectorStore from IPython.display import Markdown, display documents = SimpleDirectoryReader("../data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) print("Number of Documents: ", len(documents)) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") loader = SimpleDirectoryReader("./data/paul_graham/") documents = loader.load_data() for file in loader.input_files: print(file) from llama_index.core import StorageContext for document in documents: document.metadata = {"user_id": "123", "favorite_color": "blue"} vector_store = MyScaleVectorStore(myscale_client=client) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp') from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en") get_ipython().system('pip install llama-cpp-python') from llama_index.llms.llama_cpp import LlamaCPP model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf" llm = LlamaCPP( model_url=model_url, model_path=None, temperature=0.1, max_new_tokens=256, context_window=3900, generate_kwargs={}, model_kwargs={"n_gpu_layers": 1}, verbose=True, ) get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet') import psycopg2 db_name = "vector_db" host = "localhost" password = "password" port = "5432" user = "jerry" conn = psycopg2.connect( dbname="postgres", host=host, password=password, port=port, user=user, ) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from sqlalchemy import make_url from llama_index.vector_stores.postgres import PGVectorStore vector_store = PGVectorStore.from_params( database=db_name, host=host, password=password, port=port, user=user, table_name="llama2_paper", embed_dim=384, # openai embedding dimension ) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core.node_parser import SentenceSplitter text_parser = SentenceSplitter( chunk_size=1024, ) text_chunks = [] doc_idxs = [] for doc_idx, doc in enumerate(documents): cur_text_chunks = text_parser.split_text(doc.text) text_chunks.extend(cur_text_chunks) doc_idxs.extend([doc_idx] * len(cur_text_chunks)) from llama_index.core.schema import TextNode nodes = [] for idx, text_chunk in enumerate(text_chunks): node = TextNode( text=text_chunk, ) src_doc = documents[doc_idxs[idx]] node.metadata = src_doc.metadata nodes.append(node) for node in nodes: node_embedding = embed_model.get_text_embedding( node.get_content(metadata_mode="all") ) node.embedding = node_embedding vector_store.add(nodes) query_str = "Can you tell me about the key concepts for safety finetuning" query_embedding = embed_model.get_query_embedding(query_str) from llama_index.core.vector_stores import VectorStoreQuery query_mode = "default" vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=2, mode=query_mode ) query_result = vector_store.query(vector_store_query) print(query_result.nodes[0].get_content()) from llama_index.core.schema import NodeWithScore from typing import Optional nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) from llama_index.core import QueryBundle from llama_index.core.retrievers import BaseRetriever from typing import Any, List class VectorDBRetriever(BaseRetriever): """Retriever over a postgres vector store.""" def __init__( self, vector_store: PGVectorStore, embed_model: Any, query_mode: str = "default", similarity_top_k: int = 2, ) -> None: """Init params.""" self._vector_store = vector_store self._embed_model = embed_model self._query_mode = query_mode self._similarity_top_k = similarity_top_k super().__init__() def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve.""" query_embedding = embed_model.get_query_embedding( query_bundle.query_str ) vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=self._similarity_top_k, mode=self._query_mode, ) query_result = vector_store.query(vector_store_query) nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) return nodes_with_scores retriever = VectorDBRetriever( vector_store, embed_model, query_mode="default", similarity_top_k=2 ) from llama_index.core.query_engine import RetrieverQueryEngine query_engine =
RetrieverQueryEngine.from_args(retriever, llm=llm)
llama_index.core.query_engine.RetrieverQueryEngine.from_args
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") import pandas as pd def display_eval_df(question, source, answer_a, answer_b, result) -> None: """Pretty print question/answer + gpt-4 judgement dataset.""" eval_df = pd.DataFrame( { "Question": question, "Source": source, "Model A": answer_a["model"], "Answer A": answer_a["text"], "Model B": answer_b["model"], "Answer B": answer_b["text"], "Score": result.score, "Judgement": result.feedback, }, index=[0], ) eval_df = eval_df.style.set_properties( **{ "inline-size": "300px", "overflow-wrap": "break-word", }, subset=["Answer A", "Answer B"] ) display(eval_df) get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader train_cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Boston", ] test_cities = [ "Tokyo", "Singapore", "Paris", ] train_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in train_cities] ) test_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in test_cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) train_dataset_generator = DatasetGenerator.from_documents( train_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) test_dataset_generator = DatasetGenerator.from_documents( test_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) train_questions = train_dataset_generator.generate_questions_from_nodes( num=200 ) test_questions = test_dataset_generator.generate_questions_from_nodes(num=150) len(train_questions), len(test_questions) train_questions[:3] test_questions[:3] from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever train_index = VectorStoreIndex.from_documents(documents=train_documents) train_retriever = VectorIndexRetriever( index=train_index, similarity_top_k=2, ) test_index = VectorStoreIndex.from_documents(documents=test_documents) test_retriever = VectorIndexRetriever( index=test_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI def create_query_engine( hf_name: str, retriever: VectorIndexRetriever, hf_llm_generators: dict ) -> RetrieverQueryEngine: """Create a RetrieverQueryEngine using the HuggingFaceInferenceAPI LLM""" if hf_name not in hf_llm_generators: raise KeyError("model not listed in hf_llm_generators") llm = HuggingFaceInferenceAPI( model_name=hf_llm_generators[hf_name], context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) return RetrieverQueryEngine.from_args(retriever=retriever, llm=llm) hf_llm_generators = { "mistral-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1", "llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", } train_query_engines = { mdl: create_query_engine(mdl, train_retriever, hf_llm_generators) for mdl in hf_llm_generators.keys() } test_query_engines = { mdl: create_query_engine(mdl, test_retriever, hf_llm_generators) for mdl in hf_llm_generators.keys() } import tqdm import random train_dataset = [] for q in tqdm.tqdm(train_questions): model_versus = random.sample(list(train_query_engines.items()), 2) data_entry = {"question": q} responses = [] source = None for name, engine in model_versus: response = engine.query(q) response_struct = {} response_struct["model"] = name response_struct["text"] = str(response) if source is not None: assert source == response.source_nodes[0].node.text[:1000] + "..." else: source = response.source_nodes[0].node.text[:1000] + "..." responses.append(response_struct) data_entry["answers"] = responses data_entry["source"] = source train_dataset.append(data_entry) from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.core import Settings main_finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([main_finetuning_handler]) Settings.callback_manager = callback_manager llm_4 = OpenAI(temperature=0, model="gpt-4", callback_manager=callback_manager) gpt4_judge = PairwiseComparisonEvaluator(llm=llm) for data_entry in tqdm.tqdm(train_dataset): final_eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["answers"][0]["text"], second_response=data_entry["answers"][1]["text"], reference=data_entry["source"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = final_eval_result.score judgement["text"] = final_eval_result.response judgement["source"] = final_eval_result.pairwise_source data_entry["evaluations"] = [judgement] display_eval_df( question=data_entry["question"], source=data_entry["source"], answer_a=data_entry["answers"][0], answer_b=data_entry["answers"][1], result=final_eval_result, ) main_finetuning_handler.save_finetuning_events( "pairwise_finetuning_events.jsonl" ) import json with open("pairwise_finetuning_events.jsonl") as f: combined_finetuning_events = [json.loads(line) for line in f] finetuning_events = ( [] ) # for storing events using original order of presentation flipped_finetuning_events = ( [] ) # for storing events using flipped order of presentation for ix, event in enumerate(combined_finetuning_events): if ix % 2 == 0: # we always do original ordering first finetuning_events += [event] else: # then we flip order and have GPT-4 make another judgement flipped_finetuning_events += [event] assert len(finetuning_events) == len(flipped_finetuning_events) resolved_finetuning_events = [] for ix, data_entry in enumerate(train_dataset): if data_entry["evaluations"][0]["source"] == "original": resolved_finetuning_events += [finetuning_events[ix]] elif data_entry["evaluations"][0]["source"] == "flipped": resolved_finetuning_events += [flipped_finetuning_events[ix]] else: continue with open("resolved_pairwise_finetuning_events.jsonl", "w") as outfile: for entry in resolved_finetuning_events: print(json.dumps(entry), file=outfile) from llama_index.finetuning import OpenAIFinetuneEngine finetune_engine = OpenAIFinetuneEngine( "gpt-3.5-turbo", "resolved_pairwise_finetuning_events.jsonl", ) finetune_engine.finetune() finetune_engine.get_current_job() import random test_dataset = [] for q in tqdm.tqdm(test_questions): model_versus = random.sample(list(test_query_engines.items()), 2) data_entry = {"question": q} responses = [] source = None for name, engine in model_versus: response = engine.query(q) response_struct = {} response_struct["model"] = name response_struct["text"] = str(response) if source is not None: assert source == response.source_nodes[0].node.text[:1000] + "..." else: source = response.source_nodes[0].node.text[:1000] + "..." responses.append(response_struct) data_entry["answers"] = responses data_entry["source"] = source test_dataset.append(data_entry) for data_entry in tqdm.tqdm(test_dataset): final_eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["answers"][0]["text"], second_response=data_entry["answers"][1]["text"], reference=data_entry["source"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = final_eval_result.score judgement["text"] = final_eval_result.response judgement["source"] = final_eval_result.pairwise_source data_entry["evaluations"] = [judgement] from llama_index.core.evaluation import EvaluationResult ft_llm = finetune_engine.get_finetuned_model() ft_gpt_3p5_judge = PairwiseComparisonEvaluator(llm=ft_llm) for data_entry in tqdm.tqdm(test_dataset): try: final_eval_result = await ft_gpt_3p5_judge.aevaluate( query=data_entry["question"], response=data_entry["answers"][0]["text"], second_response=data_entry["answers"][1]["text"], reference=data_entry["source"], ) except: final_eval_result = EvaluationResult( query=data_entry["question"], response="", passing=None, score=0.5, feedback="", pairwise_source="output-cannot-be-parsed", ) judgement = {} judgement["llm"] = "ft_gpt_3p5" judgement["score"] = final_eval_result.score judgement["text"] = final_eval_result.response judgement["source"] = final_eval_result.pairwise_source data_entry["evaluations"] += [judgement] gpt_3p5_llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Tokyo", "Singapore", "Paris", ] documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI gpt_35_llm =
OpenAI(model="gpt-3.5-turbo", temperature=0.3)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index') import time import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "[YOUR_API_KEY]" from llama_index.core import VectorStoreIndex, download_loader from llama_index.readers.wikipedia import WikipediaReader loader = WikipediaReader() documents = loader.load_data( pages=[ "Berlin", "Santiago", "Moscow", "Tokyo", "Jakarta", "Cairo", "Bogota", "Shanghai", "Damascus", ] ) len(documents) start_time = time.perf_counter() index = VectorStoreIndex.from_documents(documents) duration = time.perf_counter() - start_time print(duration) start_time = time.perf_counter() index =
VectorStoreIndex(documents, use_async=True)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().system('pip install llama-index') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/lyft" ) lyft_index =
load_index_from_storage(storage_context)
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import LLMRerank from llama_index.llms.openai import OpenAI from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.chunk_overlap = 0 Settings.chunk_size = 128 documents = SimpleDirectoryReader( input_files=["./data/10k/lyft_2021.pdf"] ).load_data() index = VectorStoreIndex.from_documents( documents, ) from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core import QueryBundle import pandas as pd from IPython.display import display, HTML from copy import deepcopy pd.set_option("display.max_colwidth", -1) def get_retrieved_nodes( query_str, vector_top_k=10, reranker_top_n=3, with_reranker=False ): query_bundle =
QueryBundle(query_str)
llama_index.core.QueryBundle
import os print(os.listdir("./discord_dumps")) import json with open("./discord_dumps/help_channel_dump_05_25_23.json", "r") as f: data = json.load(f) print("JSON keys: ", data.keys(), "\n") print("Message Count: ", len(data["messages"]), "\n") print("Sample Message Keys: ", data["messages"][0].keys(), "\n") print("First Message: ", data["messages"][0]["content"], "\n") print("Last Message: ", data["messages"][-1]["content"]) get_ipython().system('python ./group_conversations.py ./discord_dumps/help_channel_dump_05_25_23.json') with open("conversation_docs.json", "r") as f: threads = json.load(f) print("Thread keys: ", threads[0].keys(), "\n") print(threads[0]["metadata"], "\n") print(threads[0]["thread"], "\n") from llama_index.core import Document documents = [] for thread in threads: thread_text = thread["thread"] thread_id = thread["metadata"]["id"] timestamp = thread["metadata"]["timestamp"] documents.append(
Document(text=thread_text, id_=thread_id, metadata={"date": timestamp})
llama_index.core.Document
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("../data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SummaryIndex from llama_index.core import VectorStoreIndex summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "" import os from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient github_client = GitHubIssuesClient() loader = GitHubRepositoryIssuesReader( github_client, owner="run-llama", repo="llama_index", verbose=True, ) orig_docs = loader.load_data() limit = 100 docs = [] for idx, doc in enumerate(orig_docs): doc.metadata["index_id"] = doc.id_ if idx >= limit: break docs.append(doc) from copy import deepcopy import asyncio from tqdm.asyncio import tqdm_asyncio from llama_index.core.indices import SummaryIndex from llama_index.core import Document, ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.async_utils import run_jobs async def aprocess_doc(doc, include_summary: bool = True): """Process doc.""" print(f"Processing {doc.id_}") metadata = doc.metadata date_tokens = metadata["created_at"].split("T")[0].split("-") year = int(date_tokens[0]) month = int(date_tokens[1]) day = int(date_tokens[2]) assignee = "" if "assignee" not in doc.metadata else doc.metadata["assignee"] size = "" if len(doc.metadata["labels"]) > 0: size_arr = [l for l in doc.metadata["labels"] if "size:" in l] size = size_arr[0].split(":")[1] if len(size_arr) > 0 else "" new_metadata = { "state": metadata["state"], "year": year, "month": month, "day": day, "assignee": assignee, "size": size, "index_id": doc.id_, } summary_index = SummaryIndex.from_documents([doc]) query_str = "Give a one-sentence concise summary of this issue." query_engine = summary_index.as_query_engine( service_context=ServiceContext.from_defaults(llm=
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index llama-index-vector-stores-qdrant -q') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data') get_ipython().system('wget "https://arxiv.org/pdf/2402.09353.pdf" -O "./data/dorav1.pdf"') from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") response = llm.complete("What is DoRA?") print(response.text) """Load the data. With llama-index, before any transformations are applied, data is loaded in the `Document` abstraction, which is a container that holds the text of the document. """ from llama_index.core import SimpleDirectoryReader loader =
SimpleDirectoryReader(input_dir="./data")
llama_index.core.SimpleDirectoryReader
get_ipython().system('pip install llama-index') get_ipython().system('pip install llama-index-core') get_ipython().system('pip install --quiet transformers torch') get_ipython().system('pip install llama-index-embeddings-openai') get_ipython().system('pip install llama-index-llms-openai') get_ipython().system('pip install llama-index-postprocessor-colbert-rerank') from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os os.environ["OPENAI_API_KEY"] = "sk-" documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc = ImageDocument(image_path="./shanghai.jpg") from llama_index.core import VectorStoreIndex from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-m3") vector_index = VectorStoreIndex.from_documents( documents, embed_model=embed_model ) query_engine = vector_index.as_query_engine() from llama_index.core.prompts import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline, FnComponent query_prompt_str = """\ Please expand the initial statement using the provided context from the Tesla 10K report. {initial_statement} """ query_prompt_tmpl = PromptTemplate(query_prompt_str) qp = QueryPipeline( modules={ "mm_model": mm_model.as_query_component( partial={"image_documents": [image_doc]} ), "query_prompt": query_prompt_tmpl, "query_engine": query_engine, }, verbose=True, ) qp.add_chain(["mm_model", "query_prompt", "query_engine"]) rag_response = qp.run("Which Tesla Factory is shown in the image?") print(f"> Retrieval Augmented Response: {rag_response}") rag_response.source_nodes[1].get_content() get_ipython().system('wget "https://drive.usercontent.google.com/download?id=1qQDcaKuzgRGuEC1kxgYL_4mx7vG-v4gC&export=download&authuser=1&confirm=t&uuid=f944e95f-a31f-4b55-b68f-8ea67a6e90e5&at=APZUnTVZ6n1aOg7rtkcjBjw7Pt1D:1707010667927" -O mixed_wiki.zip') get_ipython().system('unzip mixed_wiki.zip') get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm') from llama_index.core.indices.multi_modal.base import ( MultiModalVectorStoreIndex, ) from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.embeddings.clip import ClipEmbedding import qdrant_client from llama_index import ( SimpleDirectoryReader, ) client = qdrant_client.QdrantClient(path="qdrant_mm_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) image_embed_model = ClipEmbedding() documents = SimpleDirectoryReader("./mixed_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, image_embed_model=image_embed_model, ) from llama_index.core.prompts import PromptTemplate from llama_index.core.query_engine import SimpleMultiModalQueryEngine qa_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl = PromptTemplate(qa_tmpl_str) query_engine = index.as_query_engine( multi_modal_llm=mm_model, text_qa_template=qa_tmpl ) query_str = "Tell me more about the Porsche" response = query_engine.query(query_str) print(str(response)) from PIL import Image import matplotlib.pyplot as plt import os def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(2, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break from llama_index.core.response.notebook_utils import display_source_node for text_node in response.metadata["text_nodes"]:
display_source_node(text_node, source_length=200)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.llms.anthropic import Anthropic llm = OpenAI(model="gpt-4") data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray') get_ipython().system('pip install llama-index') import os import sys import logging import textwrap import warnings warnings.filterwarnings("ignore") os.environ["TOKENIZERS_PARALLELISM"] = "false" from llama_index.core import ( GPTVectorStoreIndex, SimpleDirectoryReader, Document, ) from llama_index.vector_stores.docarray import DocArrayHnswVectorStore from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "<your openai key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print( "Document ID:", documents[0].doc_id, "Document Hash:", documents[0].doc_hash, ) from llama_index.core import StorageContext vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = GPTVectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What was a hard moment for the author?") print(textwrap.fill(str(response), 100)) from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", }, ), ] from llama_index.core import StorageContext vector_store = DocArrayHnswVectorStore(work_dir="hnsw_filters") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = GPTVectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters filters = MetadataFilters( filters=[
ExactMatchFilter(key="theme", value="Mafia")
llama_index.core.vector_stores.ExactMatchFilter
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("../data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SummaryIndex from llama_index.core import VectorStoreIndex summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) list_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool list_tool = QueryEngineTool.from_defaults( query_engine=list_query_engine, description=( "Useful for summarization questions related to Paul Graham eassy on" " What I Worked On." ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) from llama_index.core.query_engine import RouterQueryEngine from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) query_engine = RouterQueryEngine( selector=PydanticSingleSelector.from_defaults(), query_engine_tools=[ list_tool, vector_tool, ], ) response = query_engine.query("What is the summary of the document?") print(str(response)) response = query_engine.query("What did Paul Graham do after RICS?") print(str(response)) query_engine = RouterQueryEngine( selector=
LLMSingleSelector.from_defaults()
llama_index.core.selectors.LLMSingleSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy') get_ipython().system('pip install llama-index') import json from typing import Sequence from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/sept" ) sept_index = load_index_from_storage(storage_context) index_loaded = True except: index_loaded = False get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") if not index_loaded: march_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_docs = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index =
VectorStoreIndex.from_documents(march_docs)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Tokyo", "Singapore", "Paris", ] documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) dataset_generator = DatasetGenerator.from_documents( documents, question_gen_query=QUESTION_GEN_PROMPT, llm=gpt_35_llm, num_questions_per_chunk=25, ) qrd = dataset_generator.generate_dataset_from_nodes(num=350) from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever the_index = VectorStoreIndex.from_documents(documents=documents) the_retriever = VectorIndexRetriever( index=the_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI llm = HuggingFaceInferenceAPI( model_name="meta-llama/Llama-2-7b-chat-hf", context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm) import tqdm train_dataset = [] num_train_questions = int(0.65 * len(qrd.qr_pairs)) for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]): data_entry = {"question": q, "reference": a} response = query_engine.query(q) response_struct = {} response_struct["model"] = "llama-2" response_struct["text"] = str(response) response_struct["context"] = ( response.source_nodes[0].node.text[:1000] + "..." ) data_entry["response_data"] = response_struct train_dataset.append(data_entry) from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.evaluation import CorrectnessEvaluator finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) gpt_4_llm = OpenAI( temperature=0, model="gpt-4", callback_manager=callback_manager ) gpt4_judge = CorrectnessEvaluator(llm=gpt_4_llm) import tqdm for data_entry in tqdm.tqdm(train_dataset): eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["response_data"]["text"], context=data_entry["response_data"]["context"], reference=data_entry["reference"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = eval_result.score judgement["text"] = eval_result.response data_entry["evaluations"] = [judgement] finetuning_handler.save_finetuning_events("correction_finetuning_events.jsonl") from llama_index.finetuning import OpenAIFinetuneEngine finetune_engine = OpenAIFinetuneEngine( "gpt-3.5-turbo", "correction_finetuning_events.jsonl", ) finetune_engine.finetune() finetune_engine.get_current_job() test_dataset = [] for q, a in tqdm.tqdm(qrd.qr_pairs[num_train_questions:]): data_entry = {"question": q, "reference": a} response = query_engine.query(q) response_struct = {} response_struct["model"] = "llama-2" response_struct["text"] = str(response) response_struct["context"] = ( response.source_nodes[0].node.text[:1000] + "..." ) data_entry["response_data"] = response_struct test_dataset.append(data_entry) for data_entry in tqdm.tqdm(test_dataset): eval_result = await gpt4_judge.aevaluate( query=data_entry["question"], response=data_entry["response_data"]["text"], context=data_entry["response_data"]["context"], reference=data_entry["reference"], ) judgement = {} judgement["llm"] = "gpt_4" judgement["score"] = eval_result.score judgement["text"] = eval_result.response data_entry["evaluations"] = [judgement] from llama_index.core.evaluation import EvaluationResult ft_llm = finetune_engine.get_finetuned_model() ft_gpt_3p5_judge =
CorrectnessEvaluator(llm=ft_llm)
llama_index.core.evaluation.CorrectnessEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_str(table_schema_objs: List[SQLTableSchema]): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component = FnComponent(fn=get_table_context_str) from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import FnComponent from llama_index.core.llms import ChatResponse def parse_response_to_sql(response: ChatResponse) -> str: """Parse response to SQL.""" response = response.message.content sql_query_start = response.find("SQLQuery:") if sql_query_start != -1: response = response[sql_query_start:] if response.startswith("SQLQuery:"): response = response[len("SQLQuery:") :] sql_result_start = response.find("SQLResult:") if sql_result_start != -1: response = response[:sql_result_start] return response.strip().strip("```").strip() sql_parser_component = FnComponent(fn=parse_response_to_sql) text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format( dialect=engine.dialect.name ) print(text2sql_prompt.template) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n" "SQL: {sql_query}\n" "SQL Response: {context_str}\n" "Response: " ) response_synthesis_prompt = PromptTemplate( response_synthesis_prompt_str, ) llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Tokyo", "Singapore", "Paris", ] documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) dataset_generator = DatasetGenerator.from_documents( documents, question_gen_query=QUESTION_GEN_PROMPT, llm=gpt_35_llm, num_questions_per_chunk=25, ) qrd = dataset_generator.generate_dataset_from_nodes(num=350) from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever the_index = VectorStoreIndex.from_documents(documents=documents) the_retriever = VectorIndexRetriever( index=the_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI llm = HuggingFaceInferenceAPI( model_name="meta-llama/Llama-2-7b-chat-hf", context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm) import tqdm train_dataset = [] num_train_questions = int(0.65 * len(qrd.qr_pairs)) for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]): data_entry = {"question": q, "reference": a} response = query_engine.query(q) response_struct = {} response_struct["model"] = "llama-2" response_struct["text"] = str(response) response_struct["context"] = ( response.source_nodes[0].node.text[:1000] + "..." ) data_entry["response_data"] = response_struct train_dataset.append(data_entry) from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.evaluation import CorrectnessEvaluator finetuning_handler = OpenAIFineTuningHandler() callback_manager =
CallbackManager([finetuning_handler])
llama_index.core.callbacks.CallbackManager
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor') get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png') get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png') from llama_index.readers.web import SimpleWebPageReader url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday" reader = SimpleWebPageReader(html_to_text=True) documents = reader.load_data(urls=[url]) from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") vector_index = VectorStoreIndex.from_documents( documents, ) query_tool = QueryEngineTool( query_engine=vector_index.as_query_engine(), metadata=ToolMetadata( name=f"vector_tool", description=( "Useful to lookup new features announced by OpenAI" ), ), ) from llama_index.core.agent.react_multimodal.step import ( MultimodalReActAgentWorker, ) from llama_index.core.agent import AgentRunner from llama_index.core.multi_modal_llms import MultiModalLLM from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core.agent import Task mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000) react_step_engine = MultimodalReActAgentWorker.from_tools( [query_tool], multi_modal_llm=mm_llm, verbose=True, ) agent = AgentRunner(react_step_engine) query_str = ( "The photo shows some new features released by OpenAI. " "Can you pinpoint the features in the photo and give more details using relevant tools?" ) from llama_index.core.schema import ImageDocument image_document = ImageDocument(image_path="other_images/openai/dev_day.png") task = agent.create_task( query_str, extra_state={"image_docs": [image_document]}, ) def execute_step(agent: AgentRunner, task: Task): step_output = agent.run_step(task.task_id) if step_output.is_last: response = agent.finalize_response(task.task_id) print(f"> Agent finished: {str(response)}") return response else: return None def execute_steps(agent: AgentRunner, task: Task): response = execute_step(agent, task) while response is None: response = execute_step(agent, task) return response response = execute_step(agent, task) response = execute_step(agent, task) print(str(response)) from llama_index.tools.metaphor import MetaphorToolSpec from llama_index.core.agent.react_multimodal.step import ( MultimodalReActAgentWorker, ) from llama_index.core.agent import AgentRunner from llama_index.core.multi_modal_llms import MultiModalLLM from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core.agent import Task metaphor_tool_spec = MetaphorToolSpec( api_key="<api_key>", ) metaphor_tools = metaphor_tool_spec.to_tool_list() mm_llm =
OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000)
llama_index.multi_modal_llms.openai.OpenAIMultiModal
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import openai import os os.environ["OPENAI_API_KEY"] = "API_KEY_HERE" openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, SimpleDirectoryReader data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().system('pip install llama-index') from llama_index.llms.cohere import Cohere api_key = "Your api key" resp = Cohere(api_key=api_key).complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.cohere import Cohere messages = [ ChatMessage(role="user", content="hello there"), ChatMessage( role="assistant", content="Arrrr, matey! How can I help ye today?" ), ChatMessage(role="user", content="What is your name"), ] resp = Cohere(api_key=api_key).chat( messages, preamble_override="You are a pirate with a colorful personality" ) print(resp) from llama_index.llms.openai import OpenAI llm =
Cohere(api_key=api_key)
llama_index.llms.cohere.Cohere
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system("pip install llama-index 'google-generativeai>=0.3.0' matplotlib qdrant_client") import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from pathlib import Path import random from typing import Optional def get_image_files( dir_path, sample: Optional[int] = 10, shuffle: bool = False ): dir_path = Path(dir_path) image_paths = [] for image_path in dir_path.glob("*.jpg"): image_paths.append(image_path) random.shuffle(image_paths) if sample: return image_paths[:sample] else: return image_paths image_files = get_image_files("SROIE2019/test/img", sample=100) from pydantic import BaseModel, Field class ReceiptInfo(BaseModel): company: str = Field(..., description="Company name") date: str = Field(..., description="Date field in DD/MM/YYYY format") address: str = Field(..., description="Address") total: float = Field(..., description="total amount") currency: str = Field( ..., description="Currency of the country (in abbreviations)" ) summary: str = Field( ..., description="Extracted text summary of the receipt, including items purchased, the type of store, the location, and any other notable salient features (what does the purchase seem to be for?).", ) from llama_index.multi_modal_llms.gemini import GeminiMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you summarize the image and return a response \ with the following JSON format: \ """ async def pydantic_gemini(output_class, image_documents, prompt_template_str): gemini_llm = GeminiMultiModal( api_key=GOOGLE_API_KEY, model_name="models/gemini-pro-vision" ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=gemini_llm, verbose=True, ) response = await llm_program.acall() return response from llama_index.core import SimpleDirectoryReader from llama_index.core.async_utils import run_jobs async def aprocess_image_file(image_file): print(f"Image file: {image_file}") img_docs = SimpleDirectoryReader(input_files=[image_file]).load_data() output = await pydantic_gemini(ReceiptInfo, img_docs, prompt_template_str) return output async def aprocess_image_files(image_files): """Process metadata on image files.""" new_docs = [] tasks = [] for image_file in image_files: task = aprocess_image_file(image_file) tasks.append(task) outputs = await run_jobs(tasks, show_progress=True, workers=5) return outputs outputs = await aprocess_image_files(image_files) outputs[4] from llama_index.core.schema import TextNode from typing import List def get_nodes_from_objs( objs: List[ReceiptInfo], image_files: List[str] ) -> TextNode: """Get nodes from objects.""" nodes = [] for image_file, obj in zip(image_files, objs): node = TextNode( text=obj.summary, metadata={ "company": obj.company, "date": obj.date, "address": obj.address, "total": obj.total, "currency": obj.currency, "image_file": str(image_file), }, excluded_embed_metadata_keys=["image_file"], excluded_llm_metadata_keys=["image_file"], ) nodes.append(node) return nodes nodes = get_nodes_from_objs(outputs, image_files) print(nodes[0].get_content(metadata_mode="all")) import qdrant_client from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import StorageContext from llama_index.core import VectorStoreIndex from llama_index.embeddings.gemini import GeminiEmbedding from llama_index.llms.gemini import Gemini from llama_index.core import Settings client = qdrant_client.QdrantClient(path="qdrant_gemini") vector_store =
QdrantVectorStore(client=client, collection_name="collection")
llama_index.vector_stores.qdrant.QdrantVectorStore
get_ipython().run_line_magic('', 'pip install llama-index-llms-groq') get_ipython().system('pip install llama-index') from llama_index.llms.groq import Groq llm =
Groq(model="mixtral-8x7b-32768", api_key="your_api_key")
llama_index.llms.groq.Groq
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool summary_tool = QueryEngineTool.from_defaults( query_engine=summary_query_engine, name="summary_tool", description=( "Useful for summarization questions related to the author's life" ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, name="vector_tool", description=( "Useful for retrieving specific context to answer specific questions about the author's life" ), ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="QA bot", instructions="You are a bot designed to answer questions about the author", openai_tools=[], tools=[summary_tool, vector_tool], verbose=True, run_retrieve_sleep_time=1.0, ) response = agent.chat("Can you give me a summary about the author's life?") print(str(response)) response = agent.query("What did the author do after RICS?") print(str(response)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") try: pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ),
TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla') get_ipython().system('pip/pip3 install pyepsilla') get_ipython().system('pip install llama-index') import logging import sys from llama_index.core import SimpleDirectoryReader, Document, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.epsilla import EpsillaVectorStore import textwrap import openai import getpass OPENAI_API_KEY = getpass.getpass("OpenAI API Key:") openai.api_key = OPENAI_API_KEY get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print(f"Total documents: {len(documents)}") print(f"First document, id: {documents[0].doc_id}") print(f"First document, hash: {documents[0].hash}") from pyepsilla import vectordb client = vectordb.Client() vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("Who is the author?") print(textwrap.fill(str(response), 100)) response = query_engine.query("How did the author learn about AI?") print(textwrap.fill(str(response), 100)) vector_store = EpsillaVectorStore(client=client, overwrite=True) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import camelot from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import PandasQueryEngine from llama_index.core.schema import IndexNode from llama_index.llms.openai import OpenAI from llama_index.readers.file import PyMuPDFReader from typing import List import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") file_path = "billionaires_page.pdf" reader = PyMuPDFReader() docs = reader.load(file_path) def get_tables(path: str, pages: List[int]): table_dfs = [] for page in pages: table_list = camelot.read_pdf(path, pages=str(page)) table_df = table_list[0].df table_df = ( table_df.rename(columns=table_df.iloc[0]) .drop(table_df.index[0]) .reset_index(drop=True) ) table_dfs.append(table_df) return table_dfs table_dfs = get_tables(file_path, pages=[3, 25]) table_dfs[0] table_dfs[1] llm = OpenAI(model="gpt-4") df_query_engines = [ PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs ] response = df_query_engines[0].query( "What's the net worth of the second richest billionaire in 2023?" ) print(str(response)) response = df_query_engines[1].query( "How many billionaires were there in 2009?" ) print(str(response)) from llama_index.core import Settings doc_nodes = Settings.node_parser.get_nodes_from_documents(docs) summaries = [ ( "This node provides information about the world's richest billionaires" " in 2023" ), ( "This node provides information on the number of billionaires and" " their combined net worth from 2000 to 2023." ), ] df_nodes = [ IndexNode(text=summary, index_id=f"pandas{idx}") for idx, summary in enumerate(summaries) ] df_id_query_engine_mapping = { f"pandas{idx}": df_query_engine for idx, df_query_engine in enumerate(df_query_engines) } vector_index =
VectorStoreIndex(doc_nodes + df_nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix from typing import Iterable from random import randrange LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab" SESSION_CORPUS_ID_PREFIX = ( f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}" ) def corpus_id(num_id: int) -> str: return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}" SESSION_CORPUS_ID = corpus_id(1) def list_corpora() -> Iterable[genaix.Corpus]: client = genaix.build_semantic_retriever() yield from genaix.list_corpora(client=client) def delete_corpus(*, corpus_id: str) -> None: client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
llama_index.core.vector_stores.google.generativeai.genai_extension.delete_corpus
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) tools = [multiply_tool, add_tool] multiply_tool.metadata.fn_schema_str from llama_index.core.agent import AgentRunner llm = OpenAI(model="gpt-4") callback_manager = llm.callback_manager agent_worker = LLMCompilerAgentWorker.from_tools( tools, llm=llm, verbose=True, callback_manager=callback_manager ) agent = AgentRunner(agent_worker, callback_manager=callback_manager) response = agent.chat("What is (121 * 3) + 42?") response agent.memory.get_all() get_ipython().system('pip install llama-index-readers-wikipedia') from llama_index.readers.wikipedia import WikipediaReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"] city_docs = {} reader = WikipediaReader() for wiki_title in wiki_titles: docs = reader.load_data(pages=[wiki_title]) city_docs[wiki_title] = docs from llama_index.core import ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import CallbackManager llm = OpenAI(temperature=0, model="gpt-4") service_context =
ServiceContext.from_defaults(llm=llm)
llama_index.core.ServiceContext.from_defaults
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc =
ImageDocument(image_path="./shanghai.jpg")
llama_index.core.schema.ImageDocument
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey') get_ipython().system('pip install llama-index') get_ipython().system('pip install -U llama_index') get_ipython().system('pip install -U portkey-ai') from llama_index.llms.portkey import Portkey from llama_index.core.llms import ChatMessage import portkey as pk import os os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY" openai_virtual_key_a = "" openai_virtual_key_b = "" anthropic_virtual_key_a = "" anthropic_virtual_key_b = "" cohere_virtual_key_a = "" cohere_virtual_key_b = "" os.environ["OPENAI_API_KEY"] = "" os.environ["ANTHROPIC_API_KEY"] = "" portkey_client = Portkey( mode="single", ) openai_llm = pk.LLMOptions( provider="openai", model="gpt-4", virtual_key=openai_virtual_key_a, ) portkey_client.add_llms(openai_llm) messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] print("Testing Portkey Llamaindex integration:") response = portkey_client.chat(messages) print(response) prompt = "Why is the sky blue?" print("\nTesting Stream Complete:\n") response = portkey_client.stream_complete(prompt) for i in response: print(i.delta, end="", flush=True) messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] print("\nTesting Stream Chat:\n") response = portkey_client.stream_chat(messages) for i in response: print(i.delta, end="", flush=True) portkey_client = Portkey(mode="fallback") messages = [ ChatMessage(role="system", content="You are a helpful assistant"), ChatMessage(role="user", content="What can you do?"), ] llm1 = pk.LLMOptions( provider="openai", model="gpt-4", retry_settings={"on_status_codes": [429, 500], "attempts": 2}, virtual_key=openai_virtual_key_a, ) llm2 = pk.LLMOptions( provider="openai", model="gpt-3.5-turbo", virtual_key=openai_virtual_key_b, ) portkey_client.add_llms(llm_params=[llm1, llm2]) print("Testing Fallback & Retry functionality:") response = portkey_client.chat(messages) print(response) portkey_client = Portkey(mode="ab_test") messages = [
ChatMessage(role="system", content="You are a helpful assistant")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core import PromptTemplate text_qa_template_str = ( "Context information is" " below.\n---------------------\n{context_str}\n---------------------\nUsing" " both the context information and also using your own knowledge, answer" " the question: {query_str}\nIf the context isn't helpful, you can also" " answer the question on your own.\n" ) text_qa_template = PromptTemplate(text_qa_template_str) refine_template_str = ( "The original question is as follows: {query_str}\nWe have provided an" " existing answer: {existing_answer}\nWe have the opportunity to refine" " the existing answer (only if needed) with some more context" " below.\n------------\n{context_msg}\n------------\nUsing both the new" " context and your own knowledge, update or repeat the existing answer.\n" ) refine_template =
PromptTemplate(refine_template_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from llama_index.core import SummaryIndex from llama_index.core.response.notebook_utils import display_response from llama_index.llms.openai import OpenAI get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] llm = OpenAI(model="gpt-4") chunk_sizes = [128, 256, 512, 1024] nodes_list = [] vector_indices = [] for chunk_size in chunk_sizes: print(f"Chunk Size: {chunk_size}") splitter = SentenceSplitter(chunk_size=chunk_size) nodes = splitter.get_nodes_from_documents(docs) for node in nodes: node.metadata["chunk_size"] = chunk_size node.excluded_embed_metadata_keys = ["chunk_size"] node.excluded_llm_metadata_keys = ["chunk_size"] nodes_list.append(nodes) vector_index = VectorStoreIndex(nodes) vector_indices.append(vector_index) from llama_index.core.tools import RetrieverTool from llama_index.core.schema import IndexNode retriever_dict = {} retriever_nodes = [] for chunk_size, vector_index in zip(chunk_sizes, vector_indices): node_id = f"chunk_{chunk_size}" node = IndexNode( text=( "Retrieves relevant context from the Llama 2 paper (chunk size" f" {chunk_size})" ), index_id=node_id, ) retriever_nodes.append(node) retriever_dict[node_id] = vector_index.as_retriever() from llama_index.core.selectors import PydanticMultiSelector from llama_index.core.retrievers import RouterRetriever from llama_index.core.retrievers import RecursiveRetriever from llama_index.core import SummaryIndex summary_index =
SummaryIndex(retriever_nodes)
llama_index.core.SummaryIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.schema import TextNode from llama_index.core.indices.managed.types import ManagedIndexQueryMode from llama_index.indices.managed.vectara import VectaraIndex from llama_index.indices.managed.vectara import VectaraAutoRetriever from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo from llama_index.llms.openai import OpenAI nodes = [ TextNode( text=( "A pragmatic paleontologist touring an almost complete theme park on an island " + "in Central America is tasked with protecting a couple of kids after a power " + "failure causes the park's cloned dinosaurs to run loose." ), metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), TextNode( text=( "A thief who steals corporate secrets through the use of dream-sharing technology " + "is given the inverse task of planting an idea into the mind of a C.E.O., " + "but his tragic past may doom the project and his team to disaster." ), metadata={ "year": 2010, "director": "Christopher Nolan", "rating": 8.2, }, ), TextNode( text="Barbie suffers a crisis that leads her to question her world and her existence.", metadata={ "year": 2023, "director": "Greta Gerwig", "genre": "fantasy", "rating": 9.5, }, ),
TextNode( text=( "A cowboy doll is profoundly threatened and jealous when a new spaceman action " + "figure supplants him as top toy in a boy's bedroom." )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-langchain') get_ipython().system('pip install llama-index') from langchain.chat_models import ChatAnyscale, ChatOpenAI from llama_index.llms.langchain import LangChainLLM from llama_index.core import PromptTemplate llm = LangChainLLM(ChatOpenAI()) stream = await llm.astream(PromptTemplate("Hi, write a short story")) async for token in stream: print(token, end="") llm = LangChainLLM(ChatAnyscale()) stream = llm.stream(
PromptTemplate("Hi, Which NFL team have most Super Bowl wins")
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.response.pprint_utils import pprint_response from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.query_engine import SubQuestionQueryEngine import os os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY" from llama_index.core import Settings Settings.llm = OpenAI(temperature=0.2, model="gpt-3.5-turbo") get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") march_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index =
VectorStoreIndex.from_documents(march_2022)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader("../data/paul_graham") docs = reader.load_data() import os from llama_index.core import ( StorageContext, VectorStoreIndex, load_index_from_storage, ) if not os.path.exists("storage"): index = VectorStoreIndex.from_documents(docs) index.set_index_id("vector_index") index.storage_context.persist("./storage") else: storage_context = StorageContext.from_defaults(persist_dir="storage") index =
load_index_from_storage(storage_context, index_id="vector_index")
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import txtai txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"}) from llama_index.core import ( SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext, ) from llama_index.vector_stores.txtai import TxtaiVectorStore from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = TxtaiVectorStore(txtai_index=txtai_index) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return ChatPromptTemplate(message_templates=messages) class ResponseEval(BaseModel): """Evaluation of whether the response has an error.""" has_error: bool = Field( ..., description="Whether the response has an error." ) new_question: str = Field(..., description="The suggested new question.") explanation: str = Field( ..., description=( "The explanation for the error as well as for the new question." "Can include the direct stack trace as well." ), ) from llama_index.core.bridge.pydantic import PrivateAttr class RetryAgentWorker(CustomSimpleAgentWorker): """Agent worker that adds a retry layer on top of a router. Continues iterating until there's no errors / task is done. """ prompt_str: str = Field(default=DEFAULT_PROMPT_STR) max_iterations: int =
Field(default=10)
llama_index.core.bridge.pydantic.Field
import openai openai.api_key = "sk-key" from llama_index.agent import OpenAIAgent from llama_index.tools.code_interpreter.base import CodeInterpreterToolSpec code_spec = CodeInterpreterToolSpec() tools = code_spec.to_tool_list() agent =
OpenAIAgent.from_tools(tools, verbose=True)
llama_index.agent.OpenAIAgent.from_tools
get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb') get_ipython().system('pip install llama-index-vector-stores-duckdb') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.duckdb import DuckDBVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import os import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) vector_store =
DuckDBVectorStore.from_local("./persist/pg.duckdb")
llama_index.vector_stores.duckdb.DuckDBVectorStore.from_local
get_ipython().run_line_magic('pip', 'install llama-index-llms-sagemaker-endpoint') get_ipython().system(' pip install llama-index') ENDPOINT_NAME = "<-YOUR-ENDPOINT-NAME->" from llama_index.llms.sagemaker_endpoint import SageMakerLLM AWS_ACCESS_KEY_ID = "<-YOUR-AWS-ACCESS-KEY-ID->" AWS_SECRET_ACCESS_KEY = "<-YOUR-AWS-SECRET-ACCESS-KEY->" AWS_SESSION_TOKEN = "<-YOUR-AWS-SESSION-TOKEN->" REGION_NAME = "<-YOUR-ENDPOINT-REGION-NAME->" llm = SageMakerLLM( endpoint_name=ENDPOINT_NAME, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, aws_session_token=AWS_SESSION_TOKEN, aws_region_name=REGION_NAME, ) from llama_index.llms.sagemaker_endpoint import SageMakerLLM ENDPOINT_NAME = "<-YOUR-ENDPOINT-NAME->" PROFILE_NAME = "<-YOUR-PROFILE-NAME->" llm = SageMakerLLM( endpoint_name=ENDPOINT_NAME, profile_name=PROFILE_NAME ) # Omit the profile name to use the default profile resp = llm.complete( "Paul Graham is ", formatted=True ) # formatted=True to avoid adding system prompt print(resp) from llama_index.core.llms import ChatMessage messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="What is your name")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from os import getenv from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) from llama_index.core import VectorStoreIndex, StorageContext endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() text_field = "content" embedding_field = "embedding" client = OpensearchVectorClient( endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field ) vector_store =
OpensearchVectorStore(client)
llama_index.vector_stores.opensearch.OpensearchVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3) llm_4 =
OpenAI(model="gpt-4-0613", temperature=0.3)
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install matplotlib') import os os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/prometheus_paper_card.png") plt.imshow(img) from llama_index.core import SimpleDirectoryReader from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal image_documents = SimpleDirectoryReader( input_files=["../data/images/prometheus_paper_card.png"] ).load_data() anthropic_mm_llm = AnthropicMultiModal(max_tokens=300) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(response) from PIL import Image import requests from io import BytesIO import matplotlib.pyplot as plt from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png", ] img_response = requests.get(image_urls[0]) img = Image.open(BytesIO(img_response.content)) plt.imshow(img) image_url_documents = load_image_urls(image_urls) response = anthropic_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_url_documents, ) print(response) from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader( input_files=["../data/images/ark_email_sample.PNG"] ).load_data() from PIL import Image import matplotlib.pyplot as plt img = Image.open("../data/images/ark_email_sample.PNG") plt.imshow(img) from pydantic import BaseModel from typing import List class TickerInfo(BaseModel): """List of ticker info.""" direction: str ticker: str company: str shares_traded: int percent_of_total_etf: float class TickerList(BaseModel): """List of stock tickers.""" fund: str tickers: List[TickerInfo] from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you get the stock information in the image \ and return the answer? Pick just one fund. Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below. """ anthropic_mm_llm =
AnthropicMultiModal(max_tokens=300)
llama_index.multi_modal_llms.anthropic.AnthropicMultiModal
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.elasticsearch import ElasticsearchStore from llama_index.core.schema import TextNode nodes = [
TextNode( text=( "A bunch of scientists bring back dinosaurs and mayhem breaks" " loose" )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-ollama') get_ipython().system('pip install llama-index') from llama_index.llms.ollama import Ollama llm = Ollama(model="llama2", request_timeout=30.0) resp = llm.complete("Who is Paul Graham?") print(resp) from llama_index.core.llms import ChatMessage messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="What is your name")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') get_ipython().system('pip install llama-index chromadb --quiet') get_ipython().system('pip install chromadb==0.4.17') get_ipython().system('pip install sentence-transformers') get_ipython().system('pip install pydantic==1.10.11') get_ipython().system('pip install open-clip-torch') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core import StorageContext from llama_index.embeddings.huggingface import HuggingFaceEmbedding from IPython.display import Markdown, display import chromadb import os import openai OPENAI_API_KEY = "" openai.api_key = OPENAI_API_KEY os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY import requests def get_wikipedia_images(title): response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "imageinfo", "iiprop": "url|dimensions|mime", "generator": "images", "gimlimit": "50", }, ).json() image_urls = [] for page in response["query"]["pages"].values(): if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][ 0 ]["url"].endswith(".png"): image_urls.append(page["imageinfo"][0]["url"]) return image_urls from pathlib import Path import urllib.request image_uuid = 0 MAX_IMAGES_PER_WIKI = 20 wiki_titles = { "Tesla Model X", "Pablo Picasso", "Rivian", "The Lord of the Rings", "The Matrix", "The Simpsons", } data_path = Path("mixed_wiki") if not data_path.exists(): Path.mkdir(data_path) for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) images_per_wiki = 0 try: list_img_urls = get_wikipedia_images(title) for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 urllib.request.urlretrieve( url, data_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue from chromadb.utils.embedding_functions import OpenCLIPEmbeddingFunction embedding_function = OpenCLIPEmbeddingFunction() from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext from chromadb.utils.data_loaders import ImageLoader image_loader = ImageLoader() chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection( "multimodal_collection", embedding_function=embedding_function, data_loader=image_loader, ) documents = SimpleDirectoryReader("./mixed_wiki/").load_data() vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from os import getenv from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) from llama_index.core import VectorStoreIndex, StorageContext endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install rank-bm25 pymupdf') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama-index') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.core.node_parser import SimpleNodeParser dataset_generator = DatasetGenerator( base_nodes[:20], llm=OpenAI(model="gpt-4"), show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import random full_qr_pairs = eval_dataset.qr_pairs num_exemplars = 2 num_eval = 40 exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars) eval_qr_pairs = random.sample(full_qr_pairs, num_eval) len(exemplar_qr_pairs) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo")) evaluator_dict = { "correctness": evaluator_c, } batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True) async def get_correctness(query_engine, eval_qa_pairs, batch_runner): eval_qs = [q for q, _ in eval_qa_pairs] eval_answers = [a for _, a in eval_qa_pairs] pred_responses = get_responses(eval_qs, query_engine, show_progress=True) eval_results = await batch_runner.aevaluate_responses( eval_qs, responses=pred_responses, reference=eval_answers ) avg_correctness = np.array( [r.score for r in eval_results["correctness"]] ).mean() return avg_correctness QA_PROMPT_KEY = "response_synthesizer:text_qa_template" from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate llm = OpenAI(model="gpt-3.5-turbo") qa_tmpl_str = ( "---------------------\n" "{context_str}\n" "---------------------\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl = PromptTemplate(qa_tmpl_str) print(query_engine.get_prompts()[QA_PROMPT_KEY].get_template()) meta_tmpl_str = """\ Your task is to generate the instruction <INS>. Below are some previous instructions with their scores. The score ranges from 1 to 5. {prev_instruction_score_pairs} Below we show the task. The <INS> tag is prepended to the below prompt template, e.g. as follows: ``` <INS> {prompt_tmpl_str} ``` The prompt template contains template variables. Given an input set of template variables, the formatted prompt is then given to an LLM to get an output. Some examples of template variable inputs and expected outputs are given below to illustrate the task. **NOTE**: These do NOT represent the \ entire evaluation dataset. {qa_pairs_str} We run every input in an evaluation dataset through an LLM. If the LLM-generated output doesn't match the expected output, we mark it as wrong (score 0). A correct answer has a score of 1. The final "score" for an instruction is the average of scores across an evaluation dataset. Write your new instruction (<INS>) that is different from the old ones and has a score as high as possible. Instruction (<INS>): \ """ meta_tmpl = PromptTemplate(meta_tmpl_str) from copy import deepcopy def format_meta_tmpl( prev_instr_score_pairs, prompt_tmpl_str, qa_pairs, meta_tmpl, ): """Call meta-prompt to generate new instruction.""" pair_str_list = [ f"Instruction (<INS>):\n{instr}\nScore:\n{score}" for instr, score in prev_instr_score_pairs ] full_instr_pair_str = "\n\n".join(pair_str_list) qa_str_list = [ f"query_str:\n{query_str}\nAnswer:\n{answer}" for query_str, answer in qa_pairs ] full_qa_pair_str = "\n\n".join(qa_str_list) fmt_meta_tmpl = meta_tmpl.format( prev_instruction_score_pairs=full_instr_pair_str, prompt_tmpl_str=prompt_tmpl_str, qa_pairs_str=full_qa_pair_str, ) return fmt_meta_tmpl def get_full_prompt_template(cur_instr: str, prompt_tmpl): tmpl_str = prompt_tmpl.get_template() new_tmpl_str = cur_instr + "\n" + tmpl_str new_tmpl =
PromptTemplate(new_tmpl_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index llama-hub') from llama_index.readers.wikipedia import WikipediaReader loader =
WikipediaReader()
llama_index.readers.wikipedia.WikipediaReader
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate') import json import pandas as pd from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.evaluation.tonic_validate import ( AnswerConsistencyEvaluator, AnswerSimilarityEvaluator, AugmentationAccuracyEvaluator, AugmentationPrecisionEvaluator, RetrievalPrecisionEvaluator, TonicValidateEvaluator, ) question = "What makes Sam Altman a good founder?" reference_answer = "He is smart and has a great force of will." llm_answer = "He is a good founder because he is smart." retrieved_context_list = [ "Sam Altman is a good founder. He is very smart.", "What makes Sam Altman such a good founder is his great force of will.", ] answer_similarity_evaluator = AnswerSimilarityEvaluator() score = await answer_similarity_evaluator.aevaluate( question, llm_answer, retrieved_context_list, reference_response=reference_answer, ) score answer_consistency_evaluator = AnswerConsistencyEvaluator() score = await answer_consistency_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score augmentation_accuracy_evaluator =
AugmentationAccuracyEvaluator()
llama_index.evaluation.tonic_validate.AugmentationAccuracyEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import LLMRerank from llama_index.llms.openai import OpenAI from IPython.display import Markdown, display from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.chunk_size = 512 documents =
SimpleDirectoryReader("../../../examples/gatsby/data")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-notion') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install llama-index') from llama_index.core import SummaryIndex from llama_index.readers.notion import NotionPageReader from IPython.display import Markdown, display import os integration_token = os.getenv("NOTION_INTEGRATION_TOKEN") page_ids = ["<page_id>"] documents = NotionPageReader(integration_token=integration_token).load_data( page_ids=page_ids ) index = SummaryIndex.from_documents(documents) query_engine = index.as_query_engine() response = query_engine.query("<query_text>") display(Markdown(f"<b>{response}</b>")) database_id = "<database-id>" documents = NotionPageReader(integration_token=integration_token).load_data( database_id=database_id ) print(documents) index =
SummaryIndex.from_documents(documents)
llama_index.core.SummaryIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index llama-hub') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader reader = UnstructuredReader() from pathlib import Path all_files_gen = Path("./docs.llamaindex.ai/").rglob("*") all_files = [f.resolve() for f in all_files_gen] all_html_files = [f for f in all_files if f.suffix.lower() == ".html"] len(all_html_files) from llama_index.core import Document doc_limit = 100 docs = [] for idx, f in enumerate(all_html_files): if idx > doc_limit: break print(f"Idx {idx}/{len(all_html_files)}") loaded_docs = reader.load_data(file=f, split_documents=True) start_idx = 72 loaded_doc = Document( text="\n\n".join([d.get_content() for d in loaded_docs[72:]]), metadata={"path": str(f)}, ) print(loaded_doc.metadata["path"]) docs.append(loaded_doc) import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.agent.openai import OpenAIAgent from llama_index.core import ( load_index_from_storage, StorageContext, VectorStoreIndex, ) from llama_index.core import SummaryIndex from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.node_parser import SentenceSplitter import os from tqdm.notebook import tqdm import pickle async def build_agent_per_doc(nodes, file_base): print(file_base) vi_out_path = f"./data/llamaindex_docs/{file_base}" summary_out_path = f"./data/llamaindex_docs/{file_base}_summary.pkl" if not os.path.exists(vi_out_path): Path("./data/llamaindex_docs/").mkdir(parents=True, exist_ok=True) vector_index = VectorStoreIndex(nodes) vector_index.storage_context.persist(persist_dir=vi_out_path) else: vector_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=vi_out_path), ) summary_index = SummaryIndex(nodes) vector_query_engine = vector_index.as_query_engine(llm=llm) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", llm=llm ) if not os.path.exists(summary_out_path): Path(summary_out_path).parent.mkdir(parents=True, exist_ok=True) summary = str( await summary_query_engine.aquery( "Extract a concise 1-2 line summary of this document" ) ) pickle.dump(summary, open(summary_out_path, "wb")) else: summary = pickle.load(open(summary_out_path, "rb")) query_engine_tools = [ QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=f"vector_tool_{file_base}", description=f"Useful for questions related to specific facts", ), ), QueryEngineTool( query_engine=summary_query_engine, metadata=ToolMetadata( name=f"summary_tool_{file_base}", description=f"Useful for summarization questions", ), ), ] function_llm = OpenAI(model="gpt-4") agent = OpenAIAgent.from_tools( query_engine_tools, llm=function_llm, verbose=True, system_prompt=f"""\ You are a specialized agent designed to answer queries about the `{file_base}.html` part of the LlamaIndex docs. You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\ """, ) return agent, summary async def build_agents(docs): node_parser =
SentenceSplitter()
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler = LlamaDebugHandler() openai_callback = CallbackManager([openai_handler]) openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback) gradient_handler = LlamaDebugHandler() gradient_callback = CallbackManager([gradient_handler]) base_model_slug = "llama2-7b-chat" gradient_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=300, callback_manager=gradient_callback, is_chat_model=True, ) from llama_index.core.llms import LLMMetadata prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=openai_llm, verbose=True, ) gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=gradient_llm, verbose=True, ) response = openai_program(movie_name="The Shining") print(str(response)) tmp = openai_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) response = gradient_program(movie_name="The Shining") print(str(response)) tmp = gradient_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) from llama_index.core.program import LLMTextCompletionProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import GradientAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.output_parsers import PydanticOutputParser from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler = GradientAIFineTuningHandler() callback_manager =
CallbackManager([finetuning_handler])
llama_index.core.callbacks.CallbackManager
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm = Konko(model="gpt-3.5-turbo") message = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([message]) print(resp) message = ChatMessage(role="user", content="Tell me a story in 250 words") resp = llm.stream_chat([message], max_tokens=1000) for r in resp: print(r.delta, end="") llm =
Konko(model="numbersstation/nsql-llama-2-7b", max_tokens=100)
llama_index.llms.konko.Konko
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser =
SentenceSplitter(chunk_size=1024)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks') get_ipython().run_line_magic('pip', 'install llama-index') from llama_index.llms.fireworks import Fireworks resp =
Fireworks()
llama_index.llms.fireworks.Fireworks
get_ipython().run_line_magic('pip', 'install llama-index-readers-pinecone') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) api_key = "<api_key>" get_ipython().system('pip install llama-index') from llama_index.readers.pinecone import PineconeReader reader =
PineconeReader(api_key=api_key, environment="us-west1-gcp")
llama_index.readers.pinecone.PineconeReader
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) wandb_callback = llama_index.core.global_handler llama_debug =
LlamaDebugHandler(print_trace_on_end=True)
llama_index.core.callbacks.LlamaDebugHandler
get_ipython().run_line_magic('pip', 'install llama-index-readers-make-com') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.readers.make_com import MakeWrapper get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().system('pip install llama-index') from llama_index.llms.anthropic import Anthropic from llama_index.core import Settings tokenizer = Anthropic().tokenizer Settings.tokenizer = tokenizer import os os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY" from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-3-opus-20240229") resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.anthropic import Anthropic messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ),
ChatMessage(role="user", content="Tell me a story")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) documents = reader.load_data() from llama_index.core.query_pipeline import ( QueryPipeline, InputComponent, ArgPackComponent, ) from typing import Dict, Any, List, Optional from llama_index.core.llama_pack import BaseLlamaPack from llama_index.core.llms import LLM from llama_index.llms.openai import OpenAI from llama_index.core import Document, VectorStoreIndex from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core.node_parser import SentenceSplitter llm = OpenAI(model="gpt-3.5-turbo") chunk_sizes = [128, 256, 512, 1024] query_engines = {} for chunk_size in chunk_sizes: splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=0) nodes = splitter.get_nodes_from_documents(documents) vector_index = VectorStoreIndex(nodes) query_engines[str(chunk_size)] = vector_index.as_query_engine(llm=llm) p =
QueryPipeline(verbose=True)
llama_index.core.query_pipeline.QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import LLMRerank from llama_index.llms.openai import OpenAI from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/10k/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'") from llama_index.core import Settings Settings.llm =
OpenAI(temperature=0, model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.vector_stores.pinecone import PineconeVectorStore vector_store = PineconeVectorStore(pinecone_index=pinecone_index) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') import os OPENAI_API_TOKEN = "sk-<your-openai-api-token>" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN REPLICATE_API_TOKEN = "" # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN from pathlib import Path input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader("./restaurant_images").load_data() openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000 ) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ openai_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=openai_mm_llm, verbose=True, ) response = openai_program() for res in response: print(res) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ def pydantic_replicate( model_name, output_class, image_documents, prompt_template_str ): mm_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS[model_name], temperature=0.1, max_new_tokens=1000, ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_llm, verbose=True, ) response = llm_program() print(f"Model: {model_name}") for res in response: print(res) pydantic_replicate("fuyu-8b", Restaurant, image_documents, prompt_template_str) pydantic_replicate( "llava-13b", Restaurant, image_documents, prompt_template_str ) pydantic_replicate( "minigpt-4", Restaurant, image_documents, prompt_template_str ) pydantic_replicate("cogvlm", Restaurant, image_documents, prompt_template_str) input_image_path = Path("amazon_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1p1Y1qAoM68eC4sAvvHaiJyPhdUZS0Gqb" -O ./amazon_images/amazon.png') from pydantic import BaseModel class Product(BaseModel): """Data model for a Amazon Product.""" title: str category: str discount: str price: str rating: str review: str description: str inventory: str imageUrl = "./amazon_images/amazon.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) amazon_image_documents = SimpleDirectoryReader("./amazon_images").load_data() prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ openai_program_amazon = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Product), image_documents=amazon_image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=openai_mm_llm, verbose=True, ) response = openai_program_amazon() for res in response: print(res) pydantic_replicate( "fuyu-8b", Product, amazon_image_documents, prompt_template_str ) pydantic_replicate( "minigpt-4", Product, amazon_image_documents, prompt_template_str ) pydantic_replicate( "cogvlm", Product, amazon_image_documents, prompt_template_str ) pydantic_replicate( "llava-13b", Product, amazon_image_documents, prompt_template_str ) input_image_path = Path("instagram_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png') from pydantic import BaseModel class InsAds(BaseModel): """Data model for a Ins Ads.""" account: str brand: str product: str category: str discount: str price: str comments: str review: str description: str from PIL import Image import matplotlib.pyplot as plt imageUrl = "./instagram_images/jordan.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) ins_image_documents =
SimpleDirectoryReader("./instagram_images")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-0613") data = SimpleDirectoryReader(input_dir="../data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-together') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader from pathlib import Path from llama_index.llms.openai import OpenAI from llama_index.core import Document reader = UnstructuredReader() all_html_files = [ "docs.llamaindex.ai/en/latest/index.html", "docs.llamaindex.ai/en/latest/contributing/contributing.html", "docs.llamaindex.ai/en/latest/understanding/understanding.html", "docs.llamaindex.ai/en/latest/understanding/using_llms/using_llms.html", "docs.llamaindex.ai/en/latest/understanding/using_llms/privacy.html", "docs.llamaindex.ai/en/latest/understanding/loading/llamahub.html", "docs.llamaindex.ai/en/latest/optimizing/production_rag.html", "docs.llamaindex.ai/en/latest/module_guides/models/llms.html", ] doc_limit = 10 docs = [] for idx, f in enumerate(all_html_files): if idx > doc_limit: break print(f"Idx {idx}/{len(all_html_files)}") loaded_docs = reader.load_data(file=f, split_documents=True) start_idx = 64 loaded_doc = Document( id_=str(f), text="\n\n".join([d.get_content() for d in loaded_docs[start_idx:]]), metadata={"path": str(f)}, ) print(str(f)) docs.append(loaded_doc) from llama_index.embeddings.together import TogetherEmbedding from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI api_key = "<api_key>" embed_model = TogetherEmbedding( model_name="togethercomputer/m2-bert-80M-32k-retrieval", api_key=api_key ) llm = OpenAI(temperature=0, model="gpt-3.5-turbo") from llama_index.core.storage.docstore import SimpleDocumentStore for doc in docs: embedding = embed_model.get_text_embedding(doc.get_content()) doc.embedding = embedding docstore = SimpleDocumentStore() docstore.add_documents(docs) from llama_index.core.schema import IndexNode from llama_index.core import ( load_index_from_storage, StorageContext, VectorStoreIndex, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core import SummaryIndex from llama_index.core.retrievers import RecursiveRetriever import os from tqdm.notebook import tqdm import pickle def build_index(docs, out_path: str = "storage/chunk_index"): nodes = [] splitter = SentenceSplitter(chunk_size=512, chunk_overlap=70) for idx, doc in enumerate(tqdm(docs)): cur_nodes = splitter.get_nodes_from_documents([doc]) for cur_node in cur_nodes: file_path = doc.metadata["path"] new_node = IndexNode( text=cur_node.text or "None", index_id=str(file_path), metadata=doc.metadata ) nodes.append(new_node) print("num nodes: " + str(len(nodes))) if not os.path.exists(out_path): index = VectorStoreIndex(nodes, embed_model=embed_model) index.set_index_id("simple_index") index.storage_context.persist(f"./{out_path}") else: storage_context = StorageContext.from_defaults( persist_dir=f"./{out_path}" ) index = load_index_from_storage( storage_context, index_id="simple_index", embed_model=embed_model ) return index index = build_index(docs) from llama_index.core.retrievers import BaseRetriever from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings from llama_index.core import QueryBundle from llama_index.core.schema import NodeWithScore from typing import List, Any, Optional class HybridRetriever(BaseRetriever): """Hybrid retriever.""" def __init__( self, vector_index, docstore, similarity_top_k: int = 2, out_top_k: Optional[int] = None, alpha: float = 0.5, **kwargs: Any, ) -> None: """Init params.""" super().__init__(**kwargs) self._vector_index = vector_index self._embed_model = vector_index._embed_model self._retriever = vector_index.as_retriever( similarity_top_k=similarity_top_k ) self._out_top_k = out_top_k or similarity_top_k self._docstore = docstore self._alpha = alpha def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve nodes given query.""" nodes = self._retriever.retrieve(query_bundle.query_str) docs = [self._docstore.get_document(n.node.index_id) for n in nodes] doc_embeddings = [d.embedding for d in docs] query_embedding = self._embed_model.get_query_embedding( query_bundle.query_str ) doc_similarities, doc_idxs = get_top_k_embeddings( query_embedding, doc_embeddings ) result_tups = [] for doc_idx, doc_similarity in zip(doc_idxs, doc_similarities): node = nodes[doc_idx] full_similarity = (self._alpha * node.score) + ( (1 - self._alpha) * doc_similarity ) print( f"Doc {doc_idx} (node score, doc similarity, full similarity): {(node.score, doc_similarity, full_similarity)}" ) result_tups.append((full_similarity, node)) result_tups = sorted(result_tups, key=lambda x: x[0], reverse=True) for full_score, node in result_tups: node.score = full_score return [n for _, n in result_tups][:out_top_k] top_k = 10 out_top_k = 3 hybrid_retriever = HybridRetriever( index, docstore, similarity_top_k=top_k, out_top_k=3, alpha=0.5 ) base_retriever = index.as_retriever(similarity_top_k=out_top_k) def show_nodes(nodes, out_len: int = 200): for idx, n in enumerate(nodes): print(f"\n\n >>>>>>>>>>>> ID {n.id_}: {n.metadata['path']}") print(n.get_content()[:out_len]) query_str = "Tell me more about the LLM interface and where they're used" nodes = hybrid_retriever.retrieve(query_str) show_nodes(nodes) base_nodes = base_retriever.retrieve(query_str) show_nodes(base_nodes) from llama_index.core.query_engine import RetrieverQueryEngine query_engine =
RetrieverQueryEngine(hybrid_retriever)
llama_index.core.query_engine.RetrieverQueryEngine
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lantern') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install psycopg2-binary llama-index asyncpg') from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.lantern import LanternVectorStore import textwrap import openai import os os.environ["OPENAI_API_KEY"] = "<your_key>" openai.api_key = "<your_key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) import psycopg2 connection_string = "postgresql://postgres:postgres@localhost:5432" db_name = "postgres" conn = psycopg2.connect(connection_string) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.embed_model =
OpenAIEmbedding(model="text-embedding-3-small")
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ) from IPython.display import Markdown, display import dotenv import s3fs import os dotenv.load_dotenv("../../../.env") AWS_KEY = os.environ["AWS_ACCESS_KEY_ID"] AWS_SECRET = os.environ["AWS_SECRET_ACCESS_KEY"] R2_ACCOUNT_ID = os.environ["R2_ACCOUNT_ID"] assert AWS_KEY is not None and AWS_KEY != "" s3 = s3fs.S3FileSystem( key=AWS_KEY, secret=AWS_SECRET, endpoint_url=f"https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com", s3_additional_kwargs={"ACL": "public-read"}, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print(len(documents)) index = VectorStoreIndex.from_documents(documents, fs=s3) index.set_index_id("vector_index") index.storage_context.persist("llama-index/storage_demo", fs=s3) s3.listdir("llama-index/storage_demo") sc = StorageContext.from_defaults( persist_dir="llama-index/storage_demo", fs=s3 ) index2 =
load_index_from_storage(sc, "vector_index")
llama_index.core.load_index_from_storage