id
stringlengths
14
17
text
stringlengths
42
2.1k
4e9727215e95-1100
", metadata: { reliable: false }, }),];const taggedDocuments = await metadataTagger.transformDocuments(documents);console.log(taggedDocuments);/* [ Document { pageContent: 'Review of The Bee Movie\n' + 'By Roger Ebert\n' + 'This is the greatest movie ever made. 4 out of 5 stars. ', metadata: { movie_title: 'The Bee Movie', critic: 'Roger Ebert', tone: 'positive', rating: 4 } }, Document { pageContent: 'Review of The Godfather\n' + 'By Anonymous\n' + '\n' + 'This movie was super boring. 1 out of 5 stars. ', metadata: { movie_title: 'The Godfather', critic: 'Roger Ebert', tone: 'negative', rating: 1, reliable: false } } ]*/API Reference:createMetadataTaggerFromZod from langchain/document_transformers/openai_functionsChatOpenAI from langchain/chat_models/openaiDocument from langchain/documentPromptTemplate from langchain/prompts It can often be useful to tag ingested documents with structured metadata, such as the title, tone, or length of a document, to allow for more targeted similarity search later. However, for large numbers of documents, performing this labelling process manually can be tedious. The MetadataTagger document transformer automates this process by extracting metadata from each provided document according to a provided schema. It uses a configurable OpenAI Functions-powered chain under the hood, so if you pass a custom LLM instance, it must be an OpenAI model with functions support. Note: This document transformer works best with complete documents, so it's best to run it first with whole documents before doing any other splitting or processing!
4e9727215e95-1101
For example, let's say you wanted to index a set of movie reviews. You could initialize the document transformer as follows:
4e9727215e95-1102
import { z } from "zod";import { createMetadataTaggerFromZod } from "langchain/document_transformers/openai_functions";import { ChatOpenAI } from "langchain/chat_models/openai";import { Document } from "langchain/document";const zodSchema = z.object({ movie_title: z.string(), critic: z.string(), tone: z.enum(["positive", "negative"]), rating: z .optional(z.number()) .describe("The number of stars the critic rated the movie"),});const metadataTagger = createMetadataTaggerFromZod(zodSchema, { llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo" }),});const documents = [ new Document({ pageContent: "Review of The Bee Movie\nBy Roger Ebert\nThis is the greatest movie ever made. 4 out of 5 stars. ", }), new Document({ pageContent: "Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars. ", metadata: { reliable: false }, }),];const taggedDocuments = await metadataTagger.transformDocuments(documents);console.log(taggedDocuments);/* [ Document { pageContent: 'Review of The Bee Movie\n' + 'By Roger Ebert\n' + 'This is the greatest movie ever made. 4 out of 5 stars. ', metadata: { movie_title: 'The Bee Movie', critic: 'Roger Ebert', tone: 'positive', rating: 4 } }, Document { pageContent: 'Review of The Godfather\n' + 'By Anonymous\n' + '\n' + 'This movie was super boring. 1 out of 5 stars.
4e9727215e95-1103
', metadata: { movie_title: 'The Godfather', critic: 'Anonymous', tone: 'negative', rating: 1, reliable: false } } ]*/ API Reference:createMetadataTaggerFromZod from langchain/document_transformers/openai_functionsChatOpenAI from langchain/chat_models/openaiDocument from langchain/document There is an additional createMetadataTagger method that accepts a valid JSON Schema object as well. You can pass the underlying tagging chain the standard LLMChain arguments in the second options parameter. For example, if you wanted to ask the LLM to focus specific details in the input documents, or extract metadata in a certain style, you could pass in a custom prompt:
4e9727215e95-1104
import { z } from "zod";import { createMetadataTaggerFromZod } from "langchain/document_transformers/openai_functions";import { ChatOpenAI } from "langchain/chat_models/openai";import { Document } from "langchain/document";import { PromptTemplate } from "langchain/prompts";const taggingChainTemplate = `Extract the desired information from the following passage.Anonymous critics are actually Roger Ebert.Passage:{input}`;const zodSchema = z.object({ movie_title: z.string(), critic: z.string(), tone: z.enum(["positive", "negative"]), rating: z .optional(z.number()) .describe("The number of stars the critic rated the movie"),});const metadataTagger = createMetadataTaggerFromZod(zodSchema, { llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo" }), prompt: PromptTemplate.fromTemplate(taggingChainTemplate),});const documents = [ new Document({ pageContent: "Review of The Bee Movie\nBy Roger Ebert\nThis is the greatest movie ever made. 4 out of 5 stars. ", }), new Document({ pageContent: "Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars. ", metadata: { reliable: false }, }),];const taggedDocuments = await metadataTagger.transformDocuments(documents);console.log(taggedDocuments);/* [ Document { pageContent: 'Review of The Bee Movie\n' + 'By Roger Ebert\n' + 'This is the greatest movie ever made. 4 out of 5 stars.
4e9727215e95-1105
', metadata: { movie_title: 'The Bee Movie', critic: 'Roger Ebert', tone: 'positive', rating: 4 } }, Document { pageContent: 'Review of The Godfather\n' + 'By Anonymous\n' + '\n' + 'This movie was super boring. 1 out of 5 stars. ', metadata: { movie_title: 'The Godfather', critic: 'Roger Ebert', tone: 'negative', rating: 1, reliable: false } } ]*/ API Reference:createMetadataTaggerFromZod from langchain/document_transformers/openai_functionsChatOpenAI from langchain/chat_models/openaiDocument from langchain/documentPromptTemplate from langchain/prompts Split by character UsageCustomization Page Title: Split by character | 🦜️🔗 Langchain Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersSplit by characterSplit by characterThis is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters.How the text is split: by single characterHow the chunk size is measured: by number of charactersCharacterTextSplitterBesides the RecursiveCharacterTextSplitter, there is also the more standard CharacterTextSplitter. This splits only on one type of character (defaults to "\n\n").
4e9727215e95-1106
You can use it in the exact same way.import { Document } from "langchain/document";import { CharacterTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3,});const output = await splitter.createDocuments([text]);PreviousOpenAI functions metadata taggerNextSplit code and markupCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc. Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersSplit by characterSplit by characterThis is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters.How the text is split: by single characterHow the chunk size is measured: by number of charactersCharacterTextSplitterBesides the RecursiveCharacterTextSplitter, there is also the more standard CharacterTextSplitter. This splits only on one type of character (defaults to "\n\n"). You can use it in the exact same way.import { Document } from "langchain/document";import { CharacterTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3,});const output = await splitter.createDocuments([text]);PreviousOpenAI functions metadata taggerNextSplit code and markup
4e9727215e95-1107
Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI reference ModulesData connectionDocument transformersText splittersSplit by characterSplit by characterThis is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters.How the text is split: by single characterHow the chunk size is measured: by number of charactersCharacterTextSplitterBesides the RecursiveCharacterTextSplitter, there is also the more standard CharacterTextSplitter. This splits only on one type of character (defaults to "\n\n"). You can use it in the exact same way.import { Document } from "langchain/document";import { CharacterTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3,});const output = await splitter.createDocuments([text]);PreviousOpenAI functions metadata taggerNextSplit code and markup
4e9727215e95-1108
Split by characterThis is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters.How the text is split: by single characterHow the chunk size is measured: by number of charactersCharacterTextSplitterBesides the RecursiveCharacterTextSplitter, there is also the more standard CharacterTextSplitter. This splits only on one type of character (defaults to "\n\n"). You can use it in the exact same way.import { Document } from "langchain/document";import { CharacterTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3,});const output = await splitter.createDocuments([text]); This is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters. Besides the RecursiveCharacterTextSplitter, there is also the more standard CharacterTextSplitter. This splits only on one type of character (defaults to "\n\n"). You can use it in the exact same way. import { Document } from "langchain/document";import { CharacterTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3,});const output = await splitter.createDocuments([text]); Split code and markup Page Title: Split code and markup | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1109
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersSplit code and markupSplit code and markupCodeTextSplitter allows you to split your code and markup with support for multiple languages.LangChain supports a variety of different markup and programming language-specific text splitters to split your text based on language-specific syntax. This results in more semantically self-contained chunks that are more useful to a vector store or other retriever.
4e9727215e95-1110
Popular languages like JavaScript, Python, Solidity, and Rust are supported as well as Latex, HTML, and Markdown.Usage​Initialize a standard RecursiveCharacterTextSplitter with the fromLanguage factory method. Below are some examples for various languages.JavaScript​import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter,} from "langchain/text_splitter";console.log(SupportedTextSplitterLanguages); // Array of supported languages/* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ]*/const jsCode = `function helloWorld() { console.log("Hello, World! ");}// Call the functionhelloWorld();`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0,});const jsOutput = await splitter.createDocuments([jsCode]);console.log(jsOutput);/* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World!
4e9727215e95-1111
");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ]*/API Reference:SupportedTextSplitterLanguages from langchain/text_splitterRecursiveCharacterTextSplitter from langchain/text_splitterPython​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const pythonCode = `def hello_world(): print("Hello, World! ")# Call the functionhello_world()`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0,});const pythonOutput = await splitter.createDocuments([pythonCode]);console.log(pythonOutput);/* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World!
4e9727215e95-1112
")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterHTML​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1113
</div> </body></html>`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ]*/API
4e9727215e95-1114
Reference:RecursiveCharacterTextSplitter from langchain/text_splitterLatex​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `\\begin{document}\\title{🦜️🔗 LangChain}⚡ Building applications with LLMs through composability ⚡\\section{Quick Install}\\begin{verbatim}Hopefully this code block isn't splityarn add langchain\\end{verbatim}As an open source project in a rapidly developing field, we are extremely open to contributions.\\end{document}`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1115
', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterPreviousSplit by characterNextContextual chunk headersCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc. Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersSplit code and markupSplit code and markupCodeTextSplitter allows you to split your code and markup with support for multiple languages.LangChain supports a variety of different markup and programming language-specific text splitters to split your text based on language-specific syntax. This results in more semantically self-contained chunks that are more useful to a vector store or other retriever.
4e9727215e95-1116
Popular languages like JavaScript, Python, Solidity, and Rust are supported as well as Latex, HTML, and Markdown.Usage​Initialize a standard RecursiveCharacterTextSplitter with the fromLanguage factory method. Below are some examples for various languages.JavaScript​import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter,} from "langchain/text_splitter";console.log(SupportedTextSplitterLanguages); // Array of supported languages/* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ]*/const jsCode = `function helloWorld() { console.log("Hello, World! ");}// Call the functionhelloWorld();`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0,});const jsOutput = await splitter.createDocuments([jsCode]);console.log(jsOutput);/* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World!
4e9727215e95-1117
");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ]*/API Reference:SupportedTextSplitterLanguages from langchain/text_splitterRecursiveCharacterTextSplitter from langchain/text_splitterPython​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const pythonCode = `def hello_world(): print("Hello, World! ")# Call the functionhello_world()`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0,});const pythonOutput = await splitter.createDocuments([pythonCode]);console.log(pythonOutput);/* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World!
4e9727215e95-1118
")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterHTML​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1119
</div> </body></html>`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ]*/API
4e9727215e95-1120
Reference:RecursiveCharacterTextSplitter from langchain/text_splitterLatex​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `\\begin{document}\\title{🦜️🔗 LangChain}⚡ Building applications with LLMs through composability ⚡\\section{Quick Install}\\begin{verbatim}Hopefully this code block isn't splityarn add langchain\\end{verbatim}As an open source project in a rapidly developing field, we are extremely open to contributions.\\end{document}`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions. ', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterPreviousSplit by characterNextContextual chunk headers
4e9727215e95-1121
ModulesData connectionDocument transformersText splittersSplit code and markupSplit code and markupCodeTextSplitter allows you to split your code and markup with support for multiple languages.LangChain supports a variety of different markup and programming language-specific text splitters to split your text based on language-specific syntax. This results in more semantically self-contained chunks that are more useful to a vector store or other retriever. Popular languages like JavaScript, Python, Solidity, and Rust are supported as well as Latex, HTML, and Markdown.Usage​Initialize a standard RecursiveCharacterTextSplitter with the fromLanguage factory method. Below are some examples for various languages.JavaScript​import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter,} from "langchain/text_splitter";console.log(SupportedTextSplitterLanguages); // Array of supported languages/* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ]*/const jsCode = `function helloWorld() { console.log("Hello, World! ");}// Call the functionhelloWorld();`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0,});const jsOutput = await splitter.createDocuments([jsCode]);console.log(jsOutput);/* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World!
4e9727215e95-1122
");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ]*/API Reference:SupportedTextSplitterLanguages from langchain/text_splitterRecursiveCharacterTextSplitter from langchain/text_splitterPython​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const pythonCode = `def hello_world(): print("Hello, World! ")# Call the functionhello_world()`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0,});const pythonOutput = await splitter.createDocuments([pythonCode]);console.log(pythonOutput);/* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World!
4e9727215e95-1123
")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterHTML​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1124
</div> </body></html>`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ]*/API
4e9727215e95-1125
Reference:RecursiveCharacterTextSplitter from langchain/text_splitterLatex​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `\\begin{document}\\title{🦜️🔗 LangChain}⚡ Building applications with LLMs through composability ⚡\\section{Quick Install}\\begin{verbatim}Hopefully this code block isn't splityarn add langchain\\end{verbatim}As an open source project in a rapidly developing field, we are extremely open to contributions.\\end{document}`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions. ', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterPreviousSplit by characterNextContextual chunk headers
4e9727215e95-1126
Split code and markupCodeTextSplitter allows you to split your code and markup with support for multiple languages.LangChain supports a variety of different markup and programming language-specific text splitters to split your text based on language-specific syntax. This results in more semantically self-contained chunks that are more useful to a vector store or other retriever. Popular languages like JavaScript, Python, Solidity, and Rust are supported as well as Latex, HTML, and Markdown.Usage​Initialize a standard RecursiveCharacterTextSplitter with the fromLanguage factory method. Below are some examples for various languages.JavaScript​import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter,} from "langchain/text_splitter";console.log(SupportedTextSplitterLanguages); // Array of supported languages/* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ]*/const jsCode = `function helloWorld() { console.log("Hello, World! ");}// Call the functionhelloWorld();`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0,});const jsOutput = await splitter.createDocuments([jsCode]);console.log(jsOutput);/* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World!
4e9727215e95-1127
");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ]*/API Reference:SupportedTextSplitterLanguages from langchain/text_splitterRecursiveCharacterTextSplitter from langchain/text_splitterPython​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const pythonCode = `def hello_world(): print("Hello, World! ")# Call the functionhello_world()`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0,});const pythonOutput = await splitter.createDocuments([pythonCode]);console.log(pythonOutput);/* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World!
4e9727215e95-1128
")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitterHTML​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1129
</div> </body></html>`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ]*/API
4e9727215e95-1130
Reference:RecursiveCharacterTextSplitter from langchain/text_splitterLatex​import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `\\begin{document}\\title{🦜️🔗 LangChain}⚡ Building applications with LLMs through composability ⚡\\section{Quick Install}\\begin{verbatim}Hopefully this code block isn't splityarn add langchain\\end{verbatim}As an open source project in a rapidly developing field, we are extremely open to contributions.\\end{document}`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions. ', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ]*/API Reference:RecursiveCharacterTextSplitter from langchain/text_splitter
4e9727215e95-1131
CodeTextSplitter allows you to split your code and markup with support for multiple languages. LangChain supports a variety of different markup and programming language-specific text splitters to split your text based on language-specific syntax. This results in more semantically self-contained chunks that are more useful to a vector store or other retriever. Popular languages like JavaScript, Python, Solidity, and Rust are supported as well as Latex, HTML, and Markdown. Initialize a standard RecursiveCharacterTextSplitter with the fromLanguage factory method. Below are some examples for various languages.
4e9727215e95-1132
import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter,} from "langchain/text_splitter";console.log(SupportedTextSplitterLanguages); // Array of supported languages/* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ]*/const jsCode = `function helloWorld() { console.log("Hello, World! ");}// Call the functionhelloWorld();`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0,});const jsOutput = await splitter.createDocuments([jsCode]);console.log(jsOutput);/* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World! ");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ]*/ API Reference:SupportedTextSplitterLanguages from langchain/text_splitterRecursiveCharacterTextSplitter from langchain/text_splitter
4e9727215e95-1133
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const pythonCode = `def hello_world(): print("Hello, World! ")# Call the functionhello_world()`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0,});const pythonOutput = await splitter.createDocuments([pythonCode]);console.log(pythonOutput);/* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World! ")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ]*/ API Reference:RecursiveCharacterTextSplitter from langchain/text_splitter import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions.
4e9727215e95-1134
</div> </body></html>`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ]*/
4e9727215e95-1135
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `\\begin{document}\\title{🦜️🔗 LangChain}⚡ Building applications with LLMs through composability ⚡\\section{Quick Install}\\begin{verbatim}Hopefully this code block isn't splityarn add langchain\\end{verbatim}As an open source project in a rapidly developing field, we are extremely open to contributions.\\end{document}`;const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);console.log(output);/* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions. ', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ]*/ Contextual chunk headers Page Title: Contextual chunk headers | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1136
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersContextual chunk headersContextual chunk headersConsider a scenario where you want to store a large, arbitrary collection of documents in a vector store and perform Q&A tasks on them. Simply splitting documents with overlapping text may not provide sufficient context for LLMs to determine if multiple chunks are referencing the same information, or how to resolve information from contradictory sources.Tagging each document with metadata is a solution if you know what to filter against, but you may not know ahead of time exactly what kind of queries your vector store will be expected to handle.
4e9727215e95-1137
Including additional contextual information directly in each chunk in the form of headers can help deal with arbitrary queries.Here's an example:import { OpenAI } from "langchain/llms/openai";import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";import { CharacterTextSplitter } from "langchain/text_splitter";import { OpenAIEmbeddings } from "langchain/embeddings/openai";import { HNSWLib } from "langchain/vectorstores/hnswlib";const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200,});const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const vectorStore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings());const model = new OpenAI({ temperature: 0 });const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model), retriever: vectorStore.asRetriever(), returnSourceDocuments: true,});const res = await chain.call({ query: "What is Pam's favorite color? ",});console.log(JSON.stringify(res, null, 2));/* { "text": " Red. ", "sourceDocuments": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.
4e9727215e95-1138
", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue. ", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ] }*/API Reference:OpenAI from langchain/llms/openaiRetrievalQAChain from langchain/chainsloadQAStuffChain from langchain/chainsCharacterTextSplitter from langchain/text_splitterOpenAIEmbeddings from langchain/embeddings/openaiHNSWLib from langchain/vectorstores/hnswlib;PreviousSplit code and markupNextCustom text splittersCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc. Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersContextual chunk headersContextual chunk headersConsider a scenario where you want to store a large, arbitrary collection of documents in a vector store and perform Q&A tasks on them. Simply splitting documents with overlapping text may not provide sufficient context for LLMs to determine if multiple chunks are referencing the same information, or how to resolve information from contradictory sources.Tagging each document with metadata is a solution if you know what to filter against, but you may not know ahead of time exactly what kind of queries your vector store will be expected to handle.
4e9727215e95-1139
Including additional contextual information directly in each chunk in the form of headers can help deal with arbitrary queries.Here's an example:import { OpenAI } from "langchain/llms/openai";import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";import { CharacterTextSplitter } from "langchain/text_splitter";import { OpenAIEmbeddings } from "langchain/embeddings/openai";import { HNSWLib } from "langchain/vectorstores/hnswlib";const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200,});const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const vectorStore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings());const model = new OpenAI({ temperature: 0 });const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model), retriever: vectorStore.asRetriever(), returnSourceDocuments: true,});const res = await chain.call({ query: "What is Pam's favorite color? ",});console.log(JSON.stringify(res, null, 2));/* { "text": " Red. ", "sourceDocuments": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.
4e9727215e95-1140
", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue. ", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ] }*/API Reference:OpenAI from langchain/llms/openaiRetrievalQAChain from langchain/chainsloadQAStuffChain from langchain/chainsCharacterTextSplitter from langchain/text_splitterOpenAIEmbeddings from langchain/embeddings/openaiHNSWLib from langchain/vectorstores/hnswlib;PreviousSplit code and markupNextCustom text splitters ModulesData connectionDocument transformersText splittersContextual chunk headersContextual chunk headersConsider a scenario where you want to store a large, arbitrary collection of documents in a vector store and perform Q&A tasks on them. Simply splitting documents with overlapping text may not provide sufficient context for LLMs to determine if multiple chunks are referencing the same information, or how to resolve information from contradictory sources.Tagging each document with metadata is a solution if you know what to filter against, but you may not know ahead of time exactly what kind of queries your vector store will be expected to handle.
4e9727215e95-1141
Including additional contextual information directly in each chunk in the form of headers can help deal with arbitrary queries.Here's an example:import { OpenAI } from "langchain/llms/openai";import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";import { CharacterTextSplitter } from "langchain/text_splitter";import { OpenAIEmbeddings } from "langchain/embeddings/openai";import { HNSWLib } from "langchain/vectorstores/hnswlib";const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200,});const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const vectorStore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings());const model = new OpenAI({ temperature: 0 });const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model), retriever: vectorStore.asRetriever(), returnSourceDocuments: true,});const res = await chain.call({ query: "What is Pam's favorite color? ",});console.log(JSON.stringify(res, null, 2));/* { "text": " Red. ", "sourceDocuments": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.
4e9727215e95-1142
", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue. ", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ] }*/API Reference:OpenAI from langchain/llms/openaiRetrievalQAChain from langchain/chainsloadQAStuffChain from langchain/chainsCharacterTextSplitter from langchain/text_splitterOpenAIEmbeddings from langchain/embeddings/openaiHNSWLib from langchain/vectorstores/hnswlib;PreviousSplit code and markupNextCustom text splitters Contextual chunk headersConsider a scenario where you want to store a large, arbitrary collection of documents in a vector store and perform Q&A tasks on them. Simply splitting documents with overlapping text may not provide sufficient context for LLMs to determine if multiple chunks are referencing the same information, or how to resolve information from contradictory sources.Tagging each document with metadata is a solution if you know what to filter against, but you may not know ahead of time exactly what kind of queries your vector store will be expected to handle.
4e9727215e95-1143
Including additional contextual information directly in each chunk in the form of headers can help deal with arbitrary queries.Here's an example:import { OpenAI } from "langchain/llms/openai";import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";import { CharacterTextSplitter } from "langchain/text_splitter";import { OpenAIEmbeddings } from "langchain/embeddings/openai";import { HNSWLib } from "langchain/vectorstores/hnswlib";const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200,});const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const vectorStore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings());const model = new OpenAI({ temperature: 0 });const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model), retriever: vectorStore.asRetriever(), returnSourceDocuments: true,});const res = await chain.call({ query: "What is Pam's favorite color? ",});console.log(JSON.stringify(res, null, 2));/* { "text": " Red. ", "sourceDocuments": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.
4e9727215e95-1144
", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue. ", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ] }*/API Reference:OpenAI from langchain/llms/openaiRetrievalQAChain from langchain/chainsloadQAStuffChain from langchain/chainsCharacterTextSplitter from langchain/text_splitterOpenAIEmbeddings from langchain/embeddings/openaiHNSWLib from langchain/vectorstores/hnswlib; Consider a scenario where you want to store a large, arbitrary collection of documents in a vector store and perform Q&A tasks on them. Simply splitting documents with overlapping text may not provide sufficient context for LLMs to determine if multiple chunks are referencing the same information, or how to resolve information from contradictory sources. Tagging each document with metadata is a solution if you know what to filter against, but you may not know ahead of time exactly what kind of queries your vector store will be expected to handle. Including additional contextual information directly in each chunk in the form of headers can help deal with arbitrary queries.
4e9727215e95-1145
import { OpenAI } from "langchain/llms/openai";import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains";import { CharacterTextSplitter } from "langchain/text_splitter";import { OpenAIEmbeddings } from "langchain/embeddings/openai";import { HNSWLib } from "langchain/vectorstores/hnswlib";const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200,});const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, });const vectorStore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings());const model = new OpenAI({ temperature: 0 });const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model), retriever: vectorStore.asRetriever(), returnSourceDocuments: true,});const res = await chain.call({ query: "What is Pam's favorite color? ",});console.log(JSON.stringify(res, null, 2));/* { "text": " Red. ", "sourceDocuments": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.
4e9727215e95-1146
", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue. ", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ] }*/ API Reference:OpenAI from langchain/llms/openaiRetrievalQAChain from langchain/chainsloadQAStuffChain from langchain/chainsCharacterTextSplitter from langchain/text_splitterOpenAIEmbeddings from langchain/embeddings/openaiHNSWLib from langchain/vectorstores/hnswlib Custom text splitters Page Title: Custom text splitters | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1147
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersCustom text splittersCustom text splittersIf you want to implement your own custom Text Splitter, you only need to subclass TextSplitter and implement a single method: splitText. The method takes a string and returns a list of strings. The returned strings will be used as the chunks.abstract class TextSplitter { abstract splitText(text: string): Promise<string[]>;}PreviousContextual chunk headersNextRecursively split by characterCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc. Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersCustom text splittersCustom text splittersIf you want to implement your own custom Text Splitter, you only need to subclass TextSplitter and implement a single method: splitText. The method takes a string and returns a list of strings. The returned strings will be used as the chunks.abstract class TextSplitter { abstract splitText(text: string): Promise<string[]>;}PreviousContextual chunk headersNextRecursively split by character
4e9727215e95-1148
ModulesData connectionDocument transformersText splittersCustom text splittersCustom text splittersIf you want to implement your own custom Text Splitter, you only need to subclass TextSplitter and implement a single method: splitText. The method takes a string and returns a list of strings. The returned strings will be used as the chunks.abstract class TextSplitter { abstract splitText(text: string): Promise<string[]>;}PreviousContextual chunk headersNextRecursively split by character Custom text splittersIf you want to implement your own custom Text Splitter, you only need to subclass TextSplitter and implement a single method: splitText. The method takes a string and returns a list of strings. The returned strings will be used as the chunks.abstract class TextSplitter { abstract splitText(text: string): Promise<string[]>;} If you want to implement your own custom Text Splitter, you only need to subclass TextSplitter and implement a single method: splitText. The method takes a string and returns a list of strings. The returned strings will be used as the chunks. abstract class TextSplitter { abstract splitText(text: string): Promise<string[]>;} Recursively split by character Page Title: Recursively split by character | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1149
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersRecursively split by characterRecursively split by characterThis text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.How the text is split: by list of charactersHow the chunk size is measured: by number of charactersImportant parameters to know here are chunkSize and chunkOverlap. chunkSize controls the max size (in terms of number of characters) of the final documents. chunkOverlap specifies how much overlap there should be between chunks. This is often helpful to make sure that the text isn't split weirdly.
4e9727215e95-1150
In the example below we set these values to be small (for illustration purposes), but in practice they default to 1000 and 200 respectively.import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const output = await splitter.createDocuments([text]);You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly.import { Document } from "langchain/document";import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }),]);PreviousCustom text splittersNextTokenTextSplitterCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc.
4e9727215e95-1151
Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersRecursively split by characterRecursively split by characterThis text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.How the text is split: by list of charactersHow the chunk size is measured: by number of charactersImportant parameters to know here are chunkSize and chunkOverlap. chunkSize controls the max size (in terms of number of characters) of the final documents. chunkOverlap specifies how much overlap there should be between chunks. This is often helpful to make sure that the text isn't split weirdly.
4e9727215e95-1152
In the example below we set these values to be small (for illustration purposes), but in practice they default to 1000 and 200 respectively.import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const output = await splitter.createDocuments([text]);You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly.import { Document } from "langchain/document";import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }),]);PreviousCustom text splittersNextTokenTextSplitter
4e9727215e95-1153
ModulesData connectionDocument transformersText splittersRecursively split by characterRecursively split by characterThis text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.How the text is split: by list of charactersHow the chunk size is measured: by number of charactersImportant parameters to know here are chunkSize and chunkOverlap. chunkSize controls the max size (in terms of number of characters) of the final documents. chunkOverlap specifies how much overlap there should be between chunks. This is often helpful to make sure that the text isn't split weirdly. In the example below we set these values to be small (for illustration purposes), but in practice they default to 1000 and 200 respectively.import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are?
4e9727215e95-1154
You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const output = await splitter.createDocuments([text]);You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly.import { Document } from "langchain/document";import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }),]);PreviousCustom text splittersNextTokenTextSplitter
4e9727215e95-1155
Recursively split by characterThis text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.How the text is split: by list of charactersHow the chunk size is measured: by number of charactersImportant parameters to know here are chunkSize and chunkOverlap. chunkSize controls the max size (in terms of number of characters) of the final documents. chunkOverlap specifies how much overlap there should be between chunks. This is often helpful to make sure that the text isn't split weirdly. In the example below we set these values to be small (for illustration purposes), but in practice they default to 1000 and 200 respectively.import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const output = await splitter.createDocuments([text]);You'll note that in the above example we are splitting a raw text string and getting back a list of documents.
4e9727215e95-1156
We can also split documents directly.import { Document } from "langchain/document";import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.This is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.`;const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1,});const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }),]); This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text. TokenTextSplitter Page Title: TokenTextSplitter | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1157
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersTokenTextSplitterTokenTextSplitterFinally, TokenTextSplitter splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text.import { Document } from "langchain/document";import { TokenTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new TokenTextSplitter({ encodingName: "gpt2", chunkSize: 10, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);PreviousRecursively split by characterNextText embedding modelsCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc.
4e9727215e95-1158
Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersIntegrationsText splittersSplit by characterSplit code and markupContextual chunk headersCustom text splittersRecursively split by characterTokenTextSplitterText embedding modelsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionDocument transformersText splittersTokenTextSplitterTokenTextSplitterFinally, TokenTextSplitter splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text.import { Document } from "langchain/document";import { TokenTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new TokenTextSplitter({ encodingName: "gpt2", chunkSize: 10, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);PreviousRecursively split by characterNextText embedding models ModulesData connectionDocument transformersText splittersTokenTextSplitterTokenTextSplitterFinally, TokenTextSplitter splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text.import { Document } from "langchain/document";import { TokenTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new TokenTextSplitter({ encodingName: "gpt2", chunkSize: 10, chunkOverlap: 0,});const output = await splitter.createDocuments([text]);PreviousRecursively split by characterNextText embedding models
4e9727215e95-1159
TokenTextSplitterFinally, TokenTextSplitter splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text.import { Document } from "langchain/document";import { TokenTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new TokenTextSplitter({ encodingName: "gpt2", chunkSize: 10, chunkOverlap: 0,});const output = await splitter.createDocuments([text]); Finally, TokenTextSplitter splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text. import { Document } from "langchain/document";import { TokenTextSplitter } from "langchain/text_splitter";const text = "foo bar baz 123";const splitter = new TokenTextSplitter({ encodingName: "gpt2", chunkSize: 10, chunkOverlap: 0,});const output = await splitter.createDocuments([text]); Page Title: Text embedding models | 🦜️🔗 Langchain Paragraphs:
4e9727215e95-1160
Paragraphs: Skip to main content🦜️🔗 LangChainDocsUse casesAPILangSmithPython DocsCTRLKGet startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersText embedding modelsHow-toIntegrationsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionText embedding modelsOn this pageText embedding modelsThe Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).Get started​Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents.Below is an example of how to use the OpenAI embeddings.
4e9727215e95-1161
Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a embedQuery and embedDocuments method.import { OpenAIEmbeddings } from "langchain/embeddings/openai";/* Create instance */const embeddings = new OpenAIEmbeddings();/* Embed queries */const res = await embeddings.embedQuery("Hello world");/*[ -0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806, 0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334, 0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734, 0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575, -0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202, -0.0019203906, 0.012161949, -0.019194454, 0.030373365, -0.031028723, 0.0036170771, -0.007813894, -0.0060778237, -0.017820721, 0.0048647798, -0.015640393, 0.001373733, -0.015552171,
4e9727215e95-1162
0.001373733, -0.015552171, 0.019534737, -0.016169721, 0.007316074, 0.008273906, 0.011418369, -0.01390117, -0.033347685, 0.011248227, 0.0042503807, -0.012792102, -0.0014595914, 0.028356876, 0.025407761, 0.00076445413, -0.016308354, 0.017455231, -0.016396577, 0.008557475, -0.03312083, 0.031104341, 0.032389853, -0.02132437, 0.003324056, 0.0055610985, -0.0078012915, 0.006090427, 0.0062038545, 0.0169133, 0.0036391325, 0.0076815626, -0.018841568, 0.026037913, 0.024550753, 0.0055264398, -0.0015824712, -0.0047765584, 0.018425668,
4e9727215e95-1163
0.0030656934, -0.0113742575, -0.0020322427, 0.005069579, 0.0022701253, 0.036095154, -0.027449455, -0.008475555, 0.015388331, 0.018917186, 0.0018999106, -0.003349262, 0.020895867, -0.014480911, -0.025042271, 0.012546342, 0.013850759, 0.0069253794, 0.008588983, -0.015199285, -0.0029585673, -0.008759124, 0.016749462, 0.004111747, -0.04804285, ... 1436 more items]*//* Embed documents */const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);/*[ [ -0.0047852774, 0.0048640342, -0.01645707, -0.024395779, -0.017263541, 0.012512918, -0.019191515, 0.009053908, -0.010213212, -0.026890801, 0.022883644, 0.010251015, -0.023589306, -0.006584088, 0.007989113, 0.002720268, 0.025088841,
4e9727215e95-1164
0.002720268, 0.025088841, -0.012153786, 0.012928754, 0.013054766, -0.010395928, -0.0035566676, 0.0040008575, 0.008600268, -0.020678446, -0.0019106456, 0.012178987, -0.019241918, 0.030444318, -0.03102397, 0.0035692686, -0.007749692, -0.00604854, -0.01781799, 0.004860884, -0.015612794, 0.0014097509, -0.015637996, 0.019443536, -0.01612944, 0.0072960514, 0.008316742, 0.011548932, -0.013987249, -0.03336778, 0.011341013, 0.00425603, -0.0126578305, -0.0013861238, 0.028302127, 0.025466874, 0.0007029065, -0.016318457, 0.017427357, -0.016394064, 0.008499459, -0.033241767, 0.031200387,
4e9727215e95-1165
-0.033241767, 0.031200387, 0.03238489, -0.0212833, 0.0032416396, 0.005443686, -0.007749692, 0.0060201874,
4e9727215e95-1166
0.006281661, 0.016923312, 0.003528315, 0.0076740854, -0.01881348, 0.026109532, 0.024660403, 0.005472039, -0.0016712243, -0.0048136297, 0.018397642, 0.003011669, -0.011385117, -0.0020193304, 0.005138109, 0.0022335495, 0.03603922, -0.027495656, -0.008575066, 0.015436378, 0.018851284, 0.0018019609, -0.0034338066, 0.02094307, -0.014503895, -0.024950229, 0.012632628, 0.013735226, 0.0069936244, 0.008575066, -0.015196957, -0.0030541976, -0.008745181, 0.016746895, 0.0040481114, -0.048010286, ... 1436 more items ], [ -0.009446913, -0.013253193, 0.013174579, 0.0057552797, -0.038993083, 0.0077763423,
4e9727215e95-1167
0.0077763423, -0.0260478, -0.0114384955, -0.0022683728, -0.016509168, 0.041797023, 0.01787183, 0.00552271, -0.0049789557, 0.018146982, -0.01542166, 0.033752076, 0.006112323, 0.023872782, -0.016535373, -0.006623321, 0.016116094, -0.0061090477, -0.0044155475, -0.016627092, -0.022077737, -0.0009286407, -0.02156674, 0.011890532, -0.026283644, 0.02630985, 0.011942943, -0.026126415, -0.018264906, -0.014045896, -0.024187243, -0.019037955, -0.005037917, 0.020780588, -0.0049527506, 0.002399398, 0.020767486, 0.0080908025, -0.019666875, -0.027934562, 0.017688395, 0.015225122,
4e9727215e95-1168
0.017688395, 0.015225122, 0.0046186363, -0.0045007137, 0.024265857, 0.03244183, 0.0038848957, -0.03244183, -0.018893827, -0.0018065092, 0.023440398, -0.021763276, 0.015120302,
4e9727215e95-1169
0.01568371, -0.010861984, 0.011739853, -0.024501702, -0.005214801, 0.022955606, 0.001315165, -0.00492327, 0.0020358032, -0.003468891, -0.031079166, 0.0055259857, 0.0028547104, 0.012087069, 0.007992534, -0.0076256637, 0.008110457, 0.002998838, -0.024265857, 0.006977089, -0.015185814, -0.0069115767, 0.006466091, -0.029428247, -0.036241557, 0.036713246, 0.032284595, -0.0021144184, -0.014255536, 0.011228855, -0.027227025, -0.021619149, 0.00038242966, 0.02245771, -0.0014748519, 0.01573612, 0.0041010873, 0.006256451, -0.007992534, 0.038547598, 0.024658933, -0.012958387, ... 1436 more items ]]*/PreviousTokenTextSplitterNextDealing with API errorsGet startedCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc.
4e9727215e95-1170
Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersText embedding modelsHow-toIntegrationsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI referenceModulesData connectionText embedding modelsOn this pageText embedding modelsThe Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).Get started​Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents.Below is an example of how to use the OpenAI embeddings.
4e9727215e95-1171
Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a embedQuery and embedDocuments method.import { OpenAIEmbeddings } from "langchain/embeddings/openai";/* Create instance */const embeddings = new OpenAIEmbeddings();/* Embed queries */const res = await embeddings.embedQuery("Hello world");/*[ -0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806, 0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334, 0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734, 0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575, -0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202, -0.0019203906, 0.012161949, -0.019194454, 0.030373365, -0.031028723, 0.0036170771, -0.007813894, -0.0060778237, -0.017820721, 0.0048647798, -0.015640393, 0.001373733, -0.015552171,
4e9727215e95-1172
0.001373733, -0.015552171, 0.019534737, -0.016169721, 0.007316074, 0.008273906, 0.011418369, -0.01390117, -0.033347685, 0.011248227, 0.0042503807, -0.012792102, -0.0014595914, 0.028356876, 0.025407761, 0.00076445413, -0.016308354, 0.017455231, -0.016396577, 0.008557475, -0.03312083, 0.031104341, 0.032389853, -0.02132437, 0.003324056, 0.0055610985, -0.0078012915, 0.006090427, 0.0062038545, 0.0169133, 0.0036391325, 0.0076815626, -0.018841568, 0.026037913, 0.024550753, 0.0055264398, -0.0015824712, -0.0047765584, 0.018425668,
4e9727215e95-1173
0.0030656934, -0.0113742575, -0.0020322427, 0.005069579, 0.0022701253, 0.036095154, -0.027449455, -0.008475555, 0.015388331, 0.018917186, 0.0018999106, -0.003349262, 0.020895867, -0.014480911, -0.025042271, 0.012546342, 0.013850759, 0.0069253794, 0.008588983, -0.015199285, -0.0029585673, -0.008759124, 0.016749462, 0.004111747, -0.04804285, ... 1436 more items]*//* Embed documents */const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);/*[ [ -0.0047852774, 0.0048640342, -0.01645707, -0.024395779, -0.017263541, 0.012512918, -0.019191515, 0.009053908, -0.010213212, -0.026890801, 0.022883644, 0.010251015, -0.023589306, -0.006584088, 0.007989113, 0.002720268, 0.025088841,
4e9727215e95-1174
0.002720268, 0.025088841, -0.012153786, 0.012928754, 0.013054766, -0.010395928, -0.0035566676, 0.0040008575, 0.008600268, -0.020678446, -0.0019106456, 0.012178987, -0.019241918, 0.030444318, -0.03102397, 0.0035692686, -0.007749692, -0.00604854, -0.01781799, 0.004860884, -0.015612794, 0.0014097509, -0.015637996, 0.019443536, -0.01612944, 0.0072960514, 0.008316742, 0.011548932, -0.013987249, -0.03336778, 0.011341013, 0.00425603, -0.0126578305, -0.0013861238, 0.028302127, 0.025466874, 0.0007029065, -0.016318457, 0.017427357, -0.016394064, 0.008499459, -0.033241767, 0.031200387,
4e9727215e95-1175
-0.033241767, 0.031200387, 0.03238489, -0.0212833, 0.0032416396, 0.005443686, -0.007749692, 0.0060201874,
4e9727215e95-1176
0.006281661, 0.016923312, 0.003528315, 0.0076740854, -0.01881348, 0.026109532, 0.024660403, 0.005472039, -0.0016712243, -0.0048136297, 0.018397642, 0.003011669, -0.011385117, -0.0020193304, 0.005138109, 0.0022335495, 0.03603922, -0.027495656, -0.008575066, 0.015436378, 0.018851284, 0.0018019609, -0.0034338066, 0.02094307, -0.014503895, -0.024950229, 0.012632628, 0.013735226, 0.0069936244, 0.008575066, -0.015196957, -0.0030541976, -0.008745181, 0.016746895, 0.0040481114, -0.048010286, ... 1436 more items ], [ -0.009446913, -0.013253193, 0.013174579, 0.0057552797, -0.038993083, 0.0077763423,
4e9727215e95-1177
0.0077763423, -0.0260478, -0.0114384955, -0.0022683728, -0.016509168, 0.041797023, 0.01787183, 0.00552271, -0.0049789557, 0.018146982, -0.01542166, 0.033752076, 0.006112323, 0.023872782, -0.016535373, -0.006623321, 0.016116094, -0.0061090477, -0.0044155475, -0.016627092, -0.022077737, -0.0009286407, -0.02156674, 0.011890532, -0.026283644, 0.02630985, 0.011942943, -0.026126415, -0.018264906, -0.014045896, -0.024187243, -0.019037955, -0.005037917, 0.020780588, -0.0049527506, 0.002399398, 0.020767486, 0.0080908025, -0.019666875, -0.027934562, 0.017688395, 0.015225122,
4e9727215e95-1178
0.017688395, 0.015225122, 0.0046186363, -0.0045007137, 0.024265857, 0.03244183, 0.0038848957, -0.03244183, -0.018893827, -0.0018065092, 0.023440398, -0.021763276, 0.015120302,
4e9727215e95-1179
0.01568371, -0.010861984, 0.011739853, -0.024501702, -0.005214801, 0.022955606, 0.001315165, -0.00492327, 0.0020358032, -0.003468891, -0.031079166, 0.0055259857, 0.0028547104, 0.012087069, 0.007992534, -0.0076256637, 0.008110457, 0.002998838, -0.024265857, 0.006977089, -0.015185814, -0.0069115767, 0.006466091, -0.029428247, -0.036241557, 0.036713246, 0.032284595, -0.0021144184, -0.014255536, 0.011228855, -0.027227025, -0.021619149, 0.00038242966, 0.02245771, -0.0014748519, 0.01573612, 0.0041010873, 0.006256451, -0.007992534, 0.038547598, 0.024658933, -0.012958387, ... 1436 more items ]]*/PreviousTokenTextSplitterNextDealing with API errorsGet started
4e9727215e95-1180
Get startedIntroductionInstallationQuickstartModulesModel I/​OData connectionDocument loadersDocument transformersText embedding modelsHow-toIntegrationsVector storesRetrieversExperimentalCaching embeddingsChainsMemoryAgentsCallbacksModulesGuidesEcosystemAdditional resourcesCommunity navigatorAPI reference ModulesData connectionText embedding modelsOn this pageText embedding modelsThe Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).Get started​Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents.Below is an example of how to use the OpenAI embeddings.
4e9727215e95-1181
Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a embedQuery and embedDocuments method.import { OpenAIEmbeddings } from "langchain/embeddings/openai";/* Create instance */const embeddings = new OpenAIEmbeddings();/* Embed queries */const res = await embeddings.embedQuery("Hello world");/*[ -0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806, 0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334, 0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734, 0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575, -0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202, -0.0019203906, 0.012161949, -0.019194454, 0.030373365, -0.031028723, 0.0036170771, -0.007813894, -0.0060778237, -0.017820721, 0.0048647798, -0.015640393, 0.001373733, -0.015552171,
4e9727215e95-1182
0.001373733, -0.015552171, 0.019534737, -0.016169721, 0.007316074, 0.008273906, 0.011418369, -0.01390117, -0.033347685, 0.011248227, 0.0042503807, -0.012792102, -0.0014595914, 0.028356876, 0.025407761, 0.00076445413, -0.016308354, 0.017455231, -0.016396577, 0.008557475, -0.03312083, 0.031104341, 0.032389853, -0.02132437, 0.003324056, 0.0055610985, -0.0078012915, 0.006090427, 0.0062038545, 0.0169133, 0.0036391325, 0.0076815626, -0.018841568, 0.026037913, 0.024550753, 0.0055264398, -0.0015824712, -0.0047765584, 0.018425668,
4e9727215e95-1183
0.0030656934, -0.0113742575, -0.0020322427, 0.005069579, 0.0022701253, 0.036095154, -0.027449455, -0.008475555, 0.015388331, 0.018917186, 0.0018999106, -0.003349262, 0.020895867, -0.014480911, -0.025042271, 0.012546342, 0.013850759, 0.0069253794, 0.008588983, -0.015199285, -0.0029585673, -0.008759124, 0.016749462, 0.004111747, -0.04804285, ... 1436 more items]*//* Embed documents */const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);/*[ [ -0.0047852774, 0.0048640342, -0.01645707, -0.024395779, -0.017263541, 0.012512918, -0.019191515, 0.009053908, -0.010213212, -0.026890801, 0.022883644, 0.010251015, -0.023589306, -0.006584088, 0.007989113, 0.002720268, 0.025088841,
4e9727215e95-1184
0.002720268, 0.025088841, -0.012153786, 0.012928754, 0.013054766, -0.010395928, -0.0035566676, 0.0040008575, 0.008600268, -0.020678446, -0.0019106456, 0.012178987, -0.019241918, 0.030444318, -0.03102397, 0.0035692686, -0.007749692, -0.00604854, -0.01781799, 0.004860884, -0.015612794, 0.0014097509, -0.015637996, 0.019443536, -0.01612944, 0.0072960514, 0.008316742, 0.011548932, -0.013987249, -0.03336778, 0.011341013, 0.00425603, -0.0126578305, -0.0013861238, 0.028302127, 0.025466874, 0.0007029065, -0.016318457, 0.017427357, -0.016394064, 0.008499459, -0.033241767, 0.031200387,
4e9727215e95-1185
-0.033241767, 0.031200387, 0.03238489, -0.0212833, 0.0032416396, 0.005443686, -0.007749692, 0.0060201874,
4e9727215e95-1186
0.006281661, 0.016923312, 0.003528315, 0.0076740854, -0.01881348, 0.026109532, 0.024660403, 0.005472039, -0.0016712243, -0.0048136297, 0.018397642, 0.003011669, -0.011385117, -0.0020193304, 0.005138109, 0.0022335495, 0.03603922, -0.027495656, -0.008575066, 0.015436378, 0.018851284, 0.0018019609, -0.0034338066, 0.02094307, -0.014503895, -0.024950229, 0.012632628, 0.013735226, 0.0069936244, 0.008575066, -0.015196957, -0.0030541976, -0.008745181, 0.016746895, 0.0040481114, -0.048010286, ... 1436 more items ], [ -0.009446913, -0.013253193, 0.013174579, 0.0057552797, -0.038993083, 0.0077763423,
4e9727215e95-1187
0.0077763423, -0.0260478, -0.0114384955, -0.0022683728, -0.016509168, 0.041797023, 0.01787183, 0.00552271, -0.0049789557, 0.018146982, -0.01542166, 0.033752076, 0.006112323, 0.023872782, -0.016535373, -0.006623321, 0.016116094, -0.0061090477, -0.0044155475, -0.016627092, -0.022077737, -0.0009286407, -0.02156674, 0.011890532, -0.026283644, 0.02630985, 0.011942943, -0.026126415, -0.018264906, -0.014045896, -0.024187243, -0.019037955, -0.005037917, 0.020780588, -0.0049527506, 0.002399398, 0.020767486, 0.0080908025, -0.019666875, -0.027934562, 0.017688395, 0.015225122,
4e9727215e95-1188
0.017688395, 0.015225122, 0.0046186363, -0.0045007137, 0.024265857, 0.03244183, 0.0038848957, -0.03244183, -0.018893827, -0.0018065092, 0.023440398, -0.021763276, 0.015120302,
4e9727215e95-1189
0.01568371, -0.010861984, 0.011739853, -0.024501702, -0.005214801, 0.022955606, 0.001315165, -0.00492327, 0.0020358032, -0.003468891, -0.031079166, 0.0055259857, 0.0028547104, 0.012087069, 0.007992534, -0.0076256637, 0.008110457, 0.002998838, -0.024265857, 0.006977089, -0.015185814, -0.0069115767, 0.006466091, -0.029428247, -0.036241557, 0.036713246, 0.032284595, -0.0021144184, -0.014255536, 0.011228855, -0.027227025, -0.021619149, 0.00038242966, 0.02245771, -0.0014748519, 0.01573612, 0.0041010873, 0.006256451, -0.007992534, 0.038547598, 0.024658933, -0.012958387, ... 1436 more items ]]*/PreviousTokenTextSplitterNextDealing with API errorsGet started
4e9727215e95-1190
ModulesData connectionText embedding modelsOn this pageText embedding modelsThe Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).Get started​Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents.Below is an example of how to use the OpenAI embeddings.
4e9727215e95-1191
Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a embedQuery and embedDocuments method.import { OpenAIEmbeddings } from "langchain/embeddings/openai";/* Create instance */const embeddings = new OpenAIEmbeddings();/* Embed queries */const res = await embeddings.embedQuery("Hello world");/*[ -0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806, 0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334, 0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734, 0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575, -0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202, -0.0019203906, 0.012161949, -0.019194454, 0.030373365, -0.031028723, 0.0036170771, -0.007813894, -0.0060778237, -0.017820721, 0.0048647798, -0.015640393, 0.001373733, -0.015552171,
4e9727215e95-1192
0.001373733, -0.015552171, 0.019534737, -0.016169721, 0.007316074, 0.008273906, 0.011418369, -0.01390117, -0.033347685, 0.011248227, 0.0042503807, -0.012792102, -0.0014595914, 0.028356876, 0.025407761, 0.00076445413, -0.016308354, 0.017455231, -0.016396577, 0.008557475, -0.03312083, 0.031104341, 0.032389853, -0.02132437, 0.003324056, 0.0055610985, -0.0078012915, 0.006090427, 0.0062038545, 0.0169133, 0.0036391325, 0.0076815626, -0.018841568, 0.026037913, 0.024550753, 0.0055264398, -0.0015824712, -0.0047765584, 0.018425668,
4e9727215e95-1193
0.0030656934, -0.0113742575, -0.0020322427, 0.005069579, 0.0022701253, 0.036095154, -0.027449455, -0.008475555, 0.015388331, 0.018917186, 0.0018999106, -0.003349262, 0.020895867, -0.014480911, -0.025042271, 0.012546342, 0.013850759, 0.0069253794, 0.008588983, -0.015199285, -0.0029585673, -0.008759124, 0.016749462, 0.004111747, -0.04804285, ... 1436 more items]*//* Embed documents */const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);/*[ [ -0.0047852774, 0.0048640342, -0.01645707, -0.024395779, -0.017263541, 0.012512918, -0.019191515, 0.009053908, -0.010213212, -0.026890801, 0.022883644, 0.010251015, -0.023589306, -0.006584088, 0.007989113, 0.002720268, 0.025088841,
4e9727215e95-1194
0.002720268, 0.025088841, -0.012153786, 0.012928754, 0.013054766, -0.010395928, -0.0035566676, 0.0040008575, 0.008600268, -0.020678446, -0.0019106456, 0.012178987, -0.019241918, 0.030444318, -0.03102397, 0.0035692686, -0.007749692, -0.00604854, -0.01781799, 0.004860884, -0.015612794, 0.0014097509, -0.015637996, 0.019443536, -0.01612944, 0.0072960514, 0.008316742, 0.011548932, -0.013987249, -0.03336778, 0.011341013, 0.00425603, -0.0126578305, -0.0013861238, 0.028302127, 0.025466874, 0.0007029065, -0.016318457, 0.017427357, -0.016394064, 0.008499459, -0.033241767, 0.031200387,
4e9727215e95-1195
-0.033241767, 0.031200387, 0.03238489, -0.0212833, 0.0032416396, 0.005443686, -0.007749692, 0.0060201874,
4e9727215e95-1196
0.006281661, 0.016923312, 0.003528315, 0.0076740854, -0.01881348, 0.026109532, 0.024660403, 0.005472039, -0.0016712243, -0.0048136297, 0.018397642, 0.003011669, -0.011385117, -0.0020193304, 0.005138109, 0.0022335495, 0.03603922, -0.027495656, -0.008575066, 0.015436378, 0.018851284, 0.0018019609, -0.0034338066, 0.02094307, -0.014503895, -0.024950229, 0.012632628, 0.013735226, 0.0069936244, 0.008575066, -0.015196957, -0.0030541976, -0.008745181, 0.016746895, 0.0040481114, -0.048010286, ... 1436 more items ], [ -0.009446913, -0.013253193, 0.013174579, 0.0057552797, -0.038993083, 0.0077763423,
4e9727215e95-1197
0.0077763423, -0.0260478, -0.0114384955, -0.0022683728, -0.016509168, 0.041797023, 0.01787183, 0.00552271, -0.0049789557, 0.018146982, -0.01542166, 0.033752076, 0.006112323, 0.023872782, -0.016535373, -0.006623321, 0.016116094, -0.0061090477, -0.0044155475, -0.016627092, -0.022077737, -0.0009286407, -0.02156674, 0.011890532, -0.026283644, 0.02630985, 0.011942943, -0.026126415, -0.018264906, -0.014045896, -0.024187243, -0.019037955, -0.005037917, 0.020780588, -0.0049527506, 0.002399398, 0.020767486, 0.0080908025, -0.019666875, -0.027934562, 0.017688395, 0.015225122,
4e9727215e95-1198
0.017688395, 0.015225122, 0.0046186363, -0.0045007137, 0.024265857, 0.03244183, 0.0038848957, -0.03244183, -0.018893827, -0.0018065092, 0.023440398, -0.021763276, 0.015120302,
4e9727215e95-1199
0.01568371, -0.010861984, 0.011739853, -0.024501702, -0.005214801, 0.022955606, 0.001315165, -0.00492327, 0.0020358032, -0.003468891, -0.031079166, 0.0055259857, 0.0028547104, 0.012087069, 0.007992534, -0.0076256637, 0.008110457, 0.002998838, -0.024265857, 0.006977089, -0.015185814, -0.0069115767, 0.006466091, -0.029428247, -0.036241557, 0.036713246, 0.032284595, -0.0021144184, -0.014255536, 0.011228855, -0.027227025, -0.021619149, 0.00038242966, 0.02245771, -0.0014748519, 0.01573612, 0.0041010873, 0.006256451, -0.007992534, 0.038547598, 0.024658933, -0.012958387, ... 1436 more items ]]*/PreviousTokenTextSplitterNextDealing with API errors