id
stringlengths 14
16
| text
stringlengths 29
2.73k
| source
stringlengths 49
117
|
---|---|---|
bc94e8ce763f-1
|
self.file_path = file_path
self.load_single_document = load_single_document
[docs] def load(self) -> List[Document]:
"""Load documents from EverNote export file."""
documents = [
Document(
page_content=note["content"],
metadata={
**{
key: value
for key, value in note.items()
if key not in ["content", "content-raw", "resource"]
},
**{"source": self.file_path},
},
)
for note in self._parse_note_xml(self.file_path)
if note.get("content") is not None
]
if not self.load_single_document:
return documents
return [
Document(
page_content="".join([document.page_content for document in documents]),
metadata={"source": self.file_path},
)
]
@staticmethod
def _parse_content(content: str) -> str:
try:
import html2text
return html2text.html2text(content).strip()
except ImportError as e:
logging.error(
"Could not import `html2text`. Although it is not a required package "
"to use Langchain, using the EverNote loader requires `html2text`. "
"Please install `html2text` via `pip install html2text` and try again."
)
raise e
@staticmethod
def _parse_resource(resource: list) -> dict:
rsc_dict: Dict[str, Any] = {}
for elem in resource:
if elem.tag == "data":
# Sometimes elem.text is None
rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
|
bc94e8ce763f-2
|
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest()
else:
rsc_dict[elem.tag] = elem.text
return rsc_dict
@staticmethod
def _parse_note(note: List, prefix: Optional[str] = None) -> dict:
note_dict: Dict[str, Any] = {}
resources = []
def add_prefix(element_tag: str) -> str:
if prefix is None:
return element_tag
return f"{prefix}.{element_tag}"
for elem in note:
if elem.tag == "content":
note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text)
# A copy of original content
note_dict["content-raw"] = elem.text
elif elem.tag == "resource":
resources.append(EverNoteLoader._parse_resource(elem))
elif elem.tag == "created" or elem.tag == "updated":
note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ")
elif elem.tag == "note-attributes":
additional_attributes = EverNoteLoader._parse_note(
elem, elem.tag
) # Recursively enter the note-attributes tag
note_dict.update(additional_attributes)
else:
note_dict[elem.tag] = elem.text
if len(resources) > 0:
note_dict["resource"] = resources
return {add_prefix(key): value for key, value in note_dict.items()}
@staticmethod
def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]:
"""Parse Evernote xml."""
# Without huge_tree set to True, parser may complain about huge text node
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
|
bc94e8ce763f-3
|
# Without huge_tree set to True, parser may complain about huge text node
# Try to recover, because there may be " ", which will cause
# "XMLSyntaxError: Entity 'nbsp' not defined"
try:
from lxml import etree
except ImportError as e:
logging.error(
"Could not import `lxml`. Although it is not a required package to use "
"Langchain, using the EverNote loader requires `lxml`. Please install "
"`lxml` via `pip install lxml` and try again."
)
raise e
context = etree.iterparse(
xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True
)
for action, elem in context:
if elem.tag == "note":
yield EverNoteLoader._parse_note(elem)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
|
12afd2394ce0-0
|
Source code for langchain.document_loaders.trello
"""Loader that loads cards from Trello"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env
if TYPE_CHECKING:
from trello import Board, Card, TrelloClient
[docs]class TrelloLoader(BaseLoader):
"""Trello loader. Reads all cards from a Trello board."""
def __init__(
self,
client: TrelloClient,
board_name: str,
*,
include_card_name: bool = True,
include_comments: bool = True,
include_checklist: bool = True,
card_filter: Literal["closed", "open", "all"] = "all",
extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"),
):
"""Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
self.client = client
self.board_name = board_name
self.include_card_name = include_card_name
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
12afd2394ce0-1
|
self.board_name = board_name
self.include_card_name = include_card_name
self.include_comments = include_comments
self.include_checklist = include_checklist
self.extra_metadata = extra_metadata
self.card_filter = card_filter
[docs] @classmethod
def from_credentials(
cls,
board_name: str,
*,
api_key: Optional[str] = None,
token: Optional[str] = None,
**kwargs: Any,
) -> TrelloLoader:
"""Convenience constructor that builds TrelloClient init param for you.
Args:
board_name: The name of the Trello board.
api_key: Trello API key. Can also be specified as environment variable
TRELLO_API_KEY.
token: Trello token. Can also be specified as environment variable
TRELLO_TOKEN.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
try:
from trello import TrelloClient # type: ignore
except ImportError as ex:
raise ImportError(
"Could not import trello python package. "
"Please install it with `pip install py-trello`."
) from ex
api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY")
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
12afd2394ce0-2
|
token = token or get_from_env("token", "TRELLO_TOKEN")
client = TrelloClient(api_key=api_key, token=token)
return cls(client, board_name, **kwargs)
[docs] def load(self) -> List[Document]:
"""Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
"""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError as ex:
raise ImportError(
"`beautifulsoup4` package not found, please run"
" `pip install beautifulsoup4`"
) from ex
board = self._get_board()
# Create a dictionary with the list IDs as keys and the list names as values
list_dict = {list_item.id: list_item.name for list_item in board.list_lists()}
# Get Cards on the board
cards = board.get_cards(card_filter=self.card_filter)
return [self._card_to_doc(card, list_dict) for card in cards]
def _get_board(self) -> Board:
# Find the first board with a matching name
board = next(
(b for b in self.client.list_boards() if b.name == self.board_name), None
)
if not board:
raise ValueError(f"Board `{self.board_name}` not found.")
return board
def _card_to_doc(self, card: Card, list_dict: dict) -> Document:
from bs4 import BeautifulSoup # type: ignore
text_content = ""
if self.include_card_name:
text_content = card.name + "\n"
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
12afd2394ce0-3
|
if self.include_card_name:
text_content = card.name + "\n"
if card.description.strip():
text_content += BeautifulSoup(card.description, "lxml").get_text()
if self.include_checklist:
# Get all the checklist items on the card
for checklist in card.checklists:
if checklist.items:
items = [
f"{item['name']}:{item['state']}" for item in checklist.items
]
text_content += f"\n{checklist.name}\n" + "\n".join(items)
if self.include_comments:
# Get all the comments on the card
comments = [
BeautifulSoup(comment["data"]["text"], "lxml").get_text()
for comment in card.comments
]
text_content += "Comments:" + "\n".join(comments)
# Default metadata fields
metadata = {
"title": card.name,
"id": card.id,
"url": card.url,
}
# Extra metadata fields. Card object is not subscriptable.
if "labels" in self.extra_metadata:
metadata["labels"] = [label.name for label in card.labels]
if "list" in self.extra_metadata:
if card.list_id in list_dict:
metadata["list"] = list_dict[card.list_id]
if "closed" in self.extra_metadata:
metadata["closed"] = card.closed
if "due_date" in self.extra_metadata:
metadata["due_date"] = card.due_date
return Document(page_content=text_content, metadata=metadata)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
f4b92cfe94af-0
|
Source code for langchain.document_loaders.conllu
"""Load CoNLL-U files."""
import csv
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class CoNLLULoader(BaseLoader):
"""Load CoNLL-U files."""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load from file path."""
with open(self.file_path, encoding="utf8") as f:
tsv = list(csv.reader(f, delimiter="\t"))
# If len(line) > 1, the line is not a comment
lines = [line for line in tsv if len(line) > 1]
text = ""
for i, line in enumerate(lines):
# Do not add a space after a punctuation mark or at the end of the sentence
if line[9] == "SpaceAfter=No" or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + " "
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/conllu.html
|
c92a133ad81b-0
|
Source code for langchain.document_loaders.roam
"""Loader that loads Roam directory dump."""
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class RoamLoader(BaseLoader):
"""Loader that loads Roam files from disk."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/roam.html
|
a4dd65d54e2e-0
|
Source code for langchain.document_loaders.git
import os
from typing import Callable, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class GitLoader(BaseLoader):
"""Loads files from a Git repository into a list of documents.
Repository can be local on disk available at `repo_path`,
or remote at `clone_url` that will be cloned to `repo_path`.
Currently supports only text files.
Each document represents one file in the repository. The `path` points to
the local Git repository, and the `branch` specifies the branch to load
files from. By default, it loads from the `main` branch.
"""
def __init__(
self,
repo_path: str,
clone_url: Optional[str] = None,
branch: Optional[str] = "main",
file_filter: Optional[Callable[[str], bool]] = None,
):
self.repo_path = repo_path
self.clone_url = clone_url
self.branch = branch
self.file_filter = file_filter
[docs] def load(self) -> List[Document]:
try:
from git import Blob, Repo # type: ignore
except ImportError as ex:
raise ImportError(
"Could not import git python package. "
"Please install it with `pip install GitPython`."
) from ex
if not os.path.exists(self.repo_path) and self.clone_url is None:
raise ValueError(f"Path {self.repo_path} does not exist")
elif self.clone_url:
repo = Repo.clone_from(self.clone_url, self.repo_path)
repo.git.checkout(self.branch)
else:
repo = Repo(self.repo_path)
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
|
a4dd65d54e2e-1
|
else:
repo = Repo(self.repo_path)
repo.git.checkout(self.branch)
docs: List[Document] = []
for item in repo.tree().traverse():
if not isinstance(item, Blob):
continue
file_path = os.path.join(self.repo_path, item.path)
ignored_files = repo.ignored([file_path]) # type: ignore
if len(ignored_files):
continue
# uses filter to skip files
if self.file_filter and not self.file_filter(file_path):
continue
rel_file_path = os.path.relpath(file_path, self.repo_path)
try:
with open(file_path, "rb") as f:
content = f.read()
file_type = os.path.splitext(item.name)[1]
# loads only text files
try:
text_content = content.decode("utf-8")
except UnicodeDecodeError:
continue
metadata = {
"source": rel_file_path,
"file_path": rel_file_path,
"file_name": item.name,
"file_type": file_type,
}
doc = Document(page_content=text_content, metadata=metadata)
docs.append(doc)
except Exception as e:
print(f"Error reading file {file_path}: {e}")
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
|
92a77b071c55-0
|
Source code for langchain.document_loaders.whatsapp_chat
import re
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(date: str, sender: str, text: str) -> str:
"""Combine message information in a readable format ready to be used."""
return f"{sender} on {date}: {text}\n\n"
[docs]class WhatsAppChatLoader(BaseLoader):
"""Loader that loads WhatsApp messages text file."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
text_content = ""
with open(p, encoding="utf8") as f:
lines = f.readlines()
message_line_regex = r"""
\[?
(
\d{1,2}
[\/.]
\d{1,2}
[\/.]
\d{2,4}
,\s
\d{1,2}
:\d{2}
(?:
:\d{2}
)?
(?:[ _](?:AM|PM))?
)
\]?
[\s-]*
([~\w\s]+)
[:]+
\s
(.+)
"""
for line in lines:
result = re.match(message_line_regex, line.strip(), flags=re.VERBOSE)
if result:
date, sender, text = result.groups()
text_content += concatenate_rows(date, sender, text)
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
|
92a77b071c55-1
|
text_content += concatenate_rows(date, sender, text)
metadata = {"source": str(p)}
return [Document(page_content=text_content, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
|
8c63ec1a8bcb-0
|
Source code for langchain.document_loaders.confluence
"""Load Data from a Confluence Space"""
import logging
from io import BytesIO
from typing import Any, Callable, List, Optional, Union
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class ConfluenceLoader(BaseLoader):
"""
Load Confluence pages. Port of https://llamahub.ai/l/confluence
This currently supports both username/api_key and Oauth2 login.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceReader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain.document_loaders import ConfluenceLoader
loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345"
)
documents = loader.load(space_key="SPACE",limit=50)
:param url: _description_
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-1
|
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
oauth2: Optional[dict] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(url, api_key, username, oauth2)
if errors:
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-2
|
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
try:
from atlassian import Confluence # noqa: F401
except ImportError:
raise ImportError(
"`atlassian` package not found, please run "
"`pip install atlassian-python-api`"
)
if oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
[docs] @staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
oauth2: Optional[dict] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided, "
"the other must be as well."
)
if (api_key or username) and oauth2:
errors.append(
"Cannot provide a value for `api_key` and/or "
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-3
|
errors.append(
"Cannot provide a value for `api_key` and/or "
"`username` and provide a value for `oauth2`"
)
if oauth2 and oauth2.keys() != [
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
]:
errors.append(
"You have either ommited require keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`"
)
if errors:
return errors
return None
[docs] def load(
self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_restricted_content: bool = False,
include_archived_content: bool = False,
include_attachments: bool = False,
include_comments: bool = False,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
) -> List[Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-4
|
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`, "
"`label`, `cql` parameters."
)
docs = []
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
status="any" if include_archived_content else "current",
expand="body.storage.value",
)
docs += self.process_pages(
pages, include_restricted_content, include_attachments, include_comments
)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-5
|
label=label,
limit=limit,
max_pages=max_pages,
)
ids_by_label = [page["id"] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(
self.confluence.cql,
cql=cql,
limit=limit,
max_pages=max_pages,
include_archived_spaces=include_archived_content,
expand="body.storage.value",
)
docs += self.process_pages(
pages, include_restricted_content, include_attachments, include_comments
)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(page_id=page_id, expand="body.storage.value")
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
return docs
[docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-6
|
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the results key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-7
|
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
[docs] def is_public_page(self, page: dict) -> bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page["id"])
return (
page["status"] == "current"
and not restrictions["read"]["restrictions"]["user"]["results"]
and not restrictions["read"]["restrictions"]["group"]["results"]
)
[docs] def process_pages(
self,
pages: List[dict],
include_restricted_content: bool,
include_attachments: bool,
include_comments: bool,
) -> List[Document]:
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
return docs
[docs] def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
) -> Document:
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run "
"`pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"])
else:
attachment_texts = []
text = BeautifulSoup(
page["body"]["storage"]["value"], "lxml"
).get_text() + "".join(attachment_texts)
if include_comments:
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-8
|
).get_text() + "".join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text()
for comment in comments
]
text = text + "".join(comment_texts)
return Document(
page_content=text,
metadata={
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
},
)
[docs] def process_attachment(self, page_id: str) -> List[str]:
try:
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`Pillow` package not found, " "please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-9
|
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url)
else:
continue
texts.append(text)
return texts
[docs] def process_pdf(self, link: str) -> str:
try:
import pytesseract # noqa: F401
from pdf2image import convert_from_bytes # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, "
"please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
[docs] def process_image(self, link: str) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-10
|
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, "
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image)
[docs] def process_doc(self, link: str) -> str:
try:
import docx2txt # noqa: F401
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
[docs] def process_xls(self, link: str) -> str:
try:
import xlrd # noqa: F401
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
8c63ec1a8bcb-11
|
or response.content is None
):
return text
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n"
text += "\n"
return text
[docs] def process_svg(self, link: str) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
from reportlab.graphics import renderPM # noqa: F401
from svglib.svglib import svg2rlg # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, "
"please run `pip install pytesseract Pillow reportlab svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
|
cb7c385224f0-0
|
Source code for langchain.document_loaders.wikipedia
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.wikipedia import WikipediaAPIWrapper
[docs]class WikipediaLoader(BaseLoader):
"""Loads a query result from www.wikipedia.org into a list of Documents.
The hard limit on the number of downloaded Documents is 300 for now.
Each wiki page represents one Document.
"""
def __init__(
self,
query: str,
lang: str = "en",
load_max_docs: Optional[int] = 100,
load_all_available_meta: Optional[bool] = False,
):
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
[docs] def load(self) -> List[Document]:
client = WikipediaAPIWrapper(
lang=self.lang,
top_k_results=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
)
docs = client.load(self.query)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/wikipedia.html
|
28d414493e2b-0
|
Source code for langchain.document_loaders.url_selenium
"""Loader that uses Selenium to load a page, then uses unstructured to load the html.
"""
import logging
from typing import TYPE_CHECKING, List, Literal, Optional, Union
if TYPE_CHECKING:
from selenium.webdriver import Chrome, Firefox
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class SeleniumURLLoader(BaseLoader):
"""Loader that uses Selenium and to load a page and unstructured to load the html.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
browser (str): The browser to use, either 'chrome' or 'firefox'.
binary_location (Optional[str]): The location of the browser binary.
executable_path (Optional[str]): The path to the browser executable.
headless (bool): If True, the browser will run in headless mode.
arguments [List[str]]: List of arguments to pass to the browser.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
browser: Literal["chrome", "firefox"] = "chrome",
binary_location: Optional[str] = None,
executable_path: Optional[str] = None,
headless: bool = True,
arguments: List[str] = [],
):
"""Load a list of URLs using Selenium and unstructured."""
try:
import selenium # noqa:F401
except ImportError:
raise ImportError(
"selenium package not found, please install it with "
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
28d414493e2b-1
|
raise ImportError(
"selenium package not found, please install it with "
"`pip install selenium`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.browser = browser
self.binary_location = binary_location
self.executable_path = executable_path
self.headless = headless
self.arguments = arguments
def _get_driver(self) -> Union["Chrome", "Firefox"]:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == "chrome":
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(executable_path=self.executable_path, options=chrome_options)
elif self.browser.lower() == "firefox":
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
28d414493e2b-2
|
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument("--headless")
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(
executable_path=self.executable_path, options=firefox_options
)
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
[docs] def load(self) -> List[Document]:
"""Load the specified URLs using Selenium and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
driver = self._get_driver()
for url in self.urls:
try:
driver.get(url)
page_content = driver.page_source
elements = partition_html(text=page_content)
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
driver.quit()
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
078a1c242499-0
|
Source code for langchain.document_loaders.blockchain
import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BlockchainType(Enum):
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
[docs]class BlockchainDocumentLoader(BaseLoader):
"""Loads elements from a blockchain smart contract into Langchain documents.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockain APIs (e.g. Infura, Opensea, etc.)
"""
def __init__(
self,
contract_address: str,
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
078a1c242499-1
|
"""
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
[docs] def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
078a1c242499-2
|
tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf":
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
078a1c242499-3
|
elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
9f66eeabecad-0
|
Source code for langchain.document_loaders.html
"""Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load HTML files."""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html
|
c7434f5d0dda-0
|
Source code for langchain.document_loaders.azure_blob_storage_container
"""Loading logic for loading documents from an Azure Blob Storage container."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain.document_loaders.base import BaseLoader
[docs]class AzureBlobStorageContainerLoader(BaseLoader):
"""Loading logic for loading documents from Azure Blob Storage."""
def __init__(self, conn_str: str, container: str, prefix: str = ""):
"""Initialize with connection string, container and blob prefix."""
self.conn_str = conn_str
self.container = container
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ValueError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
container = ContainerClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container
)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(
self.conn_str, self.container, blob.name # type: ignore
)
docs.extend(loader.load())
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html
|
3b17076c8fe3-0
|
Source code for langchain.document_loaders.apify_dataset
"""Logic for loading documents from Apify datasets."""
from typing import Any, Callable, Dict, List
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ApifyDatasetLoader(BaseLoader, BaseModel):
"""Logic for loading documents from Apify datasets."""
apify_client: Any
dataset_id: str
"""The ID of the dataset on the Apify platform."""
dataset_mapping_function: Callable[[Dict], Document]
"""A custom function that takes a single dictionary (an Apify dataset item)
and converts it to an instance of the Document class."""
def __init__(
self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document]
):
"""Initialize the loader with an Apify dataset ID and a mapping function.
Args:
dataset_id (str): The ID of the dataset on the Apify platform.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an instance
of the Document class.
"""
super().__init__(
dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment."""
try:
from apify_client import ApifyClient
values["apify_client"] = ApifyClient()
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
|
3b17076c8fe3-1
|
)
return values
[docs] def load(self) -> List[Document]:
"""Load documents."""
dataset_items = self.apify_client.dataset(self.dataset_id).list_items().items
return list(map(self.dataset_mapping_function, dataset_items))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
|
af7a0982dd54-0
|
Source code for langchain.document_loaders.epub
"""Loader that loads EPub files."""
from typing import List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
[docs]class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load epub files."""
def _get_elements(self) -> List:
min_unstructured_version = "0.5.4"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning epub files is only supported in "
f"unstructured>={min_unstructured_version}."
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/epub.html
|
a6ef5190831a-0
|
Source code for langchain.document_loaders.mastodon
"""Mastodon document loader."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ValueError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
[docs]class MastodonTootsLoader(BaseLoader):
"""Mastodon toots loader."""
def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account.
exclude_replies: Whether to exclude reply toots from the load.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
a6ef5190831a-1
|
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
[docs] def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
8c6989dd304b-0
|
Source code for langchain.document_loaders.azure_blob_storage_file
"""Loading logic for loading documents from an Azure Blob Storage file."""
import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class AzureBlobStorageFileLoader(BaseLoader):
"""Loading logic for loading documents from Azure Blob Storage."""
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
self.container = container
self.blob = blob_name
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import BlobClient
except ImportError as exc:
raise ValueError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
client = BlobClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container, blob_name=self.blob
)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.container}/{self.blob}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f"{file_path}", "wb") as file:
blob_data = client.download_blob()
blob_data.readinto(file)
loader = UnstructuredFileLoader(file_path)
return loader.load()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html
|
05516020851d-0
|
Source code for langchain.document_loaders.csv_loader
import csv
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all doucments by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
):
self.file_path = file_path
self.source_column = source_column
self.encoding = encoding
self.csv_args = csv_args or {}
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
docs = []
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv_reader):
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
|
05516020851d-1
|
for i, row in enumerate(csv_reader):
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
try:
source = (
row[self.source_column]
if self.source_column is not None
else self.file_path
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
metadata = {"source": source, "row": i}
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
|
f7a942fc2eb2-0
|
Source code for langchain.document_loaders.url
"""Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class UnstructuredURLLoader(BaseLoader):
"""Loader that uses unstructured to load HTML files."""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ValueError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop("headers", {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
"You are using an old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
def _validate_mode(self, mode: str) -> None:
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
|
f7a942fc2eb2-1
|
def _validate_mode(self, mode: str) -> None:
_valid_modes = {"single", "elements"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
def __is_headers_available_for_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def __is_headers_available_for_non_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 13)
def __is_non_html_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 12)
[docs] def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.auto import partition
from unstructured.partition.html import partition_html
docs: List[Document] = list()
for url in self.urls:
try:
if self.__is_non_html_available():
if self.__is_headers_available_for_non_html():
elements = partition(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition(url=url, **self.unstructured_kwargs)
else:
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
|
f7a942fc2eb2-2
|
elements = partition(url=url, **self.unstructured_kwargs)
else:
if self.__is_headers_available_for_html():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exeption: {e}")
continue
else:
raise e
if self.mode == "single":
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
elif self.mode == "elements":
for element in elements:
metadata = element.metadata.to_dict()
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
|
75cef2a8b367-0
|
Source code for langchain.document_loaders.readthedocs
"""Loader that loads ReadTheDocs documentation directory dump."""
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ReadTheDocsLoader(BaseLoader):
"""Loader that loads ReadTheDocs documentation directory dump."""
def __init__(
self,
path: Union[str, Path],
encoding: Optional[str] = None,
errors: Optional[str] = None,
custom_html_tag: Optional[Tuple[str, dict]] = None,
**kwargs: Optional[Any]
):
"""
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extract the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specifies how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
75cef2a8b367-1
|
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.bs_kwargs = kwargs
[docs] def load(self) -> List[Document]:
"""Load documents."""
docs = []
for p in self.file_path.rglob("*"):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
def _clean_data(self, data: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, **self.bs_kwargs)
# default tags
html_tags = [
("div", {"role": "main"}),
("main", {"id": "main-content"}),
]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
text = None
# reversed order. check the custom one first
for tag, attrs in html_tags[::-1]:
text = soup.find(tag, attrs)
# if found, break
if text is not None:
break
if text is not None:
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
75cef2a8b367-2
|
if text is not None:
break
if text is not None:
text = text.get_text()
else:
text = ""
# trim empty lines
return "\n".join([t for t in text.split("\n") if t])
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
40a06eb74f99-0
|
Source code for langchain.document_loaders.odt
"""Loader that loads Open Office ODT files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredODTLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load open office ODT files."""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
|
43acafc5e794-0
|
Source code for langchain.document_loaders.discord
"""Load from Discord chat dump"""
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import pandas as pd
[docs]class DiscordChatLoader(BaseLoader):
"""Load Discord chat logs."""
def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"):
"""Initialize with a Pandas DataFrame containing chat logs."""
if not isinstance(chat_log, pd.DataFrame):
raise ValueError(
f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}"
)
self.chat_log = chat_log
self.user_id_col = user_id_col
[docs] def load(self) -> List[Document]:
"""Load all chat messages."""
result = []
for _, row in self.chat_log.iterrows():
user_id = row[self.user_id_col]
metadata = row.to_dict()
metadata.pop(self.user_id_col)
result.append(Document(page_content=user_id, metadata=metadata))
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/discord.html
|
b2096a22ad2f-0
|
Source code for langchain.document_loaders.stripe
"""Loader that fetches data from Stripe"""
import json
import urllib.request
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_dict
STRIPE_ENDPOINTS = {
"balance_transactions": "https://api.stripe.com/v1/balance_transactions",
"charges": "https://api.stripe.com/v1/charges",
"customers": "https://api.stripe.com/v1/customers",
"events": "https://api.stripe.com/v1/events",
"refunds": "https://api.stripe.com/v1/refunds",
"disputes": "https://api.stripe.com/v1/disputes",
}
[docs]class StripeLoader(BaseLoader):
def __init__(self, resource: str, access_token: Optional[str] = None) -> None:
self.resource = resource
access_token = access_token or get_from_env(
"access_token", "STRIPE_ACCESS_TOKEN"
)
self.headers = {"Authorization": f"Bearer {access_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = STRIPE_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
|
b2096a22ad2f-1
|
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
return self._get_resource()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
|
660ac79b673f-0
|
Source code for langchain.document_loaders.gcs_directory
"""Loading logic for loading documents from an GCS directory."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.gcs_file import GCSFileLoader
[docs]class GCSDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from GCS."""
def __init__(self, project_name: str, bucket: str, prefix: str = ""):
"""Initialize with bucket and key name."""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ValueError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(project=self.project_name)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(self.project_name, self.bucket, blob.name)
docs.extend(loader.load())
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html
|
bbe7acea464b-0
|
Source code for langchain.document_loaders.toml
import json
from pathlib import Path
from typing import Iterator, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class TomlLoader(BaseLoader):
"""
A TOML document loader that inherits from the BaseLoader class.
This class can be initialized with either a single source file or a source
directory containing TOML files.
"""
def __init__(self, source: Union[str, Path]):
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
[docs] def load(self) -> List[Document]:
"""Load and return all documents."""
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazily load the TOML documents from the source file or directory."""
import tomli
if self.source.is_file() and self.source.suffix == ".toml":
files = [self.source]
elif self.source.is_dir():
files = list(self.source.glob("**/*.toml"))
else:
raise ValueError("Invalid source path or file type")
for file_path in files:
with file_path.open("r", encoding="utf-8") as file:
content = file.read()
try:
data = tomli.loads(content)
doc = Document(
page_content=json.dumps(data),
metadata={"source": str(file_path)},
)
yield doc
except tomli.TOMLDecodeError as e:
print(f"Error parsing TOML file {file_path}: {e}")
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
|
bbe7acea464b-1
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
|
1c176c4fb836-0
|
Source code for langchain.document_loaders.word_document
"""Loader that loads word documents."""
import os
import tempfile
from abc import ABC
from typing import List
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class Docx2txtLoader(BaseLoader, ABC):
"""Loads a DOCX with docx2txt and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
1c176c4fb836-1
|
if hasattr(self, "temp_file"):
self.temp_file.close()
[docs] def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
[docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load word documents."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
1c176c4fb836-2
|
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
d76983dfd49b-0
|
Source code for langchain.vectorstores.weaviate
"""Wrapper around weaviate vector database."""
from __future__ import annotations
import datetime
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
def _create_weaviate_client(**kwargs: Any) -> Any:
client = kwargs.get("client")
if client is not None:
return client
weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL")
try:
# the weaviate api key param should not be mandatory
weaviate_api_key = get_from_dict_or_env(
kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None
)
except ValueError:
weaviate_api_key = None
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip instal weaviate-client`"
)
auth = (
weaviate.auth.AuthApiKey(api_key=weaviate_api_key)
if weaviate_api_key is not None
else None
)
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-1
|
if weaviate_api_key is not None
else None
)
client = weaviate.Client(weaviate_url, auth_client_secret=auth)
return client
def _default_score_normalizer(val: float) -> float:
return 1 - 1 / (1 + np.exp(val))
def _json_serializable(value: Any) -> Any:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
[docs]class Weaviate(VectorStore):
"""Wrapper around Weaviate vector database.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
by_text: bool = True,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-2
|
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self._relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
_id = (
kwargs["uuids"][i] if "uuids" in kwargs else get_valid_uuid(uuid4())
)
if self._embedding is not None:
vector = self._embedding.embed_documents([text])[0]
else:
vector = None
batch.add_data_object(
data_object=data_properties,
class_name=self._index_name,
uuid=_id,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-3
|
class_name=self._index_name,
uuid=_id,
vector=vector,
)
ids.append(_id)
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search when "
"_by_text=False"
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
[docs] def similarity_search_by_text(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-4
|
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_text(content).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-5
|
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
"max_marginal_relevance_search requires a suitable Embeddings object"
)
return self.max_marginal_relevance_search_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-6
|
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
results = (
query_obj.with_additional("vector")
.with_near_vector(vector)
.with_limit(fetch_k)
.do()
)
payload = results["data"]["Get"][self._index_name]
embeddings = [result["_additional"]["vector"] for result in payload]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
docs = []
for idx in mmr_selected:
text = payload[idx].pop(self._text_key)
payload[idx].pop("_additional")
meta = payload[idx]
docs.append(Document(page_content=text, metadata=meta))
return docs
[docs] def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
if self._embedding is None:
raise ValueError(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-7
|
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search_with_score"
)
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if not self._by_text:
embedding = self._embedding.embed_query(query)
vector = {"vector": embedding}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
else:
result = (
query_obj.with_near_text(content)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(
res["_additional"]["vector"], self._embedding.embed_query(query)
)
docs_and_scores.append((Document(page_content=text, metadata=res), score))
return docs_and_scores
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-8
|
0 is dissimilar, 1 is most similar.
"""
if self._relevance_score_fn is None:
raise ValueError(
"relevance_score_fn must be provided to"
" Weaviate constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(query, k=k, **kwargs)
return [
(doc, self._relevance_score_fn(score)) for doc, score in docs_and_scores
]
[docs] @classmethod
def from_texts(
cls: Type[Weaviate],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores.weaviate import Weaviate
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
client = _create_weaviate_client(**kwargs)
from weaviate.util import get_valid_uuid
index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}")
embeddings = embedding.embed_documents(texts) if embedding else None
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-9
|
embeddings = embedding.embed_documents(texts) if embedding else None
text_key = "text"
schema = _default_schema(index_name)
attributes = list(metadatas[0].keys()) if metadatas else None
# check whether the index already exists
if not client.schema.contains(schema):
client.schema.create_class(schema)
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {
text_key: text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
# if an embedding strategy is not provided, we let
# weaviate create the embedding. Note that this will only
# work if weaviate has been installed with a vectorizer module
# like text2vec-contextionary for example
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
relevance_score_fn = kwargs.get("relevance_score_fn")
by_text: bool = kwargs.get("by_text", False)
return cls(
client,
index_name,
text_key,
embedding=embedding,
attributes=attributes,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
d76983dfd49b-10
|
text_key,
embedding=embedding,
attributes=attributes,
relevance_score_fn=relevance_score_fn,
by_text=by_text,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
|
a7c8267d464c-0
|
Source code for langchain.vectorstores.annoy
"""Wrapper around Annoy vector database."""
from __future__ import annotations
import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"])
DEFAULT_METRIC = "angular"
def dependable_annoy_import() -> Any:
"""Import annoy if available, otherwise raise error."""
try:
import annoy
except ImportError:
raise ValueError(
"Could not import annoy python package. "
"Please install it with `pip install --user annoy` "
)
return annoy
[docs]class Annoy(VectorStore):
"""Wrapper around Annoy vector database.
To use, you should have the ``annoy`` python package installed.
Example:
.. code-block:: python
from langchain import Annoy
db = Annoy(embedding_function, index, docstore, index_to_docstore_id)
"""
def __init__(
self,
embedding_function: Callable,
index: Any,
metric: str,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-1
|
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.metric = metric
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
raise NotImplementedError(
"Annoy does not allow to add new data once the index is build."
)
[docs] def process_index_results(
self, idxs: List[int], dists: List[float]
) -> List[Tuple[Document, float]]:
"""Turns annoy results into a list of documents and scores.
Args:
idxs: List of indices of the documents in the index.
dists: List of distances of the documents in the index.
Returns:
List of Documents and scores.
"""
docs = []
for idx, dist in zip(idxs, dists):
_id = self.index_to_docstore_id[idx]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append((doc, dist))
return docs
[docs] def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-2
|
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_vector(
embedding, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
[docs] def similarity_search_with_score_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(
docstore_index, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
[docs] def similarity_search_with_score(
self, query: str, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-3
|
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k, search_k)
return docs
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, k, search_k
)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-4
|
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(
docstore_index, k, search_k
)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search(
self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, search_k)
return [doc for doc, _ in docs_and_scores]
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-5
|
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(
embedding, fetch_k, search_k=-1, include_distances=False
)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
# ignore the -1's if not enough docs are returned/indexed
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append(doc)
return docs
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-6
|
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
if metric not in INDEX_METRICS:
raise ValueError(
(
f"Unsupported distance metric: {metric}. "
f"Expected one of {list(INDEX_METRICS)}"
)
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError("embeddings must be provided to build AnnoyIndex")
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-7
|
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore(
{index_to_id[i]: doc for i, doc in enumerate(documents)}
)
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Annoy
from langchain.embeddings import OpenAIEmbeddings
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-8
|
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
[docs] @classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from embeddings.
Args:
text_embeddings: List of tuples of (text, embedding)
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1
This is a user friendly interface that:
1. Creates an in memory docstore with provided embeddings
2. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-9
|
text_embedding_pairs = list(zip(texts, text_embeddings))
db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
[docs] def save_local(self, folder_path: str, prefault: bool = False) -> None:
"""Save Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
prefault: Whether to pre-load the index into memory.
"""
path = Path(folder_path)
os.makedirs(path, exist_ok=True)
# save index, index config, docstore and index_to_docstore_id
config_object = ConfigParser()
config_object["ANNOY"] = {
"f": self.index.f,
"metric": self.metric,
}
self.index.save(str(path / "index.annoy"), prefault=prefault)
with open(path / "index.pkl", "wb") as file:
pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
[docs] @classmethod
def load_local(
cls,
folder_path: str,
embeddings: Embeddings,
) -> Annoy:
"""Load Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to load index, docstore,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
a7c8267d464c-10
|
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries.
"""
path = Path(folder_path)
# load index separately since it is not picklable
annoy = dependable_annoy_import()
# load docstore and index_to_docstore_id
with open(path / "index.pkl", "rb") as file:
docstore, index_to_docstore_id, config_object = pickle.load(file)
f = int(config_object["ANNOY"]["f"])
metric = config_object["ANNOY"]["metric"]
index = annoy.AnnoyIndex(f, metric=metric)
index.load(str(path / "index.annoy"))
return cls(
embeddings.embed_query, index, metric, docstore, index_to_docstore_id
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
|
774ef480a8fa-0
|
Source code for langchain.vectorstores.zilliz
from __future__ import annotations
import logging
from typing import Any, List, Optional
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.milvus import Milvus
logger = logging.getLogger(__name__)
[docs]class Zilliz(Milvus):
def _create_index(self) -> None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
# If no index params, use a default AutoIndex based one
if self.index_params is None:
self.index_params = {
"metric_type": "L2",
"index_type": "AUTOINDEX",
"params": {},
}
try:
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
# If default did not work, most likely Milvus self-hosted
except MilvusException:
# Use HNSW based index
self.index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index on collection: %s", self.collection_name
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
|
774ef480a8fa-1
|
"Failed to create an index on collection: %s", self.collection_name
)
raise e
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "LangChainCollection",
connection_args: dict[str, Any] = {},
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: bool = False,
**kwargs: Any,
) -> Zilliz:
"""Create a Zilliz collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Zilliz: Zilliz Vector Store
"""
vector_db = cls(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
|
774ef480a8fa-2
|
"""
vector_db = cls(
embedding_function=embedding,
collection_name=collection_name,
connection_args=connection_args,
consistency_level=consistency_level,
index_params=index_params,
search_params=search_params,
drop_old=drop_old,
**kwargs,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
|
5d4e4a2e4ed9-0
|
Source code for langchain.vectorstores.qdrant
"""Wrapper around Qdrant vector database."""
from __future__ import annotations
import uuid
import warnings
from hashlib import md5
from itertools import islice
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
[docs]class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-1
|
metadata_payload_key: str = METADATA_KEY,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self.embeddings = embeddings
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-2
|
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self.embeddings = None
def _embed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
else:
if self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
[docs] def add_texts(
self,
texts: Iterable[str],
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-3
|
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
ids = []
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = [md5(text.encode("utf-8")).hexdigest() for text in batch_texts]
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch.construct(
ids=batch_ids,
vectors=self._embed_texts(batch_texts),
payloads=self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
ids.extend(batch_ids)
return ids
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-4
|
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k, filter=filter)
return list(map(itemgetter(0), results))
[docs] def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
results = self.client.search(
collection_name=self.collection_name,
query_vector=self._embed_query(query),
query_filter=qdrant_filter,
with_payload=True,
limit=k,
)
return [
(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-5
|
limit=k,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=fetch_k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
self._document_from_scored_point(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-6
|
)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
[docs] @classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
batch_size: int = 64,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-7
|
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-8
|
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-9
|
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
)
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
# Generate the embeddings for all the texts in a batch
batch_embeddings = embedding.embed_documents(batch_texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch.construct(
ids=[md5(text.encode("utf-8")).hexdigest() for text in batch_texts],
vectors=batch_embeddings,
payloads=cls._build_payloads(
batch_texts,
batch_metadatas,
content_payload_key,
metadata_payload_key,
),
),
)
return cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-10
|
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
5d4e4a2e4ed9-11
|
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html
|
364279978df9-0
|
Source code for langchain.vectorstores.elastic_vector_search
"""Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from abc import ABC
from typing import Any, Dict, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict:
if filter:
((key, value),) = filter.items()
filter = {"match": {f"metadata.{key}.keyword": f"{value}"}}
else:
filter = {"match_all": {}}
return {
"script_score": {
"query": filter,
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
# ElasticVectorSearch is a concrete implementation of the abstract base class
# VectorStore, which defines a common interface for all vector database
# implementations. By inheriting from the ABC class, ElasticVectorSearch can be
# defined as an abstract base class itself, allowing the creation of subclasses with
# their own specific implementations. If you plan to subclass ElasticVectorSearch,
# you can inherit from it and define your own implementation of the necessary methods
# and attributes.
[docs]class ElasticVectorSearch(VectorStore, ABC):
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
364279978df9-1
|
# and attributes.
[docs]class ElasticVectorSearch(VectorStore, ABC):
"""Wrapper around Elasticsearch as a vector database.
To connect to an Elasticsearch instance that does not require
login credentials, pass the Elasticsearch URL and index name along with the
embedding object to the constructor.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url="http://localhost:9200",
index_name="test_index",
embedding=embedding
)
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
Example:
.. code-block:: python
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
364279978df9-2
|
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_host = "cluster_id.region_id.gcp.cloud.es.io"
elasticsearch_url = f"https://username:password@{elastic_host}:9243"
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url=elasticsearch_url,
index_name="test_index",
embedding=embedding
)
Args:
elasticsearch_url (str): The URL for the Elasticsearch instance.
index_name (str): The name of the Elasticsearch index for the embeddings.
embedding (Embeddings): An object that provides the ability to embed text.
It should be an instance of a class that subclasses the Embeddings
abstract base class, such as OpenAIEmbeddings()
Raises:
ValueError: If the elasticsearch python package is not installed.
"""
def __init__(
self,
elasticsearch_url: str,
index_name: str,
embedding: Embeddings,
*,
ssl_verify: Optional[Dict[str, Any]] = None,
):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify)
except ValueError as e:
raise ValueError(
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
364279978df9-3
|
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is mis-formatted. Got error: {e} "
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
embeddings = self.embedding.embed_documents(list(texts))
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# check to see if the index already exists
try:
self.client.indices.get(index=self.index_name)
except NotFoundError:
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
self.create_index(self.client, self.index_name, mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
|
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.