id
stringlengths
36
36
text
stringlengths
114
429k
url
stringlengths
54
121
030f06a5-9c6f-4f5a-996c-f8ffcc65f5e5
Source code for langchain.document_loaders.rtf """Loader that loads rich text files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredRTFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load rtf files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): min_unstructured_version = "0.5.12" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning rtf files is only supported in " f"unstructured>={min_unstructured_version}." ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.rtf import partition_rtf return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rtf.html
e44c2ac4-03dc-4915-a20d-b2a90a5c489e
Source code for langchain.document_loaders.notebook """Loader that loads .ipynb notebook files.""" import json from pathlib import Path from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_cells( cell: dict, include_outputs: bool, max_output_length: int, traceback: bool ) -> str: """Combine cells information in a readable format ready to be used.""" cell_type = cell["cell_type"] source = cell["source"] output = cell["outputs"] if include_outputs and cell_type == "code" and output: if "ename" in output[0].keys(): error_name = output[0]["ename"] error_value = output[0]["evalue"] if traceback: traceback = output[0]["traceback"] return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f" with description '{error_value}'\n" f"and traceback '{traceback}'\n\n" ) else: return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f"with description '{error_value}'\n\n" ) elif output[0]["output_type"] == "stream": output = output[0]["text"] min_output = min(max_output_length, len(output)) return ( f"'{cell_type}' cell: '{source}'\n with " f"output: '{output[:min_output]}'\n\n" ) else: return f"'{cell_type}' cell: '{source}'\n\n" return "" def remove_newlines(x: Any) -> Any: """Remove recursively newlines, no matter the data structure they are stored in.""" import pandas as pd if isinstance(x, str): return x.replace("\n", "") elif isinstance(x, list): return [remove_newlines(elem) for elem in x] elif isinstance(x, pd.DataFrame): return x.applymap(remove_newlines) else: return x [docs]class NotebookLoader(BaseLoader): """Loader that loads .ipynb notebook files.""" def __init__( self, path: str, include_outputs: bool = False, max_output_length: int = 10, remove_newline: bool = False, traceback: bool = False, ): """Initialize with path.""" self.file_path = path self.include_outputs = include_outputs self.max_output_length = max_output_length self.remove_newline = remove_newline self.traceback = traceback [docs] def load( self, ) -> List[Document]: """Load documents.""" try: import pandas as pd except ImportError: raise ImportError( "pandas is needed for Notebook Loader, " "please install with `pip install pandas`" ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) data = pd.json_normalize(d["cells"]) filtered_data = data[["cell_type", "source", "outputs"]] if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply( lambda x: concatenate_cells( x, self.include_outputs, self.max_output_length, self.traceback ), axis=1, ).str.cat(sep=" ") metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
0d8e389f-fa99-4d41-ac78-b08db7647ba1
Source code for langchain.document_loaders.gitbook """Loader that loads GitBook.""" from typing import Any, List, Optional from urllib.parse import urljoin, urlparse from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class GitbookLoader(WebBaseLoader): """Load GitBook data. 1. load from either a single page, or 2. load all (relative) paths in the navbar. """ def __init__( self, web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = "main", ): """Initialize with web page and whether to load all paths. Args: web_page: The web page to load or the starting point from where relative paths are discovered. load_all_paths: If set to True, all relative paths in the navbar are loaded instead of only `web_page`. base_url: If `load_all_paths` is True, the relative paths are appended to this base url. Defaults to `web_page` if not set. """ self.base_url = base_url or web_page if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] if load_all_paths: # set web_path to the sitemap if we want to crawl all paths web_paths = f"{self.base_url}/sitemap.xml" else: web_paths = web_page super().__init__(web_paths) self.load_all_paths = load_all_paths self.content_selector = content_selector [docs] def load(self) -> List[Document]: """Fetch text from one single GitBook page.""" if self.load_all_paths: soup_info = self.scrape() relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = urljoin(self.base_url, path) print(f"Fetching text from {url}") soup_info = self._scrape(url) documents.append(self._get_document(soup_info, url)) return [d for d in documents if d] else: soup_info = self.scrape() documents = [self._get_document(soup_info, self.web_path)] return [d for d in documents if d] def _get_document( self, soup: Any, custom_url: Optional[str] = None ) -> Optional[Document]: """Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator="\n").strip() title_if_exists = page_content_raw.find("h1") title = title_if_exists.text if title_if_exists else "" metadata = {"source": custom_url or self.web_path, "title": title} return Document(page_content=content, metadata=metadata) def _get_paths(self, soup: Any) -> List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all("loc")]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
48572018-7d82-4bc2-b9d9-6882400c675f
Source code for langchain.document_loaders.web_base """Web base loader class.""" import asyncio import logging import warnings from typing import Any, Dict, Iterator, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } def _build_metadata(soup: Any, url: str) -> dict: """Build metadata from BeautifulSoup output.""" metadata = {"source": url} if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata [docs]class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" web_paths: List[str] requests_per_second: int = 2 """Max number of concurrent requests to make.""" default_parser: str = "html.parser" """Default parser to use for BeautifulSoup.""" requests_kwargs: Dict[str, Any] = {} """kwargs for requests""" bs_get_text_kwargs: Dict[str, Any] = {} """kwargs for beatifulsoup4 get_text""" def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None, verify: Optional[bool] = True, ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path self.session = requests.Session() try: import bs4 # noqa:F401 except ImportError: raise ValueError( "bs4 package not found, please install it with " "`pip install bs4`" ) # Choose to verify self.verify = verify headers = header_template or default_header_template if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session.headers = dict(headers) @property def web_path(self) -> str: if len(self.web_paths) > 1: raise ValueError("Multiple webpaths found.") return self.web_paths[0] async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: # For SiteMap SSL verification if not self.requests_kwargs.get("verify", True): connector = aiohttp.TCPConnector(ssl=False) else: connector = None async with aiohttp.ClientSession(connector=connector) as session: for i in range(retries): try: async with session.get( url, headers=self.session.headers, verify=self.verify ) as response: return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) @staticmethod def _check_parser(parser: str) -> None: """Check that parser is valid for bs4.""" valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"] if parser not in valid_parsers: raise ValueError( "`parser` must be one of " + ", ".join(valid_parsers) + "." ) [docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser)) return final_results def _scrape(self, url: str, parser: Union[str, None] = None) -> Any: from bs4 import BeautifulSoup if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, verify=self.verify, **self.requests_kwargs) html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser) [docs] def scrape(self, parser: Union[str, None] = None) -> Any: """Scrape data from webpage and return it in BeautifulSoup format.""" if parser is None: parser = self.default_parser return self._scrape(self.web_path, parser) [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load text from the url(s) in web_path.""" for path in self.web_paths: soup = self._scrape(path) text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, path) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" return list(self.lazy_load()) [docs] def aload(self) -> List[Document]: """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) docs = [] for i in range(len(results)): soup = results[i] text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, self.web_paths[i]) docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
e31cab96-15c9-4469-aff0-f1b77385efc1
Source code for langchain.document_loaders.bilibili import json import re import warnings from typing import List, Tuple import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class BiliBiliLoader(BaseLoader): """Loader that loads bilibili transcripts.""" def __init__(self, video_urls: List[str]): """Initialize with bilibili url.""" self.video_urls = video_urls [docs] def load(self) -> List[Document]: """Load from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]: try: from bilibili_api import sync, video except ImportError: raise ValueError( "requests package not found, please install it with " "`pip install bilibili-api-python`" ) bvid = re.search(r"BV\w+", url) if bvid is not None: v = video.Video(bvid=bvid.group()) else: aid = re.search(r"av[0-9]+", url) if aid is not None: try: v = video.Video(aid=int(aid.group()[2:])) except AttributeError: raise ValueError(f"{url} is not bilibili url.") else: raise ValueError(f"{url} is not bilibili url.") video_info = sync(v.get_info()) video_info.update({"url": url}) # Get subtitle url subtitle = video_info.pop("subtitle") sub_list = subtitle["list"] if sub_list: sub_url = sub_list[0]["subtitle_url"] result = requests.get(sub_url) raw_sub_titles = json.loads(result.content)["body"] raw_transcript = " ".join([c["content"] for c in raw_sub_titles]) raw_transcript_with_meta_info = ( f"Video Title: {video_info['title']}," f"description: {video_info['desc']}\n\n" f"Transcript: {raw_transcript}" ) return raw_transcript_with_meta_info, video_info else: raw_transcript = "" warnings.warn( f""" No subtitles found for video: {url}. Return Empty transcript. """ ) return raw_transcript, video_info
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html
e15474e6-bd04-4234-9bc3-ce2ebe39f642
Source code for langchain.document_loaders.diffbot """Loader that uses Diffbot to load webpages in text format.""" import logging from typing import Any, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class DiffbotLoader(BaseLoader): """Loader that loads Diffbot file json.""" def __init__( self, api_token: str, urls: List[str], continue_on_failure: bool = True ): """Initialize with API token, ids, and key.""" self.api_token = api_token self.urls = urls self.continue_on_failure = continue_on_failure def _diffbot_api_url(self, diffbot_api: str) -> str: return f"https://api.diffbot.com/v3/{diffbot_api}" def _get_diffbot_data(self, url: str) -> Any: """Get Diffbot file from Diffbot REST API.""" # TODO: Add support for other Diffbot APIs diffbot_url = self._diffbot_api_url("article") params = { "token": self.api_token, "url": url, } response = requests.get(diffbot_url, params=params, timeout=10) # TODO: handle non-ok errors return response.json() if response.ok else {} [docs] def load(self) -> List[Document]: """Extract text from Diffbot on all the URLs and return Document instances""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data["objects"][0]["text"] if "objects" in data else "" metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html
c86c9582-688c-4d38-8bcc-d93cb3980a6e
Source code for langchain.document_loaders.csv_loader import csv from typing import Any, Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class CSVLoader(BaseLoader): """Loads a CSV file into a list of documents. Each document represents one row of the CSV file. Every row is converted into a key/value pair and outputted to a new line in the document's page_content. The source for each document loaded from csv is set to the value of the `file_path` argument for all doucments by default. You can override this by setting the `source_column` argument to the name of a column in the CSV file. The source of each document will then be set to the value of the column with the name specified in `source_column`. Output Example: .. code-block:: txt column1: value1 column2: value2 column3: value3 """ def __init__( self, file_path: str, source_column: Optional[str] = None, csv_args: Optional[Dict] = None, encoding: Optional[str] = None, ): self.file_path = file_path self.source_column = source_column self.encoding = encoding self.csv_args = csv_args or {} [docs] def load(self) -> List[Document]: """Load data into document objects.""" docs = [] with open(self.file_path, newline="", encoding=self.encoding) as csvfile: csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore for i, row in enumerate(csv_reader): content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items()) try: source = ( row[self.source_column] if self.source_column is not None else self.file_path ) except KeyError: raise ValueError( f"Source column '{self.source_column}' not found in CSV file." ) metadata = {"source": source, "row": i} doc = Document(page_content=content, metadata=metadata) docs.append(doc) return docs [docs]class UnstructuredCSVLoader(UnstructuredFileLoader): """Loader that uses unstructured to load CSV files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.8") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.csv import partition_csv return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
f5b3e057-b6da-42dc-85a2-d47a7f58b4a6
Source code for langchain.document_loaders.dataframe """Load from Dataframe object""" from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DataFrameLoader(BaseLoader): """Load Pandas DataFrames.""" def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object.""" import pandas as pd if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}" ) self.data_frame = data_frame self.page_content_column = page_content_column [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load full dataframe.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html
629d22b7-a654-491a-a9c4-567938f95ee4
Source code for langchain.document_loaders.directory """Loading logic for loading documents from a directory.""" import concurrent import logging from pathlib import Path from typing import Any, List, Optional, Type, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.html_bs import BSHTMLLoader from langchain.document_loaders.text import TextLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader FILE_LOADER_TYPE = Union[ Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader] ] logger = logging.getLogger(__name__) def _is_visible(p: Path) -> bool: parts = p.parts for _p in parts: if _p.startswith("."): return False return True [docs]class DirectoryLoader(BaseLoader): """Loading logic for loading documents from a directory.""" def __init__( self, path: str, glob: str = "**/[!.]*", silent_errors: bool = False, load_hidden: bool = False, loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader, loader_kwargs: Union[dict, None] = None, recursive: bool = False, show_progress: bool = False, use_multithreading: bool = False, max_concurrency: int = 4, ): """Initialize with path to directory and how to glob over it.""" if loader_kwargs is None: loader_kwargs = {} self.path = path self.glob = glob self.load_hidden = load_hidden self.loader_cls = loader_cls self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors self.recursive = recursive self.show_progress = show_progress self.use_multithreading = use_multithreading self.max_concurrency = max_concurrency [docs] def load_file( self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any] ) -> None: if item.is_file(): if _is_visible(item.relative_to(path)) or self.load_hidden: try: sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load() docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e finally: if pbar: pbar.update(1) [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.path) if not p.exists(): raise FileNotFoundError(f"Directory not found: '{self.path}'") if not p.is_dir(): raise ValueError(f"Expected directory, got file: '{self.path}'") docs: List[Document] = [] items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob)) pbar = None if self.show_progress: try: from tqdm import tqdm pbar = tqdm(total=len(items)) except ImportError as e: logger.warning( "To log the progress of DirectoryLoader you need to install tqdm, " "`pip install tqdm`" ) if self.silent_errors: logger.warning(e) else: raise e if self.use_multithreading: with concurrent.futures.ThreadPoolExecutor( max_workers=self.max_concurrency ) as executor: executor.map(lambda i: self.load_file(i, p, docs, pbar), items) else: for i in items: self.load_file(i, p, docs, pbar) if pbar: pbar.close() return docs #
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
bc7c6573-292a-425d-b73d-85938a0b0e3b
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) class Config: arbitrary_types_allowed = True [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive_file.html
ec25ae90-f5ed-4bb5-bc2f-d978ef5cb51e
Source code for langchain.document_loaders.googledrive """Loader that loads data from Google Drive.""" # Prerequisites: # 1. Create a Google Cloud project # 2. Enable the Google Drive API: # https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com # 3. Authorize credentials for desktop app: # https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501 # 4. For service accounts visit # https://cloud.google.com/iam/docs/service-accounts-create import os from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader SCOPES = ["https://www.googleapis.com/auth/drive.readonly"] [docs]class GoogleDriveLoader(BaseLoader, BaseModel): """Loader that loads Google Docs from Google Drive.""" service_account_key: Path = Path.home() / ".credentials" / "keys.json" credentials_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" folder_id: Optional[str] = None document_ids: Optional[List[str]] = None file_ids: Optional[List[str]] = None recursive: bool = False file_types: Optional[Sequence[str]] = None load_trashed_files: bool = False # NOTE(MthwRobinson) - changing the file_loader_cls to type here currently # results in pydantic validation errors file_loader_cls: Any = None file_loader_kwargs: Dict["str", Any] = {} @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if values.get("folder_id") and ( values.get("document_ids") or values.get("file_ids") ): raise ValueError( "Cannot specify both folder_id and document_ids nor " "folder_id and file_ids" ) if ( not values.get("folder_id") and not values.get("document_ids") and not values.get("file_ids") ): raise ValueError("Must specify either folder_id, document_ids, or file_ids") file_types = values.get("file_types") if file_types: if values.get("document_ids") or values.get("file_ids"): raise ValueError( "file_types can only be given when folder_id is given," " (not when document_ids or file_ids are given)." ) type_mapping = { "document": "application/vnd.google-apps.document", "sheet": "application/vnd.google-apps.spreadsheet", "pdf": "application/pdf", } allowed_types = list(type_mapping.keys()) + list(type_mapping.values()) short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()]) full_names = ", ".join([f"'{x}'" for x in type_mapping.values()]) for file_type in file_types: if file_type not in allowed_types: raise ValueError( f"Given file type {file_type} is not supported. " f"Supported values are: {short_names}; and " f"their full-form names: {full_names}" ) # replace short-form file types by full-form file types def full_form(x: str) -> str: return type_mapping[x] if x in type_mapping else x values["file_types"] = [full_form(file_type) for file_type in file_types] return values @validator("credentials_path") def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): raise ValueError(f"credentials_path {v} does not exist") return v def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth import default from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( "You must run " "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib` " "to use the Google Drive loader." ) creds = None if self.service_account_key.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_key), scopes=SCOPES ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ: creds, project = default() creds = creds.with_scopes(SCOPES) # no need to write to file if creds: return creds else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds def _load_sheet_from_id(self, id: str) -> List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build("sheets", "v4", credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get("sheets", []) documents = [] for sheet in sheets: sheet_name = sheet["properties"]["title"] result = ( sheets_service.spreadsheets() .values() .get(spreadsheetId=id, range=sheet_name) .execute() ) values = result.get("values", []) header = values[0] for i, row in enumerate(values[1:], start=1): metadata = { "source": ( f"https://docs.google.com/spreadsheets/d/{id}/" f"edit?gid={sheet['properties']['sheetId']}" ), "title": f"{spreadsheet['properties']['title']} - {sheet_name}", "row": i, } content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else "" content.append(f"{title}: {v.strip()}") page_content = "\n".join(content) documents.append(Document(page_content=page_content, metadata=metadata)) return documents def _load_document_from_id(self, id: str) -> Document: """Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().export_media(fileId=id, mimeType="text/plain") fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print("File not found: {}".format(id)) else: print("An error occurred: {}".format(e)) text = fh.getvalue().decode("utf-8") metadata = { "source": f"https://docs.google.com/document/d/{id}/edit", "title": f"{file.get('name')}", } return Document(page_content=text, metadata=metadata) def _load_documents_from_folder( self, folder_id: str, *, file_types: Optional[Sequence[str]] = None ) -> List[Document]: """Load documents from a folder.""" from googleapiclient.discovery import build creds = self._load_credentials() service = build("drive", "v3", credentials=creds) files = self._fetch_files_recursive(service, folder_id) # If file types filter is provided, we'll filter by the file type. if file_types: _files = [f for f in files if f["mimeType"] in file_types] # type: ignore else: _files = files returns = [] for file in _files: if file["trashed"] and not self.load_trashed_files: continue elif file["mimeType"] == "application/vnd.google-apps.document": returns.append(self._load_document_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore elif ( file["mimeType"] == "application/pdf" or self.file_loader_cls is not None ): returns.extend(self._load_file_from_id(file["id"])) # type: ignore else: pass return returns def _fetch_files_recursive( self, service: Any, folder_id: str ) -> List[Dict[str, Union[str, List[str]]]]: """Fetch all files and subfolders recursively.""" results = ( service.files() .list( q=f"'{folder_id}' in parents", pageSize=1000, includeItemsFromAllDrives=True, supportsAllDrives=True, fields="nextPageToken, files(id, name, mimeType, parents, trashed)", ) .execute() ) files = results.get("files", []) returns = [] for file in files: if file["mimeType"] == "application/vnd.google-apps.folder": if self.recursive: returns.extend(self._fetch_files_recursive(service, file["id"])) else: returns.append(file) return returns def _load_documents_from_ids(self) -> List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError("document_ids must be set") return [self._load_document_from_id(doc_id) for doc_id in self.document_ids] def _load_file_from_id(self, id: str) -> List[Document]: """Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() if self.file_loader_cls is not None: fh.seek(0) loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs) docs = loader.load() for doc in docs: doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view" return docs else: from PyPDF2 import PdfReader content = fh.getvalue() pdf_reader = PdfReader(BytesIO(content)) return [ Document( page_content=page.extract_text(), metadata={ "source": f"https://drive.google.com/file/d/{id}/view", "title": f"{file.get('name')}", "page": i, }, ) for i, page in enumerate(pdf_reader.pages) ] def _load_file_from_ids(self) -> List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError("file_ids must be set") docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs [docs] def load(self) -> List[Document]: """Load documents.""" if self.folder_id: return self._load_documents_from_folder( self.folder_id, file_types=self.file_types ) elif self.document_ids: return self._load_documents_from_ids() else: return self._load_file_from_ids()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
cda717c8-02a9-47d6-810b-061748405631
Source code for langchain.document_loaders.airbyte_json """Loader that loads local airbyte json files.""" import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class AirbyteJSONLoader(BaseLoader): """Loader that loads local airbyte json files.""" def __init__(self, file_path: str): """Initialize with file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte_json.html
5c8aa8fb-00a1-4de8-a5e9-7a3da8ddc357
Source code for langchain.document_loaders.image_captions """ Loader that loads image captions By default, the loader utilizes the pre-trained BLIP image captioning model. https://huggingface.co/Salesforce/blip-image-captioning-base """ from typing import Any, List, Tuple, Union import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ImageCaptionLoader(BaseLoader): """Loader that loads the captions of an image""" def __init__( self, path_images: Union[str, List[str]], blip_processor: str = "Salesforce/blip-image-captioning-base", blip_model: str = "Salesforce/blip-image-captioning-base", ): """ Initialize with a list of image paths """ if isinstance(path_images, str): self.image_paths = [path_images] else: self.image_paths = path_images self.blip_processor = blip_processor self.blip_model = blip_model [docs] def load(self) -> List[Document]: """ Load from a list of image files """ try: from transformers import BlipForConditionalGeneration, BlipProcessor except ImportError: raise ImportError( "`transformers` package not found, please install with " "`pip install transformers`." ) processor = BlipProcessor.from_pretrained(self.blip_processor) model = BlipForConditionalGeneration.from_pretrained(self.blip_model) results = [] for path_image in self.image_paths: caption, metadata = self._get_captions_and_metadata( model=model, processor=processor, path_image=path_image ) doc = Document(page_content=caption, metadata=metadata) results.append(doc) return results def _get_captions_and_metadata( self, model: Any, processor: Any, path_image: str ) -> Tuple[str, dict]: """ Helper function for getting the captions and metadata of an image """ try: from PIL import Image except ImportError: raise ImportError( "`PIL` package not found, please install with `pip install pillow`" ) try: if path_image.startswith("http://") or path_image.startswith("https://"): image = Image.open(requests.get(path_image, stream=True).raw).convert( "RGB" ) else: image = Image.open(path_image).convert("RGB") except Exception: raise ValueError(f"Could not get image data for {path_image}") inputs = processor(image, "an image of", return_tensors="pt") output = model.generate(**inputs) caption: str = processor.decode(output[0]) metadata: dict = {"image_path": path_image} return caption, metadata
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image_captions.html
be54377b-3c97-484a-9890-509855fb2e2b
Source code for langchain.document_loaders.git import os from typing import Callable, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GitLoader(BaseLoader): """Loads files from a Git repository into a list of documents. Repository can be local on disk available at `repo_path`, or remote at `clone_url` that will be cloned to `repo_path`. Currently supports only text files. Each document represents one file in the repository. The `path` points to the local Git repository, and the `branch` specifies the branch to load files from. By default, it loads from the `main` branch. """ def __init__( self, repo_path: str, clone_url: Optional[str] = None, branch: Optional[str] = "main", file_filter: Optional[Callable[[str], bool]] = None, ): self.repo_path = repo_path self.clone_url = clone_url self.branch = branch self.file_filter = file_filter [docs] def load(self) -> List[Document]: try: from git import Blob, Repo # type: ignore except ImportError as ex: raise ImportError( "Could not import git python package. " "Please install it with `pip install GitPython`." ) from ex if not os.path.exists(self.repo_path) and self.clone_url is None: raise ValueError(f"Path {self.repo_path} does not exist") elif self.clone_url: repo = Repo.clone_from(self.clone_url, self.repo_path) repo.git.checkout(self.branch) else: repo = Repo(self.repo_path) repo.git.checkout(self.branch) docs: List[Document] = [] for item in repo.tree().traverse(): if not isinstance(item, Blob): continue file_path = os.path.join(self.repo_path, item.path) ignored_files = repo.ignored([file_path]) # type: ignore if len(ignored_files): continue # uses filter to skip files if self.file_filter and not self.file_filter(file_path): continue rel_file_path = os.path.relpath(file_path, self.repo_path) try: with open(file_path, "rb") as f: content = f.read() file_type = os.path.splitext(item.name)[1] # loads only text files try: text_content = content.decode("utf-8") except UnicodeDecodeError: continue metadata = { "source": rel_file_path, "file_path": rel_file_path, "file_name": item.name, "file_type": file_type, } doc = Document(page_content=text_content, metadata=metadata) docs.append(doc) except Exception as e: print(f"Error reading file {file_path}: {e}") return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
a222891d-3509-4fcf-a87b-01ca6ee158e4
Source code for langchain.document_loaders.url_selenium """Loader that uses Selenium to load a page, then uses unstructured to load the html. """ import logging from typing import TYPE_CHECKING, List, Literal, Optional, Union if TYPE_CHECKING: from selenium.webdriver import Chrome, Firefox from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class SeleniumURLLoader(BaseLoader): """Loader that uses Selenium and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. Attributes: urls (List[str]): List of URLs to load. continue_on_failure (bool): If True, continue loading other URLs on failure. browser (str): The browser to use, either 'chrome' or 'firefox'. binary_location (Optional[str]): The location of the browser binary. executable_path (Optional[str]): The path to the browser executable. headless (bool): If True, the browser will run in headless mode. arguments [List[str]]: List of arguments to pass to the browser. """ def __init__( self, urls: List[str], continue_on_failure: bool = True, browser: Literal["chrome", "firefox"] = "chrome", binary_location: Optional[str] = None, executable_path: Optional[str] = None, headless: bool = True, arguments: List[str] = [], ): """Load a list of URLs using Selenium and unstructured.""" try: import selenium # noqa:F401 except ImportError: raise ImportError( "selenium package not found, please install it with " "`pip install selenium`" ) try: import unstructured # noqa:F401 except ImportError: raise ImportError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments def _get_driver(self) -> Union["Chrome", "Firefox"]: """Create and return a WebDriver instance based on the specified browser. Raises: ValueError: If an invalid browser is specified. Returns: Union[Chrome, Firefox]: A WebDriver instance for the specified browser. """ if self.browser.lower() == "chrome": from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options as ChromeOptions chrome_options = ChromeOptions() for arg in self.arguments: chrome_options.add_argument(arg) if self.headless: chrome_options.add_argument("--headless") chrome_options.add_argument("--no-sandbox") if self.binary_location is not None: chrome_options.binary_location = self.binary_location if self.executable_path is None: return Chrome(options=chrome_options) return Chrome(executable_path=self.executable_path, options=chrome_options) elif self.browser.lower() == "firefox": from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options as FirefoxOptions firefox_options = FirefoxOptions() for arg in self.arguments: firefox_options.add_argument(arg) if self.headless: firefox_options.add_argument("--headless") if self.binary_location is not None: firefox_options.binary_location = self.binary_location if self.executable_path is None: return Firefox(options=firefox_options) return Firefox( executable_path=self.executable_path, options=firefox_options ) else: raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.") [docs] def load(self) -> List[Document]: """Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e driver.quit() return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
9626201b-b069-497a-8ab7-b7bbabcb21ea
Source code for langchain.document_loaders.max_compute from __future__ import annotations from typing import Any, Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.max_compute import MaxComputeAPIWrapper [docs]class MaxComputeLoader(BaseLoader): """Loads a query result from Alibaba Cloud MaxCompute table into documents.""" def __init__( self, query: str, api_wrapper: MaxComputeAPIWrapper, *, page_content_columns: Optional[Sequence[str]] = None, metadata_columns: Optional[Sequence[str]] = None, ): """Initialize Alibaba Cloud MaxCompute document loader. Args: query: SQL query to execute. api_wrapper: MaxCompute API wrapper. page_content_columns: The columns to write into the `page_content` of the Document. If unspecified, all columns will be written to `page_content`. metadata_columns: The columns to write into the `metadata` of the Document. If unspecified, all columns not added to `page_content` will be written. """ self.query = query self.api_wrapper = api_wrapper self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] @classmethod def from_params( cls, query: str, endpoint: str, project: str, *, access_id: Optional[str] = None, secret_access_key: Optional[str] = None, **kwargs: Any, ) -> MaxComputeLoader: """Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`. """ api_wrapper = MaxComputeAPIWrapper.from_params( endpoint, project, access_id=access_id, secret_access_key=secret_access_key ) return cls(query, api_wrapper, **kwargs) [docs] def lazy_load(self) -> Iterator[Document]: for row in self.api_wrapper.query(self.query): if self.page_content_columns: page_content_data = { k: v for k, v in row.items() if k in self.page_content_columns } else: page_content_data = row page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items()) if self.metadata_columns: metadata = {k: v for k, v in row.items() if k in self.metadata_columns} else: metadata = {k: v for k, v in row.items() if k not in page_content_data} yield Document(page_content=page_content, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/max_compute.html
6cd7de9a-2fbd-49ec-834e-0a6792f96573
Source code for langchain.document_loaders.pyspark_dataframe """Load from a Spark Dataframe object""" import itertools import logging import sys from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__file__) if TYPE_CHECKING: from pyspark.sql import SparkSession [docs]class PySparkDataFrameLoader(BaseLoader): """Load PySpark DataFrames""" def __init__( self, spark_session: Optional["SparkSession"] = None, df: Optional[Any] = None, page_content_column: str = "text", fraction_of_memory: float = 0.1, ): """Initialize with a Spark DataFrame object.""" try: from pyspark.sql import DataFrame, SparkSession except ImportError: raise ImportError( "pyspark is not installed. " "Please install it with `pip install pyspark`" ) self.spark = ( spark_session if spark_session else SparkSession.builder.getOrCreate() ) if not isinstance(df, DataFrame): raise ValueError( f"Expected data_frame to be a PySpark DataFrame, got {type(df)}" ) self.df = df self.page_content_column = page_content_column self.fraction_of_memory = fraction_of_memory self.num_rows, self.max_num_rows = self.get_num_rows() self.rdd_df = self.df.rdd.map(list) self.column_names = self.df.columns [docs] def get_num_rows(self) -> Tuple[int, int]: """Gets the amount of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( "psutil not installed. Please install it with `pip install psutil`." ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int( (available_memory / estimated_row_size) * self.fraction_of_memory ) return min(max_num_rows, self.df.count()), max_num_rows [docs] def lazy_load(self) -> Iterator[Document]: """A lazy loader for document content.""" for row in self.rdd_df.toLocalIterator(): metadata = {self.column_names[i]: row[i] for i in range(len(row))} text = metadata[self.page_content_column] metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load from the dataframe.""" if self.df.count() > self.max_num_rows: logger.warning( f"The number of DataFrame rows is {self.df.count()}, " f"but we will only include the amount " f"of rows that can reasonably fit in memory: {self.num_rows}." ) lazy_load_iterator = self.lazy_load() return list(itertools.islice(lazy_load_iterator, self.num_rows))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
8aa1afb5-7dc3-4a98-8f44-bd99737ec89c
Source code for langchain.document_loaders.docugami """Loader that loads processed documents from Docugami.""" import io import logging import os import re from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Union import requests from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader TD_NAME = "{http://www.w3.org/1999/xhtml}td" TABLE_NAME = "{http://www.w3.org/1999/xhtml}table" XPATH_KEY = "xpath" DOCUMENT_ID_KEY = "id" DOCUMENT_NAME_KEY = "name" STRUCTURE_KEY = "structure" TAG_KEY = "tag" PROJECTS_KEY = "projects" DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1" logger = logging.getLogger(__name__) [docs]class DocugamiLoader(BaseLoader, BaseModel): """Loader that loads processed docs from Docugami. To use, you should have the ``lxml`` python package installed. """ api: str = DEFAULT_API_ENDPOINT access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY") docset_id: Optional[str] document_ids: Optional[Sequence[str]] file_paths: Optional[Sequence[Union[Path, str]]] min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking @root_validator def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either local file paths are given, or remote API docset ID.""" if values.get("file_paths") and values.get("docset_id"): raise ValueError("Cannot specify both file_paths and remote API docset_id") if not values.get("file_paths") and not values.get("docset_id"): raise ValueError("Must specify either file_paths or remote API docset_id") if values.get("docset_id") and not values.get("access_token"): raise ValueError("Must specify access token if using remote API docset_id") return values def _parse_dgml( self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None ) -> List[Document]: """Parse a single DGML document into a list of Documents.""" try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) # helpers def _xpath_qname_for_chunk(chunk: Any) -> str: """Get the xpath qname for a chunk.""" qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}" parent = chunk.getparent() if parent is not None: doppelgangers = [x for x in parent if x.tag == chunk.tag] if len(doppelgangers) > 1: idx_of_self = doppelgangers.index(chunk) qname = f"{qname}[{idx_of_self + 1}]" return qname def _xpath_for_chunk(chunk: Any) -> str: """Get the xpath for a chunk.""" ancestor_chain = chunk.xpath("ancestor-or-self::*") return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain) def _structure_value(node: Any) -> str: """Get the structure value for a node.""" structure = ( "table" if node.tag == TABLE_NAME else node.attrib["structure"] if "structure" in node.attrib else None ) return structure def _is_structural(node: Any) -> bool: """Check if a node is structural.""" return _structure_value(node) is not None def _is_heading(node: Any) -> bool: """Check if a node is a heading.""" structure = _structure_value(node) return structure is not None and structure.lower().startswith("h") def _get_text(node: Any) -> str: """Get the text of a node.""" return " ".join(node.itertext()).strip() def _has_structural_descendant(node: Any) -> bool: """Check if a node has a structural descendant.""" for child in node: if _is_structural(child) or _has_structural_descendant(child): return True return False def _leaf_structural_nodes(node: Any) -> List: """Get the leaf structural nodes of a node.""" if _is_structural(node) and not _has_structural_descendant(node): return [node] else: leaf_nodes = [] for child in node: leaf_nodes.extend(_leaf_structural_nodes(child)) return leaf_nodes def _create_doc(node: Any, text: str) -> Document: """Create a Document from a node and text.""" metadata = { XPATH_KEY: _xpath_for_chunk(node), DOCUMENT_ID_KEY: document["id"], DOCUMENT_NAME_KEY: document["name"], STRUCTURE_KEY: node.attrib.get("structure", ""), TAG_KEY: re.sub(r"\{.*\}", "", node.tag), } if doc_metadata: metadata.update(doc_metadata) return Document( page_content=text, metadata=metadata, ) # parse the tree and return chunks tree = etree.parse(io.BytesIO(content)) root = tree.getroot() chunks: List[Document] = [] prev_small_chunk_text = None for node in _leaf_structural_nodes(root): text = _get_text(node) if prev_small_chunk_text: text = prev_small_chunk_text + " " + text prev_small_chunk_text = None if _is_heading(node) or len(text) < self.min_chunk_size: # Save headings or other small chunks to be appended to the next chunk prev_small_chunk_text = text else: chunks.append(_create_doc(node, text)) if prev_small_chunk_text and len(chunks) > 0: # small chunk at the end left over, just append to last chunk chunks[-1].page_content += " " + prev_small_chunk_text return chunks def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all document details for the given docset ID""" url = f"{self.api}/docsets/{docset_id}/documents" all_documents = [] while url: response = requests.get( url, headers={"Authorization": f"Bearer {self.access_token}"}, ) if response.ok: data = response.json() all_documents.extend(data["documents"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_documents def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all project details for the given docset ID""" url = f"{self.api}/projects?docset.id={docset_id}" all_projects = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json() all_projects.extend(data["projects"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_projects def _metadata_for_project(self, project: Dict) -> Dict: """Gets project metadata for all files""" project_id = project.get("id") url = f"{self.api}/projects/{project_id}/artifacts/latest" all_artifacts = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json() all_artifacts.extend(data["artifacts"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) per_file_metadata = {} for artifact in all_artifacts: artifact_name = artifact.get("name") artifact_url = artifact.get("url") artifact_doc = artifact.get("document") if artifact_name == "report-values.xml" and artifact_url and artifact_doc: doc_id = artifact_doc["id"] metadata: Dict = {} # the evaluated XML for each document is named after the project response = requests.request( "GET", f"{artifact_url}/content", headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) artifact_tree = etree.parse(io.BytesIO(response.content)) artifact_root = artifact_tree.getroot() ns = artifact_root.nsmap entries = artifact_root.xpath("//pr:Entry", namespaces=ns) for entry in entries: heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text value = " ".join( entry.xpath("./pr:Value", namespaces=ns)[0].itertext() ).strip() metadata[heading] = value per_file_metadata[doc_id] = metadata else: raise Exception( f"Failed to download {artifact_url}/content " + "(status: {response.status_code})" ) return per_file_metadata def _load_chunks_for_document( self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None ) -> List[Document]: """Load chunks for a document.""" document_id = document["id"] url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml" response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: return self._parse_dgml(document, response.content, doc_metadata) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) [docs] def load(self) -> List[Document]: """Load documents.""" chunks: List[Document] = [] if self.access_token and self.docset_id: # remote mode _document_details = self._document_details_for_docset_id(self.docset_id) if self.document_ids: _document_details = [ d for d in _document_details if d["id"] in self.document_ids ] _project_details = self._project_details_for_docset_id(self.docset_id) combined_project_metadata = {} if _project_details: # if there are any projects for this docset, load project metadata for project in _project_details: metadata = self._metadata_for_project(project) combined_project_metadata.update(metadata) for doc in _document_details: doc_metadata = combined_project_metadata.get(doc["id"]) chunks += self._load_chunks_for_document( self.docset_id, doc, doc_metadata ) elif self.file_paths: # local mode (for integration testing, or pre-downloaded XML) for path in self.file_paths: path = Path(path) with open(path, "rb") as file: chunks += self._parse_dgml( { DOCUMENT_ID_KEY: path.name, DOCUMENT_NAME_KEY: path.name, }, file.read(), ) return chunks
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
a2336470-d394-4477-a335-27f9932e8d93
Source code for langchain.document_loaders.gcs_file """Loading logic for loading documents from a GCS file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, blob: str): """Initialize with bucket and key name.""" self.bucket = bucket self.blob = blob self.project_name = project_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_file.html
372cd748-d352-4f3f-8a99-7404ff47f393
Source code for langchain.document_loaders.facebook_chat """Loader that loads Facebook chat json dump.""" import datetime import json from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" sender = row["sender_name"] text = row["content"] date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{sender} on {date}: {text}\n\n" [docs]class FacebookChatLoader(BaseLoader): """Loader that loads Facebook messages json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message.get("content") and isinstance(message["content"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/facebook_chat.html
a1b61b6a-30ff-4f82-a526-23aaec8c4ae2
Source code for langchain.document_loaders.modern_treasury """Loader that fetches data from Modern Treasury""" import json import urllib.request from base64 import b64encode from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_value MODERN_TREASURY_ENDPOINTS = { "payment_orders": "https://app.moderntreasury.com/api/payment_orders", "expected_payments": "https://app.moderntreasury.com/api/expected_payments", "returns": "https://app.moderntreasury.com/api/returns", "incoming_payment_details": "https://app.moderntreasury.com/api/\ incoming_payment_details", "counterparties": "https://app.moderntreasury.com/api/counterparties", "internal_accounts": "https://app.moderntreasury.com/api/internal_accounts", "external_accounts": "https://app.moderntreasury.com/api/external_accounts", "transactions": "https://app.moderntreasury.com/api/transactions", "ledgers": "https://app.moderntreasury.com/api/ledgers", "ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts", "ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions", "events": "https://app.moderntreasury.com/api/events", "invoices": "https://app.moderntreasury.com/api/invoices", } [docs]class ModernTreasuryLoader(BaseLoader): """Loader that fetches data from Modern Treasury.""" def __init__( self, resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None, ) -> None: self.resource = resource organization_id = organization_id or get_from_env( "organization_id", "MODERN_TREASURY_ORGANIZATION_ID" ) api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY") credentials = f"{organization_id}:{api_key}".encode("utf-8") basic_auth_token = b64encode(credentials).decode("utf-8") self.headers = {"Authorization": f"Basic {basic_auth_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_value(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
779d989e-2e6f-41e4-9cd6-d4b6efb60a5d
Source code for langchain.document_loaders.s3_file """Loading logic for loading documents from an s3 file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class S3FileLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, key: str): """Initialize with bucket and key name.""" self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import `boto3` python package. " "Please install it with `pip install boto3`." ) s3 = boto3.client("s3") with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) s3.download_file(self.bucket, self.key, file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_file.html
c8cc2f8f-e906-49f0-9e0a-600054af3c01
Source code for langchain.document_loaders.github from abc import ABC from datetime import datetime from typing import Dict, Iterator, List, Literal, Optional, Union import requests from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_dict_or_env class BaseGitHubLoader(BaseLoader, BaseModel, ABC): """Load issues of a GitHub repository.""" repo: str """Name of repository""" access_token: str """Personal access token - see https://github.com/settings/tokens?type=beta""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that access token exists in environment.""" values["access_token"] = get_from_dict_or_env( values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN" ) return values @property def headers(self) -> Dict[str, str]: return { "Accept": "application/vnd.github+json", "Authorization": f"Bearer {self.access_token}", } [docs]class GitHubIssuesLoader(BaseGitHubLoader): include_prs: bool = True """If True include Pull Requests in results, otherwise ignore them.""" milestone: Union[int, Literal["*", "none"], None] = None """If integer is passed, it should be a milestone's number field. If the string '*' is passed, issues with any milestone are accepted. If the string 'none' is passed, issues without milestones are returned. """ state: Optional[Literal["open", "closed", "all"]] = None """Filter on issue state. Can be one of: 'open', 'closed', 'all'.""" assignee: Optional[str] = None """Filter on assigned user. Pass 'none' for no user and '*' for any user.""" creator: Optional[str] = None """Filter on the user that created the issue.""" mentioned: Optional[str] = None """Filter on a user that's mentioned in the issue.""" labels: Optional[List[str]] = None """Label names to filter one. Example: bug,ui,@high.""" sort: Optional[Literal["created", "updated", "comments"]] = None """What to sort results by. Can be one of: 'created', 'updated', 'comments'. Default is 'created'.""" direction: Optional[Literal["asc", "desc"]] = None """The direction to sort the results by. Can be one of: 'asc', 'desc'.""" since: Optional[str] = None """Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.""" @validator("since") def validate_since(cls, v: Optional[str]) -> Optional[str]: if v: try: datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ") except ValueError: raise ValueError( "Invalid value for 'since'. Expected a date string in " f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}" ) return v [docs] def lazy_load(self) -> Iterator[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ url: Optional[str] = self.url while url: response = requests.get(url, headers=self.headers) response.raise_for_status() issues = response.json() for issue in issues: doc = self.parse_issue(issue) if not self.include_prs and doc.metadata["is_pull_request"]: continue yield doc if response.links and response.links.get("next"): url = response.links["next"]["url"] else: url = None [docs] def load(self) -> List[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ return list(self.lazy_load()) [docs] def parse_issue(self, issue: dict) -> Document: """Create Document objects from a list of GitHub issues.""" metadata = { "url": issue["html_url"], "title": issue["title"], "creator": issue["user"]["login"], "created_at": issue["created_at"], "comments": issue["comments"], "state": issue["state"], "labels": [label["name"] for label in issue["labels"]], "assignee": issue["assignee"]["login"] if issue["assignee"] else None, "milestone": issue["milestone"]["title"] if issue["milestone"] else None, "locked": issue["locked"], "number": issue["number"], "is_pull_request": "pull_request" in issue, } content = issue["body"] if issue["body"] is not None else "" return Document(page_content=content, metadata=metadata) @property def query_params(self) -> str: labels = ",".join(self.labels) if self.labels else self.labels query_params_dict = { "milestone": self.milestone, "state": self.state, "assignee": self.assignee, "creator": self.creator, "mentioned": self.mentioned, "labels": labels, "sort": self.sort, "direction": self.direction, "since": self.since, } query_params_list = [ f"{k}={v}" for k, v in query_params_dict.items() if v is not None ] query_params = "&".join(query_params_list) return query_params @property def url(self) -> str: return f"https://api.github.com/repos/{self.repo}/issues?{self.query_params}"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
bc16c90b-7f53-4d98-9d4b-9181316705de
Source code for langchain.document_loaders.discord """Load from Discord chat dump""" from __future__ import annotations from typing import TYPE_CHECKING, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import pandas as pd [docs]class DiscordChatLoader(BaseLoader): """Load Discord chat logs.""" def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"): """Initialize with a Pandas DataFrame containing chat logs.""" if not isinstance(chat_log, pd.DataFrame): raise ValueError( f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}" ) self.chat_log = chat_log self.user_id_col = user_id_col [docs] def load(self) -> List[Document]: """Load all chat messages.""" result = [] for _, row in self.chat_log.iterrows(): user_id = row[self.user_id_col] metadata = row.to_dict() metadata.pop(self.user_id_col) result.append(Document(page_content=user_id, metadata=metadata)) return result
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/discord.html
346227b5-c1bd-4624-8276-46351f9a7aa7
Source code for langchain.document_loaders.fauna from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class FaunaLoader(BaseLoader): """FaunaDB Loader. Attributes: query (str): The FQL query string to execute. page_content_field (str): The field that contains the content of each page. secret (str): The secret key for authenticating to FaunaDB. metadata_fields (Optional[Sequence[str]]): Optional list of field names to include in metadata. """ def __init__( self, query: str, page_content_field: str, secret: str, metadata_fields: Optional[Sequence[str]] = None, ): self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: try: from fauna import Page, fql from fauna.client import Client from fauna.encoding import QuerySuccess except ImportError: raise ImportError( "Could not import fauna python package. " "Please install it with `pip install fauna`." ) # Create Fauna Client client = Client(secret=self.secret) # Run FQL Query response: QuerySuccess = client.query(fql(self.query)) page: Page = response.data for result in page: if result is not None: document_dict = dict(result.items()) page_content = "" for key, value in document_dict.items(): if key == self.page_content_field: page_content = value document: Document = Document( page_content=page_content, metadata={"id": result.id, "ts": result.ts}, ) yield document if page.after is not None: yield Document( page_content="Next Page Exists", metadata={"after": page.after}, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/fauna.html
da3e376d-0db8-47d1-a92e-fbb57b802577
Source code for langchain.document_loaders.arxiv from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivLoader(BaseLoader): """Loads a query result from arxiv.org into a list of Documents. Each document represents one Document. The loader converts the original PDF format into the text. """ def __init__( self, query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta [docs] def load(self) -> List[Document]: arxiv_client = ArxivAPIWrapper( load_max_docs=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = arxiv_client.load(self.query) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/arxiv.html
5e1a805b-9754-44ea-9a20-3eff52a02300
Source code for langchain.document_loaders.python import tokenize from langchain.document_loaders.text import TextLoader [docs]class PythonLoader(TextLoader): """ Load Python files, respecting any non-default encoding if specified. """ def __init__(self, file_path: str): with open(file_path, "rb") as f: encoding, _ = tokenize.detect_encoding(f.readline) super().__init__(file_path=file_path, encoding=encoding)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/python.html
985d9022-2b71-4a96-97fb-b8a19b52d1e0
Source code for langchain.document_loaders.bigquery from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: from google.auth.credentials import Credentials [docs]class BigQueryLoader(BaseLoader): """Loads a query result from BigQuery into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, credentials: Optional[Credentials] = None, ): """Initialize BigQuery document loader. Args: query: The query to run in BigQuery. project: Optional. The project to run the query in. page_content_columns: Optional. The columns to write into the `page_content` of the document. metadata_columns: Optional. The columns to write into the `metadata` of the document. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine (`google.auth.compute_engine.Credentials`) or Service Account (`google.oauth2.service_account.Credentials`) credentials directly. """ self.query = query self.project = project self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns self.credentials = credentials [docs] def load(self) -> List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ValueError( "Could not import google-cloud-bigquery python package. " "Please install it with `pip install google-cloud-bigquery`." ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self.project) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
671b26a1-9da6-4a62-83fd-23cd66947ca8
Source code for langchain.document_loaders.azure_blob_storage_file """Loading logic for loading documents from an Azure Blob Storage file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class AzureBlobStorageFileLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, blob_name: str): """Initialize with connection string, container and blob name.""" self.conn_str = conn_str self.container = container self.blob = blob_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import BlobClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc client = BlobClient.from_connection_string( conn_str=self.conn_str, container_name=self.container, blob_name=self.blob ) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.container}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(f"{file_path}", "wb") as file: blob_data = client.download_blob() blob_data.readinto(file) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html
b7b07f59-be74-4239-89e6-44e181ac207f
Source code for langchain.document_loaders.duckdb_loader from typing import Dict, List, Optional, cast from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DuckDBLoader(BaseLoader): """Loads a query result from DuckDB into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, database: str = ":memory:", read_only: bool = False, config: Optional[Dict[str, str]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): self.query = query self.database = database self.read_only = read_only self.config = config or {} self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] def load(self) -> List[Document]: try: import duckdb except ImportError: raise ImportError( "Could not import duckdb python package. " "Please install it with `pip install duckdb`." ) docs = [] with duckdb.connect( database=self.database, read_only=self.read_only, config=self.config ) as con: query_result = con.execute(self.query) results = query_result.fetchall() description = cast(list, query_result.description) field_names = [c[0] for c in description] if self.page_content_columns is None: page_content_columns = field_names else: page_content_columns = self.page_content_columns if self.metadata_columns is None: metadata_columns = [] else: metadata_columns = self.metadata_columns for result in results: page_content = "\n".join( f"{column}: {result[field_names.index(column)]}" for column in page_content_columns ) metadata = { column: result[field_names.index(column)] for column in metadata_columns } doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html
d6a52970-5b4c-4795-969a-60b4057b748d
Source code for langchain.document_loaders.notion """Loader that loads Notion directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class NotionDirectoryLoader(BaseLoader): """Loader that loads Notion directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notion.html
59e6d1d0-b5e1-491a-8bbd-1bb293b1c9f3
Source code for langchain.document_loaders.psychic """Loader that loads documents from Psychic.dev.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loader that loads documents from Psychic.dev.""" def __init__(self, api_key: str, connector_id: str, connection_id: str): """Initialize with API key, connector id, and connection id.""" try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.connection_id = connection_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents(self.connector_id, self.connection_id) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html
875d1e48-a467-42d0-93c0-03251c26622d
Source code for langchain.document_loaders.apify_dataset """Logic for loading documents from Apify datasets.""" from typing import Any, Callable, Dict, List from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ApifyDatasetLoader(BaseLoader, BaseModel): """Logic for loading documents from Apify datasets.""" apify_client: Any dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment.""" try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values [docs] def load(self) -> List[Document]: """Load documents.""" dataset_items = self.apify_client.dataset(self.dataset_id).list_items().items return list(map(self.dataset_mapping_function, dataset_items))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
e06cd5b3-9ddb-4d13-aa2f-bd54318b55b9
Source code for langchain.document_loaders.html """Loader that uses unstructured to load HTML files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredHTMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load HTML files.""" def _get_elements(self) -> List: from unstructured.partition.html import partition_html return partition_html(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html
2631f346-ef76-4ed6-94fb-abf79be2f813
Source code for langchain.document_loaders.s3_directory """Loading logic for loading documents from an s3 directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.s3_file import S3FileLoader [docs]class S3DirectoryLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, prefix: str = ""): """Initialize with bucket and key name.""" self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) s3 = boto3.resource("s3") bucket = s3.Bucket(self.bucket) docs = [] for obj in bucket.objects.filter(Prefix=self.prefix): loader = S3FileLoader(self.bucket, obj.key) docs.extend(loader.load()) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_directory.html
20dcb4f9-15ef-4ea2-bc27-8c98453d9546
Source code for langchain.document_loaders.url """Loader that uses unstructured to load HTML files.""" import logging from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class UnstructuredURLLoader(BaseLoader): """Loader that uses unstructured to load HTML files.""" def __init__( self, urls: List[str], continue_on_failure: bool = True, mode: str = "single", show_progress_bar: bool = False, **unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 from unstructured.__version__ import __version__ as __unstructured_version__ self.__version = __unstructured_version__ except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self._validate_mode(mode) self.mode = mode headers = unstructured_kwargs.pop("headers", {}) if len(headers.keys()) != 0: warn_about_headers = False if self.__is_non_html_available(): warn_about_headers = not self.__is_headers_available_for_non_html() else: warn_about_headers = not self.__is_headers_available_for_html() if warn_about_headers: logger.warning( "You are using an old version of unstructured. " "The headers parameter is ignored" ) self.urls = urls self.continue_on_failure = continue_on_failure self.headers = headers self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar def _validate_mode(self, mode: str) -> None: _valid_modes = {"single", "elements"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) def __is_headers_available_for_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 7) def __is_headers_available_for_non_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 13) def __is_non_html_available(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 12) [docs] def load(self) -> List[Document]: """Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition(url=url, **self.unstructured_kwargs) else: if self.__is_headers_available_for_html(): elements = partition_html( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exeption: {e}") continue else: raise e if self.mode == "single": text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == "elements": for element in elements: metadata = element.metadata.to_dict() metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
ca0ce80a-403a-468f-acf4-d1bcecdb83ce
Source code for langchain.document_loaders.onedrive """Loader that loads data from OneDrive""" from __future__ import annotations import logging import os import tempfile from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.onedrive_file import OneDriveFileLoader if TYPE_CHECKING: from O365 import Account from O365.drive import Drive, Folder SCOPES = ["offline_access", "Files.Read.All"] logger = logging.getLogger(__name__) class _OneDriveSettings(BaseSettings): client_id: str = Field(..., env="O365_CLIENT_ID") client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") class Config: env_prefix = "" case_sentive = False env_file = ".env" class _OneDriveTokenStorage(BaseSettings): token_path: FilePath = Field(Path.home() / ".credentials" / "o365_token.txt") class _FileType(str, Enum): DOC = "doc" DOCX = "docx" PDF = "pdf" class _SupportedFileTypes(BaseModel): file_types: List[_FileType] def fetch_mime_types(self) -> Dict[str, str]: mime_types_mapping = {} for file_type in self.file_types: if file_type.value == "doc": mime_types_mapping[file_type.value] = "application/msword" elif file_type.value == "docx": mime_types_mapping[ file_type.value ] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501 elif file_type.value == "pdf": mime_types_mapping[file_type.value] = "application/pdf" return mime_types_mapping [docs]class OneDriveLoader(BaseLoader, BaseModel): settings: _OneDriveSettings = Field(default_factory=_OneDriveSettings) drive_id: str = Field(...) folder_path: Optional[str] = None object_ids: Optional[List[str]] = None auth_with_token: bool = False def _auth(self) -> Type[Account]: """ Authenticates the OneDrive API client using the specified authentication method and returns the Account object. Returns: Type[Account]: The authenticated Account object. """ try: from O365 import FileSystemTokenBackend except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) if self.auth_with_token: token_storage = _OneDriveTokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend( token_path=token_path.parent, token_filename=token_path.name ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) else: token_backend = FileSystemTokenBackend( token_path=Path.home() / ".credentials" ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) # make the auth account.authenticate() return account def _get_folder_from_path(self, drive: Type[Drive]) -> Union[Folder, Drive]: """ Returns the folder or drive object located at the specified path relative to the given drive. Args: drive (Type[Drive]): The root drive from which the folder path is relative. Returns: Union[Folder, Drive]: The folder or drive object located at the specified path. Raises: FileNotFoundError: If the path does not exist. """ subfolder_drive = drive if self.folder_path is None: return subfolder_drive subfolders = [f for f in self.folder_path.split("/") if f != ""] if len(subfolders) == 0: return subfolder_drive items = subfolder_drive.get_items() for subfolder in subfolders: try: subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0] items = subfolder_drive.get_items() except (IndexError, AttributeError): raise FileNotFoundError("Path {} not exist.".format(self.folder_path)) return subfolder_drive def _load_from_folder(self, folder: Type[Folder]) -> List[Document]: """ Loads all supported document files from the specified folder and returns a list of Document objects. Args: folder (Type[Folder]): The folder object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() items = folder.get_items() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs def _load_from_object_ids(self, drive: Type[Drive]) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive based on their object IDs and returns a list of Document objects. Args: drive (Type[Drive]): The OneDrive drive object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for object_id in self.object_ids if self.object_ids else [""]: file = drive.get_item(object_id) if not file: logging.warning( "There isn't a file with " f"object_id {object_id} in drive {drive}." ) continue if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs [docs] def load(self) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive a nd returns a list of Document objects. Returns: List[Document]: A list of Document objects representing the loaded documents. Raises: ValueError: If the specified drive ID does not correspond to a drive in the OneDrive storage. """ account = self._auth() storage = account.storage() drive = storage.get_drive(self.drive_id) docs: List[Document] = [] if not drive: raise ValueError(f"There isn't a drive with id {self.drive_id}.") if self.folder_path: folder = self._get_folder_from_path(drive=drive) docs.extend(self._load_from_folder(folder=folder)) elif self.object_ids: docs.extend(self._load_from_object_ids(drive=drive)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
272eeba1-cd2c-4da3-bdba-0372f13e5833
Source code for langchain.document_loaders.rst """Loader that loads RST files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredRSTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load RST files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.7.5") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.rst import partition_rst return partition_rst(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rst.html
73098c3d-c92b-4d7f-a27b-70e022022170
Source code for langchain.document_loaders.open_city_data from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class OpenCityDataLoader(BaseLoader): """Loader that loads Open city data.""" def __init__(self, city_id: str, dataset_id: str, limit: int): """Initialize with dataset_id""" """ Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 """ """ e.g., city_id = data.sfgov.org """ """ e.g., dataset_id = vw6y-z8j6 """ self.city_id = city_id self.dataset_id = dataset_id self.limit = limit [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records.""" from sodapy import Socrata client = Socrata(self.city_id, None) results = client.get(self.dataset_id, limit=self.limit) for record in results: yield Document( page_content=str(record), metadata={ "source": self.city_id + "_" + self.dataset_id, }, ) [docs] def load(self) -> List[Document]: """Load records.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/open_city_data.html
15c3e197-01db-4e8b-9da8-b81f24c0623b
Source code for langchain.document_loaders.readthedocs """Loader that loads ReadTheDocs documentation directory dump.""" from pathlib import Path from typing import Any, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ReadTheDocsLoader(BaseLoader): """Loader that loads ReadTheDocs documentation directory dump.""" def __init__( self, path: Union[str, Path], encoding: Optional[str] = None, errors: Optional[str] = None, custom_html_tag: Optional[Tuple[str, dict]] = None, **kwargs: Optional[Any] ): """ Initialize ReadTheDocsLoader The loader loops over all files under `path` and extract the actual content of the files by retrieving main html tags. Default main html tags include `<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You can also define your own html tags by passing custom_html_tag, e.g. `("div", "class=main")`. The loader iterates html tags with the order of custom html tags (if exists) and default html tags. If any of the tags is not empty, the loop will break and retrieve the content out of that tag. Args: path: The location of pulled readthedocs folder. encoding: The encoding with which to open the documents. errors: Specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. custom_html_tag: Optional custom html tag to retrieve the content from files. """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "Could not import python packages. " "Please install it with `pip install beautifulsoup4`. " ) try: _ = BeautifulSoup( "<html><body>Parser builder library test.</body></html>", **kwargs ) except Exception as e: raise ValueError("Parsing kwargs do not appear valid") from e self.file_path = Path(path) self.encoding = encoding self.errors = errors self.custom_html_tag = custom_html_tag self.bs_kwargs = kwargs [docs] def load(self) -> List[Document]: """Load documents.""" docs = [] for p in self.file_path.rglob("*"): if p.is_dir(): continue with open(p, encoding=self.encoding, errors=self.errors) as f: text = self._clean_data(f.read()) metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs def _clean_data(self, data: str) -> str: from bs4 import BeautifulSoup soup = BeautifulSoup(data, **self.bs_kwargs) # default tags html_tags = [ ("div", {"role": "main"}), ("main", {"id": "main-content"}), ] if self.custom_html_tag is not None: html_tags.append(self.custom_html_tag) text = None # reversed order. check the custom one first for tag, attrs in html_tags[::-1]: text = soup.find(tag, attrs) # if found, break if text is not None: break if text is not None: text = text.get_text() else: text = "" # trim empty lines return "\n".join([t for t in text.split("\n") if t])
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
286a0e4b-1b0e-4b29-ac8a-d80bb2215b07
Source code for langchain.document_loaders.twitter """Twitter document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import tweepy from tweepy import OAuth2BearerHandler, OAuthHandler def _dependable_tweepy_import() -> tweepy: try: import tweepy except ImportError: raise ImportError( "tweepy package not found, please install it with `pip install tweepy`" ) return tweepy [docs]class TwitterTweetLoader(BaseLoader): """Twitter tweets loader. Read tweets of user twitter handle. First you need to go to `https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api` to get your token. And create a v2 version of the app. """ def __init__( self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ): self.auth = auth_handler self.twitter_users = twitter_users self.number_tweets = number_tweets [docs] def load(self) -> List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results def _format_tweets( self, tweets: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format tweets into a string.""" for tweet in tweets: metadata = { "created_at": tweet["created_at"], "user_info": user_info, } yield Document( page_content=tweet["text"], metadata=metadata, ) [docs] @classmethod def from_bearer_token( cls, oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from OAuth2 bearer token.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) [docs] @classmethod def from_secrets( cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler( access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret, ) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
d012caf6-82f5-43fc-9f90-04bc8f1a998f
Source code for langchain.document_loaders.iugu """Loader that fetches data from IUGU""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict IUGU_ENDPOINTS = { "invoices": "https://api.iugu.com/v1/invoices", "customers": "https://api.iugu.com/v1/customers", "charges": "https://api.iugu.com/v1/charges", "subscriptions": "https://api.iugu.com/v1/subscriptions", "plans": "https://api.iugu.com/v1/plans", } [docs]class IuguLoader(BaseLoader): """Loader that fetches data from IUGU.""" def __init__(self, resource: str, api_token: Optional[str] = None) -> None: self.resource = resource api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN") self.headers = {"Authorization": f"Bearer {api_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = IUGU_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
f405b5f0-8887-4fb3-a9e3-ca6b3fdc6d24
Source code for langchain.document_loaders.reddit """Reddit document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import praw def _dependable_praw_import() -> praw: try: import praw except ImportError: raise ValueError( "praw package not found, please install it with `pip install praw`" ) return praw [docs]class RedditPostsLoader(BaseLoader): """Reddit posts loader. Read posts on a subreddit. First you need to go to https://www.reddit.com/prefs/apps/ and create your application """ def __init__( self, client_id: str, client_secret: str, user_agent: str, search_queries: Sequence[str], mode: str, categories: Sequence[str] = ["new"], number_posts: Optional[int] = 10, ): self.client_id = client_id self.client_secret = client_secret self.user_agent = user_agent self.search_queries = search_queries self.mode = mode self.categories = categories self.number_posts = number_posts [docs] def load(self) -> List[Document]: """Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit( client_id=self.client_id, client_secret=self.client_secret, user_agent=self.user_agent, ) results: List[Document] = [] if self.mode == "subreddit": for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) elif self.mode == "username": for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode" ) return results def _subreddit_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: subreddit = reddit.subreddit(search_query) method = getattr(subreddit, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, ) def _user_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: user = reddit.redditor(search_query) method = getattr(user.submissions, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html
3a24cbf2-d0cf-403f-866a-be663707dfa5
Source code for langchain.document_loaders.azure_blob_storage_container """Loading logic for loading documents from an Azure Blob Storage container.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.azure_blob_storage_file import ( AzureBlobStorageFileLoader, ) from langchain.document_loaders.base import BaseLoader [docs]class AzureBlobStorageContainerLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, prefix: str = ""): """Initialize with connection string, container and blob prefix.""" self.conn_str = conn_str self.container = container self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import ContainerClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc container = ContainerClient.from_connection_string( conn_str=self.conn_str, container_name=self.container ) docs = [] blob_list = container.list_blobs(name_starts_with=self.prefix) for blob in blob_list: loader = AzureBlobStorageFileLoader( self.conn_str, self.container, blob.name # type: ignore ) docs.extend(loader.load()) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html
70db530d-24ad-4803-9c77-748ff1fdfa0d
Source code for langchain.document_loaders.markdown """Loader that loads Markdown files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredMarkdownLoader(UnstructuredFileLoader): """Loader that uses unstructured to load markdown files.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.partition.md import partition_md # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) if unstructured_version < (0, 4, 16): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning markdown files is only supported in unstructured>=0.4.16." ) return partition_md(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/markdown.html
75dba5b9-5265-4ba6-9634-c368126f1990
Source code for langchain.document_loaders.stripe """Loader that fetches data from Stripe""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } [docs]class StripeLoader(BaseLoader): """Loader that fetches data from Stripe.""" def __init__(self, resource: str, access_token: Optional[str] = None) -> None: self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
19446e29-e13f-4262-9ce3-cf9bac62eab4
Source code for langchain.document_loaders.ifixit """Loader that loads iFixit data.""" from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.web_base import WebBaseLoader IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0" [docs]class IFixitLoader(BaseLoader): """Load iFixit repair guides, device wikis and answers. iFixit is the largest, open repair community on the web. The site contains nearly 100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under CC-BY. This loader will allow you to download the text of a repair guide, text of Q&A's and wikis from devices on iFixit using their open APIs and web scraping. """ def __init__(self, web_path: str): """Initialize with web path.""" if not web_path.startswith("https://www.ifixit.com"): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace("https://www.ifixit.com", "") allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths): raise ValueError( "web path must start with /Device, /Guide, /Teardown or /Answers" ) pieces = [x for x in path.split("/") if x] """Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide" if self.page_type == "Guide" or self.page_type == "Answers": self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path [docs] def load(self) -> List[Document]: if self.page_type == "Device": return self.load_device() elif self.page_type == "Guide" or self.page_type == "Teardown": return self.load_guide() elif self.page_type == "Answers": return self.load_questions_and_answers() else: raise ValueError("Unknown page type: " + self.page_type) [docs] @staticmethod def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]: res = requests.get( IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type ) if res.status_code != 200: raise ValueError( 'Could not load suggestions for "' + query + '"\n' + res.json() ) data = res.json() results = data["results"] output = [] for result in results: try: loader = IFixitLoader(result["url"]) if loader.page_type == "Device": output += loader.load_device(include_guides=False) else: output += loader.load() except ValueError: continue return output [docs] def load_questions_and_answers( self, url_override: Optional[str] = None ) -> List[Document]: loader = WebBaseLoader(self.web_path if url_override is None else url_override) soup = loader.scrape() output = [] title = soup.find("h1", "post-title").text output.append("# " + title) output.append(soup.select_one(".post-content .post-text").text.strip()) answersHeader = soup.find("div", "post-answers-header") if answersHeader: output.append("\n## " + answersHeader.text.strip()) for answer in soup.select(".js-answers-list .post.post-answer"): if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]: output.append("\n### Accepted Answer") elif "post-helpful" in answer["class"]: output.append("\n### Most Helpful Answer") else: output.append("\n### Other Answer") output += [ a.text.strip() for a in answer.select(".post-content .post-text") ] output.append("\n") text = "\n".join(output).strip() metadata = {"source": self.web_path, "title": title} return [Document(page_content=text, metadata=metadata)] [docs] def load_device( self, url_override: Optional[str] = None, include_guides: bool = True ) -> List[Document]: documents = [] if url_override is None: url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id else: url = url_override res = requests.get(url) data = res.json() text = "\n".join( [ data[key] for key in ["title", "description", "contents_raw"] if key in data ] ).strip() metadata = {"source": self.web_path, "title": data["title"]} documents.append(Document(page_content=text, metadata=metadata)) if include_guides: """Load and return documents for each guide linked to from the device""" guide_urls = [guide["url"] for guide in data["guides"]] for guide_url in guide_urls: documents.append(IFixitLoader(guide_url).load()[0]) return documents [docs] def load_guide(self, url_override: Optional[str] = None) -> List[Document]: if url_override is None: url = IFIXIT_BASE_URL + "/guides/" + self.id else: url = url_override res = requests.get(url) if res.status_code != 200: raise ValueError( "Could not load guide: " + self.web_path + "\n" + res.json() ) data = res.json() doc_parts = ["# " + data["title"], data["introduction_raw"]] doc_parts.append("\n\n###Tools Required:") if len(data["tools"]) == 0: doc_parts.append("\n - None") else: for tool in data["tools"]: doc_parts.append("\n - " + tool["text"]) doc_parts.append("\n\n###Parts Required:") if len(data["parts"]) == 0: doc_parts.append("\n - None") else: for part in data["parts"]: doc_parts.append("\n - " + part["text"]) for row in data["steps"]: doc_parts.append( "\n\n## " + ( row["title"] if row["title"] != "" else "Step {}".format(row["orderby"]) ) ) for line in row["lines"]: doc_parts.append(line["text_raw"]) doc_parts.append(data["conclusion_raw"]) text = "\n".join(doc_parts) metadata = {"source": self.web_path, "title": data["title"]} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
360d94e4-ae3c-4989-a5a7-862f7a37ae0d
Source code for langchain.document_loaders.notiondb """Notion DB loader for langchain""" from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" [docs]class NotionDBLoader(BaseLoader): """Notion DB Loader. Reads content from pages within a Noton Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. """ def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec [docs] def load(self) -> List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries) def _retrieve_page_summaries( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[Dict[str, Any]]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") return pages [docs] def load_page(self, page_summary: Dict[str, Any]) -> Document: """Read a page.""" page_id = page_summary["id"] # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in page_summary["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
44ed3d91-59b3-4ff0-934a-28b2c6989911
Source code for langchain.document_loaders.bibtex import logging import re from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.bibtex import BibtexparserWrapper logger = logging.getLogger(__name__) [docs]class BibtexLoader(BaseLoader): """Loads a bibtex file into a list of Documents. Each document represents one entry from the bibtex file. If a PDF file is present in the `file` bibtex field, the original PDF is loaded into the document text. If no such file entry is present, the `abstract` field is used instead. """ def __init__( self, file_path: str, *, parser: Optional[BibtexparserWrapper] = None, max_docs: Optional[int] = None, max_content_chars: Optional[int] = 4_000, load_extra_metadata: bool = False, file_pattern: str = r"[^:]+\.pdf", ): """Initialize the BibtexLoader. Args: file_path: Path to the bibtex file. max_docs: Max number of associated documents to load. Use -1 means no limit. """ self.file_path = file_path self.parser = parser or BibtexparserWrapper() self.max_docs = max_docs self.max_content_chars = max_content_chars self.load_extra_metadata = load_extra_metadata self.file_regex = re.compile(file_pattern) def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]: import fitz parent_dir = Path(self.file_path).parent # regex is useful for Zotero flavor bibtex files file_names = self.file_regex.findall(entry.get("file", "")) if not file_names: return None texts: List[str] = [] for file_name in file_names: try: with fitz.open(parent_dir / file_name) as f: texts.extend(page.get_text() for page in f) except FileNotFoundError as e: logger.debug(e) content = "\n".join(texts) or entry.get("abstract", "") if self.max_content_chars: content = content[: self.max_content_chars] metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata) return Document( page_content=content, metadata=metadata, ) [docs] def lazy_load(self) -> Iterator[Document]: """Load bibtex file using bibtexparser and get the article texts plus the article metadata. See https://bibtexparser.readthedocs.io/en/master/ Returns: a list of documents with the document.page_content in text format """ try: import fitz # noqa: F401 except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) entries = self.parser.load_bibtex_entries(self.file_path) if self.max_docs: entries = entries[: self.max_docs] for entry in entries: doc = self._load_entry(entry) if doc: yield doc [docs] def load(self) -> List[Document]: """Load bibtex file documents from the given bibtex file path. See https://bibtexparser.readthedocs.io/en/master/ Args: file_path: the path to the bibtex file Returns: a list of documents with the document.page_content in text format """ return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html
654e9724-4937-49d5-8f2f-465473c914bf
Source code for langchain.document_loaders.slack_directory """Loader for documents from a Slack export.""" import json import zipfile from pathlib import Path from typing import Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SlackDirectoryLoader(BaseLoader): """Loader for loading documents from a Slack directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} [docs] def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
38f0350b-7184-44ce-a908-cca5ae50db07
Source code for langchain.document_loaders.merge from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MergedDataLoader(BaseLoader): """Merge documents from a list of loaders""" def __init__(self, loaders: List): """Initialize with a list of loaders""" self.loaders = loaders [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load docs from each individual loader.""" for loader in self.loaders: # Check if lazy_load is implemented try: data = loader.lazy_load() except NotImplementedError: data = loader.load() for document in data: yield document [docs] def load(self) -> List[Document]: """Load docs.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/merge.html
ffe035c0-8d4d-494d-923d-e39a728b0d60
Source code for langchain.document_loaders.hn """Loader that loads HN.""" from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class HNLoader(WebBaseLoader): """Load Hacker News data from either main page results or the comments page.""" [docs] def load(self) -> List[Document]: """Get important HN webpage information. Components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ soup_info = self.scrape() if "item" in self.web_path: return self.load_comments(soup_info) else: return self.load_results(soup_info) [docs] def load_comments(self, soup_info: Any) -> List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get("title") return [ Document( page_content=comment.text.strip(), metadata={"source": self.web_path, "title": title}, ) for comment in comments ] [docs] def load_results(self, soup: Any) -> List[Document]: """Load items from an HN page.""" items = soup.select("tr[class='athing']") documents = [] for lineItem in items: ranking = lineItem.select_one("span[class='rank']").text link = lineItem.find("span", {"class": "titleline"}).find("a").get("href") title = lineItem.find("span", {"class": "titleline"}).text.strip() metadata = { "source": self.web_path, "title": title, "link": link, "ranking": ranking, } documents.append( Document( page_content=title, link=link, ranking=ranking, metadata=metadata ) ) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hn.html
00aa36f6-ded4-4ff8-a246-cb5283bd8715
Source code for langchain.document_loaders.figma """Loader that loads Figma files json dump.""" import json import urllib.request from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class FigmaFileLoader(BaseLoader): """Loader that loads Figma file json.""" def __init__(self, access_token: str, ids: str, key: str): """Initialize with access token, ids, and key.""" self.access_token = access_token self.ids = ids self.key = key def _construct_figma_api_url(self) -> str: api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % ( self.key, self.ids, ) return api_url def _get_figma_file(self) -> Any: """Get Figma file from Figma REST API.""" headers = {"X-Figma-Token": self.access_token} request = urllib.request.Request( self._construct_figma_api_url(), headers=headers ) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data [docs] def load(self) -> List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {"source": self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/figma.html
470c9f47-b556-45d7-8baf-33c36d3ad684
Source code for langchain.document_loaders.roam """Loader that loads Roam directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RoamLoader(BaseLoader): """Loader that loads Roam files from disk.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/roam.html
07d18be0-81a0-4397-9003-de25a7012e54
Source code for langchain.document_loaders.mhtml """Loader to load MHTML files, enriching metadata with page title.""" import email import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class MHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load MHTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: message = email.message_from_string(f.read()) parts = message.get_payload() if type(parts) is not list: parts = [message] for part in parts: if part.get_content_type() == "text/html": html = part.get_payload(decode=True).decode() soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] return []
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mhtml.html
22a68594-409d-4efa-a052-e9dd3247fbb0
Source code for langchain.document_loaders.xml """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredXMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load XML files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xml import partition_xml return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/xml.html
0172e57e-3b40-4ac8-8557-93e2b02a735d
Source code for langchain.document_loaders.obsidian """Loader that loads Obsidian directory dump.""" import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ObsidianLoader(BaseLoader): """Loader that loads Obsidian files from disk.""" FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) metadata = { "source": str(p.name), "path": str(p), "created": p.stat().st_ctime, "last_modified": p.stat().st_mtime, "last_accessed": p.stat().st_atime, **front_matter, } docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html
df52a925-eb6f-4265-9f79-c3d472d64b81
Source code for langchain.document_loaders.mediawikidump """Load Data from a MediaWiki dump xml.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MWDumpLoader(BaseLoader): """ Load MediaWiki dump from XML file Example: .. code-block:: python from langchain.document_loaders import MWDumpLoader loader = MWDumpLoader( file_path="myWiki.xml", encoding="utf8" ) docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) texts = text_splitter.split_documents(docs) :param file_path: XML local file path :type file_path: str :param encoding: Charset encoding, defaults to "utf8" :type encoding: str, optional """ def __init__(self, file_path: str, encoding: Optional[str] = "utf8"): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding [docs] def load(self) -> List[Document]: """Load from file path.""" import mwparserfromhell import mwxml dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) docs = [] for page in dump.pages: for revision in page: code = mwparserfromhell.parse(revision.text) text = code.strip_code( normalize=True, collapse=True, keep_template_params=False ) metadata = {"source": page.title} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
e63bfc55-a145-4858-acc7-965c1b8256ef
Source code for langchain.document_loaders.recursive_url_loader from typing import Iterator, List, Optional, Set from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RecursiveUrlLoader(BaseLoader): """Loader that loads all child links from a given url.""" def __init__(self, url: str, exclude_dirs: Optional[str] = None) -> None: """Initialize with URL to crawl and any sub-directories to exclude.""" self.url = url self.exclude_dirs = exclude_dirs [docs] def get_child_links_recursive( self, url: str, visited: Optional[Set[str]] = None ) -> Set[str]: """Recursively get all child links starting with the path of the input URL.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "The BeautifulSoup package is required for the RecursiveUrlLoader." ) # Construct the base and parent URLs parsed_url = urlparse(url) base_url = f"{parsed_url.scheme}://{parsed_url.netloc}" parent_url = "/".join(parsed_url.path.split("/")[:-1]) current_path = parsed_url.path # Add a trailing slash if not present if not base_url.endswith("/"): base_url += "/" if not parent_url.endswith("/"): parent_url += "/" # Exclude the root and parent from list visited = set() if visited is None else visited # Exclude the links that start with any of the excluded directories if self.exclude_dirs and any( url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs ): return visited # Get all links that are relative to the root of the website response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") all_links = [link.get("href") for link in soup.find_all("a")] # Extract only the links that are children of the current URL child_links = list( { link for link in all_links if link and link.startswith(current_path) and link != current_path } ) # Get absolute path for all root relative links listed absolute_paths = [ f"{urlparse(base_url).scheme}://{urlparse(base_url).netloc}{link}" for link in child_links ] # Store the visited links and recursively visit the children for link in absolute_paths: # Check all unvisited links if link not in visited: visited.add(link) # If the link is a directory (w/ children) then visit it if link.endswith("/"): visited.update(self.get_child_links_recursive(link, visited)) return visited [docs] def lazy_load(self) -> Iterator[Document]: from langchain.document_loaders import WebBaseLoader """Lazy load web pages.""" child_links = self.get_child_links_recursive(self.url) loader = WebBaseLoader(list(child_links)) return loader.lazy_load() [docs] def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
8bd45b67-0561-45b6-b806-71c21cdb5299
Source code for langchain.document_loaders.json_loader """Loader that loads data from JSON.""" import json from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class JSONLoader(BaseLoader): """Loads a JSON file and references a jq schema provided to load the text into documents. Example: [{"text": ...}, {"text": ...}, {"text": ...}] -> schema = .[].text {"key": [{"text": ...}, {"text": ...}, {"text": ...}]} -> schema = .key[].text ["", "", ""] -> schema = .[] """ def __init__( self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicates whether the content is in string format, default to True """ try: import jq # noqa:F401 except ImportError: raise ImportError( "jq package not found, please install it with `pip install jq`" ) self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content [docs] def load(self) -> List[Document]: """Load and return documents from the JSON file.""" data = self._jq_schema.input(json.loads(self.file_path.read_text())) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) docs = [] for i, sample in enumerate(data, 1): metadata = dict( source=str(self.file_path), seq_num=i, ) text = self._get_text(sample=sample, metadata=metadata) docs.append(Document(page_content=text, metadata=metadata)) return docs def _get_text(self, sample: Any, metadata: dict) -> str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) if self._metadata_func is not None: # We pass in the metadata dict to the metadata_func # so that the user can customize the default metadata # based on the content of the JSON object. metadata = self._metadata_func(sample, metadata) else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f"Expected page_content is string, got {type(content)} instead. \ Set `text_content=False` if the desired input for \ `page_content` is not a string" ) # In case the text is None, set it to an empty string elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else "" else: return str(content) if content is not None else "" def _validate_content_key(self, data: Any) -> None: """Check if content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f"Expected the jq schema to result in a list of objects (dict), \ so sample must be a dict but got `{type(sample)}`" ) if sample.get(self._content_key) is None: raise ValueError( f"Expected the jq schema to result in a list of objects (dict) \ with the key `{self._content_key}`" ) if self._metadata_func is not None: sample_metadata = self._metadata_func(sample, {}) if not isinstance(sample_metadata, dict): raise ValueError( f"Expected the metadata_func to return a dict but got \ `{type(sample_metadata)}`" )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/json_loader.html
3e7c987d-7119-4f14-ad0e-ee89d1893b75
Source code for langchain.document_loaders.trello """Loader that loads cards from Trello""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env if TYPE_CHECKING: from trello import Board, Card, TrelloClient [docs]class TrelloLoader(BaseLoader): """Trello loader. Reads all cards from a Trello board.""" def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter [docs] @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY") token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) [docs] def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
383ee7a8-ae53-4fda-b8e4-c7c0bcb3189b
Source code for langchain.document_loaders.acreom """Loader that loads acreom vault from a directory.""" import re from pathlib import Path from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AcreomLoader(BaseLoader): FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) def _process_acreom_content(self, content: str) -> str: # remove acreom specific elements from content that # do not contribute to the context of current document content = re.sub("\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks content = re.sub("#", "", content) # rm hashtags content = re.sub("\[\[.*?\]\]", "", content) # rm doclinks return content [docs] def lazy_load(self) -> Iterator[Document]: ps = list(Path(self.file_path).glob("**/*.md")) for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) text = self._process_acreom_content(text) metadata = { "source": str(p.name), "path": str(p), **front_matter, } yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/acreom.html
2237c06f-c152-4ff1-90d5-9d477d622f70
Source code for langchain.document_loaders.image """Loader that loads image files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredImageLoader(UnstructuredFileLoader): """Loader that uses unstructured to load image files, such as PNGs and JPGs.""" def _get_elements(self) -> List: from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image.html
c565ae8a-71dd-49ce-a4e8-d35cb6fedc6a
Source code for langchain.document_loaders.wikipedia from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaLoader(BaseLoader): """Loads a query result from www.wikipedia.org into a list of Documents. The hard limit on the number of downloaded Documents is 300 for now. Each wiki page represents one Document. """ def __init__( self, query: str, lang: str = "en", load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, doc_content_chars_max: Optional[int] = 4000, ): """ Initializes a new instance of the WikipediaLoader class. Args: query (str): The query string to search on Wikipedia. lang (str, optional): The language code for the Wikipedia language edition. Defaults to "en". load_max_docs (int, optional): The maximum number of documents to load. Defaults to 100. load_all_available_meta (bool, optional): Indicates whether to load all available metadata for each document. Defaults to False. doc_content_chars_max (int, optional): The maximum number of characters for the document content. Defaults to 4000. """ self.query = query self.lang = lang self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta self.doc_content_chars_max = doc_content_chars_max [docs] def load(self) -> List[Document]: """ Loads the query result from Wikipedia into a list of Documents. Returns: List[Document]: A list of Document objects representing the loaded Wikipedia pages. """ client = WikipediaAPIWrapper( lang=self.lang, top_k_results=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, doc_content_chars_max=self.doc_content_chars_max, ) docs = client.load(self.query) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/wikipedia.html
a9546abb-5edb-4c2a-b6c1-2779cc74b9da
Source code for langchain.document_loaders.imsdb """Loader that loads IMSDb.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class IMSDbLoader(WebBaseLoader): """Loader that loads IMSDb webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/imsdb.html
427c3033-fa67-4805-92cd-b54ada68aa32
Source code for langchain.document_loaders.excel """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredExcelLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Microsoft Excel files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xlsx import partition_xlsx return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html
4e2190e6-d675-436c-95e1-4856c5e7465a
Source code for langchain.document_loaders.unstructured """Loader that uses unstructured to load files.""" import collections from abc import ABC, abstractmethod from typing import IO, Any, Dict, List, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def satisfies_min_unstructured_version(min_version: str) -> bool: """Checks to see if the installed unstructured version exceeds the minimum version for the feature in question.""" from unstructured.__version__ import __version__ as __unstructured_version__ min_version_tuple = tuple([int(x) for x in min_version.split(".")]) # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version_tuple = tuple( [int(x) for x in _unstructured_version.split(".")] ) return unstructured_version_tuple >= min_version_tuple def validate_unstructured_version(min_unstructured_version: str) -> None: """Raises an error if the unstructured version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f"unstructured>={min_unstructured_version} is required in this loader." ) class UnstructuredBaseLoader(BaseLoader, ABC): """Loader that uses unstructured to load files.""" def __init__(self, mode: str = "single", **unstructured_kwargs: Any): """Initialize with file path.""" try: import unstructured # noqa:F401 except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) _valid_modes = {"single", "elements", "paged"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) self.mode = mode if not satisfies_min_unstructured_version("0.5.4"): if "strategy" in unstructured_kwargs: unstructured_kwargs.pop("strategy") self.unstructured_kwargs = unstructured_kwargs @abstractmethod def _get_elements(self) -> List: """Get elements.""" @abstractmethod def _get_metadata(self) -> dict: """Get metadata.""" def load(self) -> List[Document]: """Load file.""" elements = self._get_elements() if self.mode == "elements": docs: List[Document] = list() for element in elements: metadata = self._get_metadata() # NOTE(MthwRobinson) - the attribute check is for backward compatibility # with unstructured<0.4.9. The metadata attributed was added in 0.4.9. if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) if hasattr(element, "category"): metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) elif self.mode == "paged": text_dict: Dict[int, str] = {} meta_dict: Dict[int, Dict] = {} for idx, element in enumerate(elements): metadata = self._get_metadata() if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) page_number = metadata.get("page_number", 1) # Check if this page_number already exists in docs_dict if page_number not in text_dict: # If not, create new entry with initial text and metadata text_dict[page_number] = str(element) + "\n\n" meta_dict[page_number] = metadata else: # If exists, append to text and update the metadata text_dict[page_number] += str(element) + "\n\n" meta_dict[page_number].update(metadata) # Convert the dict to a list of Document objects docs = [ Document(page_content=text_dict[key], metadata=meta_dict[key]) for key in text_dict.keys() ] elif self.mode == "single": metadata = self._get_metadata() text = "\n\n".join([str(el) for el in elements]) docs = [Document(page_content=text, metadata=metadata)] else: raise ValueError(f"mode of {self.mode} not supported.") return docs [docs]class UnstructuredFileLoader(UnstructuredBaseLoader): """Loader that uses unstructured to load files.""" def __init__( self, file_path: Union[str, List[str]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(filename=self.file_path, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def get_elements_from_api( file_path: Union[str, List[str], None] = None, file: Union[IO, Sequence[IO], None] = None, api_url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ) -> List: """Retrieves a list of elements from the Unstructured API.""" if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list): from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api( filenames=file_path, files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) elements = [] for _elements in _doc_elements: elements.extend(_elements) return elements else: from unstructured.partition.api import partition_via_api return partition_via_api( filename=file_path, file=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) [docs]class UnstructuredAPIFileLoader(UnstructuredFileLoader): """Loader that uses the unstructured web API to load files.""" def __init__( self, file_path: Union[str, List[str]] = "", mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file_path, str): validate_unstructured_version(min_unstructured_version="0.6.2") else: validate_unstructured_version(min_unstructured_version="0.6.3") self.url = url self.api_key = api_key super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def _get_elements(self) -> List: return get_elements_from_api( file_path=self.file_path, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, ) [docs]class UnstructuredFileIOLoader(UnstructuredBaseLoader): """Loader that uses unstructured to load file IO objects.""" def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(file=self.file, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {} [docs]class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader): """Loader that uses the unstructured web API to load file IO objects.""" def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file, collections.abc.Sequence): validate_unstructured_version(min_unstructured_version="0.6.3") if file: validate_unstructured_version(min_unstructured_version="0.6.2") self.url = url self.api_key = api_key super().__init__(file=file, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: return get_elements_from_api( file=self.file, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
a073ef28-cb8e-4045-b110-68e730facce9
Source code for langchain.document_loaders.word_document """Loader that loads word documents.""" import os import tempfile from abc import ABC from typing import List from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class Docx2txtLoader(BaseLoader, ABC): """Loads a DOCX with docx2txt and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close() [docs] def load(self) -> List[Document]: """Load given path as single page.""" import docx2txt return [ Document( page_content=docx2txt.process(self.file_path), metadata={"source": self.file_path}, ) ] @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) [docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader): """Loader that uses unstructured to load word documents.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_doc = detect_filetype(self.file_path) == FileType.DOC except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_doc = extension == ".doc" if is_doc and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning .doc files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_doc: from unstructured.partition.doc import partition_doc return partition_doc(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.docx import partition_docx return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
e88ee38d-d58d-468a-a05e-44fa42e6727c
Source code for langchain.document_loaders.blockchain import os import re import time from enum import Enum from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class BlockchainType(Enum): """Enumerator of the supported blockchains.""" ETH_MAINNET = "eth-mainnet" ETH_GOERLI = "eth-goerli" POLYGON_MAINNET = "polygon-mainnet" POLYGON_MUMBAI = "polygon-mumbai" [docs]class BlockchainDocumentLoader(BaseLoader): """Loads elements from a blockchain smart contract into Langchain documents. The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet, Polygon mainnet, and Polygon Mumbai testnet. If no BlockchainType is specified, the default is Ethereum mainnet. The Loader uses the Alchemy API to interact with the blockchain. ALCHEMY_API_KEY environment variable must be set to use this loader. The API returns 100 NFTs per request and can be paginated using the startToken parameter. If get_all_tokens is set to True, the loader will get all tokens on the contract. Note that for contracts with a large number of tokens, this may take a long time (e.g. 10k tokens is 100 requests). Default value is false for this reason. The max_execution_time (sec) can be set to limit the execution time of the loader. Future versions of this loader can: - Support additional Alchemy APIs (e.g. getTransactions, etc.) - Support additional blockain APIs (e.g. Infura, Opensea, etc.) """ def __init__( self, contract_address: str, blockchainType: BlockchainType = BlockchainType.ETH_MAINNET, api_key: str = "docs-demo", startToken: str = "", get_all_tokens: bool = False, max_execution_time: Optional[int] = None, ): self.contract_address = contract_address self.blockchainType = blockchainType.value self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key self.startToken = startToken self.get_all_tokens = get_all_tokens self.max_execution_time = max_execution_time if not self.api_key: raise ValueError("Alchemy API key not provided.") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address): raise ValueError(f"Invalid contract address {self.contract_address}") [docs] def load(self) -> List[Document]: result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f"https://{self.blockchainType}.g.alchemy.com/nft/v2/" f"{self.api_key}/getNFTsForCollection?withMetadata=" f"True&contractAddress={self.contract_address}" f"&startToken={current_start_token}" ) response = requests.get(url) if response.status_code != 200: raise ValueError( f"Request failed with status code {response.status_code}" ) items = response.json()["nfts"] if not items: break for item in items: content = str(item) tokenId = item["id"]["tokenId"] metadata = { "source": self.contract_address, "blockchain": self.blockchainType, "tokenId": tokenId, } result.append(Document(page_content=content, metadata=metadata)) # exit after the first API call if get_all_tokens is False if not self.get_all_tokens: break # get the start token for the next API call from the last item in array current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"]) if ( self.max_execution_time is not None and (time.time() - start_time) > self.max_execution_time ): raise RuntimeError("Execution time exceeded the allowed time limit.") if not result: raise ValueError( f"No NFTs found for contract address {self.contract_address}" ) return result # add one to the tokenId, ensuring the correct tokenId format is used def _get_next_tokenId(self, tokenId: str) -> str: value_type = self._detect_value_type(tokenId) if value_type == "hex_0x": value_int = int(tokenId, 16) elif value_type == "hex_0xbf": value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == "hex_0x": return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x") elif value_type == "hex_0xbf": return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x") else: return str(result) # A smart contract can use different formats for the tokenId @staticmethod def _detect_value_type(tokenId: str) -> str: if isinstance(tokenId, int): return "int" elif tokenId.startswith("0x"): return "hex_0x" elif tokenId.startswith("0xbf"): return "hex_0xbf" else: return "hex_0xbf"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
444806d7-288c-460d-b8d1-b71ed0ad11c9
Source code for langchain.document_loaders.evernote """Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class EverNoteLoader(BaseLoader): """EverNote Loader. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document [docs] def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: logging.error( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) raise e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b"" rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be "&nbsp;", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logging.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
7a1ff187-5d49-44ce-89c3-1d6aa44ecccb
Source code for langchain.document_loaders.srt """Loader for .srt (subtitle) files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SRTLoader(BaseLoader): """Loader for .srt (subtitle) files.""" def __init__(self, file_path: str): """Initialize with file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path [docs] def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/srt.html
695c2af4-39bc-4194-8c27-b79f15a59e9a
Source code for langchain.document_loaders.gutenberg """Loader that loads .txt web files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GutenbergLoader(BaseLoader): """Loader that uses urllib to load .txt web files.""" def __init__(self, file_path: str): """Initialize with file path.""" if not file_path.startswith("https://www.gutenberg.org"): raise ValueError("file path must start with 'https://www.gutenberg.org'") if not file_path.endswith(".txt"): raise ValueError("file path must end with '.txt'") self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" from urllib.request import urlopen elements = urlopen(self.file_path) text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gutenberg.html
b6081b38-60c4-4b4c-998f-a90fcde841a5
Source code for langchain.document_loaders.sitemap """Loader that fetches a sitemap and loads those URLs.""" import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional from langchain.document_loaders.web_base import WebBaseLoader from langchain.schema import Document def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item [docs]class SitemapLoader(WebBaseLoader): """Loader that fetches a sitemap and loads those URLs.""" def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: list of strings or regexes that will be applied to filter the urls that are parsed and loaded parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with " "`pip install lxml`" ) super().__init__(web_path) self.filter_urls = filter_urls self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local [docs] def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts.""" els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.filter_urls and not any( re.match(r, loc_text) for r in self.filter_urls ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els [docs] def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self.scrape("xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
d42a6a02-ee35-4afb-987f-6c05f66db894
Source code for langchain.document_loaders.confluence """Load Data from a Confluence Space""" import logging from enum import Enum from io import BytesIO from typing import Any, Callable, Dict, List, Optional, Union from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class ContentFormat(str, Enum): """Enumerator of the content formats of Confluence page.""" STORAGE = "body.storage" VIEW = "body.view" def get_content(self, page: dict) -> str: if self == ContentFormat.STORAGE: return page["body"]["storage"]["value"] elif self == ContentFormat.VIEW: return page["body"]["view"]["value"] raise ValueError("unknown content format") [docs]class ConfluenceLoader(BaseLoader): """ Load Confluence pages. Port of https://llamahub.ai/l/confluence This currently supports username/api_key, Oauth2 login or personal access token authentication. Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned. You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel. Confluence API supports difference format of page content. The storage format is the raw XML representation for storage. The view format is the HTML representation for viewing with macros are rendered as though it is viewed by users. You can pass a enum `content_format` argument to `load()` to specify the content format, this is set to `ContentFormat.STORAGE` by default. Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id> Example: .. code-block:: python from langchain.document_loaders import ConfluenceLoader loader = ConfluenceLoader( url="https://yoursite.atlassian.com/wiki", username="me", api_key="12345" ) documents = loader.load(space_key="SPACE",limit=50) :param url: _description_ :type url: str :param api_key: _description_, defaults to None :type api_key: str, optional :param username: _description_, defaults to None :type username: str, optional :param oauth2: _description_, defaults to {} :type oauth2: dict, optional :param token: _description_, defaults to None :type token: str, optional :param cloud: _description_, defaults to True :type cloud: bool, optional :param number_of_retries: How many times to retry, defaults to 3 :type number_of_retries: Optional[int], optional :param min_retry_seconds: defaults to 2 :type min_retry_seconds: Optional[int], optional :param max_retry_seconds: defaults to 10 :type max_retry_seconds: Optional[int], optional :param confluence_kwargs: additional kwargs to initialize confluence with :type confluence_kwargs: dict, optional :raises ValueError: Errors while validating input :raises ImportError: Required dependencies not installed. """ def __init__( self, url: str, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, cloud: Optional[bool] = True, number_of_retries: Optional[int] = 3, min_retry_seconds: Optional[int] = 2, max_retry_seconds: Optional[int] = 10, confluence_kwargs: Optional[dict] = None, ): confluence_kwargs = confluence_kwargs or {} errors = ConfluenceLoader.validate_init_args( url, api_key, username, oauth2, token ) if errors: raise ValueError(f"Error(s) while validating input: {errors}") self.base_url = url self.number_of_retries = number_of_retries self.min_retry_seconds = min_retry_seconds self.max_retry_seconds = max_retry_seconds try: from atlassian import Confluence # noqa: F401 except ImportError: raise ImportError( "`atlassian` package not found, please run " "`pip install atlassian-python-api`" ) if oauth2: self.confluence = Confluence( url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs ) elif token: self.confluence = Confluence( url=url, token=token, cloud=cloud, **confluence_kwargs ) else: self.confluence = Confluence( url=url, username=username, password=api_key, cloud=cloud, **confluence_kwargs, ) [docs] @staticmethod def validate_init_args( url: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, ) -> Union[List, None]: """Validates proper combinations of init arguments""" errors = [] if url is None: errors.append("Must provide `base_url`") if (api_key and not username) or (username and not api_key): errors.append( "If one of `api_key` or `username` is provided, " "the other must be as well." ) if (api_key or username) and oauth2: errors.append( "Cannot provide a value for `api_key` and/or " "`username` and provide a value for `oauth2`" ) if oauth2 and oauth2.keys() != [ "access_token", "access_token_secret", "consumer_key", "key_cert", ]: errors.append( "You have either ommited require keys or added extra " "keys to the oauth2 dictionary. key values should be " "`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`" ) if token and (api_key or username or oauth2): errors.append( "Cannot provide a value for `token` and a value for `api_key`, " "`username` or `oauth2`" ) if errors: return errors return None [docs] def load( self, space_key: Optional[str] = None, page_ids: Optional[List[str]] = None, label: Optional[str] = None, cql: Optional[str] = None, include_restricted_content: bool = False, include_archived_content: bool = False, include_attachments: bool = False, include_comments: bool = False, content_format: ContentFormat = ContentFormat.STORAGE, limit: Optional[int] = 50, max_pages: Optional[int] = 1000, ocr_languages: Optional[str] = None, ) -> List[Document]: """ :param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this label, defaults to None :type label: Optional[str], optional :param cql: CQL Expression, defaults to None :type cql: Optional[str], optional :param include_restricted_content: defaults to False :type include_restricted_content: bool, optional :param include_archived_content: Whether to include archived content, defaults to False :type include_archived_content: bool, optional :param include_attachments: defaults to False :type include_attachments: bool, optional :param include_comments: defaults to False :type include_comments: bool, optional :param content_format: Specify content format, defaults to ContentFormat.STORAGE :type content_format: ContentFormat :param limit: Maximum number of pages to retrieve per request, defaults to 50 :type limit: int, optional :param max_pages: Maximum number of pages to retrieve in total, defaults 1000 :type max_pages: int, optional :param ocr_languages: The languages to use for the Tesseract agent. To use a language, you'll first need to install the appropriate Tesseract language pack. :type ocr_languages: str, optional :raises ValueError: _description_ :raises ImportError: _description_ :return: _description_ :rtype: List[Document] """ if not space_key and not page_ids and not label and not cql: raise ValueError( "Must specify at least one among `space_key`, `page_ids`, " "`label`, `cql` parameters." ) docs = [] if space_key: pages = self.paginate_request( self.confluence.get_all_pages_from_space, space=space_key, limit=limit, max_pages=max_pages, status="any" if include_archived_content else "current", expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, ) if label: pages = self.paginate_request( self.confluence.get_all_pages_by_label, label=label, limit=limit, max_pages=max_pages, ) ids_by_label = [page["id"] for page in pages] if page_ids: page_ids = list(set(page_ids + ids_by_label)) else: page_ids = list(set(ids_by_label)) if cql: pages = self.paginate_request( self._search_content_by_cql, cql=cql, limit=limit, max_pages=max_pages, include_archived_spaces=include_archived_content, expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, ) if page_ids: for page_id in page_ids: get_page = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, # type: ignore[arg-type] min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(self.confluence.get_page_by_id) page = get_page(page_id=page_id, expand=content_format.value) if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, ) docs.append(doc) return docs def _search_content_by_cql( self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any ) -> List[dict]: url = "rest/api/content/search" params: Dict[str, Any] = {"cql": cql} params.update(kwargs) if include_archived_spaces is not None: params["includeArchivedSpaces"] = include_archived_spaces response = self.confluence.get(url, params=params) return response.get("results", []) [docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List: """Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the results key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """ max_pages = kwargs.pop("max_pages") docs: List[dict] = [] while len(docs) < max_pages: get_pages = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(retrieval_method) batch = get_pages(**kwargs, start=len(docs)) if not batch: break docs.extend(batch) return docs[:max_pages] [docs] def is_public_page(self, page: dict) -> bool: """Check if a page is publicly accessible.""" restrictions = self.confluence.get_all_restrictions_for_content(page["id"]) return ( page["status"] == "current" and not restrictions["read"]["restrictions"]["user"]["results"] and not restrictions["read"]["restrictions"]["group"]["results"] ) [docs] def process_pages( self, pages: List[dict], include_restricted_content: bool, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, ) -> List[Document]: """Process a list of pages into a list of documents.""" docs = [] for page in pages: if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, ) docs.append(doc) return docs [docs] def process_page( self, page: dict, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, ) -> Document: try: from bs4 import BeautifulSoup # type: ignore except ImportError: raise ImportError( "`beautifulsoup4` package not found, please run " "`pip install beautifulsoup4`" ) if include_attachments: attachment_texts = self.process_attachment(page["id"], ocr_languages) else: attachment_texts = [] content = content_format.get_content(page) text = BeautifulSoup(content, "lxml").get_text(" ", strip=True) + "".join( attachment_texts ) if include_comments: comments = self.confluence.get_page_comments( page["id"], expand="body.view.value", depth="all" )["results"] comment_texts = [ BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text( " ", strip=True ) for comment in comments ] text = text + "".join(comment_texts) return Document( page_content=text, metadata={ "title": page["title"], "id": page["id"], "source": self.base_url.strip("/") + page["_links"]["webui"], }, ) [docs] def process_attachment( self, page_id: str, ocr_languages: Optional[str] = None, ) -> List[str]: try: from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`Pillow` package not found, " "please run `pip install Pillow`" ) # depending on setup you may also need to set the correct path for # poppler and tesseract attachments = self.confluence.get_attachments_from_content(page_id)["results"] texts = [] for attachment in attachments: media_type = attachment["metadata"]["mediaType"] absolute_url = self.base_url + attachment["_links"]["download"] title = attachment["title"] if media_type == "application/pdf": text = title + self.process_pdf(absolute_url, ocr_languages) elif ( media_type == "image/png" or media_type == "image/jpg" or media_type == "image/jpeg" ): text = title + self.process_image(absolute_url, ocr_languages) elif ( media_type == "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document" ): text = title + self.process_doc(absolute_url) elif media_type == "application/vnd.ms-excel": text = title + self.process_xls(absolute_url) elif media_type == "image/svg+xml": text = title + self.process_svg(absolute_url, ocr_languages) else: continue texts.append(text) return texts [docs] def process_pdf( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from pdf2image import convert_from_bytes # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `pdf2image` package not found, " "please run `pip install pytesseract pdf2image`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: images = convert_from_bytes(response.content) except ValueError: return text for i, image in enumerate(images): image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f"Page {i + 1}:\n{image_text}\n\n" return text [docs] def process_image( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `Pillow` package not found, " "please run `pip install pytesseract Pillow`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: image = Image.open(BytesIO(response.content)) except OSError: return text return pytesseract.image_to_string(image, lang=ocr_languages) [docs] def process_doc(self, link: str) -> str: try: import docx2txt # noqa: F401 except ImportError: raise ImportError( "`docx2txt` package not found, please run `pip install docx2txt`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text file_data = BytesIO(response.content) return docx2txt.process(file_data) [docs] def process_xls(self, link: str) -> str: try: import xlrd # noqa: F401 except ImportError: raise ImportError("`xlrd` package not found, please run `pip install xlrd`") response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text workbook = xlrd.open_workbook(file_contents=response.content) for sheet in workbook.sheets(): text += f"{sheet.name}:\n" for row in range(sheet.nrows): for col in range(sheet.ncols): text += f"{sheet.cell_value(row, col)}\t" text += "\n" text += "\n" return text [docs] def process_svg( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 from reportlab.graphics import renderPM # noqa: F401 from svglib.svglib import svg2rlg # noqa: F401 except ImportError: raise ImportError( "`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, " "please run `pip install pytesseract Pillow reportlab svglib`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text drawing = svg2rlg(BytesIO(response.content)) img_data = BytesIO() renderPM.drawToFile(drawing, img_data, fmt="PNG") img_data.seek(0) image = Image.open(img_data) return pytesseract.image_to_string(image, lang=ocr_languages)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
de791a72-9bc1-4c42-8698-ca8770a7175a
Source code for langchain.document_loaders.text import logging from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.helpers import detect_file_encodings logger = logging.getLogger(__name__) [docs]class TextLoader(BaseLoader): """Load text files. Args: file_path: Path to the file to load. encoding: File encoding to use. If `None`, the file will be loaded with the default system encoding. autodetect_encoding: Whether to try to autodetect the file encoding if the specified encoding fails. """ def __init__( self, file_path: str, encoding: Optional[str] = None, autodetect_encoding: bool = False, ): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding self.autodetect_encoding = autodetect_encoding [docs] def load(self) -> List[Document]: """Load from file path.""" text = "" try: with open(self.file_path, encoding=self.encoding) as f: text = f.read() except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: logger.debug("Trying encoding: ", encoding.encoding) try: with open(self.file_path, encoding=encoding.encoding) as f: text = f.read() break except UnicodeDecodeError: continue else: raise RuntimeError(f"Error loading {self.file_path}") from e except Exception as e: raise RuntimeError(f"Error loading {self.file_path}") from e metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/text.html
c0791e73-7587-4ec4-a480-24ce2bf1ef98
Source code for langchain.document_loaders.azlyrics """Loader that loads AZLyrics.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class AZLyricsLoader(WebBaseLoader): """Loader that loads AZLyrics webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all("div", {"class": ""})[2].text text = title + lyrics metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azlyrics.html
d435d172-b0a3-44c5-961e-7c13c80e603a
Source code for langchain.document_loaders.weather """Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper [docs]class WeatherDataLoader(BaseLoader): """Weather Reader. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places [docs] @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) [docs] def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/weather.html
4ac54a00-768a-4181-823d-b940b8fe73be
Source code for langchain.document_loaders.email """Loader that loads email files.""" import os from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEmailLoader(UnstructuredFileLoader): """Loader that uses unstructured to load email files.""" def _get_elements(self) -> List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f"Filetype {filetype} is not supported in UnstructuredEmailLoader." ) [docs]class OutlookMessageLoader(BaseLoader): """ Loader that loads Outlook Message files using extract_msg. https://github.com/TeamMsgExtractor/msg-extractor """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file" % self.file_path) try: import extract_msg # noqa:F401 except ImportError: raise ImportError( "extract_msg is not installed. Please install it with " "`pip install extract_msg`" ) [docs] def load(self) -> List[Document]: """Load data into document objects.""" import extract_msg msg = extract_msg.Message(self.file_path) return [ Document( page_content=msg.body, metadata={ "subject": msg.subject, "sender": msg.sender, "date": msg.date, }, ) ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/email.html
6d8d5293-d3e4-40c2-b3ba-474c71667b25
Source code for langchain.document_loaders.odt """Loader that loads Open Office ODT files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredODTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load open office ODT files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.3") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
19010837-4027-408c-8704-d2fed20dc90c
Source code for langchain.document_loaders.blackboard """Loader that loads all documents from a blackboard course.""" import contextlib import re from pathlib import Path from typing import Any, List, Optional, Tuple from urllib.parse import unquote from langchain.docstore.document import Document from langchain.document_loaders.directory import DirectoryLoader from langchain.document_loaders.pdf import PyPDFLoader from langchain.document_loaders.web_base import WebBaseLoader [docs]class BlackboardLoader(WebBaseLoader): """Loader that loads all documents from a Blackboard course. This loader is not compatible with all Blackboard courses. It is only compatible with courses that use the new Blackboard interface. To use this loader, you must have the BbRouter cookie. You can get this cookie by logging into the course and then copying the value of the BbRouter cookie from the browser's developer tools. Example: .. code-block:: python from langchain.document_loaders import BlackboardLoader loader = BlackboardLoader( blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1", bbrouter="expires:12345...", ) documents = loader.load() """ base_url: str folder_path: str load_all_recursively: bool def __init__( self, blackboard_course_url: str, bbrouter: str, load_all_recursively: bool = True, basic_auth: Optional[Tuple[str, str]] = None, cookies: Optional[dict] = None, ): """Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. Raises: ValueError: If blackboard course url is invalid. """ super().__init__(blackboard_course_url) # Get base url try: self.base_url = blackboard_course_url.split("/webapps/blackboard")[0] except IndexError: raise ValueError( "Invalid blackboard course url. " "Please provide a url that starts with " "https://<blackboard_url>/webapps/blackboard" ) if basic_auth is not None: self.session.auth = basic_auth # Combine cookies if cookies is None: cookies = {} cookies.update({"BbRouter": bbrouter}) self.session.cookies.update(cookies) self.load_all_recursively = load_all_recursively self.check_bs4() [docs] def check_bs4(self) -> None: """Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed. """ try: import bs4 # noqa: F401 except ImportError: raise ImportError( "BeautifulSoup4 is required for BlackboardLoader. " "Please install it with `pip install beautifulsoup4`." ) [docs] def load(self) -> List[Document]: """Load data into document objects. Returns: List of documents. """ if self.load_all_recursively: soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = self.base_url + path print(f"Fetching documents from {url}") soup_info = self._scrape(url) with contextlib.suppress(ValueError): documents.extend(self._get_documents(soup_info)) return documents else: print(f"Fetching documents from {self.web_path}") soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) return self._get_documents(soup_info) def _get_folder_path(self, soup: Any) -> str: """Get the folder path to save the documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ # Get the course name course_name = soup.find("span", {"id": "crumb_1"}) if course_name is None: raise ValueError("No course name found.") course_name = course_name.text.strip() # Prepare the folder path course_name_clean = ( unquote(course_name) .replace(" ", "_") .replace("/", "_") .replace(":", "_") .replace(",", "_") .replace("?", "_") .replace("'", "_") .replace("!", "_") .replace('"', "_") ) # Get the folder path folder_path = Path(".") / course_name_clean return str(folder_path) def _get_documents(self, soup: Any) -> List[Document]: """Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents. """ attachments = self._get_attachments(soup) self._download_attachments(attachments) documents = self._load_documents() return documents def _get_attachments(self, soup: Any) -> List[str]: """Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments. """ from bs4 import BeautifulSoup, Tag # Get content list content_list = soup.find("ul", {"class": "contentList"}) if content_list is None: raise ValueError("No content list found.") content_list: BeautifulSoup # type: ignore # Get all attachments attachments = [] for attachment in content_list.find_all("ul", {"class": "attachments"}): attachment: Tag # type: ignore for link in attachment.find_all("a"): link: Tag # type: ignore href = link.get("href") # Only add if href is not None and does not start with # if href is not None and not href.startswith("#"): attachments.append(href) return attachments def _download_attachments(self, attachments: List[str]) -> None: """Download all attachments. Args: attachments: List of attachments. """ # Make sure the folder exists Path(self.folder_path).mkdir(parents=True, exist_ok=True) # Download all attachments for attachment in attachments: self.download(attachment) def _load_documents(self) -> List[Document]: """Load all documents in the folder. Returns: List of documents. """ # Create the document loader loader = DirectoryLoader( path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore ) # Load the documents documents = loader.load() # Return all documents return documents def _get_paths(self, soup: Any) -> List[str]: """Get all relative paths in the navbar.""" relative_paths = [] course_menu = soup.find("ul", {"class": "courseMenu"}) if course_menu is None: raise ValueError("No course menu found.") for link in course_menu.find_all("a"): href = link.get("href") if href is not None and href.startswith("/"): relative_paths.append(href) return relative_paths [docs] def download(self, path: str) -> None: """Download a file from a url. Args: path: Path to the file. """ # Get the file content response = self.session.get(self.base_url + path, allow_redirects=True) # Get the filename filename = self.parse_filename(response.url) # Write the file to disk with open(Path(self.folder_path) / filename, "wb") as f: f.write(response.content) [docs] def parse_filename(self, url: str) -> str: """Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. """ if (url_path := Path(url)) and url_path.suffix == ".pdf": return url_path.name else: return self._parse_filename_from_url(url) def _parse_filename_from_url(self, url: str) -> str: """Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. Raises: ValueError: If the filename could not be parsed. """ filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url) if filename_matches: filename = filename_matches.group(1) else: raise ValueError(f"Could not parse filename from {url}") if ".pdf" not in filename: raise ValueError(f"Incorrect file type: {filename}") filename = filename.split(".pdf")[0] + ".pdf" filename = unquote(filename) filename = filename.replace("%20", " ") return filename if __name__ == "__main__": loader = BlackboardLoader( "https://<YOUR BLACKBOARD URL" " HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID" " HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset", "<YOUR BBROUTER COOKIE HERE>", load_all_recursively=True, ) documents = loader.load() print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
86df6e12-f33f-4b9d-8761-af22622c6090
Source code for langchain.document_loaders.telegram """Loader that loads Telegram chat json dump.""" from __future__ import annotations import asyncio import json from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter if TYPE_CHECKING: import pandas as pd from telethon.hints import EntityLike def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" date = row["date"] sender = row["from"] text = row["text"] return f"{sender} on {date}: {text}\n\n" [docs]class TelegramChatFileLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message["type"] == "message" and isinstance(message["text"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] def text_to_docs(text: Union[str, List[str]]) -> List[Document]: """Converts a string or list of strings to a list of Documents with metadata.""" if isinstance(text, str): # Take a single string as one page text = [text] page_docs = [Document(page_content=page) for page in text] # Add page numbers as metadata for i, doc in enumerate(page_docs): doc.metadata["page"] = i + 1 # Split pages into chunks doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter( chunk_size=800, separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""], chunk_overlap=20, ) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document( page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i} ) # Add sources a metadata doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks [docs]class TelegramChatApiLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__( self, chat_entity: Optional[EntityLike] = None, api_id: Optional[int] = None, api_hash: Optional[str] = None, username: Optional[str] = None, file_path: str = "telegram_data.json", ): """Initialize with API parameters.""" self.chat_entity = chat_entity self.api_id = api_id self.api_hash = api_hash self.username = username self.file_path = file_path [docs] async def fetch_data_from_telegram(self) -> None: """Fetch data from Telegram API and save it as a JSON file.""" from telethon.sync import TelegramClient data = [] async with TelegramClient(self.username, self.api_id, self.api_hash) as client: async for message in client.iter_messages(self.chat_entity): is_reply = message.reply_to is not None reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None data.append( { "sender_id": message.sender_id, "text": message.text, "date": message.date.isoformat(), "message.id": message.id, "is_reply": is_reply, "reply_to_id": reply_to_id, } ) with open(self.file_path, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False, indent=4) def _get_message_threads(self, data: pd.DataFrame) -> dict: """Create a dictionary of message threads from the given data. Args: data (pd.DataFrame): A DataFrame containing the conversation \ data with columns: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: dict: A dictionary where the key is the parent message ID and \ the value is a list of message IDs in ascending order. """ def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]: """ Recursively find all replies to a given parent message ID. Args: parent_id (int): The parent message ID. reply_data (pd.DataFrame): A DataFrame containing reply messages. Returns: list: A list of message IDs that are replies to the parent message ID. """ # Find direct replies to the parent message ID direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][ "message.id" ].tolist() # Recursively find replies to the direct replies all_replies = [] for reply_id in direct_replies: all_replies += [reply_id] + find_replies(reply_id, reply_data) return all_replies # Filter out parent messages parent_messages = data[~data["is_reply"]] # Filter out reply messages and drop rows with NaN in 'reply_to_id' reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"]) # Convert 'reply_to_id' to integer reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int) # Create a dictionary of message threads with parent message IDs as keys and \ # lists of reply message IDs as values message_threads = { parent_id: [parent_id] + find_replies(parent_id, reply_messages) for parent_id in parent_messages["message.id"] } return message_threads def _combine_message_texts( self, message_threads: Dict[int, List[int]], data: pd.DataFrame ) -> str: """ Combine the message texts for each parent message ID based \ on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message \ ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = "" # Iterate through sorted parent message IDs for parent_id, message_ids in message_threads.items(): # Get the message texts for the message IDs and sort them by date message_texts = ( data[data["message.id"].isin(message_ids)] .sort_values(by="date")["text"] .tolist() ) message_texts = [str(elem) for elem in message_texts] # Combine the message texts combined_text += " ".join(message_texts) + ".\n" return combined_text.strip() [docs] def load(self) -> List[Document]: """Load documents.""" if self.chat_entity is not None: try: import nest_asyncio nest_asyncio.apply() asyncio.run(self.fetch_data_from_telegram()) except ImportError: raise ImportError( """`nest_asyncio` package not found. please install with `pip install nest_asyncio` """ ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) try: import pandas as pd except ImportError: raise ImportError( """`pandas` package not found. please install with `pip install pandas` """ ) normalized_messages = pd.json_normalize(d) df = pd.DataFrame(normalized_messages) message_threads = self._get_message_threads(df) combined_texts = self._combine_message_texts(message_threads, df) return text_to_docs(combined_texts)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6bed60f3-a26f-481e-9a29-019e41535e79
Source code for langchain.document_loaders.embaas import base64 import warnings from typing import Any, Dict, Iterator, List, Optional import requests from pydantic import BaseModel, root_validator, validator from typing_extensions import NotRequired, TypedDict from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser, BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.text_splitter import TextSplitter from langchain.utils import get_from_dict_or_env EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/" class EmbaasDocumentExtractionParameters(TypedDict): """Parameters for the embaas document extraction API.""" mime_type: NotRequired[str] """The mime type of the document.""" file_extension: NotRequired[str] """The file extension of the document.""" file_name: NotRequired[str] """The file name of the document.""" should_chunk: NotRequired[bool] """Whether to chunk the document into pages.""" chunk_size: NotRequired[int] """The maximum size of the text chunks.""" chunk_overlap: NotRequired[int] """The maximum overlap allowed between chunks.""" chunk_splitter: NotRequired[str] """The text splitter class name for creating chunks.""" separators: NotRequired[List[str]] """The separators for chunks.""" should_embed: NotRequired[bool] """Whether to create embeddings for the document in the response.""" model: NotRequired[str] """The model to pass to the Embaas document extraction API.""" instruction: NotRequired[str] """The instruction to pass to the Embaas document extraction API.""" class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters): """Payload for the Embaas document extraction API.""" bytes: str """The base64 encoded bytes of the document to extract text from.""" class BaseEmbaasLoader(BaseModel): embaas_api_key: Optional[str] = None api_url: str = EMBAAS_DOC_API_URL """The URL of the embaas document extraction API.""" params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters() """Additional parameters to pass to the embaas document extraction API.""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values [docs]class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser): """Wrapper around embaas's document byte loader service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Default parsing from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader() blob = Blob.from_path(path="example.mp3") documents = loader.parse(blob=blob) # Custom api parameters (create embeddings automatically) from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader( params={ "should_embed": True, "model": "e5-large-v2", "chunk_size": 256, "chunk_splitter": "CharacterTextSplitter" } ) blob = Blob.from_path(path="example.pdf") documents = loader.parse(blob=blob) """ [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: yield from self._get_documents(blob=blob) @staticmethod def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]: """Convert the API response to a list of documents.""" docs = [] for chunk in chunks: metadata = chunk["metadata"] if chunk.get("embedding", None) is not None: metadata["embedding"] = chunk["embedding"] doc = Document(page_content=chunk["text"], metadata=metadata) docs.append(doc) return docs def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload: """Generates payload for the API request.""" base64_byte_str = base64.b64encode(blob.as_bytes()).decode() payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload( bytes=base64_byte_str, # Workaround for mypy issue: https://github.com/python/mypy/issues/9408 # type: ignore **self.params, ) if blob.mimetype is not None and payload.get("mime_type", None) is None: payload["mime_type"] = blob.mimetype return payload def _handle_request( self, payload: EmbaasDocumentExtractionPayload ) -> List[Document]: """Sends a request to the embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() return EmbaasBlobLoader._api_response_to_documents( chunks=parsed_response["data"]["chunks"] ) def _get_documents(self, blob: Blob) -> Iterator[Document]: """Get the documents from the blob.""" payload = self._generate_payload(blob=blob) try: documents = self._handle_request(payload=payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError( f"Error raised by embaas document text extraction API: {e}" ) parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( f"Validation Error raised by embaas document text extraction API:" f" {parsed_response['message']}" ) raise yield from documents [docs]class EmbaasLoader(BaseEmbaasLoader, BaseLoader): """Wrapper around embaas's document loader service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Default parsing from langchain.document_loaders.embaas import EmbaasLoader loader = EmbaasLoader(file_path="example.mp3") documents = loader.load() # Custom api parameters (create embeddings automatically) from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader( file_path="example.pdf", params={ "should_embed": True, "model": "e5-large-v2", "chunk_size": 256, "chunk_splitter": "CharacterTextSplitter" } ) documents = loader.load() """ file_path: str """The path to the file to load.""" blob_loader: Optional[EmbaasBlobLoader] """The blob loader to use. If not provided, a default one will be created.""" @validator("blob_loader", always=True) def validate_blob_loader( cls, v: EmbaasBlobLoader, values: Dict ) -> EmbaasBlobLoader: return v or EmbaasBlobLoader( embaas_api_key=values["embaas_api_key"], api_url=values["api_url"], params=values["params"], ) [docs] def lazy_load(self) -> Iterator[Document]: """Load the documents from the file path lazily.""" blob = Blob.from_path(path=self.file_path) assert self.blob_loader is not None # Should never be None, but mypy doesn't know that. yield from self.blob_loader.lazy_parse(blob=blob) [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: if self.params.get("should_embed", False): warnings.warn( "Embeddings are not supported with load_and_split." " Use the API splitter to properly generate embeddings." " For more information see embaas.io docs." ) return super().load_and_split(text_splitter=text_splitter)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
dc87a8ca-15c0-4c4c-bb86-2de99e0a897a
Source code for langchain.document_loaders.airtable from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AirtableLoader(BaseLoader): """Loader for Airtable tables.""" def __init__(self, api_token: str, table_id: str, base_id: str): """Initialize with API token and the IDs for table and base""" self.api_token = api_token self.table_id = table_id self.base_id = base_id [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: # Need to convert record from dict to str yield Document( page_content=str(record), metadata={ "source": self.base_id + "_" + self.table_id, "base_id": self.base_id, "table_id": self.table_id, }, ) [docs] def load(self) -> List[Document]: """Load Table.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airtable.html
3df45c52-b6f0-4fe0-907d-ed2d9829e398
Source code for langchain.document_loaders.pdf """Loader that loads PDF files.""" import json import logging import os import tempfile import time from abc import ABC from io import StringIO from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers.pdf import ( PDFMinerParser, PDFPlumberParser, PyMuPDFParser, PyPDFium2Parser, PyPDFParser, ) from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__file__) [docs]class UnstructuredPDFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load PDF files.""" def _get_elements(self) -> List: from unstructured.partition.pdf import partition_pdf return partition_pdf(filename=self.file_path, **self.unstructured_kwargs) class BasePDFLoader(BaseLoader, ABC): """Base loader class for PDF files. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path self.web_path = None if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_dir = tempfile.TemporaryDirectory() temp_pdf = Path(self.temp_dir.name) / "tmp.pdf" with open(temp_pdf, mode="wb") as f: f.write(r.content) self.file_path = str(temp_pdf) elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_dir"): self.temp_dir.cleanup() @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) @property def source(self) -> str: return self.web_path if self.web_path is not None else self.file_path [docs]class OnlinePDFLoader(BasePDFLoader): """Loader that loads online PDFs.""" [docs] def load(self) -> List[Document]: """Load documents.""" loader = UnstructuredPDFLoader(str(self.file_path)) return loader.load() [docs]class PyPDFLoader(BasePDFLoader): """Loads a PDF with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import pypdf # noqa:F401 except ImportError: raise ImportError( "pypdf package not found, please install it with " "`pip install pypdf`" ) self.parser = PyPDFParser() super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFium2Loader(BasePDFLoader): """Loads a PDF with pypdfium2 and chunks at character level.""" def __init__(self, file_path: str): """Initialize with file path.""" super().__init__(file_path) self.parser = PyPDFium2Parser() [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFDirectoryLoader(BaseLoader): """Loads a directory with PDF files with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__( self, path: str, glob: str = "**/[!.]*.pdf", silent_errors: bool = False, load_hidden: bool = False, recursive: bool = False, ): self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors @staticmethod def _is_visible(path: Path) -> bool: return not any(part.startswith(".") for part in path.parts) [docs] def load(self) -> List[Document]: p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i)) sub_docs = loader.load() for doc in sub_docs: doc.metadata["source"] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs [docs]class PDFMinerLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files.""" def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: from pdfminer.high_level import extract_text # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) self.parser = PDFMinerParser() [docs] def load(self) -> List[Document]: """Eagerly load the content.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily lod documents.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PDFMinerPDFasHTMLLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files as HTML content.""" def __init__(self, file_path: str): """Initialize with file path.""" try: from pdfminer.high_level import extract_text_to_fp # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load file.""" from pdfminer.high_level import extract_text_to_fp from pdfminer.layout import LAParams from pdfminer.utils import open_filename output_string = StringIO() with open_filename(self.file_path, "rb") as fp: extract_text_to_fp( fp, # type: ignore[arg-type] output_string, codec="", laparams=LAParams(), output_type="html", ) metadata = {"source": self.file_path} return [Document(page_content=output_string.getvalue(), metadata=metadata)] [docs]class PyMuPDFLoader(BasePDFLoader): """Loader that uses PyMuPDF to load PDF files.""" def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import fitz # noqa:F401 except ImportError: raise ImportError( "`PyMuPDF` package not found, please install it with " "`pip install pymupdf`" ) super().__init__(file_path) [docs] def load(self, **kwargs: Optional[Any]) -> List[Document]: """Load file.""" parser = PyMuPDFParser(text_kwargs=kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) # MathpixPDFLoader implementation taken largely from Daniel Gross's: # https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21 [docs]class MathpixPDFLoader(BasePDFLoader): def __init__( self, file_path: str, processed_file_format: str = "mmd", max_wait_time_seconds: int = 500, should_clean_pdf: bool = False, **kwargs: Any, ) -> None: super().__init__(file_path) self.mathpix_api_key = get_from_dict_or_env( kwargs, "mathpix_api_key", "MATHPIX_API_KEY" ) self.mathpix_api_id = get_from_dict_or_env( kwargs, "mathpix_api_id", "MATHPIX_API_ID" ) self.processed_file_format = processed_file_format self.max_wait_time_seconds = max_wait_time_seconds self.should_clean_pdf = should_clean_pdf @property def headers(self) -> dict: return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key} @property def url(self) -> str: return "https://api.mathpix.com/v3/pdf" @property def data(self) -> dict: options = {"conversion_formats": {self.processed_file_format: True}} return {"options_json": json.dumps(options)} [docs] def send_pdf(self) -> str: with open(self.file_path, "rb") as f: files = {"file": f} response = requests.post( self.url, headers=self.headers, files=files, data=self.data ) response_data = response.json() if "pdf_id" in response_data: pdf_id = response_data["pdf_id"] return pdf_id else: raise ValueError("Unable to send PDF to Mathpix.") [docs] def wait_for_processing(self, pdf_id: str) -> None: url = self.url + "/" + pdf_id for _ in range(0, self.max_wait_time_seconds, 5): response = requests.get(url, headers=self.headers) response_data = response.json() status = response_data.get("status", None) if status == "completed": return elif status == "error": raise ValueError("Unable to retrieve PDF from Mathpix") else: print(f"Status: {status}, waiting for processing to complete") time.sleep(5) raise TimeoutError [docs] def get_processed_pdf(self, pdf_id: str) -> str: self.wait_for_processing(pdf_id) url = f"{self.url}/{pdf_id}.{self.processed_file_format}" response = requests.get(url, headers=self.headers) return response.content.decode("utf-8") [docs] def clean_pdf(self, contents: str) -> str: contents = "\n".join( [line for line in contents.split("\n") if not line.startswith("![]")] ) # replace \section{Title} with # Title contents = contents.replace("\\section{", "# ").replace("}", "") # replace the "\" slash that Mathpix adds to escape $, %, (, etc. contents = ( contents.replace(r"\$", "$") .replace(r"\%", "%") .replace(r"\(", "(") .replace(r"\)", ")") ) return contents [docs] def load(self) -> List[Document]: pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {"source": self.source, "file_path": self.source} return [Document(page_content=contents, metadata=metadata)] [docs]class PDFPlumberLoader(BasePDFLoader): """Loader that uses pdfplumber to load PDF files.""" def __init__( self, file_path: str, text_kwargs: Optional[Mapping[str, Any]] = None ) -> None: """Initialize with file path.""" try: import pdfplumber # noqa:F401 except ImportError: raise ImportError( "pdfplumber package not found, please install it with " "`pip install pdfplumber`" ) super().__init__(file_path) self.text_kwargs = text_kwargs or {} [docs] def load(self) -> List[Document]: """Load file.""" parser = PDFPlumberParser(text_kwargs=self.text_kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
67da7367-0f3b-4591-8943-5f7368932c5a
Source code for langchain.document_loaders.epub """Loader that loads EPub files.""" from typing import List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEPubLoader(UnstructuredFileLoader): """Loader that uses unstructured to load epub files.""" def _get_elements(self) -> List: min_unstructured_version = "0.5.4" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning epub files is only supported in " f"unstructured>={min_unstructured_version}." ) from unstructured.partition.epub import partition_epub return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/epub.html
e00f23d9-e217-41fa-a86e-388e0d11cb93
Source code for langchain.document_loaders.mastodon """Mastodon document loader.""" from __future__ import annotations import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import mastodon def _dependable_mastodon_import() -> mastodon: try: import mastodon except ImportError: raise ValueError( "Mastodon.py package not found, " "please install it with `pip install Mastodon.py`" ) return mastodon [docs]class MastodonTootsLoader(BaseLoader): """Mastodon toots loader.""" def __init__( self, mastodon_accounts: Sequence[str], number_toots: Optional[int] = 100, exclude_replies: bool = False, access_token: Optional[str] = None, api_base_url: str = "https://mastodon.social", ): """Instantiate Mastodon toots loader. Args: mastodon_accounts: The list of Mastodon accounts to query. number_toots: How many toots to pull for each account. exclude_replies: Whether to exclude reply toots from the load. access_token: An access token if toots are loaded as a Mastodon app. Can also be specified via the environment variables "MASTODON_ACCESS_TOKEN". api_base_url: A Mastodon API base URL to talk to, if not using the default. """ mastodon = _dependable_mastodon_import() access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN") self.api = mastodon.Mastodon( access_token=access_token, api_base_url=api_base_url ) self.mastodon_accounts = mastodon_accounts self.number_toots = number_toots self.exclude_replies = exclude_replies [docs] def load(self) -> List[Document]: """Load toots into documents.""" results: List[Document] = [] for account in self.mastodon_accounts: user = self.api.account_lookup(account) toots = self.api.account_statuses( user.id, only_media=False, pinned=False, exclude_replies=self.exclude_replies, exclude_reblogs=True, limit=self.number_toots, ) docs = self._format_toots(toots, user) results.extend(docs) return results def _format_toots( self, toots: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format toots into documents. Adding user info, and selected toot fields into the metadata. """ for toot in toots: metadata = { "created_at": toot["created_at"], "user_info": user_info, "is_reply": toot["in_reply_to_id"] is not None, } yield Document( page_content=toot["content"], metadata=metadata, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
0eb7424f-f027-4f89-a3e4-d219aa6f8eb3
Source code for langchain.document_loaders.college_confidential """Loader that loads College Confidential.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class CollegeConfidentialLoader(WebBaseLoader): """Loader that loads College Confidential webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("main[class='skin-handler']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/college_confidential.html
5a52fadd-fe05-4e66-88de-7e2441cba728
Source code for langchain.document_loaders.whatsapp_chat import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" [docs]class WhatsAppChatLoader(BaseLoader): """Loader that loads WhatsApp messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,4} [\/.] \d{1,2} [\/.] \d{1,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[\s_](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ for line in lines: result = re.match( message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE ) if result: date, sender, text = result.groups() text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
5ac2060b-778c-4806-943c-fbbd06e00b7e
Source code for langchain.document_loaders.spreedly """Loader that fetches data from Spreedly API.""" import json import urllib.request from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict SPREEDLY_ENDPOINTS = { "gateways_options": "https://core.spreedly.com/v1/gateways_options.json", "gateways": "https://core.spreedly.com/v1/gateways.json", "receivers_options": "https://core.spreedly.com/v1/receivers_options.json", "receivers": "https://core.spreedly.com/v1/receivers.json", "payment_methods": "https://core.spreedly.com/v1/payment_methods.json", "certificates": "https://core.spreedly.com/v1/certificates.json", "transactions": "https://core.spreedly.com/v1/transactions.json", "environments": "https://core.spreedly.com/v1/environments.json", } [docs]class SpreedlyLoader(BaseLoader): """Loader that fetches data from Spreedly API.""" def __init__(self, access_token: str, resource: str) -> None: self.access_token = access_token self.resource = resource self.headers = { "Authorization": f"Bearer {self.access_token}", "Accept": "application/json", } def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = SPREEDLY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/spreedly.html
faa24760-813d-49ef-9ca7-f1ddb69f3c18
Source code for langchain.document_loaders.youtube """Loader that loads YouTube transcript.""" from __future__ import annotations import logging from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from urllib.parse import parse_qs, urlparse from pydantic import root_validator from pydantic.dataclasses import dataclass from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"] [docs]@dataclass class GoogleApiClient: """A Generic Google Api Client. To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google`` python package installed. As the google api expects credentials you need to set up a google account and register your Service. "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) """ credentials_path: Path = Path.home() / ".credentials" / "credentials.json" service_account_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" def __post_init__(self) -> None: self.creds = self._load_credentials() [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if not values.get("credentials_path") and not values.get( "service_account_path" ): raise ValueError("Must specify either channel_name or video_ids") return values def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_path) ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds ALLOWED_SCHEMAS = {"http", "https"} ALLOWED_NETLOCK = { "youtu.be", "m.youtube.com", "youtube.com", "www.youtube.com", "www.youtube-nocookie.com", "vid.plus", } def _parse_video_id(url: str) -> Optional[str]: """Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith("/watch"): query = parsed_url.query parsed_query = parse_qs(query) if "v" in parsed_query: ids = parsed_query["v"] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip("/") video_id = path.split("/")[-1] if len(video_id) != 11: # Video IDs are 11 characters long return None return video_id [docs]class YoutubeLoader(BaseLoader): """Loader that loads Youtube transcripts.""" def __init__( self, video_id: str, add_video_info: bool = False, language: Union[str, Sequence[str]] = "en", translation: str = "en", continue_on_failure: bool = False, ): """Initialize with YouTube video ID.""" self.video_id = video_id self.add_video_info = add_video_info self.language = language if isinstance(language, str): self.language = [language] else: self.language = language self.translation = translation self.continue_on_failure = continue_on_failure [docs] @staticmethod def extract_video_id(youtube_url: str) -> str: """Extract video id from common YT urls.""" video_id = _parse_video_id(youtube_url) if not video_id: raise ValueError( f"Could not determine the video ID for the URL {youtube_url}" ) return video_id [docs] @classmethod def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader: """Given youtube URL, load video.""" video_id = cls.extract_video_id(youtube_url) return cls(video_id, **kwargs) [docs] def load(self) -> List[Document]: """Load documents.""" try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi, ) except ImportError: raise ImportError( "Could not import youtube_transcript_api python package. " "Please install it with `pip install youtube-transcript-api`." ) metadata = {"source": self.video_id} if self.add_video_info: # Get more video meta info # Such as title, description, thumbnail url, publish_date video_info = self._get_video_info() metadata.update(video_info) try: transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id) except TranscriptsDisabled: return [] try: transcript = transcript_list.find_transcript(self.language) except NoTranscriptFound: en_transcript = transcript_list.find_transcript(["en"]) transcript = en_transcript.translate(self.translation) transcript_pieces = transcript.fetch() transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces]) return [Document(page_content=transcript, metadata=metadata)] def _get_video_info(self) -> dict: """Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except ImportError: raise ImportError( "Could not import pytube python package. " "Please install it with `pip install pytube`." ) yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}") video_info = { "title": yt.title or "Unknown", "description": yt.description or "Unknown", "view_count": yt.views or 0, "thumbnail_url": yt.thumbnail_url or "Unknown", "publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S") if yt.publish_date else "Unknown", "length": yt.length or 0, "author": yt.author or "Unknown", } return video_info [docs]@dataclass class GoogleApiYoutubeLoader(BaseLoader): """Loader that loads all Videos from a Channel To use, you should have the ``googleapiclient,youtube_transcript_api`` python package installed. As the service needs a google_api_client, you first have to initialize the GoogleApiClient. Additionally you have to either provide a channel name or a list of videoids "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient from langchain.document_loaders import GoogleApiYoutubeLoader google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client, channel_name = "CodeAesthetic" ) load.load() """ google_api_client: GoogleApiClient channel_name: Optional[str] = None video_ids: Optional[List[str]] = None add_video_info: bool = True captions_language: str = "en" continue_on_failure: bool = False def __post_init__(self) -> None: self.youtube_client = self._build_youtube_client(self.google_api_client.creds) def _build_youtube_client(self, creds: Any) -> Any: try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) return build("youtube", "v3", credentials=creds) [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if not values.get("channel_name") and not values.get("video_ids"): raise ValueError("Must specify either channel_name or video_ids") return values def _get_transcripe_for_video_id(self, video_id: str) -> str: from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) try: transcript = transcript_list.find_transcript([self.captions_language]) except NoTranscriptFound: for available_transcript in transcript_list: transcript = available_transcript.translate(self.captions_language) continue transcript_pieces = transcript.fetch() return " ".join([t["text"].strip(" ") for t in transcript_pieces]) def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document: captions = self._get_transcripe_for_video_id(video_id) video_response = ( self.youtube_client.videos() .list( part="id,snippet", id=video_id, ) .execute() ) return Document( page_content=captions, metadata=video_response.get("items")[0], ) def _get_channel_id(self, channel_name: str) -> str: request = self.youtube_client.search().list( part="id", q=channel_name, type="channel", maxResults=1, # we only need one result since channel names are unique ) response = request.execute() channel_id = response["items"][0]["id"]["channelId"] return channel_id def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]: try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, ) except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "youtube-transcript-api` " "to use the youtube loader" ) channel_id = self._get_channel_id(channel) request = self.youtube_client.search().list( part="id,snippet", channelId=channel_id, maxResults=50, # adjust this value to retrieve more or fewer videos ) video_ids = [] while request is not None: response = request.execute() # Add each video ID to the list for item in response["items"]: if not item["id"].get("videoId"): continue meta_data = {"videoId": item["id"]["videoId"]} if self.add_video_info: item["snippet"].pop("thumbnails") meta_data.update(item["snippet"]) try: page_content = self._get_transcripe_for_video_id( item["id"]["videoId"] ) video_ids.append( Document( page_content=page_content, metadata=meta_data, ) ) except (TranscriptsDisabled, NoTranscriptFound) as e: if self.continue_on_failure: logger.error( "Error fetching transscript " + f" {item['id']['videoId']}, exception: {e}" ) else: raise e pass request = self.youtube_client.search().list_next(request, response) return video_ids [docs] def load(self) -> List[Document]: """Load documents.""" document_list = [] if self.channel_name: document_list.extend(self._get_document_for_channel(self.channel_name)) elif self.video_ids: document_list.extend( [ self._get_document_for_video_id(video_id) for video_id in self.video_ids ] ) else: raise ValueError("Must specify either channel_name or video_ids") return document_list
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
05613b52-9b5d-48f2-a2f7-4d5effcd5e0a
Source code for langchain.document_loaders.hugging_face_dataset """Loader that loads HuggingFace datasets.""" from typing import Iterator, List, Mapping, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class HuggingFaceDatasetLoader(BaseLoader): """Loading logic for loading documents from the Hugging Face Hub.""" def __init__( self, path: str, page_content_column: str = "text", name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[ Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]] ] = None, cache_dir: Optional[str] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, use_auth_token: Optional[Union[bool, str]] = None, num_proc: Optional[int] = None, ): """Initialize the HuggingFaceDatasetLoader. Args: path: Path or name of the dataset. page_content_column: Page content column name. name: Name of the dataset configuration. data_dir: Data directory of the dataset configuration. data_files: Path(s) to source data file(s). cache_dir: Directory to read/write data. keep_in_memory: Whether to copy the dataset in-memory. save_infos: Save the dataset information (checksums/size/splits/...). use_auth_token: Bearer token for remote files on the Datasets Hub. num_proc: Number of processes. """ self.path = path self.page_content_column = page_content_column self.name = name self.data_dir = data_dir self.data_files = data_files self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.save_infos = save_infos self.use_auth_token = use_auth_token self.num_proc = num_proc [docs] def lazy_load( self, ) -> Iterator[Document]: """Load documents lazily.""" try: from datasets import load_dataset except ImportError: raise ImportError( "Could not import datasets python package. " "Please install it with `pip install datasets`." ) dataset = load_dataset( path=self.path, name=self.name, data_dir=self.data_dir, data_files=self.data_files, cache_dir=self.cache_dir, keep_in_memory=self.keep_in_memory, save_infos=self.save_infos, use_auth_token=self.use_auth_token, num_proc=self.num_proc, ) yield from ( Document( page_content=row.pop(self.page_content_column), metadata=row, ) for key in dataset.keys() for row in dataset[key] ) [docs] def load(self) -> List[Document]: """Load documents.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hugging_face_dataset.html
318e14d4-1d3a-424b-9cf0-54f0839c8555
Source code for langchain.document_loaders.chatgpt """Load conversations from ChatGPT data export""" import datetime import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(message: dict, title: str) -> str: """ Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return "" sender = message["author"]["role"] if message["author"] else "unknown" text = message["content"]["parts"][0] date = datetime.datetime.fromtimestamp(message["create_time"]).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{title} - {sender} on {date}: {text}\n\n" [docs]class ChatGPTLoader(BaseLoader): """Loader that loads conversations from exported ChatGPT data.""" def __init__(self, log_file: str, num_logs: int = -1): self.log_file = log_file self.num_logs = num_logs [docs] def load(self) -> List[Document]: with open(self.log_file, encoding="utf8") as f: data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d["title"] messages = d["mapping"] text = "".join( [ concatenate_rows(messages[key]["message"], title) for idx, key in enumerate(messages) if not ( idx == 0 and messages[key]["message"]["author"]["role"] == "system" ) ] ) metadata = {"source": str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html
851c0b84-26ba-4483-8f4b-b057d839f334
Source code for langchain.document_loaders.html_bs """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class BSHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load HTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html
00219155-4974-43f0-af8d-6f191c5d1ecc
Source code for langchain.document_loaders.blob_loaders.file_system """Use to load blobs from the local file system.""" from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader T = TypeVar("T") def _make_iterator( length_func: Callable[[], int], show_progress: bool = False ) -> Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( "You must install tqdm to use show_progress=True." "You can install tqdm with `pip install tqdm`." ) # Make sure to provide `total` here so that tqdm can show # a progress bar that takes into account the total number of files. def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func()) iterator = _with_tqdm else: iterator = iter # type: ignore return iterator # PUBLIC API [docs]class FileSystemBlobLoader(BlobLoader): """Blob loader for the local file system. Example: .. code-block:: python from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): print(blob) """ def __init__( self, path: Union[str, Path], *, glob: str = "**/[!.]*", suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, ) -> None: """Initialize with path to directory and how to glob over it. Args: path: Path to directory to load from glob: Glob pattern relative to the specified path by default set to pick up all non-hidden files suffixes: Provide to keep only files with these suffixes Useful when wanting to keep files with different suffixes Suffixes must include the dot, e.g. ".txt" show_progress: If true, will show a progress bar as the files are loaded. This forces an iteration through all matching files to count them prior to loading them. Examples: ... code-block:: python # Recursively load all text files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") # Load all files in a directory without recursion. loader = FileSystemBlobLoader("/path/to/directory", glob="*") """ if isinstance(path, Path): _path = path elif isinstance(path, str): _path = Path(path) else: raise TypeError(f"Expected str or Path, got {type(path)}") self.path = _path self.glob = glob self.suffixes = set(suffixes or []) self.show_progress = show_progress [docs] def yield_blobs( self, ) -> Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator( length_func=self.count_matching_files, show_progress=self.show_progress ) for path in iterator(self._yield_paths()): yield Blob.from_path(path) def _yield_paths(self) -> Iterable[Path]: """Yield paths that match the requested pattern.""" paths = self.path.glob(self.glob) for path in paths: if path.is_file(): if self.suffixes and path.suffix not in self.suffixes: continue yield path [docs] def count_matching_files(self) -> int: """Count files that match the pattern without loading them.""" # Carry out a full iteration to count the files without # materializing anything expensive in memory. num = 0 for _ in self._yield_paths(): num += 1 return num
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
673c2b80-2ce7-4da1-8c64-df06b59737fc
Source code for langchain.document_loaders.blob_loaders.youtube_audio from typing import Iterable, List from langchain.document_loaders.blob_loaders import FileSystemBlobLoader from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader [docs]class YoutubeAudioLoader(BlobLoader): """Load YouTube urls as audio file(s).""" def __init__(self, urls: List[str], save_dir: str): if not isinstance(urls, list): raise TypeError("urls must be a list") self.urls = urls self.save_dir = save_dir [docs] def yield_blobs(self) -> Iterable[Blob]: """Yield audio blobs for each url.""" try: import yt_dlp except ImportError: raise ValueError( "yt_dlp package not found, please install it with " "`pip install yt_dlp`" ) # Use yt_dlp to download audio given a YouTube url ydl_opts = { "format": "m4a/bestaudio/best", "noplaylist": True, "outtmpl": self.save_dir + "/%(title)s.%(ext)s", "postprocessors": [ { "key": "FFmpegExtractAudio", "preferredcodec": "m4a", } ], } for url in self.urls: # Download file with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download(url) # Yield the written blobs loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") for blob in loader.yield_blobs(): yield blob
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/youtube_audio.html
ab6694c8-1dd2-4984-b462-d0af5a3e1ace
Source code for langchain.document_loaders.blob_loaders.schema """Schema for Blobs and Blob Loaders. The goal is to facilitate decoupling of content loading from content parsing code. In addition, content loading code should provide a lazy loading interface by default. """ from __future__ import annotations import contextlib import mimetypes from abc import ABC, abstractmethod from io import BufferedReader, BytesIO from pathlib import PurePath from typing import Any, Generator, Iterable, Mapping, Optional, Union from pydantic import BaseModel, root_validator PathLike = Union[str, PurePath] [docs]class Blob(BaseModel): """A blob is used to represent raw data by either reference or value. Provides an interface to materialize the blob in different representations, and help to decouple the development of data loaders from the downstream parsing of the raw data. Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob """ data: Union[bytes, str, None] # Raw data mimetype: Optional[str] = None # Not to be confused with a file extension encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string # Location where the original content was found # Represent location on the local file system # Useful for situations where downstream code assumes it must work with file paths # rather than in-memory content. path: Optional[PathLike] = None class Config: arbitrary_types_allowed = True frozen = True @property def source(self) -> Optional[str]: """The source location of the blob as string if known otherwise none.""" return str(self.path) if self.path else None @root_validator(pre=True) def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: raise ValueError("Either data or path must be provided") return values [docs] def as_string(self) -> str: """Read data as a string.""" if self.data is None and self.path: with open(str(self.path), "r", encoding=self.encoding) as f: return f.read() elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): return self.data else: raise ValueError(f"Unable to get string for blob {self}") [docs] def as_bytes(self) -> bytes: """Read data as bytes.""" if isinstance(self.data, bytes): return self.data elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: with open(str(self.path), "rb") as f: return f.read() else: raise ValueError(f"Unable to get bytes for blob {self}") [docs] @contextlib.contextmanager def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: """Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), "rb") as f: yield f else: raise NotImplementedError(f"Unable to convert blob {self}") [docs] @classmethod def from_path( cls, path: PathLike, *, encoding: str = "utf-8", mime_type: Optional[str] = None, guess_type: bool = True, ) -> Blob: """Load the blob from a path like object. Args: path: path like object to file to be read encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data guess_type: If True, the mimetype will be guessed from the file extension, if a mime-type was not provided Returns: Blob instance """ if mime_type is None and guess_type: _mimetype = mimetypes.guess_type(path)[0] if guess_type else None else: _mimetype = mime_type # We do not load the data immediately, instead we treat the blob as a # reference to the underlying data. return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path) [docs] @classmethod def from_data( cls, data: Union[str, bytes], *, encoding: str = "utf-8", mime_type: Optional[str] = None, path: Optional[str] = None, ) -> Blob: """Initialize the blob from in-memory data. Args: data: the in-memory data associated with the blob encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data path: if provided, will be set as the source from which the data came Returns: Blob instance """ return cls(data=data, mimetype=mime_type, encoding=encoding, path=path) def __repr__(self) -> str: """Define the blob representation.""" str_repr = f"Blob {id(self)}" if self.source: str_repr += f" {self.source}" return str_repr [docs]class BlobLoader(ABC): """Abstract interface for blob loaders implementation. Implementer should be able to load raw content from a storage system according to some criteria and return the raw content lazily as a stream of blobs. """ [docs] @abstractmethod def yield_blobs( self, ) -> Iterable[Blob]: """A lazy loader for raw data represented by LangChain's Blob object. Returns: A generator over blobs """
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
79fc91ed-90d1-4610-8297-7efb290b363b
Source code for langchain.embeddings.bedrock import json import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings [docs]class BedrockEmbeddings(BaseModel, Embeddings): """Embeddings provider to invoke Bedrock embedding models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from langchain.bedrock_embeddings import BedrockEmbeddings region_name ="us-east-1" credentials_profile_name = "default" model_id = "amazon.titan-e1t-medium" be = BedrockEmbeddings( credentials_profile_name=credentials_profile_name, region_name=region_name, model_id=model_id ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str = "amazon.titan-e1t-medium" """Id of the model to call, e.g., amazon.titan-e1t-medium, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values def _embedding_func(self, text: str) -> List[float]: """Call out to Bedrock embedding endpoint.""" # replace newlines, which can negatively affect performance. text = text.replace(os.linesep, " ") _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} input_body["inputText"] = text body = json.dumps(input_body) content_type = "application/json" accepts = "application/json" embeddings = [] try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accepts, contentType=content_type, ) response_body = json.loads(response.get("body").read()) embeddings = response_body.get("embedding") except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return embeddings [docs] def embed_documents( self, texts: List[str], chunk_size: int = 1 ) -> List[List[float]]: """Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed. chunk_size: Bedrock currently only allows single string inputs, so chunk size is always 1. This input is here only for compatibility with the embeddings interface. Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
5374b6aa-fe78-4c0e-91cf-a7c042b1fccc
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
8c216911-f374-496f-a09a-45eee54e278f
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ Wrapper for Aleph Alpha's Asymmetric Embeddings AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaSymmetricSemanticEmbedding() document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" hosting: Optional[str] = "https://api.aleph-alpha.com" """Optional parameter that specifies which datacenters may process the request.""" normalize: Optional[bool] = True """Should returned embeddings be normalized""" compress_to_size: Optional[int] = 128 """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) values["client"] = Client(token=aleph_alpha_api_key) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding() text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
59fcd04b-4d2d-4074-90ca-b78e21fcac4f
Source code for langchain.embeddings.modelscope_hub """Wrapper around ModelScopeHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings [docs]class ModelScopeEmbeddings(BaseModel, Embeddings): """Wrapper around modelscope_hub embedding models. To use, you should have the ``modelscope`` python package installed. Example: .. code-block:: python from langchain.embeddings import ModelScopeEmbeddings model_id = "damo/nlp_corom_sentence-embedding_english-base" embed = ModelScopeEmbeddings(model_id=model_id) """ embed: Any model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id) except ImportError as e: raise ImportError( "Could not import some python packages." "Please install it with `pip install modelscope`." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) inputs = {"source_sentence": texts} embeddings = self.embed(input=inputs)["text_embedding"] return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a modelscope embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") inputs = {"source_sentence": [text]} embedding = self.embed(input=inputs)["text_embedding"][0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html