id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
117
74f1c3d3e9b2-3
else: # Directly use the primitive type pass else: raise NotImplementedError(f"Unsupported type: {schema_type}") return schema_type @staticmethod def _validate_location(location: APIPropertyLocation, name: str) -> None: if location not in SUPPORTED_LOCATIONS: raise NotImplementedError( INVALID_LOCATION_TEMPL.format(location=location, name=name) ) @staticmethod def _validate_content(content: Optional[Dict[str, MediaType]]) -> None: if content: raise ValueError( "API Properties with media content not supported. " "Media content only supported within APIRequestBodyProperty's" ) @staticmethod def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]: schema = parameter.param_schema if isinstance(schema, Reference): schema = spec.get_referenced_schema(schema) elif schema is None: return None elif not isinstance(schema, Schema): raise ValueError(f"Error dereferencing schema: {schema}") return schema @staticmethod def is_supported_location(location: str) -> bool: """Return whether the provided location is supported.""" try: return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS except ValueError: return False @classmethod def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty": """Instantiate from an OpenAPI Parameter.""" location = APIPropertyLocation.from_str(parameter.param_in) cls._validate_location( location, parameter.name, ) cls._validate_content(parameter.content) schema = cls._get_schema(parameter, spec)
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-4
schema = cls._get_schema(parameter, spec) schema_type = cls._get_schema_type(parameter, schema) default_val = schema.default if schema is not None else None return cls( name=parameter.name, location=location, default=default_val, description=parameter.description, required=parameter.required, type=schema_type, ) class APIRequestBodyProperty(APIPropertyBase): """A model for a request body property.""" properties: List["APIRequestBodyProperty"] = Field(alias="properties") """The sub-properties of the property.""" # This is useful for handling nested property cycles. # We can define separate types in that case. references_used: List[str] = Field(alias="references_used") """The references used by the property.""" @classmethod def _process_object_schema( cls, schema: Schema, spec: OpenAPISpec, references_used: List[str] ) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]: properties = [] required_props = schema.required or [] if schema.properties is None: raise ValueError( f"No properties found when processing object schema: {schema}" ) for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): ref_name = prop_schema.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) prop_schema = spec.get_referenced_schema(prop_schema) else: continue properties.append( cls.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_props, spec=spec,
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-5
required=prop_name in required_props, spec=spec, references_used=references_used, ) ) return schema.type, properties @classmethod def _process_array_schema( cls, schema: Schema, name: str, spec: OpenAPISpec, references_used: List[str] ) -> str: items = schema.items if items is not None: if isinstance(items, Reference): ref_name = items.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) items = spec.get_referenced_schema(items) else: pass return f"Array<{ref_name}>" else: pass if isinstance(items, Schema): array_type = cls.from_schema( schema=items, name=f"{name}Item", required=True, # TODO: Add required spec=spec, references_used=references_used, ) return f"Array<{array_type.type}>" return "array" @classmethod def from_schema( cls, schema: Schema, name: str, required: bool, spec: OpenAPISpec, references_used: Optional[List[str]] = None, ) -> "APIRequestBodyProperty": """Recursively populate from an OpenAPI Schema.""" if references_used is None: references_used = [] schema_type = schema.type properties: List[APIRequestBodyProperty] = [] if schema_type == "object" and schema.properties: schema_type, properties = cls._process_object_schema( schema, spec, references_used ) elif schema_type == "array":
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-6
schema, spec, references_used ) elif schema_type == "array": schema_type = cls._process_array_schema(schema, name, spec, references_used) elif schema_type in PRIMITIVE_TYPES: # Use the primitive type directly pass elif schema_type is None: # No typing specified/parsed. WIll map to 'any' pass else: raise ValueError(f"Unsupported type: {schema_type}") return cls( name=name, required=required, type=schema_type, default=schema.default, description=schema.description, properties=properties, references_used=references_used, ) class APIRequestBody(BaseModel): """A model for a request body.""" description: Optional[str] = Field(alias="description") """The description of the request body.""" properties: List[APIRequestBodyProperty] = Field(alias="properties") # E.g., application/json - we only support JSON at the moment. media_type: str = Field(alias="media_type") """The media type of the request body.""" @classmethod def _process_supported_media_type( cls, media_type_obj: MediaType, spec: OpenAPISpec, ) -> List[APIRequestBodyProperty]: """Process the media type of the request body.""" references_used = [] schema = media_type_obj.media_type_schema if isinstance(schema, Reference): references_used.append(schema.ref.split("/")[-1]) schema = spec.get_referenced_schema(schema) if schema is None: raise ValueError( f"Could not resolve schema for media type: {media_type_obj}" ) api_request_body_properties = []
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-7
) api_request_body_properties = [] required_properties = schema.required or [] if schema.type == "object" and schema.properties: for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): prop_schema = spec.get_referenced_schema(prop_schema) api_request_body_properties.append( APIRequestBodyProperty.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_properties, spec=spec, ) ) else: api_request_body_properties.append( APIRequestBodyProperty( name="body", required=True, type=schema.type, default=schema.default, description=schema.description, properties=[], references_used=references_used, ) ) return api_request_body_properties @classmethod def from_request_body( cls, request_body: RequestBody, spec: OpenAPISpec ) -> "APIRequestBody": """Instantiate from an OpenAPI RequestBody.""" properties = [] for media_type, media_type_obj in request_body.content.items(): if media_type not in _SUPPORTED_MEDIA_TYPES: continue api_request_body_properties = cls._process_supported_media_type( media_type_obj, spec, ) properties.extend(api_request_body_properties) return cls( description=request_body.description, properties=properties, media_type=media_type, ) [docs]class APIOperation(BaseModel): """A model for a single API operation.""" operation_id: str = Field(alias="operation_id") """The unique identifier of the operation.""" description: Optional[str] = Field(alias="description")
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-8
description: Optional[str] = Field(alias="description") """The description of the operation.""" base_url: str = Field(alias="base_url") """The base URL of the operation.""" path: str = Field(alias="path") """The path of the operation.""" method: HTTPVerb = Field(alias="method") """The HTTP method of the operation.""" properties: Sequence[APIProperty] = Field(alias="properties") # TODO: Add parse in used components to be able to specify what type of # referenced object it is. # """The properties of the operation.""" # components: Dict[str, BaseModel] = Field(alias="components") request_body: Optional[APIRequestBody] = Field(alias="request_body") """The request body of the operation.""" @staticmethod def _get_properties_from_parameters( parameters: List[Parameter], spec: OpenAPISpec ) -> List[APIProperty]: """Get the properties of the operation.""" properties = [] for param in parameters: if APIProperty.is_supported_location(param.param_in): properties.append(APIProperty.from_parameter(param, spec)) elif param.required: raise ValueError( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) ) else: logger.warning( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) + " Ignoring optional parameter" ) pass return properties [docs] @classmethod def from_openapi_url( cls, spec_url: str, path: str, method: str, ) -> "APIOperation":
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-9
path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI URL.""" spec = OpenAPISpec.from_url(spec_url) return cls.from_openapi_spec(spec, path, method) [docs] @classmethod def from_openapi_spec( cls, spec: OpenAPISpec, path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method) request_body = spec.get_request_body_for_operation(operation) api_request_body = ( APIRequestBody.from_request_body(request_body, spec) if request_body is not None else None ) description = operation.description or operation.summary if not description and spec.paths is not None: description = spec.paths[path].description or spec.paths[path].summary return cls( operation_id=operation_id, description=description, base_url=spec.base_url, path=path, method=method, properties=properties, request_body=api_request_body, ) [docs] @staticmethod def ts_type_from_python(type_: SCHEMA_TYPE) -> str: if type_ is None: # TODO: Handle Nones better. These often result when # parsing specs that are < v3 return "any" elif isinstance(type_, str): return { "str": "string",
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-10
elif isinstance(type_, str): return { "str": "string", "integer": "number", "float": "number", "date-time": "string", }.get(type_, type_) elif isinstance(type_, tuple): return f"Array<{APIOperation.ts_type_from_python(type_[0])}>" elif isinstance(type_, type) and issubclass(type_, Enum): return " | ".join([f"'{e.value}'" for e in type_]) else: return str(type_) def _format_nested_properties( self, properties: List[APIRequestBodyProperty], indent: int = 2 ) -> str: """Format nested properties.""" formatted_props = [] for prop in properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" if prop.properties: nested_props = self._format_nested_properties( prop.properties, indent + 2 ) prop_type = f"{{\n{nested_props}\n{' ' * indent}}}" formatted_props.append( f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type}," ) return "\n".join(formatted_props) [docs] def to_typescript(self) -> str: """Get typescript string representation of the operation.""" operation_name = self.operation_id params = [] if self.request_body: formatted_request_body_props = self._format_nested_properties( self.request_body.properties ) params.append(formatted_request_body_props)
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
74f1c3d3e9b2-11
self.request_body.properties ) params.append(formatted_request_body_props) for prop in self.properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},") formatted_params = "\n".join(params).strip() description_str = f"/* {self.description} */" if self.description else "" typescript_definition = f""" {description_str} type {operation_name} = (_: {{ {formatted_params} }}) => any; """ return typescript_definition.strip() @property def query_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.QUERY ] @property def path_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.PATH ] @property def body_params(self) -> List[str]: if self.request_body is None: return [] return [prop.name for prop in self.request_body.properties] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
fc0e46fa5d83-0
Source code for langchain.tools.scenexplain.tool """Tool for the SceneXplain API.""" from typing import Optional from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.scenexplain import SceneXplainAPIWrapper class SceneXplainInput(BaseModel): """Input for SceneXplain.""" query: str = Field(..., description="The link to the image to explain") [docs]class SceneXplainTool(BaseTool): """Tool that adds the capability to explain images.""" name = "Image Explainer" description = ( "An Image Captioning Tool: Use this tool to generate a detailed caption " "for an image. The input can be an image file of any format, and " "the output will be a text description that covers every detail of the image." ) api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("SceneXplainTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/scenexplain/tool.html
a287b68e23a2-0
Source code for langchain.tools.metaphor_search.tool """Tool for the Metaphor search API.""" from typing import Dict, List, Optional, Union from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper [docs]class MetaphorSearchResults(BaseTool): """Tool that has capability to query the Metaphor Search API and get back json.""" name = "Metaphor Search Results JSON" description = ( "A wrapper around Metaphor Search. " "Input should be a Metaphor-optimized query. " "Output is a JSON array of the query results" ) api_wrapper: MetaphorSearchAPIWrapper def _run( self, query: str, num_results: int, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Union[List[Dict], str]: """Use the tool.""" try: return self.api_wrapper.results(query, num_results) except Exception as e: return repr(e) async def _arun( self, query: str, num_results: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> Union[List[Dict], str]: """Use the tool asynchronously.""" try: return await self.api_wrapper.results_async(query, num_results) except Exception as e: return repr(e) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/metaphor_search/tool.html
bb12e9735f43-0
Source code for langchain.tools.brave_search.tool from __future__ import annotations from typing import Any, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.brave_search import BraveSearchWrapper [docs]class BraveSearch(BaseTool): name = "brave-search" description = ( "a search engine. " "useful for when you need to answer questions about current events." " input should be a search query." ) search_wrapper: BraveSearchWrapper [docs] @classmethod def from_api_key( cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any ) -> BraveSearch: wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {}) return cls(search_wrapper=wrapper, **kwargs) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.search_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("BraveSearch does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/brave_search/tool.html
02a8fb261ac9-0
Source code for langchain.tools.youtube.search """ Adapted from https://github.com/venuv/langchain_yt_tools CustomYTSearchTool searches YouTube videos related to a person and returns a specified number of video URLs. Input to this tool should be a comma separated list, - the first part contains a person name - and the second(optional) a number that is the maximum number of video results to return """ import json from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool [docs]class YouTubeSearchTool(BaseTool): name = "YouTubeSearch" description = ( "search for youtube videos associated with a person. " "the input to this tool should be a comma separated list, " "the first part contains a person name and the second a " "number that is the maximum number of video results " "to return aka num_results. the second part is optional" ) def _search(self, person: str, num_results: int) -> str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [video["url_suffix"] for video in data["videos"]] return str(url_suffix_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" values = query.split(",") person = values[0] if len(values) > 1: num_results = int(values[1]) else:
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
02a8fb261ac9-1
num_results = int(values[1]) else: num_results = 2 return self._search(person, num_results) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("YouTubeSearchTool does not yet support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
ac3799ac2a1f-0
Source code for langchain.tools.playwright.current_page from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class CurrentWebPageTool(BaseBrowserTool): name: str = "current_webpage" description: str = "Returns the URL of the current page" args_schema: Type[BaseModel] = BaseModel def _run( self, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) return str(page.url) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) return str(page.url) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/current_page.html
5f54d08f550e-0
Source code for langchain.tools.playwright.extract_text from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class ExtractTextTool(BaseBrowserTool): name: str = "extract_text" description: str = "Extract all the text on the current webpage" args_schema: Type[BaseModel] = BaseModel @root_validator def check_acheck_bs_importrgs(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
5f54d08f550e-1
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup page = await aget_current_page(self.async_browser) html_content = await page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
f294f79d795e-0
Source code for langchain.tools.playwright.click from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class ClickToolInput(BaseModel): """Input for ClickTool.""" selector: str = Field(..., description="CSS selector for the element to click") [docs]class ClickTool(BaseBrowserTool): name: str = "click_element" description: str = "Click on an element with the given CSS selector" args_schema: Type[BaseModel] = ClickToolInput visible_only: bool = True """Whether to consider only visible elements.""" playwright_strict: bool = False """Whether to employ Playwright's strict mode when clicking on elements.""" playwright_timeout: float = 1_000 """Timeout (in ms) for Playwright to wait for element to be ready.""" def _selector_effective(self, selector: str) -> str: if not self.visible_only: return selector return f"{selector} >> visible=1" def _run( self, selector: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
f294f79d795e-1
# Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.sync_api import TimeoutError as PlaywrightTimeoutError try: page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" async def _arun( self, selector: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.async_api import TimeoutError as PlaywrightTimeoutError try: await page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
00fe7eb60890-0
Source code for langchain.tools.playwright.navigate_back from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) [docs]class NavigateBackTool(BaseBrowserTool): """Navigate back to the previous page in the browser history.""" name: str = "previous_webpage" description: str = "Navigate back to the previous page in the browser history" args_schema: Type[BaseModel] = BaseModel def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.go_back() if response: return (
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
00fe7eb60890-1
response = await page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
759aeeba628e-0
Source code for langchain.tools.playwright.extract_hyperlinks from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Optional, Type from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: pass class ExtractHyperlinksToolInput(BaseModel): """Input for ExtractHyperlinksTool.""" absolute_urls: bool = Field( default=False, description="Return absolute URLs instead of relative URLs", ) [docs]class ExtractHyperlinksTool(BaseBrowserTool): """Extract all hyperlinks on the page.""" name: str = "extract_hyperlinks" description: str = "Extract all hyperlinks on the current webpage" args_schema: Type[BaseModel] = ExtractHyperlinksToolInput @root_validator def check_bs_import(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values [docs] @staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str: from urllib.parse import urljoin from bs4 import BeautifulSoup # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") # Find all the anchor elements and extract their href attributes
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
759aeeba628e-1
# Find all the anchor elements and extract their href attributes anchors = soup.find_all("a") if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors] else: links = [anchor.get("href", "") for anchor in anchors] # Return the list of links as a JSON string return json.dumps(links) def _run( self, absolute_urls: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() return self.scrape_page(page, html_content, absolute_urls) async def _arun( self, absolute_urls: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) html_content = await page.content() return self.scrape_page(page, html_content, absolute_urls) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
e034e63f8e19-0
Source code for langchain.tools.playwright.get_elements from __future__ import annotations import json from typing import TYPE_CHECKING, List, Optional, Sequence, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: from playwright.async_api import Page as AsyncPage from playwright.sync_api import Page as SyncPage class GetElementsToolInput(BaseModel): """Input for GetElementsTool.""" selector: str = Field( ..., description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname", ) attributes: List[str] = Field( default_factory=lambda: ["innerText"], description="Set of attributes to retrieve for each element", ) async def _aget_elements( page: AsyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]: """Get elements matching the given CSS selector.""" elements = await page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = await element.inner_text() else: val = await element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results def _get_elements( page: SyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]:
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
e034e63f8e19-1
) -> List[dict]: """Get elements matching the given CSS selector.""" elements = page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = element.inner_text() else: val = element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results [docs]class GetElementsTool(BaseBrowserTool): name: str = "get_elements" description: str = ( "Retrieve elements in the current web page matching the given CSS selector" ) args_schema: Type[BaseModel] = GetElementsToolInput def _run( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool results = _get_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) async def _arun( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}")
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
e034e63f8e19-2
raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool results = await _aget_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
fec1bca5ed87-0
Source code for langchain.tools.playwright.navigate from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class NavigateToolInput(BaseModel): """Input for NavigateToolInput.""" url: str = Field(..., description="url to navigate to") [docs]class NavigateTool(BaseBrowserTool): name: str = "navigate_browser" description: str = "Navigate a browser to the specified URL" args_schema: Type[BaseModel] = NavigateToolInput def _run( self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" async def _arun( self, url: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.goto(url)
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
fec1bca5ed87-1
response = await page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
30ae2ee48b7d-0
Source code for langchain.llms.beam """Wrapper around Beam API.""" import base64 import json import logging import subprocess import textwrap import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) DEFAULT_NUM_TRIES = 10 DEFAULT_SLEEP_TIME = 4 [docs]class Beam(LLM): """Wrapper around Beam API for gpt2 large language model. To use, you should have the ``beam-sdk`` python package installed, and the environment variable ``BEAM_CLIENT_ID`` set with your client id and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how to get these is available here: https://docs.beam.cloud/account/api-keys. The wrapper can then be called as follows, where the name, cpu, memory, gpu, python version, and python packages can be updated accordingly. Once deployed, the instance can be called. Example: .. code-block:: python llm = Beam(model_name="gpt2", name="langchain-gpt2", cpu=8, memory="32Gi", gpu="A10G", python_version="python3.8", python_packages=[ "diffusers[torch]>=0.10", "transformers", "torch", "pillow", "accelerate", "safetensors", "xformers",], max_length=50) llm._deploy()
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
30ae2ee48b7d-1
max_length=50) llm._deploy() call_result = llm._call(input) """ model_name: str = "" name: str = "" cpu: str = "" memory: str = "" gpu: str = "" python_version: str = "" python_packages: List[str] = [] max_length: str = "" url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" beam_client_id: str = "" beam_client_secret: str = "" app_id: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
30ae2ee48b7d-2
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" beam_client_id = get_from_dict_or_env( values, "beam_client_id", "BEAM_CLIENT_ID" ) beam_client_secret = get_from_dict_or_env( values, "beam_client_secret", "BEAM_CLIENT_SECRET" ) values["beam_client_id"] = beam_client_id values["beam_client_secret"] = beam_client_secret return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "name": self.name, "cpu": self.cpu, "memory": self.memory, "gpu": self.gpu, "python_version": self.python_version, "python_packages": self.python_packages, "max_length": self.max_length, "model_kwargs": self.model_kwargs, } @property def _llm_type(self) -> str: """Return type of llm.""" return "beam" [docs] def app_creation(self) -> None: """Creates a Python file which will contain your Beam app definition.""" script = textwrap.dedent( """\ import beam # The environment your code will run on app = beam.App( name="{name}", cpu={cpu}, memory="{memory}", gpu="{gpu}", python_version="{python_version}", python_packages={python_packages}, ) app.Trigger.RestAPI(
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
30ae2ee48b7d-3
python_packages={python_packages}, ) app.Trigger.RestAPI( inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = "app.py" with open(script_name, "w") as file: file.write( script.format( name=self.name, cpu=self.cpu, memory=self.memory, gpu=self.gpu, python_version=self.python_version, python_packages=self.python_packages, ) ) [docs] def run_creation(self) -> None: """Creates a Python file which will be deployed on beam.""" script = textwrap.dedent( """ import os import transformers from transformers import GPT2LMHeadModel, GPT2Tokenizer model_name = "{model_name}" def beam_langchain(**inputs): prompt = inputs["prompt"] length = inputs["max_length"] tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) encodedPrompt = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(encodedPrompt, max_length=int(length), do_sample=True, pad_token_id=tokenizer.eos_token_id) output = tokenizer.decode(outputs[0], skip_special_tokens=True) print(output) return {{"text": output}} """ ) script_name = "run.py" with open(script_name, "w") as file: file.write(script.format(model_name=self.model_name))
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
30ae2ee48b7d-4
file.write(script.format(model_name=self.model_name)) def _deploy(self) -> str: """Call to Beam.""" try: import beam # type: ignore if beam.__path__ == "": raise ImportError except ImportError: raise ImportError( "Could not import beam python package. " "Please install it with `curl " "https://raw.githubusercontent.com/slai-labs" "/get-beam/main/get-beam.sh -sSfL | sh`." ) self.app_creation() self.run_creation() process = subprocess.run( "beam deploy app.py", shell=True, capture_output=True, text=True ) if process.returncode == 0: output = process.stdout logger.info(output) lines = output.split("\n") for line in lines: if line.startswith(" i Send requests to: https://apps.beam.cloud/"): self.app_id = line.split("/")[-1] self.url = line.split(":")[1].strip() return self.app_id raise ValueError( f"""Failed to retrieve the appID from the deployment output. Deployment output: {output}""" ) else: raise ValueError(f"Deployment failed. Error: {process.stderr}") @property def authorization(self) -> str: if self.beam_client_id: credential_str = self.beam_client_id + ":" + self.beam_client_secret else: credential_str = self.beam_client_secret return base64.b64encode(credential_str.encode()).decode() def _call( self, prompt: str, stop: Optional[list] = None,
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
30ae2ee48b7d-5
self, prompt: str, stop: Optional[list] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Beam.""" url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url payload = {"prompt": prompt, "max_length": self.max_length} headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Authorization": "Basic " + self.authorization, "Connection": "keep-alive", "Content-Type": "application/json", } for _ in range(DEFAULT_NUM_TRIES): request = requests.post(url, headers=headers, data=json.dumps(payload)) if request.status_code == 200: return request.json()["text"] time.sleep(DEFAULT_SLEEP_TIME) logger.warning("Unable to successfully call model.") return "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
04f6fc484234-0
Source code for langchain.llms.openai """Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( AbstractSet, Any, Callable, Collection, Dict, Generator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason" ]
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-1
"finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-2
return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM): """Wrapper around OpenAI large language models.""" client: Any #: :meta private: model_name: str = Field("text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict)
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-3
model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: "
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-4
"no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object.""" extra = Extra.ignore allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" )
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-5
values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature,
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-6
normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } # Azure gpt-35-turbo doesn't support best_of # don't specify best_of if it is 1 if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts:
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-7
for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): if run_manager: run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts:
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-8
for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if run_manager: await run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-9
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: """Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.")
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-10
.. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """Prepare the params for streaming.""" params = self._invocation_params if "best_of" in params and params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" return self._default_params @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError(
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-11
try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) def modelname_to_contextsize(self, modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049,
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-12
"davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # handling finetuned models if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) # get max context size for model by name max_size = self.modelname_to_contextsize(self.model_name) return max_size - num_tokens [docs]class OpenAI(BaseOpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key.
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-13
environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs]class AzureOpenAI(BaseOpenAI): """Wrapper around Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ deployment_name: str = "" """Deployment name to use.""" @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: return {**{"engine": self.deployment_name}, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure"
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-14
"""Return type of llm.""" return "azure" [docs]class OpenAIChat(BaseLLM): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" class Config:
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-15
"""Set of special tokens that are not allowed。""" class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization:
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-16
openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-17
if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None,
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-18
prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: await run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" [docs] def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
04f6fc484234-19
# tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/openai.html
0c1129e80be2-0
Source code for langchain.llms.predictionguard """Wrapper around Prediction Guard APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class PredictionGuard(LLM): """Wrapper around Prediction Guard large language models. To use, you should have the ``predictionguard`` python package installed, and the environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass it as a named parameter to the constructor. To use Prediction Guard's API along with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your OpenAI API key as well. Example: .. code-block:: python pgllm = PredictionGuard(model="MPT-7B-Instruct", token="my-access-token", output={ "type": "boolean" }) """ client: Any #: :meta private: model: Optional[str] = "MPT-7B-Instruct" """Model name to use.""" output: Optional[Dict[str, Any]] = None """The output type or structure for controlling the LLM output.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" token: Optional[str] = None """Your Prediction Guard access token.""" stop: Optional[List[str]] = None
https://python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
0c1129e80be2-1
"""Your Prediction Guard access token.""" stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the access token and python package exists in environment.""" token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN") try: import predictionguard as pg values["client"] = pg.Client(token=token) except ImportError: raise ImportError( "Could not import predictionguard python package. " "Please install it with `pip install predictionguard`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Prediction Guard API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "predictionguard" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Prediction Guard's model API. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example:
https://python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
0c1129e80be2-2
Returns: The string generated by the model. Example: .. code-block:: python response = pgllm("Tell me a joke.") """ import predictionguard as pg params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = pg.Completion.create( model=self.model, prompt=prompt, output=self.output, temperature=params["temperature"], max_tokens=params["max_tokens"], ) text = response["choices"][0]["text"] # If stop tokens are provided, Prediction Guard's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
726ac71c4dcb-0
Source code for langchain.llms.databricks import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM __all__ = ["Databricks"] class _DatabricksClientBase(BaseModel, ABC): """A base JSON API client that talks to Databricks.""" api_url: str api_token: str def post_raw(self, request: Any) -> Any: headers = {"Authorization": f"Bearer {self.api_token}"} response = requests.post(self.api_url, headers=headers, json=request) # TODO: error handling and automatic retries if not response.ok: raise ValueError(f"HTTP {response.status_code} error: {response.text}") return response.json() @abstractmethod def post(self, request: Any) -> Any: ... class _DatabricksServingEndpointClient(_DatabricksClientBase): """An API client that talks to a Databricks serving endpoint.""" host: str endpoint_name: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] endpoint_name = values["endpoint_name"] api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations" values["api_url"] = api_url return values def post(self, request: Any) -> Any:
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-1
return values def post(self, request: Any) -> Any: # See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html wrapped_request = {"dataframe_records": [request]} response = self.post_raw(wrapped_request)["predictions"] # For a single-record query, the result is not a list. if isinstance(response, list): response = response[0] return response class _DatabricksClusterDriverProxyClient(_DatabricksClientBase): """An API client that talks to a Databricks cluster driver proxy app.""" host: str cluster_id: str cluster_driver_port: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] cluster_id = values["cluster_id"] port = values["cluster_driver_port"] api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}" values["api_url"] = api_url return values def post(self, request: Any) -> Any: return self.post_raw(request) def get_repl_context() -> Any: """Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ValueError( "Cannot access dbruntime, not running inside a Databricks notebook." ) def get_default_host() -> str: """Gets the default Databricks workspace hostname.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-2
"""Gets the default Databricks workspace hostname. Raises an error if the hostname cannot be automatically determined. """ host = os.getenv("DATABRICKS_HOST") if not host: try: host = get_repl_context().browserHostName if not host: raise ValueError("context doesn't contain browserHostName.") except Exception as e: raise ValueError( "host was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_HOST'. Received error: {e}" ) # TODO: support Databricks CLI profile host = host.lstrip("https://").lstrip("http://").rstrip("/") return host def get_default_api_token() -> str: """Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if api_token := os.getenv("DATABRICKS_API_TOKEN"): return api_token try: api_token = get_repl_context().apiToken if not api_token: raise ValueError("context doesn't contain apiToken.") except Exception as e: raise ValueError( "api_token was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_API_TOKEN'. Received error: {e}" ) # TODO: support Databricks CLI profile return api_token [docs]class Databricks(LLM): """LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app. It supports two endpoint types: * **Serving endpoint** (recommended for both production and development).
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-3
* **Serving endpoint** (recommended for both production and development). We assume that an LLM was registered and deployed to a serving endpoint. To wrap it as an LLM you must have "Can Query" permission to the endpoint. Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and ``cluster_driver_port``. The expected model signature is: * inputs:: [{"name": "prompt", "type": "string"}, {"name": "stop", "type": "list[string]"}] * outputs: ``[{"type": "string"}]`` * **Cluster driver proxy app** (recommended for interactive development). One can load an LLM on a Databricks interactive cluster and start a local HTTP server on the driver node to serve the model at ``/`` using HTTP POST method with JSON input/output. Please use a port number between ``[3000, 8000]`` and let the server listen to the driver IP address or simply ``0.0.0.0`` instead of localhost only. To wrap it as an LLM you must have "Can Attach To" permission to the cluster. Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``. The expected server schema (using JSON schema) is: * inputs:: {"type": "object", "properties": { "prompt": {"type": "string"}, "stop": {"type": "array", "items": {"type": "string"}}}, "required": ["prompt"]}` * outputs: ``{"type": "string"}`` If the endpoint model signature is different or you want to set extra params,
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-4
If the endpoint model signature is different or you want to set extra params, you can use `transform_input_fn` and `transform_output_fn` to apply necessary transformations before and after the query. """ host: str = Field(default_factory=get_default_host) """Databricks workspace hostname. If not provided, the default value is determined by * the ``DATABRICKS_HOST`` environment variable if present, or * the hostname of the current Databricks workspace if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ api_token: str = Field(default_factory=get_default_api_token) """Databricks personal access token. If not provided, the default value is determined by * the ``DATABRICKS_API_TOKEN`` environment variable if present, or * an automatically generated temporary token if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ endpoint_name: Optional[str] = None """Name of the model serving endpont. You must specify the endpoint name to connect to a model serving endpoint. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_id: Optional[str] = None """ID of the cluster if connecting to a cluster driver proxy app. If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode, the current cluster ID is used as default. You must not set both ``endpoint_name`` and ``cluster_id``.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-5
You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_driver_port: Optional[str] = None """The port number used by the HTTP server running on the cluster driver node. The server should listen on the driver IP address or simply ``0.0.0.0`` to connect. We recommend the server using a port number between ``[3000, 8000]``. """ model_kwargs: Optional[Dict[str, Any]] = None """Extra parameters to pass to the endpoint.""" transform_input_fn: Optional[Callable] = None """A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible request object that the endpoint accepts. For example, you can apply a prompt template to the input prompt. """ transform_output_fn: Optional[Callable[..., str]] = None """A function that transforms the output from the endpoint to the generated text. """ _client: _DatabricksClientBase = PrivateAttr() class Config: extra = Extra.forbid underscore_attrs_are_private = True @validator("cluster_id", always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_id.") elif values["endpoint_name"]: return None elif v: return v else: try: if v := get_repl_context().clusterId: return v raise ValueError("Context doesn't contain clusterId.") except Exception as e: raise ValueError( "Neither endpoint_name nor cluster_id was set. "
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-6
raise ValueError( "Neither endpoint_name nor cluster_id was set. " "And the cluster_id cannot be automatically determined. Received" f" error: {e}" ) @validator("cluster_driver_port", always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_driver_port.") elif values["endpoint_name"]: return None elif v is None: raise ValueError( "Must set cluster_driver_port to connect to a cluster driver." ) elif int(v) <= 0: raise ValueError(f"Invalid cluster_driver_port: {v}") else: return v @validator("model_kwargs", always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: if v: assert "prompt" not in v, "model_kwargs must not contain key 'prompt'" assert "stop" not in v, "model_kwargs must not contain key 'stop'" return v def __init__(self, **data: Any): super().__init__(**data) if self.endpoint_name: self._client = _DatabricksServingEndpointClient( host=self.host, api_token=self.api_token, endpoint_name=self.endpoint_name, ) elif self.cluster_id and self.cluster_driver_port: self._client = _DatabricksClusterDriverProxyClient( host=self.host, api_token=self.api_token, cluster_id=self.cluster_id, cluster_driver_port=self.cluster_driver_port, ) else:
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
726ac71c4dcb-7
cluster_driver_port=self.cluster_driver_port, ) else: raise ValueError( "Must specify either endpoint_name or cluster_id/cluster_driver_port." ) @property def _llm_type(self) -> str: """Return type of llm.""" return "databricks" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Queries the LLM endpoint with the given prompt and stop sequence.""" # TODO: support callbacks request = {"prompt": prompt, "stop": stop} if self.model_kwargs: request.update(self.model_kwargs) if self.transform_input_fn: request = self.transform_input_fn(**request) response = self._client.post(request) if self.transform_output_fn: response = self.transform_output_fn(response) return response By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
a42cda925412-0
Source code for langchain.llms.cohere """Wrapper around Cohere APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1."""
https://python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
a42cda925412-1
"""Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str:
https://python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
a42cda925412-2
@property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = self.client.generate(model=self.model, prompt=prompt, **params) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
8fa0efd1bf97-0
Source code for langchain.llms.huggingface_text_gen_inference """Wrapper around Huggingface text generation inference API.""" from functools import partial from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class HuggingFaceTextGenInference(LLM): """ HuggingFace text generation inference API. This class is a wrapper around the HuggingFace text generation inference API. It is used to generate text from a given prompt. Attributes: - max_new_tokens: The maximum number of tokens to generate. - top_k: The number of top-k tokens to consider when generating text. - top_p: The cumulative probability threshold for generating text. - typical_p: The typical probability threshold for generating text. - temperature: The temperature to use when generating text. - repetition_penalty: The repetition penalty to use when generating text. - stop_sequences: A list of stop sequences to use when generating text. - seed: The seed to use when generating text. - inference_server_url: The URL of the inference server to use. - timeout: The timeout value in seconds to use while connecting to inference server. - client: The client object used to communicate with the inference server. Methods: - _call: Generates text based on a given prompt and stop sequences. - _llm_type: Returns the type of LLM. """ """ Example: .. code-block:: python # Basic Example (no streaming) llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/",
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
8fa0efd1bf97-1
inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, ) print(llm("What is Deep Learning?")) # Streaming response example from langchain.callbacks import streaming_stdout callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()] llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, callbacks = callbacks, stream = True ) print(llm("What is Deep Learning?")) """ max_new_tokens: int = 512 top_k: Optional[int] = None top_p: Optional[float] = 0.95 typical_p: Optional[float] = 0.95 temperature: float = 0.8 repetition_penalty: Optional[float] = None stop_sequences: List[str] = Field(default_factory=list) seed: Optional[int] = None inference_server_url: str = "" timeout: int = 120 stream: bool = False client: Any class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
8fa0efd1bf97-2
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: import text_generation values["client"] = text_generation.Client( values["inference_server_url"], timeout=values["timeout"] ) except ImportError: raise ImportError( "Could not import text_generation python package. " "Please install it with `pip install text_generation`." ) return values @property def _llm_type(self) -> str: """Return type of llm.""" return "hf_textgen_inference" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: if stop is None: stop = self.stop_sequences else: stop += self.stop_sequences if not self.stream: res = self.client.generate( prompt, stop_sequences=stop, max_new_tokens=self.max_new_tokens, top_k=self.top_k, top_p=self.top_p, typical_p=self.typical_p, temperature=self.temperature, repetition_penalty=self.repetition_penalty, seed=self.seed, ) # remove stop sequences from the end of the generated text for stop_seq in stop: if stop_seq in res.generated_text: res.generated_text = res.generated_text[ : res.generated_text.index(stop_seq) ] text = res.generated_text else: text_callback = None if run_manager: text_callback = partial(
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
8fa0efd1bf97-3
text_callback = None if run_manager: text_callback = partial( run_manager.on_llm_new_token, verbose=self.verbose ) params = { "stop_sequences": stop, "max_new_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "typical_p": self.typical_p, "temperature": self.temperature, "repetition_penalty": self.repetition_penalty, "seed": self.seed, } text = "" for res in self.client.generate_stream(prompt, **params): token = res.token is_stop = False for stop_seq in stop: if stop_seq in token.text: is_stop = True break if is_stop: break if not token.special: if text_callback: text_callback(token.text) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
9ed309ba7207-0
Source code for langchain.llms.nlpcloud """Wrapper around NLPCloud APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class NLPCloud(LLM): """Wrapper around NLPCloud large language models. To use, you should have the ``nlpcloud`` python package installed, and the environment variable ``NLPCLOUD_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import NLPCloud nlpcloud = NLPCloud(model="gpt-neox-20b") """ client: Any #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" min_length: int = 1 """The minimum number of tokens to generate in the completion.""" max_length: int = 256 """The maximum number of tokens to generate in the completion.""" length_no_input: bool = True """Whether min_length and max_length should include the length of the input.""" remove_input: bool = True """Remove input text from API response""" remove_end_sequence: bool = True """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" top_p: int = 1 """Total probability mass of tokens to consider at each step."""
https://python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
9ed309ba7207-1
"""Total probability mass of tokens to consider at each step.""" top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens. 1.0 means no penalty.""" length_penalty: float = 1.0 """Exponential penalty to the length.""" do_sample: bool = True """Whether to use sampling (True) or greedy decoding.""" num_beams: int = 1 """Number of beams for beam search.""" early_stopping: bool = False """Whether to stop beam search at num_beams sentences.""" num_return_sequences: int = 1 """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=True, lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values @property def _default_params(self) -> Mapping[str, Any]:
https://python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
9ed309ba7207-2
@property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling NLPCloud API.""" return { "temperature": self.temperature, "min_length": self.min_length, "max_length": self.max_length, "length_no_input": self.length_no_input, "remove_input": self.remove_input, "remove_end_sequence": self.remove_end_sequence, "bad_words": self.bad_words, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "length_penalty": self.length_penalty, "do_sample": self.do_sample, "num_beams": self.num_beams, "early_stopping": self.early_stopping, "num_return_sequences": self.num_return_sequences, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "nlpcloud" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to NLPCloud's create endpoint. Args: prompt: The prompt to pass into the model. stop: Not supported by this interface (pass in init method) Returns: The string generated by the model. Example: .. code-block:: python
https://python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
9ed309ba7207-3
The string generated by the model. Example: .. code-block:: python response = nlpcloud("Tell me a joke.") """ if stop and len(stop) > 1: raise ValueError( "NLPCloud only supports a single stop sequence per generation." "Pass in a list of length 1." ) elif stop and len(stop) == 1: end_sequence = stop[0] else: end_sequence = None response = self.client.generation( prompt, end_sequence=end_sequence, **self._default_params ) return response["generated_text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
553431be1071-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceHub(LLM): """Wrapper around HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
553431be1071-1
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call( self, prompt: str, stop: Optional[List[str]] = None,
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
553431be1071-2
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} response = self.client(inputs=prompt, params=_model_kwargs) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] elif self.client.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
66440fac175a-0
Source code for langchain.llms.ai21 """Wrapper around AI21 APIs.""" from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env class AI21PenaltyData(BaseModel): """Parameters for AI21 penalty data.""" scale: int = 0 applyToWhitespaces: bool = True applyToPunctuations: bool = True applyToNumbers: bool = True applyToStopwords: bool = True applyToEmojis: bool = True [docs]class AI21(LLM): """Wrapper around AI21 large language models. To use, you should have the environment variable ``AI21_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import AI21 ai21 = AI21(model="j2-jumbo-instruct") """ model: str = "j2-jumbo-instruct" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" maxTokens: int = 256 """The maximum number of tokens to generate in the completion.""" minTokens: int = 0 """The minimum number of tokens to generate in the completion.""" topP: float = 1.0 """Total probability mass of tokens to consider at each step.""" presencePenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens.""" countPenalty: AI21PenaltyData = AI21PenaltyData()
https://python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
66440fac175a-1
countPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to count.""" frequencyPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to frequency.""" numResults: int = 1 """How many completions to generate for each prompt.""" logitBias: Optional[Dict[str, float]] = None """Adjust the probability of specific tokens being generated.""" ai21_api_key: Optional[str] = None stop: Optional[List[str]] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY") values["ai21_api_key"] = ai21_api_key return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling AI21 API.""" return { "temperature": self.temperature, "maxTokens": self.maxTokens, "minTokens": self.minTokens, "topP": self.topP, "presencePenalty": self.presencePenalty.dict(), "countPenalty": self.countPenalty.dict(), "frequencyPenalty": self.frequencyPenalty.dict(), "numResults": self.numResults, "logitBias": self.logitBias, } @property
https://python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
66440fac175a-2
"logitBias": self.logitBias, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "ai21" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url else: if self.model in ("j1-grande-instruct",): base_url = "https://api.ai21.com/studio/v1/experimental" else: base_url = "https://api.ai21.com/studio/v1" response = requests.post( url=f"{base_url}/{self.model}/complete", headers={"Authorization": f"Bearer {self.ai21_api_key}"},
https://python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
66440fac175a-3
headers={"Authorization": f"Bearer {self.ai21_api_key}"}, json={"prompt": prompt, "stopSequences": stop, **self._default_params}, ) if response.status_code != 200: optional_detail = response.json().get("error") raise ValueError( f"AI21 /complete call failed with status code {response.status_code}." f" Details: {optional_detail}" ) response_json = response.json() return response_json["completions"][0]["data"]["text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
01617c53761a-0
Source code for langchain.llms.llamacpp """Wrapper around llama.cpp.""" import logging from typing import Any, Dict, Generator, List, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class LlamaCpp(LLM): """Wrapper around the llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.llms import LlamaCppEmbeddings llm = LlamaCppEmbeddings(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv")
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-1
f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = []
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-2
"""Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" streaming: bool = True """Whether to stream the results, token by token.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, **model_params) except ImportError: raise ModuleNotFoundError(
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-3
except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" return { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llama.cpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-4
Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager): combined_text_output += token["choices"][0]["text"] return combined_text_output else: params = self._get_parameters(stop) result = self.client(prompt=prompt, **params)
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-5
result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] [docs] def stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> Generator[Dict, None, None]: """Yields results objects as they are generated in real time. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = self._get_parameters(stop) result = self.client(prompt=prompt, stream=True, **params) for chunk in result: token = chunk["choices"][0]["text"]
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
01617c53761a-6
for chunk in result: token = chunk["choices"][0]["text"] log_probs = chunk["choices"][0].get("logprobs", None) if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield chunk [docs] def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1bc0bd15a5fb-0
Source code for langchain.llms.modal """Wrapper around Modal API.""" import logging from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) [docs]class Modal(LLM): """Wrapper around Modal large language models. To use, you should have the ``modal-client`` python package installed. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Modal modal = Modal(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs.
https://python.langchain.com/en/latest/_modules/langchain/llms/modal.html
1bc0bd15a5fb-1
logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "modal" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Modal endpoint.""" params = self.model_kwargs or {} response = requests.post( url=self.endpoint_url, headers={ "Content-Type": "application/json", }, json={"prompt": prompt, **params}, ) try: if prompt in response.json()["prompt"]: response_json = response.json() except KeyError: raise ValueError("LangChain requires 'prompt' key in response.") text = response_json["prompt"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/modal.html
a61d84b42525-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class AlephAlpha(LLM): """Wrapper around Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha alpeh_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
a61d84b42525-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
a61d84b42525-2
echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
a61d84b42525-3
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo,
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
a61d84b42525-4
"minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "alpeh_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model.
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
a61d84b42525-5
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = alpeh_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
34de076c5d57-0
Source code for langchain.llms.replicate """Wrapper around Replicate API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Replicate(LLM): """Wrapper around Replicate models. To use, you should have the ``replicate`` python package installed, and the environment variable ``REPLICATE_API_TOKEN`` set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, ...} Example: .. code-block:: python from langchain.llms import Replicate replicate = Replicate(model="stability-ai/stable-diffusion: \ 27b93a2413e7f36cd83da926f365628\ 0b2931564ff050bf9575f1fdf9bcd7478", input={"image_dimensions": "512x512"}) """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) replicate_api_token: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in."""
https://python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
34de076c5d57-1
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN" ) values["replicate_api_token"] = replicate_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "replicate" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError: raise ImportError( "Could not import replicate python package. "
https://python.langchain.com/en/latest/_modules/langchain/llms/replicate.html