id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
117
34de076c5d57-2
raise ImportError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ) # get the model and version model_str, version_str = self.model.split(":") model = replicate_python.models.get(model_str) version = model.versions.get(version_str) # sort through the openapi schema to get the name of the first input input_properties = sorted( version.openapi_schema["components"]["schemas"]["Input"][ "properties" ].items(), key=lambda item: item[1].get("x-order", 0), ) first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} iterator = replicate_python.run(self.model, input={**inputs}) return "".join([output for output in iterator]) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
bafbc7d0fe39-0
Source code for langchain.llms.bedrock import json from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class LLMInputOutputAdapter: """Adapter class to prepare the inputs from Langchain to a format that LLM model expects. Also, provides helper function to extract the generated text from the model response.""" @classmethod def prepare_input( cls, provider: str, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: input_body = {**model_kwargs} if provider == "anthropic" or provider == "ai21": input_body["prompt"] = prompt else: input_body["inputText"] = prompt if provider == "anthropic" and "max_tokens_to_sample" not in input_body: input_body["max_tokens_to_sample"] = 50 return input_body @classmethod def prepare_output(cls, provider: str, response: Any) -> str: if provider == "anthropic": response_body = json.loads(response.get("body").read().decode()) return response_body.get("completion") else: response_body = json.loads(response.get("body").read()) if provider == "ai21": return response_body.get("completions")[0].get("data").get("text") else: return response_body.get("results")[0].get("outputText") [docs]class Bedrock(LLM): """LLM provider to invoke Bedrock models.
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
bafbc7d0fe39-1
"""LLM provider to invoke Bedrock models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from bedrock_langchain.bedrock_llm import BedrockLLM llm = BedrockLLM( credentials_profile_name="default", model_id="amazon.titan-tg1-large" ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str """Id of the model to call, e.g., amazon.titan-tg1-large, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
bafbc7d0fe39-2
model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" # Skip creating new client if passed in constructor if "client" in values: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_bedrock" def _call( self,
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
bafbc7d0fe39-3
return "amazon_bedrock" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Bedrock service model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} provider = self.model_id.split(".")[0] input_body = LLMInputOutputAdapter.prepare_input( provider, prompt, _model_kwargs ) body = json.dumps(input_body) accept = "application/json" contentType = "application/json" try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accept, contentType=contentType ) text = LLMInputOutputAdapter.prepare_output(provider, response) except Exception as e: raise ValueError(f"Error raised by bedrock service: {e}") if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
ada7dbdedc24-0
Source code for langchain.llms.self_hosted_hugging_face """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.""" import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ada7dbdedc24-1
text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" )
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ada7dbdedc24-2
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline [docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline): """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM(
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ada7dbdedc24-3
hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task ("text-generation", "text2text-generation" or "summarization").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server."""
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ada7dbdedc24-4
"""Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ada7dbdedc24-5
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
ce585433b30d-0
Source code for langchain.llms.huggingface_endpoint """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config:
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
ce585433b30d-1
huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self,
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
ce585433b30d-2
return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization":
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
ce585433b30d-3
elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
97f56ddeb833-0
Source code for langchain.llms.cerebriumai """Wrapper around CerebriumAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class CerebriumAI(LLM): """Wrapper around CerebriumAI large language models. To use, you should have the ``cerebrium`` python package installed, and the environment variable ``CEREBRIUMAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import CerebriumAI cerebrium = CerebriumAI(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" cerebriumai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()}
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
97f56ddeb833-1
all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY" ) values["cerebriumai_api_key"] = cerebriumai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "cerebriumai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to CerebriumAI endpoint.""" try: from cerebrium import model_api_request
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
97f56ddeb833-2
try: from cerebrium import model_api_request except ImportError: raise ValueError( "Could not import cerebrium python package. " "Please install it with `pip install cerebrium`." ) params = self.model_kwargs or {} response = model_api_request( self.endpoint_url, {"prompt": prompt, **params}, self.cerebriumai_api_key ) text = response["data"]["result"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
82323e2b0f27-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.")
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
82323e2b0f27-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {}
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
82323e2b0f27-2
""" params = self.model_kwargs or {} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
afa27ae0a4f8-0
Source code for langchain.llms.fake """Fake LLM wrapper for testing purposes.""" from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM [docs]class FakeListLLM(LLM): """Fake LLM wrapper for testing purposes.""" responses: List i: int = 0 @property def _llm_type(self) -> str: """Return type of llm.""" return "fake-list" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Return next response""" response = self.responses[self.i] self.i += 1 return response async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Return next response""" response = self.responses[self.i] self.i += 1 return response @property def _identifying_params(self) -> Mapping[str, Any]: return {"responses": self.responses} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/fake.html
013a01484bb4-0
Source code for langchain.llms.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from abc import abstractmethod from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]]) OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]]]) class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]): """A handler class to transform input from LLM to a format that SageMaker endpoint expects. Similarily, the class also handles transforming output from the SageMaker endpoint to a format that LLM class expects. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ content_type: Optional[str] = "text/plain" """The MIME type of the input data passed to endpoint""" accepts: Optional[str] = "text/plain" """The MIME type of the response data returned from endpoint""" @abstractmethod
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
013a01484bb4-1
"""The MIME type of the response data returned from endpoint""" @abstractmethod def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request header. """ @abstractmethod def transform_output(self, output: bytes) -> OUTPUT_TYPE: """Transforms the output from the model to string that the LLM class expects. """ class LLMContentHandler(ContentHandlerBase[str, str]): """Content handler for LLM class.""" [docs]class SagemakerEndpoint(LLM): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain import SagemakerEndpoint endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" )
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
013a01484bb4-2
) credentials_profile_name = ( "default" ) se = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: LLMContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str:
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
013a01484bb4-3
def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]:
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
013a01484bb4-4
@property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.endpoint_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "sagemaker_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Sagemaker inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(prompt, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") text = self.content_handler.transform_output(response["Body"]) if stop is not None:
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
013a01484bb4-5
if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to the sagemaker endpoint. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
dac4dc7c2960-0
Source code for langchain.llms.google_palm """Wrapper arround Google's PaLM Text APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" try: import google.api_core.exceptions except ImportError: raise ImportError( "Could not import google-api-core python package. " "Please install it with `pip install google-api-core`." ) multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), )
https://python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
dac4dc7c2960-1
), before_sleep=before_sleep_log(logger, logging.WARNING), ) def generate_with_retry(llm: GooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _generate_with_retry(**kwargs: Any) -> Any: return llm.client.generate_text(**kwargs) return _generate_with_retry(**kwargs) def _strip_erroneous_leading_spaces(text: str) -> str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) if has_leading_space: return text.replace("\n ", "\n") else: return text [docs]class GooglePalm(BaseLLM, BaseModel): client: Any #: :meta private: google_api_key: Optional[str] model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive."""
https://python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
dac4dc7c2960-2
Must be positive.""" max_output_tokens: Optional[int] = None """Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ImportError( "Could not import google-generativeai python package. " "Please install it with `pip install google-generativeai`." ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: raise ValueError("max_output_tokens must be greater than zero") return values def _generate(
https://python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
dac4dc7c2960-3
return values def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: generations = [] for prompt in prompts: completion = generate_with_retry( self, model=self.model_name, prompt=prompt, stop_sequences=stop, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, max_output_tokens=self.max_output_tokens, candidate_count=self.n, ) prompt_generations = [] for candidate in completion.candidates: raw_text = candidate["output"] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: raise NotImplementedError() @property def _llm_type(self) -> str: """Return type of llm.""" return "google_palm" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2ee1cd014917-0
Source code for langchain.llms.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env INSTRUCTION_KEY = "### Instruction:" RESPONSE_KEY = "### Response:" INTRO_BLURB = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request." ) PROMPT_FOR_GENERATION_FORMAT = """{intro} {instruction_key} {instruction} {response_key} """.format( intro=INTRO_BLURB, instruction_key=INSTRUCTION_KEY, instruction="{instruction}", response_key=RESPONSE_KEY, ) [docs]class MosaicML(LLM): """Wrapper around MosaicML's LLM inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicML endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict" ) mosaic_llm = MosaicML( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = (
https://python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
2ee1cd014917-1
) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict" ) """Endpoint URL to use.""" inject_instruction_format: bool = False """Whether to inject the instruction format into the prompt.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "mosaicml" def _transform_prompt(self, prompt: str) -> str: """Transform prompt.""" if self.inject_instruction_format: prompt = PROMPT_FOR_GENERATION_FORMAT.format( instruction=prompt, )
https://python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
2ee1cd014917-2
instruction=prompt, ) return prompt def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, is_retry: bool = False, ) -> str: """Call out to a MosaicML LLM inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = mosaic_llm("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} prompt = self._transform_prompt(prompt) payload = {"input_strings": [prompt]} payload.update(_model_kwargs) # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep) return self._call(prompt, stop, run_manager, is_retry=True) raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
2ee1cd014917-3
raise ValueError( f"Error raised by inference API: {parsed_response['error']}" ) if "data" not in parsed_response: raise ValueError( f"Error raised by inference API, no key data: {parsed_response}" ) generated_text = parsed_response["data"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) text = generated_text[0][len(prompt) :] # TODO: replace when MosaicML supports custom stop tokens natively if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
b565efba966a-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request."""
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
b565efba966a-1
"""Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)):
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
b565efba966a-2
for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
b565efba966a-3
``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {}
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
b565efba966a-4
generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
3a9a9dda96e5-0
Source code for langchain.llms.writer """Wrapper around Writer APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Writer(LLM): """Wrapper around Writer large language models. To use, you should have the environment variable ``WRITER_API_KEY`` and ``WRITER_ORG_ID`` set with your API key and organization ID respectively. Example: .. code-block:: python from langchain import Writer writer = Writer(model_id="palmyra-base") """ writer_org_id: Optional[str] = None """Writer organization ID.""" model_id: str = "palmyra-instruct" """Model name to use.""" min_tokens: Optional[int] = None """Minimum number of tokens to generate.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" temperature: Optional[float] = None """What sampling temperature to use.""" top_p: Optional[float] = None """Total probability mass of tokens to consider at each step.""" stop: Optional[List[str]] = None """Sequences when completion generation will stop.""" presence_penalty: Optional[float] = None """Penalizes repeated tokens regardless of frequency.""" repetition_penalty: Optional[float] = None """Penalizes repeated tokens according to frequency.""" best_of: Optional[int] = None """Generates this many completions server-side and returns the "best".""" logprobs: bool = False
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
3a9a9dda96e5-1
logprobs: bool = False """Whether to return log probabilities.""" n: Optional[int] = None """How many completions to generate.""" writer_api_key: Optional[str] = None """Writer API key.""" base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and organization id exist in environment.""" writer_api_key = get_from_dict_or_env( values, "writer_api_key", "WRITER_API_KEY" ) values["writer_api_key"] = writer_api_key writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID") values["writer_org_id"] = writer_org_id return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return { "minTokens": self.min_tokens, "maxTokens": self.max_tokens, "temperature": self.temperature, "topP": self.top_p, "stop": self.stop, "presencePenalty": self.presence_penalty, "repetitionPenalty": self.repetition_penalty, "bestOf": self.best_of, "logprobs": self.logprobs, "n": self.n, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
3a9a9dda96e5-2
"""Get the identifying parameters.""" return { **{"model_id": self.model_id, "writer_org_id": self.writer_org_id}, **self._default_params, } @property def _llm_type(self) -> str: """Return type of llm.""" return "writer" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.") """ if self.base_url is not None: base_url = self.base_url else: base_url = ( "https://enterprise-api.writer.com/llm" f"/organization/{self.writer_org_id}" f"/model/{self.model_id}/completions" ) response = requests.post( url=base_url, headers={ "Authorization": f"{self.writer_api_key}", "Content-Type": "application/json", "Accept": "application/json", }, json={"prompt": prompt, **self._default_params}, ) text = response.text if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
3a9a9dda96e5-3
return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
cd05e9768170-0
Source code for langchain.llms.anthropic """Wrapper around Anthropic APIs.""" import re import warnings from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Union from pydantic import BaseModel, Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env class _AnthropicCommon(BaseModel): client: Any = None #: :meta private: model: str = "claude-v1" """Model name to use.""" max_tokens_to_sample: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: Optional[float] = None """A non-negative float that tunes the degree of randomness in generation.""" top_k: Optional[int] = None """Number of most likely tokens to consider at each step.""" top_p: Optional[float] = None """Total probability mass of tokens to consider at each step.""" streaming: bool = False """Whether to stream the results.""" default_request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to Anthropic Completion API. Default is 600 seconds.""" anthropic_api_key: Optional[str] = None HUMAN_PROMPT: Optional[str] = None AI_PROMPT: Optional[str] = None count_tokens: Optional[Callable[[str], int]] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anthropic_api_key = get_from_dict_or_env(
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
cd05e9768170-1
anthropic_api_key = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY" ) try: import anthropic values["client"] = anthropic.Client( api_key=anthropic_api_key, default_request_timeout=values["default_request_timeout"], ) values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT values["AI_PROMPT"] = anthropic.AI_PROMPT values["count_tokens"] = anthropic.count_tokens except ImportError: raise ImportError( "Could not import anthropic python package. " "Please it install it with `pip install anthropic`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Anthropic API.""" d = { "max_tokens_to_sample": self.max_tokens_to_sample, "model": self.model, } if self.temperature is not None: d["temperature"] = self.temperature if self.top_k is not None: d["top_k"] = self.top_k if self.top_p is not None: d["top_p"] = self.top_p return d @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{}, **self._default_params} def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if stop is None: stop = []
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
cd05e9768170-2
if stop is None: stop = [] # Never want model to invent new turns of Human / Assistant dialog. stop.extend([self.HUMAN_PROMPT]) return stop [docs]class Anthropic(LLM, _AnthropicCommon): r"""Wrapper around Anthropic's large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key") # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. response = model("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" response = model(prompt) """ @root_validator() def raise_warning(cls, values: Dict) -> Dict: """Raise warning that this class is deprecated.""" warnings.warn( "This Anthropic LLM is deprecated. " "Please use `from langchain.chat_models import ChatAnthropic` instead" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
cd05e9768170-3
extra = Extra.forbid @property def _llm_type(self) -> str: """Return type of llm.""" return "anthropic-llm" def _wrap_prompt(self, prompt: str) -> str: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if prompt.startswith(self.HUMAN_PROMPT): return prompt # Already wrapped. # Guard against common errors in specifying wrong number of newlines. corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) if n_subs == 1: return corrected_prompt # As a last resort, wrap the prompt ourselves to emulate instruct-style. return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt) """ stop = self._get_anthropic_stop(stop) if self.streaming: stream_resp = self.client.completion_stream(
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
cd05e9768170-4
if self.streaming: stream_resp = self.client.completion_stream( prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) current_completion = "" for data in stream_resp: delta = data["completion"][len(current_completion) :] current_completion = data["completion"] if run_manager: run_manager.on_llm_new_token(delta, **data) return current_completion response = self.client.completion( prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) return response["completion"] async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Call out to Anthropic's completion endpoint asynchronously.""" stop = self._get_anthropic_stop(stop) if self.streaming: stream_resp = await self.client.acompletion_stream( prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) current_completion = "" async for data in stream_resp: delta = data["completion"][len(current_completion) :] current_completion = data["completion"] if run_manager: await run_manager.on_llm_new_token(delta, **data) return current_completion response = await self.client.acompletion( prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) return response["completion"]
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
cd05e9768170-5
**self._default_params, ) return response["completion"] [docs] def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: r"""Call Anthropic completion_stream and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """ stop = self._get_anthropic_stop(stop) return self.client.completion_stream( prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) [docs] def get_num_tokens(self, text: str) -> int: """Calculate number of tokens.""" if not self.count_tokens: raise NameError("Please ensure the anthropic package is loaded") return self.count_tokens(text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
e8b2554ac54e-0
Source code for langchain.llms.deepinfra """Wrapper around DeepInfra APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "google/flan-t5-xl" [docs]class DeepInfra(LLM): """Wrapper around DeepInfra deployed models. To use, you should have the ``requests`` python package installed, and the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import DeepInfra di = DeepInfra(model_id="google/flan-t5-xl", deepinfra_api_token="my-api-key") """ model_id: str = DEFAULT_MODEL_ID model_kwargs: Optional[dict] = None deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property
https://python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
e8b2554ac54e-1
return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepinfra" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to DeepInfra's inference API endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = di("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers={ "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", }, json={"input": prompt, **_model_kwargs}, ) if res.status_code != 200: raise ValueError("Error raised by inference API") t = res.json() text = t["results"][0]["generated_text"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
e8b2554ac54e-2
return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
e5d668a15059-0
Source code for langchain.llms.gpt4all """Wrapper for the GPT4All model.""" from functools import partial from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class GPT4All(LLM): r"""Wrapper around GPT4All language models. To use, you should have the ``gpt4all`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import GPT4All model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8) # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained GPT4All model file.""" backend: Optional[str] = Field(None, alias="backend") n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(0, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all")
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
e5d668a15059-1
logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" embedding: bool = Field(False, alias="embedding") """Use embedding mode only.""" n_threads: Optional[int] = Field(4, alias="n_threads") """Number of threads to use.""" n_predict: Optional[int] = 256 """The maximum number of tokens to generate.""" temp: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_last_n: Optional[int] = 64 "Last n tokens to penalize" repeat_penalty: Optional[float] = 1.3 """The penalty to apply to repeated tokens.""" n_batch: int = Field(1, alias="n_batch") """Batch size for prompt processing.""" streaming: bool = False """Whether to stream the results or not.""" context_erase: float = 0.5 """Leave (n_ctx * context_erase) tokens starting from beginning if the context has run out.""" allow_download: bool = False
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
e5d668a15059-2
starting from beginning if the context has run out.""" allow_download: bool = False """If model does not exist in ~/.cache/gpt4all/, download it.""" client: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @staticmethod def _model_param_names() -> Set[str]: return { "n_ctx", "n_predict", "top_k", "top_p", "temp", "n_batch", "repeat_penalty", "repeat_last_n", "context_erase", } def _default_params(self) -> Dict[str, Any]: return { "n_ctx": self.n_ctx, "n_predict": self.n_predict, "top_k": self.top_k, "top_p": self.top_p, "temp": self.temp, "n_batch": self.n_batch, "repeat_penalty": self.repeat_penalty, "repeat_last_n": self.repeat_last_n, "context_erase": self.context_erase, } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel except ImportError: raise ImportError( "Could not import gpt4all python package. " "Please install it with `pip install gpt4all`." ) full_path = values["model"] model_path, delimiter, model_name = full_path.rpartition("/") model_path += delimiter
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
e5d668a15059-3
model_path += delimiter values["client"] = GPT4AllModel( model_name, model_path=model_path or None, model_type=values["backend"], allow_download=values["allow_download"], ) if values["n_threads"] is not None: # set n_threads values["client"].model.set_thread_count(values["n_threads"]) values["backend"] = values["client"].model.model_type return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params(), **{ k: v for k, v in self.__dict__.items() if k in self._model_param_names() }, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "gpt4all" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: r"""Call out to GPT4All's generate method. Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
e5d668a15059-4
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client.generate(prompt, **self._default_params()): if text_callback: text_callback(token) text += token if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
b9a5d3949e86-0
Source code for langchain.llms.human from typing import Any, Callable, List, Mapping, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens def _display_prompt(prompt: str) -> None: """Displays the given prompt to the user.""" print(f"\n{prompt}") def _collect_user_input( separator: Optional[str] = None, stop: Optional[List[str]] = None ) -> str: """Collects and returns user input as a single string.""" separator = separator or "\n" lines = [] while True: line = input() if not line: break lines.append(line) if stop and any(seq in line for seq in stop): break # Combine all lines into a single string multi_line_input = separator.join(lines) return multi_line_input [docs]class HumanInputLLM(LLM): """ A LLM wrapper which returns user input as the response. """ input_func: Callable = Field(default_factory=lambda: _collect_user_input) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt) separator: str = "\n" input_kwargs: Mapping[str, Any] = {} prompt_kwargs: Mapping[str, Any] = {} @property def _identifying_params(self) -> Mapping[str, Any]: """ Returns an empty dictionary as there are no identifying parameters. """ return {} @property def _llm_type(self) -> str: """Returns the type of LLM.""" return "human-input"
https://python.langchain.com/en/latest/_modules/langchain/llms/human.html
b9a5d3949e86-1
"""Returns the type of LLM.""" return "human-input" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """ Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. stop (Optional[List[str]]): A list of stop strings. run_manager (Optional[CallbackManagerForLLMRun]): Currently not used. Returns: str: The user's input as a response. """ self.prompt_func(prompt, **self.prompt_kwargs) user_input = self.input_func( separator=self.separator, stop=stop, **self.input_kwargs ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the human themselves user_input = enforce_stop_tokens(user_input, stop) return user_input By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/human.html
2c72b706a043-0
Source code for langchain.llms.vertexai """Wrapper around Google VertexAI models.""" from typing import TYPE_CHECKING, Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utilities.vertexai import ( init_vertexai, raise_vertex_import_error, ) if TYPE_CHECKING: from vertexai.language_models._language_models import _LanguageModel class _VertexAICommon(BaseModel): client: "_LanguageModel" = None #: :meta private: model_name: str "Model name to use." temperature: float = 0.0 "Sampling temperature, it controls the degree of randomness in token selection." max_output_tokens: int = 128 "Token limit determines the maximum amount of text output from one prompt." top_p: float = 0.95 "Tokens are selected from most probable to least until the sum of their " "probabilities equals the top-p value." top_k: int = 40 "How the model selects tokens for output, the next token is selected from " "among the top-k most probable tokens." project: Optional[str] = None "The default GCP project to use when making Vertex API calls." location: str = "us-central1" "The default location to use when making API calls." credentials: Any = None "The default custom credentials (google.auth.credentials.Credentials) to use " "when making API calls. If not provided, credentials will be ascertained from " "the environment." @property
https://python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
2c72b706a043-1
"the environment." @property def _default_params(self) -> Dict[str, Any]: base_params = { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, "top_k": self.top_p, "top_p": self.top_k, } return {**base_params} def _predict(self, prompt: str, stop: Optional[List[str]]) -> str: res = self.client.predict(prompt, **self._default_params) return self._enforce_stop_words(res.text, stop) def _enforce_stop_words(self, text: str, stop: Optional[List[str]]) -> str: if stop: return enforce_stop_tokens(text, stop) return text @property def _llm_type(self) -> str: return "vertexai" @classmethod def _try_init_vertexai(cls, values: Dict) -> None: allowed_params = ["project", "location", "credentials"] params = {k: v for k, v in values.items() if v in allowed_params} init_vertexai(**params) return None [docs]class VertexAI(_VertexAICommon, LLM): """Wrapper around Google Vertex AI large language models.""" model_name: str = "text-bison" tuned_model_name: Optional[str] = None "The name of a tuned model, if it's provided, model_name is ignored." @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) try: from vertexai.preview.language_models import TextGenerationModel except ImportError:
https://python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
2c72b706a043-2
from vertexai.preview.language_models import TextGenerationModel except ImportError: raise_vertex_import_error() tuned_model_name = values.get("tuned_model_name") if tuned_model_name: values["client"] = TextGenerationModel.get_tuned_model(tuned_model_name) else: values["client"] = TextGenerationModel.from_pretrained(values["model_name"]) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call Vertex model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. stop: A list of stop words (optional). run_manager: A Callbackmanager for LLM run, optional. Returns: The string generated by the model. """ return self._predict(prompt, stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
869ff8f531cc-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Banana(LLM): """Wrapper around Banana large language models. To use, you should have the ``banana-dev`` python package installed, and the environment variable ``BANANA_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Banana banana = Banana(model_key="") """ model_key: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" banana_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names:
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
869ff8f531cc-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( values, "banana_api_key", "BANANA_API_KEY" ) values["banana_api_key"] = banana_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_key": self.model_key}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "banana" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Banana endpoint.""" try: import banana_dev as banana except ImportError: raise ImportError( "Could not import banana-dev python package. " "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {} api_key = self.banana_api_key model_key = self.model_key
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
869ff8f531cc-2
api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params, } response = banana.run(api_key, model_key, model_inputs) try: text = response["modelOutputs"][0]["output"] except (KeyError, TypeError): returned = response["modelOutputs"][0] raise ValueError( "Response should be of schema: {'output': 'text'}." f"\nResponse was: {returned}" "\nTo fix this:" "\n- fork the source repo of the Banana model" "\n- modify app.py to return the above schema" "\n- deploy that as a custom repo" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
726a69ec9379-0
Source code for langchain.llms.anyscale """Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
726a69ec9379-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint.
https://python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
726a69ec9379-2
) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
4b2fad263517-0
Source code for langchain.llms.forefrontai """Wrapper around ForefrontAI APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class ForefrontAI(LLM): """Wrapper around ForefrontAI large language models. To use, you should have the environment variable ``FOREFRONTAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import ForefrontAI forefrontai = ForefrontAI(endpoint_url="") """ endpoint_url: str = "" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 40 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: int = 1 """Penalizes repeated tokens according to frequency.""" forefrontai_api_key: Optional[str] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
4b2fad263517-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return { "temperature": self.temperature, "length": self.length, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"endpoint_url": self.endpoint_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "forefrontai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to ForefrontAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ForefrontAI("Tell me a joke.") """ response = requests.post( url=self.endpoint_url,
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
4b2fad263517-2
""" response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params}, ) response_json = response.json() text = response_json["result"][0]["completion"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
b420600881cb-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens."""
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
b420600881cb-1
presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try:
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
b420600881cb-2
) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params:
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
b420600881cb-3
if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
b4793aa3a2ac-0
Source code for langchain.llms.pipelineai """Wrapper around Pipeline Cloud API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class PipelineAI(LLM, BaseModel): """Wrapper around PipelineAI large language models. To use, you should have the ``pipeline-ai`` python package installed, and the environment variable ``PIPELINE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain import PipelineAI pipeline = PipelineAI(pipeline_key="") """ pipeline_key: str = "" """The id or tag of the target pipeline""" pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any pipeline parameters valid for `create` call not explicitly specified.""" pipeline_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("pipeline_kwargs", {}) for field_name in list(values):
https://python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
b4793aa3a2ac-1
extra = values.get("pipeline_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to pipeline_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["pipeline_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" pipeline_api_key = get_from_dict_or_env( values, "pipeline_api_key", "PIPELINE_API_KEY" ) values["pipeline_api_key"] = pipeline_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"pipeline_key": self.pipeline_key}, **{"pipeline_kwargs": self.pipeline_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "pipeline_ai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Pipeline Cloud endpoint.""" try: from pipeline import PipelineCloud except ImportError: raise ValueError( "Could not import pipeline-ai python package. " "Please install it with `pip install pipeline-ai`." )
https://python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
b4793aa3a2ac-2
"Please install it with `pip install pipeline-ai`." ) client = PipelineCloud(token=self.pipeline_api_key) params = self.pipeline_kwargs or {} run = client.run_pipeline(self.pipeline_key, [prompt, params]) try: text = run.result_preview[0][0] except AttributeError: raise AttributeError( f"A pipeline run should have a `result_preview` attribute." f"Run was: {run}" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the pipeline parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
bc7f41b2811c-0
Source code for langchain.llms.huggingface_pipeline """Wrapper around HuggingFace Pipeline APIs.""" import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") logger = logging.getLogger(__name__) [docs]class HuggingFacePipeline(LLM): """Wrapper around HuggingFace Pipeline API. To use, you should have the ``transformers`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) Example passing pipeline in directly: .. code-block:: python from langchain.llms import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) """ pipeline: Any #: :meta private:
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
bc7f41b2811c-1
""" pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @classmethod def from_model_id( cls, model_id: str, task: str, device: int = -1, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, "
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
bc7f41b2811c-2
else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls(
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
bc7f41b2811c-3
) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif self.pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
bc7f41b2811c-4
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
e4588129be46-0
Source code for langchain.llms.self_hosted """Run model inference on self-hosted remote hardware.""" import importlib.util import logging import pickle from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a pipeline callable (or, more likely, a key pointing to the model on the cluster's object store) and returns text predictions for each document in the batch. """ text = pipeline(prompt, *args, **kwargs) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _send_pipeline_to_device(pipeline: Any, device: int) -> Any: """Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, "rb") as f: pipeline = pickle.load(f) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0:
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
e4588129be46-1
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline.device = torch.device(device) pipeline.model = pipeline.model.to(pipeline.device) return pipeline [docs]class SelfHostedPipeline(LLM): """Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example for custom pipeline and inference functions: .. code-block:: python from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline,
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
e4588129be46-2
llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server): .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models: .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ pipeline_ref: Any #: :meta private: client: Any #: :meta private: inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
e4588129be46-3
load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ImportError( "Could not import runhouse python package. " "Please install it with `pip install runhouse`." ) remote_load_fn = rh.function(fn=self.model_load_fn).to( self.hardware, reqs=self.model_reqs ) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to( self.hardware, reqs=self.model_reqs ) [docs] @classmethod def from_pipeline( cls, pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any, ) -> LLM: """Init the SelfHostedPipeline from a pipeline object or string.""" if not isinstance(pipeline, str): logger.warning(
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
e4588129be46-4
if not isinstance(pipeline, str): logger.warning( "Serializing pipeline to send to remote hardware. " "Note, it can be quite slow" "to serialize and send large models with each execution. " "Consider sending the pipeline" "to the cluster and passing the path to the pipeline instead." ) load_fn_kwargs = {"pipeline": pipeline, "device": device} return cls( load_fn_kwargs=load_fn_kwargs, model_load_fn=_send_pipeline_to_device, hardware=hardware, model_reqs=["transformers", "torch"] + (model_reqs or []), **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"hardware": self.hardware}, } @property def _llm_type(self) -> str: return "self_hosted_llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
8cd16f1d0856-0
Source code for langchain.llms.openlm from typing import Any, Dict from pydantic import root_validator from langchain.llms.openai import BaseOpenAI [docs]class OpenLM(BaseOpenAI): @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} @root_validator() def validate_environment(cls, values: Dict) -> Dict: try: import openlm values["client"] = openlm.Completion except ImportError: raise ValueError( "Could not import openlm python package. " "Please install it with `pip install openlm`." ) if values["streaming"]: raise ValueError("Streaming not supported with openlm") return values By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/openlm.html
8a503fc0171c-0
Source code for langchain.llms.rwkv """Wrapper for the RWKV model. Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py """ from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import BaseModel, Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class RWKV(LLM, BaseModel): r"""Wrapper around RWKV language models. To use, you should have the ``rwkv`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import RWKV model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained RWKV model file.""" tokens_path: str """Path to the RWKV tokens file.""" strategy: str = "cpu fp32" """Token context window.""" rwkv_verbose: bool = True """Print debug information.""" temperature: float = 1.0 """The temperature to use for sampling.""" top_p: float = 0.5 """The top-p value to use for sampling.""" penalty_alpha_frequency: float = 0.4 """Positive values penalize new tokens based on their existing frequency
https://python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
8a503fc0171c-1
"""Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim..""" penalty_alpha_presence: float = 0.4 """Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics..""" CHUNK_LEN: int = 256 """Batch size for prompt processing.""" max_tokens_per_generation: int = 256 """Maximum number of tokens to generate.""" client: Any = None #: :meta private: tokenizer: Any = None #: :meta private: pipeline: Any = None #: :meta private: model_tokens: Any = None #: :meta private: model_state: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "verbose": self.verbose, "top_p": self.top_p, "temperature": self.temperature, "penalty_alpha_frequency": self.penalty_alpha_frequency, "penalty_alpha_presence": self.penalty_alpha_presence, "CHUNK_LEN": self.CHUNK_LEN, "max_tokens_per_generation": self.max_tokens_per_generation, } @staticmethod def _rwkv_param_names() -> Set[str]: """Get the identifying parameters.""" return { "verbose", } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment."""
https://python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
8a503fc0171c-2
"""Validate that the python package exists in the environment.""" try: import tokenizers except ImportError: raise ImportError( "Could not import tokenizers python package. " "Please install it with `pip install tokenizers`." ) try: from rwkv.model import RWKV as RWKVMODEL from rwkv.utils import PIPELINE values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"]) rwkv_keys = cls._rwkv_param_names() model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys} model_kwargs["verbose"] = values["rwkv_verbose"] values["client"] = RWKVMODEL( values["model"], strategy=values["strategy"], **model_kwargs ) values["pipeline"] = PIPELINE(values["client"], values["tokens_path"]) except ImportError: raise ValueError( "Could not import rwkv python package. " "Please install it with `pip install rwkv`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params, **{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()}, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "rwkv-4" def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any: AVOID_REPEAT_TOKENS = []
https://python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
8a503fc0171c-3
AVOID_REPEAT_TOKENS = [] AVOID_REPEAT = ",:?!" for i in AVOID_REPEAT: dd = self.pipeline.encode(i) assert len(dd) == 1 AVOID_REPEAT_TOKENS += dd tokens = [int(x) for x in _tokens] self.model_tokens += tokens out: Any = None while len(tokens) > 0: out, self.model_state = self.client.forward( tokens[: self.CHUNK_LEN], self.model_state ) tokens = tokens[self.CHUNK_LEN :] END_OF_LINE = 187 out[END_OF_LINE] += newline_adj # adjust \n probability if self.model_tokens[-1] in AVOID_REPEAT_TOKENS: out[self.model_tokens[-1]] = -999999999 return out def rwkv_generate(self, prompt: str) -> str: self.model_state = None self.model_tokens = [] logits = self.run_rnn(self.tokenizer.encode(prompt).ids) begin = len(self.model_tokens) out_last = begin occurrence: Dict = {} decoded = "" for i in range(self.max_tokens_per_generation): for n in occurrence: logits[n] -= ( self.penalty_alpha_presence + occurrence[n] * self.penalty_alpha_frequency ) token = self.pipeline.sample_logits( logits, temperature=self.temperature, top_p=self.top_p ) END_OF_TEXT = 0 if token == END_OF_TEXT: break if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 logits = self.run_rnn([token])
https://python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
8a503fc0171c-4
occurrence[token] += 1 logits = self.run_rnn([token]) xxx = self.tokenizer.decode(self.model_tokens[out_last:]) if "\ufffd" not in xxx: # avoid utf-8 display issues decoded += xxx out_last = begin + i + 1 if i >= self.max_tokens_per_generation - 100: break return decoded def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: r"""RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
bdc5d3131f6c-0
Source code for langchain.llms.ctransformers """Wrapper around the C Transformers library.""" from typing import Any, Dict, Optional, Sequence from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class CTransformers(LLM): """Wrapper around the C Transformers LLM interface. To use, you should have the ``ctransformers`` python package installed. See https://github.com/marella/ctransformers Example: .. code-block:: python from langchain.llms import CTransformers llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2") """ client: Any #: :meta private: model: str """The path to a model file or directory or the name of a Hugging Face Hub model repo.""" model_type: Optional[str] = None """The model type.""" model_file: Optional[str] = None """The name of the model file in repo or directory.""" config: Optional[Dict[str, Any]] = None """The config parameters. See https://github.com/marella/ctransformers#config""" lib: Optional[str] = None """The path to a shared library or one of `avx2`, `avx`, `basic`.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_type": self.model_type, "model_file": self.model_file, "config": self.config, } @property
https://python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
bdc5d3131f6c-1
"config": self.config, } @property def _llm_type(self) -> str: """Return type of llm.""" return "ctransformers" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``ctransformers`` package is installed.""" try: from ctransformers import AutoModelForCausalLM except ImportError: raise ImportError( "Could not import `ctransformers` package. " "Please install it with `pip install ctransformers`" ) config = values["config"] or {} values["client"] = AutoModelForCausalLM.from_pretrained( values["model"], model_type=values["model_type"], model_file=values["model_file"], lib=values["lib"], **config, ) return values def _call( self, prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() for chunk in self.client(prompt, stop=stop, stream=True): text.append(chunk)
https://python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
bdc5d3131f6c-2
text.append(chunk) _run_manager.on_llm_new_token(chunk, verbose=self.verbose) return "".join(text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
d04aca0fd0ca-0
Source code for langchain.llms.petals """Wrapper around Petals API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Petals(LLM): """Wrapper around Petals Bloom models. To use, you should have the ``petals`` python package installed, and the environment variable ``HUGGINGFACE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import petals petals = Petals() """ client: Any """The client to use for the API calls.""" tokenizer: Any """The tokenizer to use for the API calls.""" model_name: str = "bigscience/bloom-petals" """The model to use.""" temperature: float = 0.7 """What sampling temperature to use""" max_new_tokens: int = 256 """The maximum number of new tokens to generate in the completion.""" top_p: float = 0.9 """The cumulative probability for top-p sampling.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" do_sample: bool = True """Whether or not to use sampling; use greedy decoding otherwise."""
https://python.langchain.com/en/latest/_modules/langchain/llms/petals.html