id
stringlengths
14
16
text
stringlengths
45
2.05k
source
stringlengths
53
111
52aa80b12a88-3
# Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/loading.html
346cb8d61943-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate, BaseModel): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
346cb8d61943-1
"""Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args:
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
346cb8d61943-2
"""Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
346cb8d61943-3
"""Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
bb0688444125-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, Union from pydantic import BaseModel, Field from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(BaseModel, ABC): @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/chat.html
bb0688444125-1
def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template(cls, template: str, **kwargs: Any) -> BaseMessagePromptTemplate: prompt = PromptTemplate.from_template(template) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs)
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/chat.html
bb0688444125-2
text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class ChatPromptTemplate(BasePromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( content=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(content=PromptTemplate.from_template(template)) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string()
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/chat.html
bb0688444125-3
return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return ChatPromptValue(messages=result) [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: raise NotImplementedError [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/chat.html
61a389170bbe-0
Source code for langchain.prompts.example_selector.semantic_similarity """Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.vectorstores.base import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] [docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0]
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
61a389170bbe-1
return ids[0] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
61a389170bbe-2
instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) [docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector, BaseModel): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs]
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
61a389170bbe-3
examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs )
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
61a389170bbe-4
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
99094701ee88-0
Source code for langchain.prompts.example_selector.length_based """Select examples based on length.""" import re from typing import Callable, Dict, List from pydantic import BaseModel, validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def _get_length_based(text: str) -> int: return len(re.split("\n| ", text)) [docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): """Select examples based on length.""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" get_text_length: Callable[[str], int] = _get_length_based """Function to measure prompt length. Defaults to word count.""" max_length: int = 2048 """Max length for the prompt, beyond which examples are cut.""" example_text_lengths: List[int] = [] #: :meta private: [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) @validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in if v: return v # If they were not, calculate them example_prompt = values["example_prompt"] get_text_length = values["get_text_length"]
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/length_based.html
99094701ee88-1
get_text_length = values["get_text_length"] string_examples = [example_prompt.format(**eg) for eg in values["examples"]] return [get_text_length(eg) for eg in string_examples] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the input lengths.""" inputs = " ".join(input_variables.values()) remaining_length = self.max_length - self.get_text_length(inputs) i = 0 examples = [] while remaining_length > 0 and i < len(self.examples): new_length = remaining_length - self.example_text_lengths[i] if new_length < 0: break else: examples.append(self.examples[i]) remaining_length = new_length i += 1 return examples By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/example_selector/length_based.html
8dff7613211d-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation") [docs]class HuggingFaceHub(LLM, BaseModel): """Wrapper around HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_hub.html
8dff7613211d-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please it install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_hub.html
8dff7613211d-2
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} response = self.client(inputs=prompt, params=_model_kwargs) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_hub.html
802aa6894444-0
Source code for langchain.llms.self_hosted """Run model inference on self-hosted remote hardware.""" import importlib.util import logging import pickle from typing import Any, Callable, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger() def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a pipeline callable (or, more likely, a key pointing to the model on the cluster's object store) and returns text predictions for each document in the batch. """ text = pipeline(prompt, *args, **kwargs) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _send_pipeline_to_device(pipeline: Any, device: int) -> Any: """Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, "rb") as f: pipeline = pickle.load(f) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted.html
802aa6894444-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline.device = torch.device(device) pipeline.model = pipeline.model.to(pipeline.device) return pipeline [docs]class SelfHostedPipeline(LLM, BaseModel): """Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example for custom pipeline and inference functions: .. code-block:: python from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted.html
802aa6894444-2
llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server): .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models: .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ pipeline_ref: Any #: :meta private: client: Any #: :meta private: inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted.html
802aa6894444-3
load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ValueError( "Could not import runhouse python package. " "Please install it with `pip install runhouse`." ) remote_load_fn = rh.function(fn=self.model_load_fn).to( self.hardware, reqs=self.model_reqs ) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to( self.hardware, reqs=self.model_reqs ) [docs] @classmethod def from_pipeline( cls, pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any, ) -> LLM: """Init the SelfHostedPipeline from a pipeline object or string.""" if not isinstance(pipeline, str): logger.warning(
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted.html
802aa6894444-4
if not isinstance(pipeline, str): logger.warning( "Serializing pipeline to send to remote hardware. " "Note, it can be quite slow" "to serialize and send large models with each execution. " "Consider sending the pipeline" "to the cluster and passing the path to the pipeline instead." ) load_fn_kwargs = {"pipeline": pipeline, "device": device} return cls( load_fn_kwargs=load_fn_kwargs, model_load_fn=_send_pipeline_to_device, hardware=hardware, model_reqs=["transformers", "torch"] + (model_reqs or []), **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"hardware": self.hardware}, } @property def _llm_type(self) -> str: return "self_hosted_llm" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted.html
0520fbcec953-0
Source code for langchain.llms.writer """Wrapper around Writer APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Writer(LLM, BaseModel): """Wrapper around Writer large language models. To use, you should have the environment variable ``WRITER_API_KEY`` set with your API key. Example: .. code-block:: python from langchain import Writer writer = Writer(model_id="palmyra-base") """ model_id: str = "palmyra-base" """Model name to use.""" tokens_to_generate: int = 24 """Max number of tokens to generate.""" logprobs: bool = False """Whether to return log probabilities.""" temperature: float = 1.0 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 1 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens according to frequency.""" random_seed: int = 0 """The model generates random results. Changing the random seed alone will produce a different response with similar characteristics. It is possible to reproduce results by fixing the random seed (assuming all other hyperparameters are also fixed)"""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/writer.html
0520fbcec953-1
by fixing the random seed (assuming all other hyperparameters are also fixed)""" beam_search_diversity_rate: float = 1.0 """Only applies to beam search, i.e. when the beam width is >1. A higher value encourages beam search to return a more diverse set of candidates""" beam_width: Optional[int] = None """The number of concurrent candidates to keep track of during beam search""" length_pentaly: float = 1.0 """Only applies to beam search, i.e. when the beam width is >1. Larger values penalize long candidates more heavily, thus preferring shorter candidates""" writer_api_key: Optional[str] = None stop: Optional[List[str]] = None """Sequences when completion generation will stop""" base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" writer_api_key = get_from_dict_or_env( values, "writer_api_key", "WRITER_API_KEY" ) values["writer_api_key"] = writer_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return { "tokens_to_generate": self.tokens_to_generate, "stop": self.stop, "logprobs": self.logprobs, "temperature": self.temperature, "top_p": self.top_p,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/writer.html
0520fbcec953-2
"temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "random_seed": self.random_seed, "beam_search_diversity_rate": self.beam_search_diversity_rate, "beam_width": self.beam_width, "length_pentaly": self.length_pentaly, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_id": self.model_id}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "writer" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Writer's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.") """ if self.base_url is not None: base_url = self.base_url else: base_url = ( "https://api.llm.writer.com/v1/models/{self.model_id}/completions" ) response = requests.post( url=base_url, headers={ "Authorization": f"Bearer {self.writer_api_key}", "Content-Type": "application/json", "Accept": "application/json", }, json={"prompt": prompt, **self._default_params}, )
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/writer.html
0520fbcec953-3
}, json={"prompt": prompt, **self._default_params}, ) text = response.text if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/writer.html
b8326de33634-0
Source code for langchain.llms.forefrontai """Wrapper around ForefrontAI APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class ForefrontAI(LLM, BaseModel): """Wrapper around ForefrontAI large language models. To use, you should have the environment variable ``FOREFRONTAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import ForefrontAI forefrontai = ForefrontAI(endpoint_url="") """ endpoint_url: str = "" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 40 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: int = 1 """Penalizes repeated tokens according to frequency.""" forefrontai_api_key: Optional[str] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/forefrontai.html
b8326de33634-1
"""Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return { "temperature": self.temperature, "length": self.length, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"endpoint_url": self.endpoint_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "forefrontai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to ForefrontAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ForefrontAI("Tell me a joke.") """ response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params},
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/forefrontai.html
b8326de33634-2
}, json={"text": prompt, **self._default_params}, ) response_json = response.json() text = response_json["result"][0]["completion"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/forefrontai.html
e86c6cdc42d6-0
Source code for langchain.llms.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from abc import ABC, abstractmethod from typing import Any, Dict, List, Mapping, Optional, Union from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class ContentHandlerBase(ABC): """A handler class to transform input from LLM to a format that SageMaker endpoint expects. Similarily, the class also handles transforming output from the SageMaker endpoint to a format that LLM class expects. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ content_type: Optional[str] = "text/plain" """The MIME type of the input data passed to endpoint""" accepts: Optional[str] = "text/plain" """The MIME type of the response data returned from endpoint""" @abstractmethod def transform_input( self, prompt: Union[str, List[str]], model_kwargs: Dict ) -> bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
e86c6cdc42d6-1
like object in the format specified in the content_type request header. """ @abstractmethod def transform_output(self, output: bytes) -> Any: """Transforms the output from the model to string that the LLM class expects. """ [docs]class SagemakerEndpoint(LLM, BaseModel): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain import SagemakerEndpoint endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = ""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
e86c6cdc42d6-2
Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: ContentHandlerBase """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
e86c6cdc42d6-3
""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please it install it with `pip install boto3`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.endpoint_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "sagemaker_endpoint" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Sagemaker inference endpoint. Args:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
e86c6cdc42d6-4
"""Call out to Sagemaker inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(prompt, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") text = self.content_handler.transform_output(response["Body"]) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to the sagemaker endpoint. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
edfec6e7205b-0
Source code for langchain.llms.huggingface_pipeline """Wrapper around HuggingFace Pipeline APIs.""" import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation") logger = logging.getLogger() [docs]class HuggingFacePipeline(LLM, BaseModel): """Wrapper around HuggingFace Pipeline API. To use, you should have the ``transformers`` python package installed. Only supports `text-generation` and `text2text-generation` for now. Example using from_model_id: .. code-block:: python from langchain.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation" ) Example passing pipeline in directly: .. code-block:: python from langchain.llms import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_pipeline.html
edfec6e7205b-1
"""Model name to use.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @classmethod def from_model_id( cls, model_id: str, task: str, device: int = -1, model_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please it install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task == "text2text-generation": model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_pipeline.html
edfec6e7205b-2
if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_pipeline.html
edfec6e7205b-3
response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_pipeline.html
c6e5118a7794-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM, BaseModel): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning(
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/stochasticai.html
c6e5118a7794-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/stochasticai.html
c6e5118a7794-2
json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/stochasticai.html
8292bc146ea3-0
Source code for langchain.llms.self_hosted_hugging_face """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.""" import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation") logger = logging.getLogger() def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
8292bc146ea3-1
task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task == "text2text-generation": model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available"
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
8292bc146ea3-2
"Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline [docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline, BaseModel): """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation` and `text2text-generation` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
8292bc146ea3-3
.. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task (either "text-generation" or "text2text-generation").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any):
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
8292bc146ea3-4
extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
10fb7608917a-0
Source code for langchain.llms.cohere """Wrapper around Cohere APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Cohere(LLM, BaseModel): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cohere.html
10fb7608917a-1
truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. " "Please it install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere"
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cohere.html
10fb7608917a-2
"""Return type of llm.""" return "cohere" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = self.client.generate(model=self.model, prompt=prompt, **params) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cohere.html
65a7fec7dfac-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class AlephAlpha(LLM, BaseModel): """Wrapper around Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha alpeh_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
65a7fec7dfac-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
65a7fec7dfac-2
"""Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
65a7fec7dfac-3
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please it install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
65a7fec7dfac-4
"minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "alpeh_alpha" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
65a7fec7dfac-5
Returns: The string generated by the model. Example: .. code-block:: python response = alpeh_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/aleph_alpha.html
1c5640157a1d-0
Source code for langchain.llms.deepinfra """Wrapper around DeepInfra APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "google/flan-t5-xl" [docs]class DeepInfra(LLM, BaseModel): """Wrapper around DeepInfra deployed models. To use, you should have the ``requests`` python package installed, and the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import DeepInfra di = DeepInfra(model_id="google/flan-t5-xl", deepinfra_api_token="my-api-key") """ model_id: str = DEFAULT_MODEL_ID model_kwargs: Optional[dict] = None deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/deepinfra.html
1c5640157a1d-1
@property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepinfra" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to DeepInfra's inference API endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = di("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers={ "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", }, json={"input": prompt, **_model_kwargs}, ) if res.status_code != 200: raise ValueError("Error raised by inference API") text = res.json()[0]["generated_text"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/deepinfra.html
6a40acda07e2-0
Source code for langchain.llms.ai21 """Wrapper around AI21 APIs.""" from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env class AI21PenaltyData(BaseModel): """Parameters for AI21 penalty data.""" scale: int = 0 applyToWhitespaces: bool = True applyToPunctuations: bool = True applyToNumbers: bool = True applyToStopwords: bool = True applyToEmojis: bool = True [docs]class AI21(LLM, BaseModel): """Wrapper around AI21 large language models. To use, you should have the environment variable ``AI21_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import AI21 ai21 = AI21(model="j1-jumbo") """ model: str = "j1-jumbo" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" maxTokens: int = 256 """The maximum number of tokens to generate in the completion.""" minTokens: int = 0 """The minimum number of tokens to generate in the completion.""" topP: float = 1.0 """Total probability mass of tokens to consider at each step.""" presencePenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens.""" countPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to count."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/ai21.html
6a40acda07e2-1
"""Penalizes repeated tokens according to count.""" frequencyPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to frequency.""" numResults: int = 1 """How many completions to generate for each prompt.""" logitBias: Optional[Dict[str, float]] = None """Adjust the probability of specific tokens being generated.""" ai21_api_key: Optional[str] = None stop: Optional[List[str]] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY") values["ai21_api_key"] = ai21_api_key return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling AI21 API.""" return { "temperature": self.temperature, "maxTokens": self.maxTokens, "minTokens": self.minTokens, "topP": self.topP, "presencePenalty": self.presencePenalty.dict(), "countPenalty": self.countPenalty.dict(), "frequencyPenalty": self.frequencyPenalty.dict(), "numResults": self.numResults, "logitBias": self.logitBias, } @property def _identifying_params(self) -> Dict[str, Any]:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/ai21.html
6a40acda07e2-2
@property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "ai21" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url else: if self.model in ("j1-grande-instruct",): base_url = "https://api.ai21.com/studio/v1/experimental" else: base_url = "https://api.ai21.com/studio/v1" response = requests.post( url=f"{base_url}/{self.model}/complete", headers={"Authorization": f"Bearer {self.ai21_api_key}"}, json={"prompt": prompt, "stopSequences": stop, **self._default_params}, ) if response.status_code != 200: optional_detail = response.json().get("error")
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/ai21.html
6a40acda07e2-3
optional_detail = response.json().get("error") raise ValueError( f"AI21 /complete call failed with status code {response.status_code}." f" Details: {optional_detail}" ) response_json = response.json() return response_json["completions"][0]["data"]["text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/ai21.html
638cf12e99da-0
Source code for langchain.llms.nlpcloud """Wrapper around NLPCloud APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class NLPCloud(LLM, BaseModel): """Wrapper around NLPCloud large language models. To use, you should have the ``nlpcloud`` python package installed, and the environment variable ``NLPCLOUD_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import NLPCloud nlpcloud = NLPCloud(model="gpt-neox-20b") """ client: Any #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" min_length: int = 1 """The minimum number of tokens to generate in the completion.""" max_length: int = 256 """The maximum number of tokens to generate in the completion.""" length_no_input: bool = True """Whether min_length and max_length should include the length of the input.""" remove_input: bool = True """Remove input text from API response""" remove_end_sequence: bool = True """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" top_p: int = 1 """Total probability mass of tokens to consider at each step.""" top_k: int = 50
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/nlpcloud.html
638cf12e99da-1
top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens. 1.0 means no penalty.""" length_penalty: float = 1.0 """Exponential penalty to the length.""" do_sample: bool = True """Whether to use sampling (True) or greedy decoding.""" num_beams: int = 1 """Number of beams for beam search.""" early_stopping: bool = False """Whether to stop beam search at num_beams sentences.""" num_return_sequences: int = 1 """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=True, lang="en" ) except ImportError: raise ValueError( "Could not import nlpcloud python package. " "Please it install it with `pip install nlpcloud`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling NLPCloud API.""" return {
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/nlpcloud.html
638cf12e99da-2
"""Get the default parameters for calling NLPCloud API.""" return { "temperature": self.temperature, "min_length": self.min_length, "max_length": self.max_length, "length_no_input": self.length_no_input, "remove_input": self.remove_input, "remove_end_sequence": self.remove_end_sequence, "bad_words": self.bad_words, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "length_penalty": self.length_penalty, "do_sample": self.do_sample, "num_beams": self.num_beams, "early_stopping": self.early_stopping, "num_return_sequences": self.num_return_sequences, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "nlpcloud" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to NLPCloud's create endpoint. Args: prompt: The prompt to pass into the model. stop: Not supported by this interface (pass in init method) Returns: The string generated by the model. Example: .. code-block:: python response = nlpcloud("Tell me a joke.") """ if stop and len(stop) > 1: raise ValueError( "NLPCloud only supports a single stop sequence per generation."
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/nlpcloud.html
638cf12e99da-3
raise ValueError( "NLPCloud only supports a single stop sequence per generation." "Pass in a list of length 1." ) elif stop and len(stop) == 1: end_sequence = stop[0] else: end_sequence = None response = self.client.generation( prompt, end_sequence=end_sequence, **self._default_params ) return response["generated_text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/nlpcloud.html
17fc36c238ed-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Banana(LLM, BaseModel): """Wrapper around Banana large language models. To use, you should have the ``banana-dev`` python package installed, and the environment variable ``BANANA_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Banana banana = Banana(model_key="") """ model_key: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" banana_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/bananadev.html
17fc36c238ed-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( values, "banana_api_key", "BANANA_API_KEY" ) values["banana_api_key"] = banana_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_key": self.model_key}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "banana" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to Banana endpoint.""" try: import banana_dev as banana except ImportError: raise ValueError( "Could not import banana-dev python package. " "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {} api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/bananadev.html
17fc36c238ed-2
"prompt": prompt, **params, } response = banana.run(api_key, model_key, model_inputs) try: text = response["modelOutputs"][0]["output"] except (KeyError, TypeError): returned = response["modelOutputs"][0] raise ValueError( "Response should be of schema: {'output': 'text'}." f"\nResponse was: {returned}" "\nTo fix this:" "\n- fork the source repo of the Banana model" "\n- modify app.py to return the above schema" "\n- deploy that as a custom repo" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/bananadev.html
55be16b62b7b-0
Source code for langchain.llms.cerebriumai """Wrapper around CerebriumAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class CerebriumAI(LLM, BaseModel): """Wrapper around CerebriumAI large language models. To use, you should have the ``cerebrium`` python package installed, and the environment variable ``CEREBRIUMAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import CerebriumAI cerebrium = CerebriumAI(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" cerebriumai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {})
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cerebriumai.html
55be16b62b7b-1
extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY" ) values["cerebriumai_api_key"] = cerebriumai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "cerebriumai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to CerebriumAI endpoint.""" try: from cerebrium import model_api_request except ImportError: raise ValueError( "Could not import cerebrium python package. " "Please install it with `pip install cerebrium`." )
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cerebriumai.html
55be16b62b7b-2
"Please install it with `pip install cerebrium`." ) params = self.model_kwargs or {} response = model_api_request( self.endpoint_url, {"prompt": prompt, **params}, self.cerebriumai_api_key ) text = response["data"]["result"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/cerebriumai.html
3cbe3e937a97-0
Source code for langchain.llms.petals """Wrapper around Petals API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Petals(LLM, BaseModel): """Wrapper around Petals Bloom models. To use, you should have the ``petals`` python package installed, and the environment variable ``HUGGINGFACE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import petals petals = Petals() """ client: Any """The client to use for the API calls.""" tokenizer: Any """The tokenizer to use for the API calls.""" model_name: str = "bigscience/bloom-petals" """The model to use.""" temperature: float = 0.7 """What sampling temperature to use""" max_new_tokens: int = 256 """The maximum number of new tokens to generate in the completion.""" top_p: float = 0.9 """The cumulative probability for top-p sampling.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" do_sample: bool = True """Whether or not to use sampling; use greedy decoding otherwise.""" max_length: Optional[int] = None
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/petals.html
3cbe3e937a97-1
max_length: Optional[int] = None """The maximum length of the sequence to be generated.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" huggingface_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingface_api_key = get_from_dict_or_env( values, "huggingface_api_key", "HUGGINGFACE_API_KEY" ) try: from petals import DistributedBloomForCausalLM from transformers import BloomTokenizerFast model_name = values["model_name"]
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/petals.html
3cbe3e937a97-2
from transformers import BloomTokenizerFast model_name = values["model_name"] values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name) values["client"] = DistributedBloomForCausalLM.from_pretrained(model_name) values["huggingface_api_key"] = huggingface_api_key except ImportError: raise ValueError( "Could not import transformers or petals python package." "Please install with `pip install -U transformers petals`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Petals API.""" normal_params = { "temperature": self.temperature, "max_new_tokens": self.max_new_tokens, "top_p": self.top_p, "top_k": self.top_k, "do_sample": self.do_sample, "max_length": self.max_length, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "petals" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call the Petals API.""" params = self._default_params inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] outputs = self.client.generate(inputs, **params) text = self.tokenizer.decode(outputs[0]) if stop is not None:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/petals.html
3cbe3e937a97-3
text = self.tokenizer.decode(outputs[0]) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/petals.html
d28c2dcdc63c-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class GooseAI(LLM, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/gooseai.html
d28c2dcdc63c-1
"""Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/gooseai.html
d28c2dcdc63c-2
) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/gooseai.html
d28c2dcdc63c-3
params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/gooseai.html
c2e2159d3a53-0
Source code for langchain.llms.modal """Wrapper around Modal API.""" import logging from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) [docs]class Modal(LLM, BaseModel): """Wrapper around Modal large language models. To use, you should have the ``modal-client`` python package installed. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Modal modal = Modal(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/modal.html
c2e2159d3a53-1
Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "modal" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to Modal endpoint.""" params = self.model_kwargs or {} response = requests.post( url=self.endpoint_url, headers={ "Content-Type": "application/json", }, json={"prompt": prompt, **params}, ) try: if prompt in response.json()["prompt"]: response_json = response.json() except KeyError: raise ValueError("LangChain requires 'prompt' key in response.") text = response_json["prompt"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/modal.html
83068d778ee1-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import List, Optional from pydantic import BaseModel from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop) request_end_time = datetime.datetime.now().timestamp()
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
83068d778ee1-1
request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], self._identifying_params,
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
83068d778ee1-2
"langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
83068d778ee1-3
self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i]
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
83068d778ee1-4
for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
4ea867d608b5-0
Source code for langchain.llms.anthropic """Wrapper around Anthropic APIs.""" import re from typing import Any, Dict, Generator, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class Anthropic(LLM, BaseModel): r"""Wrapper around Anthropic large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key") # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. response = model("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" response = model(prompt) """ client: Any #: :meta private: model: str = "claude-v1" """Model name to use.""" max_tokens_to_sample: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 1.0 """A non-negative float that tunes the degree of randomness in generation."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
4ea867d608b5-1
"""A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" anthropic_api_key: Optional[str] = None HUMAN_PROMPT: Optional[str] = None AI_PROMPT: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anthropic_api_key = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY" ) try: import anthropic values["client"] = anthropic.Client(anthropic_api_key) values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT values["AI_PROMPT"] = anthropic.AI_PROMPT except ImportError: raise ValueError( "Could not import anthropic python package. " "Please it install it with `pip install anthropic`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Anthropic API.""" return { "max_tokens_to_sample": self.max_tokens_to_sample, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
4ea867d608b5-2
"""Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "anthropic" def _wrap_prompt(self, prompt: str) -> str: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if prompt.startswith(self.HUMAN_PROMPT): return prompt # Already wrapped. # Guard against common errors in specifying wrong number of newlines. corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) if n_subs == 1: return corrected_prompt # As a last resort, wrap the prompt ourselves to emulate instruct-style. return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if stop is None: stop = [] # Never want model to invent new turns of Human / Assistant dialog. stop.extend([self.HUMAN_PROMPT, self.AI_PROMPT]) return stop def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
4ea867d608b5-3
stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt) """ stop = self._get_anthropic_stop(stop) response = self.client.completion( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) text = response["completion"] return text [docs] def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: r"""Call Anthropic completion_stream and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """ stop = self._get_anthropic_stop(stop) return self.client.completion_stream( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) By Harrison Chase © Copyright 2023, Harrison Chase.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
4ea867d608b5-4
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 22, 2023.
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
10b6eaed6327-0
Source code for langchain.llms.openai """Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( Any, Callable, Dict, Generator, List, Mapping, Optional, Set, Tuple, Union, ) from pydantic import BaseModel, Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-1
def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-2
) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ client: Any #: :meta private: model_name: str = "text-davinci-003" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-3
"""Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object."""
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-4
class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key values["client"] = openai.Completion except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-5
if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "best_of": self.best_of, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts:
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
10b6eaed6327-6
for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.")
https://langchain.readthedocs.io/en/latest/_modules/langchain/llms/openai.html