id stringlengths 14 16 | text stringlengths 4 1.28k | source stringlengths 54 121 |
|---|---|---|
a49b5cd9c1f6-5 | suffix_kwargs = {
k: v for k, v in kwargs.items() if k in self.suffix.input_variables
}
for k in suffix_kwargs.keys():
kwargs.pop(k)
suffix = self.suffix.format(
**suffix_kwargs,
)
pieces = [prefix, *example_strings, suffix]
template = ... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html |
a49b5cd9c1f6-6 | if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().dict(**kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html |
235cc75a2941-0 | Source code for langchain.prompts.example_selector.ngram_overlap
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Dict, List
import numpy as np
from pydantic import B... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html |
235cc75a2941-1 | https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html |
235cc75a2941-2 | examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html |
235cc75a2941-3 | try:
from nltk.translate.bleu_score import ( # noqa: disable=F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ValueError(
"Not all the correct dependencies for this ExampleSelect exist"
) from... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html |
235cc75a2941-4 | k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max =... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html |
fbc05febd17c-0 | Source code for langchain.prompts.example_selector.semantic_similarity
"""Example selector that selects examples based on SemanticSimilarity."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
fr... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-1 | """Number of examples to select."""
example_keys: Optional[List[str]] = None
"""Optional keys to filter examples to."""
input_keys: Optional[List[str]] = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
class Config:
... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-2 | return ids[0]
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in s... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-3 | return examples
[docs] @classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExamp... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-4 | instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-5 | This was shown to improve performance in this paper:
https://arxiv.org/pdf/2211.13892.pdf
"""
fetch_k: int = 20
"""Number of examples to fetch to rerank."""
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic simil... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-6 | examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
[docs] @classmethod
def from_examples(
cls,
... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-7 | Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the sear... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
fbc05febd17c-8 | string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys) | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html |
6e346a77ee53-0 | Source code for langchain.prompts.example_selector.length_based
"""Select examples based on length."""
import re
from typing import Callable, Dict, List
from pydantic import BaseModel, validator
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
d... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html |
6e346a77ee53-1 | max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: List[int] = [] #: :meta private:
[docs] def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
string_example = s... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html |
6e346a77ee53-2 | example_prompt = values["example_prompt"]
get_text_length = values["get_text_length"]
string_examples = [example_prompt.format(**eg) for eg in values["examples"]]
return [get_text_length(eg) for eg in string_examples]
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[d... | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html |
6e346a77ee53-3 | i += 1
return examples | https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html |
3c98b72bf039-0 | Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import ChatResult
from langchain.utils... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3c98b72bf039-1 | - ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turb... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3c98b72bf039-2 | openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
openai_proxy: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3c98b72bf039-3 | )
values["openai_api_type"] = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
defa... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3c98b72bf039-4 | except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3c98b72bf039-5 | """Get the identifying parameters."""
return {**self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
openai_creds = {
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_creds, **super(... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
d76a6ac8b2cf-0 | Source code for langchain.chat_models.fake
"""Fake ChatModel for testing purposes."""
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import SimpleChatModel
from langchain.schema import BaseMessage
[docs]class FakeListChatM... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/fake.html |
d76a6ac8b2cf-1 | ) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
self.i += 1
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses} | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/fake.html |
d104e029b59f-0 | Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Field, root_validator
from tenac... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-1 | ChatResult,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-2 | # 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| ... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-3 | retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _conv... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-4 | else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-5 | message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": mes... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-6 | Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-7 | """What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service e... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-8 | n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of to... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-9 | when tiktoken is called, you can specify a model name to use here."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-10 | )
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead t... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-11 | default="",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_P... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-12 | "with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-13 | min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multipli... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-14 | )
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-15 | overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Ch... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-16 | token = stream_resp["choices"][0]["delta"].get("content") or ""
inner_completion += token
_function_call = stream_resp["choices"][0]["delta"].get("function_call")
if _function_call:
if function_call is None:
function_call = _fun... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-17 | def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the in... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-18 | generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_mana... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-19 | self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token or ""
_function_call = stream_resp["choices"][0]["delt... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-20 | else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"mo... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-21 | import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def _... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-22 | # Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used b... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-23 | """Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
[docs] def get... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-24 | if model.startswith("gpt-3.5-turbo"):
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model.startswith("gpt-4"):
tokens_per_message = 3
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d104e029b59f-25 | for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
74055a3aa969-0 | Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langch... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-1 | Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "ant... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-2 | elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-3 | Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
messages = messages.copy() # don't mutate the original list
if not self.AI_PROMPT:
raise NameError("P... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-4 | **kwargs: Any,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_res... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-5 | self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._de... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
74055a3aa969-6 | )
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
[docs] def get_num_tokens(self, text: str) -> int:
"""Calcula... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
9967f0849c53-0 | Source code for langchain.chat_models.google_palm
"""Wrapper around Google's PaLM Chat API."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional
from pydantic import BaseModel, root_validator
from tenacity import (
before_sleep_log,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-1 | if TYPE_CHECKING:
import google.generativeai as genai
logger = logging.getLogger(__name__)
class ChatGooglePalmError(Exception):
"""Error raised when there is an issue with the Google PaLM API."""
pass
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates tex... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-2 | if not response.candidates:
raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
generations: List[ChatGeneration] = []
for candidate in response.candidates:
author = candidate.get("author")
if author is None:
raise ChatGooglePalmError(f"ChatResponse mu... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-3 | )
)
else:
generations.append(
ChatGeneration(
text=content,
message=ChatMessage(role=author, content=content),
)
)
return ChatResult(generations=generations)
def _messages_to_prompt_dict(
input_me... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-4 | if index != 0:
raise ChatGooglePalmError("System message must be first input message.")
context = input_message.content
elif isinstance(input_message, HumanMessage) and input_message.example:
if messages:
raise ChatGooglePalmError(
"Mes... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-5 | " AI example response."
)
elif isinstance(input_message, AIMessage) and input_message.example:
raise ChatGooglePalmError(
"AI example message must be immediately preceded by a Human "
"example message."
)
elif isinstance(input_messa... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-6 | )
return genai.types.MessagePromptDict(
context=context,
examples=examples,
messages=messages,
)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multipli... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-7 | ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _chat_with_retry(**kwargs: Any) -> Any:
retur... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-8 | return await llm.client.chat_async(**kwargs)
return await _achat_with_retry(**kwargs)
[docs]class ChatGooglePalm(BaseChatModel, BaseModel):
"""Wrapper around Google's PaLM Chat API.
To use you must have the google.generativeai Python package installed and
either:
1. The ``GOOGLE_API_KEY``` envir... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-9 | temperature: Optional[float] = None
"""Run inference with this temperature. Must by in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval ... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-10 | """Validate api key, python package exists, temperature, top_p, and top_k."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except Im... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-11 | if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
m... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-12 | top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
**kwargs,
)
return _response_to_result(response, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[Asyn... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
9967f0849c53-13 | @property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
ff8869b9ce2d-0 | Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models import ChatOpenAI
from langchain.sch... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ff8869b9ce2d-1 | be passed here. The PromptLayerChatOpenAI adds to optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ff8869b9ce2d-2 | **kwargs: Any
) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ff8869b9ce2d-3 | response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.gene... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ff8869b9ce2d-4 | request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ff8869b9ce2d-5 | if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
db3245ca1398-0 | Source code for langchain.chat_models.vertexai
"""Wrapper around Google VertexAI chat-based models."""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManage... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-1 | answer: AIMessage
@dataclass
class _ChatHistory:
"""InputOutputTextPair represents a pair of input and output texts."""
history: List[_MessagePair] = field(default_factory=list)
system_message: Optional[SystemMessage] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-2 | by a message from AI (e.g., Human, Human, AI or AI, AI, Human).
"""
if not history:
return _ChatHistory()
first_message = history[0]
system_message = first_message if isinstance(first_message, SystemMessage) else None
chat_history = _ChatHistory(system_message=system_message)
messages_le... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-3 | f"got {question.type}, {answer.type}."
)
chat_history.history.append(_MessagePair(question=question, answer=answer))
return chat_history
[docs]class ChatVertexAI(_VertexAICommon, BaseChatModel):
"""Wrapper around Vertex AI large language models."""
model_name: str = "chat-bison"
@roo... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-4 | except ImportError:
raise_vertex_import_error()
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generat... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-5 | raise ValueError(
"You should provide at least one message to start the chat!"
)
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
db3245ca1398-6 | return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
c125cfb3b5c4-0 | Source code for langchain.tools.base
"""Base implementation for tools or skills."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Type, Union
from pydantic import (
BaseModel,
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-1 | class ToolMetaclass(ModelMetaclass):
"""Metaclass for BaseTool to ensure the provided args_schema
doesn't silently ignored."""
def __new__(
cls: Type[ToolMetaclass], name: str, bases: Tuple[Type, ...], dct: dict
) -> ToolMetaclass:
"""Create the definition of the new tool class."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-2 | class ChildTool(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
..."""
raise SchemaAnnotationError(
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-3 | fields = {}
for field_name in field_names:
field = model.__fields__[field_name]
fields[field_name] = (field.type_, field.field_info)
return create_model(name, **fields) # type: ignore
def _get_filtered_args(
inferred_model: Type[BaseModel],
func: Callable,
) -> dict:
"""Get the argu... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-4 | ) -> Type[BaseModel]:
"""Create a pydantic schema from a function's signature.
Args:
model_name: Name to assign to the generated pydandic schema
func: Function to generate the schema from
Returns:
A pydantic model with the same arguments as the function
"""
# https://docs.pyd... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-5 | return _create_subset_model(
f"{model_name}Schema", inferred_model, list(valid_properties)
)
class ToolException(Exception):
"""An optional exception that tool throws when execution error occurs.
When this exception is thrown, the agent will not stop working,
but will handle the exception accord... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-6 | """
args_schema: Optional[Type[BaseModel]] = None
"""Pydantic model class to validate and parse the tool's input arguments."""
return_direct: bool = False
"""Whether to return the tool's output directly. Setting this to True means
that after the tool is called, the AgentExecutor will stop loopi... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-7 | class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
keys = {k for k in self.args if k != "kwargs"}
return l... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-8 | input_args = self.args_schema
if isinstance(tool_input, str):
if input_args is not None:
key_ = next(iter(input_args.__fields__.keys()))
input_args.validate({key_: tool_input})
return tool_input
else:
if input_args is not None:
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-9 | return values
@abstractmethod
def _run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Use the tool.
Add run_manager: Optional[CallbackManagerForToolRun] = None
to child implementations to enable tracing,
"""
@abstractmethod
async def _arun(
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-10 | # pass as a positional argument.
if isinstance(tool_input, str):
return (tool_input,), {}
else:
return (), tool_input
[docs] def run(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
c125cfb3b5c4-11 | )
# TODO: maybe also pass through run_manager is _run supports kwargs
new_arg_supported = signature(self._run).parameters.get("run_manager")
run_manager = callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_i... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.