text
stringlengths
3
1.68M
id
stringlengths
13
169
metadata
dict
__index_level_0__
int64
0
2.21k
"""In memory store that is not thread safe and has no eviction policy. This is a simple implementation of the BaseStore using a dictionary that is useful primarily for unit testing purposes. """ from typing import ( Any, AsyncIterator, Dict, Generic, Iterator, List, Optional, Sequence, Tuple, TypeVar, ) from langchain_core.stores import BaseStore V = TypeVar("V") class InMemoryBaseStore(BaseStore[str, V], Generic[V]): """In-memory implementation of the BaseStore using a dictionary. Attributes: store (Dict[str, Any]): The underlying dictionary that stores the key-value pairs. Examples: .. code-block:: python from langchain.storage import InMemoryStore store = InMemoryStore() store.mset([('key1', 'value1'), ('key2', 'value2')]) store.mget(['key1', 'key2']) # ['value1', 'value2'] store.mdelete(['key1']) list(store.yield_keys()) # ['key2'] list(store.yield_keys(prefix='k')) # ['key2'] """ def __init__(self) -> None: """Initialize an empty store.""" self.store: Dict[str, V] = {} def mget(self, keys: Sequence[str]) -> List[Optional[V]]: """Get the values associated with the given keys. Args: keys (Sequence[str]): A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ return [self.store.get(key) for key in keys] async def amget(self, keys: Sequence[str]) -> List[Optional[V]]: """Get the values associated with the given keys. Args: keys (Sequence[str]): A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ return self.mget(keys) def mset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None: """Set the values for the given keys. Args: key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs. Returns: None """ for key, value in key_value_pairs: self.store[key] = value async def amset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None: """Set the values for the given keys. Args: key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs. Returns: None """ return self.mset(key_value_pairs) def mdelete(self, keys: Sequence[str]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. """ for key in keys: if key in self.store: del self.store[key] async def amdelete(self, keys: Sequence[str]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. """ self.mdelete(keys) def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: """Get an iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ if prefix is None: yield from self.store.keys() else: for key in self.store.keys(): if key.startswith(prefix): yield key async def ayield_keys(self, prefix: Optional[str] = None) -> AsyncIterator[str]: """Get an async iterator over keys that match the given prefix. Args: prefix (str, optional): The prefix to match. Defaults to None. Returns: AsyncIterator[str]: An async iterator over keys that match the given prefix. """ if prefix is None: for key in self.store.keys(): yield key else: for key in self.store.keys(): if key.startswith(prefix): yield key InMemoryStore = InMemoryBaseStore[Any] InMemoryByteStore = InMemoryBaseStore[bytes]
langchain/libs/langchain/langchain/storage/in_memory.py/0
{ "file_path": "langchain/libs/langchain/langchain/storage/in_memory.py", "repo_id": "langchain", "token_count": 1942 }
543
--- sidebar_class_name: node-only --- # Llama CPP :::tip Compatibility Only available on Node.js. ::: This module is based on the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) Node.js bindings for [llama.cpp](https://github.com/ggerganov/llama.cpp), allowing you to work with a locally running LLM. This allows you to work with a much smaller quantized model capable of running on a laptop environment, ideal for testing and scratch padding ideas without running up a bill! ## Setup You'll need to install the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. ```bash npm2yarn npm install -S node-llama-cpp ``` import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; <IntegrationInstallTooltip></IntegrationInstallTooltip> ```bash npm2yarn npm install @langchain/community ``` You will also need a local Llama 2 model (or a model supported by [node-llama-cpp](https://github.com/withcatai/node-llama-cpp)). You will need to pass the path to this model to the LlamaCpp module as a part of the parameters (see example). Out-of-the-box `node-llama-cpp` is tuned for running on a MacOS platform with support for the Metal GPU of Apple M-series of processors. If you need to turn this off or need support for the CUDA architecture then refer to the documentation at [node-llama-cpp](https://withcatai.github.io/node-llama-cpp/). For advice on getting and preparing `llama2` see the documentation for the LLM version of this module. A note to LangChain.js contributors: if you want to run the tests associated with this module you will need to put the path to your local model in the environment variable `LLAMA_PATH`. ## Usage ### Basic use We need to provide a path to our local Llama2 model, also the `embeddings` property is always set to `true` in this module. import CodeBlock from "@theme/CodeBlock"; import BasicExample from "@examples/embeddings/llama_cpp_basic.ts"; <CodeBlock language="typescript">{BasicExample}</CodeBlock> ### Document embedding import DocsExample from "@examples/embeddings/llama_cpp_docs.ts"; <CodeBlock language="typescript">{DocsExample}</CodeBlock>
langchainjs/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx/0
{ "file_path": "langchainjs/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx", "repo_id": "langchainjs", "token_count": 649 }
743
python_test_utils( name="test_utils", ) python_tests( name="tests", ) python_sources()
llama_index/llama-index-core/tests/indices/tree/BUILD/0
{ "file_path": "llama_index/llama-index-core/tests/indices/tree/BUILD", "repo_id": "llama_index", "token_count": 42 }
1,159
from llama_index.packs.gradio_react_agent_chatbot.base import GradioReActAgentPack __all__ = ["GradioReActAgentPack"]
llama_index/llama-index-packs/llama-index-packs-gradio-react-agent-chatbot/llama_index/packs/gradio_react_agent_chatbot/__init__.py/0
{ "file_path": "llama_index/llama-index-packs/llama-index-packs-gradio-react-agent-chatbot/llama_index/packs/gradio_react_agent_chatbot/__init__.py", "repo_id": "llama_index", "token_count": 41 }
1,673
import base64 import json import logging import subprocess import textwrap import time from typing import Any, Dict, List, Mapping, Optional import requests from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models.llms import LLM from langchain_core.pydantic_v1 import Extra, Field, root_validator from langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) DEFAULT_NUM_TRIES = 10 DEFAULT_SLEEP_TIME = 4 class Beam(LLM): """Beam API for gpt2 large language model. To use, you should have the ``beam-sdk`` python package installed, and the environment variable ``BEAM_CLIENT_ID`` set with your client id and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how to get this is available here: https://docs.beam.cloud/account/api-keys. The wrapper can then be called as follows, where the name, cpu, memory, gpu, python version, and python packages can be updated accordingly. Once deployed, the instance can be called. Example: .. code-block:: python llm = Beam(model_name="gpt2", name="langchain-gpt2", cpu=8, memory="32Gi", gpu="A10G", python_version="python3.8", python_packages=[ "diffusers[torch]>=0.10", "transformers", "torch", "pillow", "accelerate", "safetensors", "xformers",], max_length=50) llm._deploy() call_result = llm._call(input) """ model_name: str = "" name: str = "" cpu: str = "" memory: str = "" gpu: str = "" python_version: str = "" python_packages: List[str] = [] max_length: str = "" url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" beam_client_id: str = "" beam_client_secret: str = "" app_id: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" beam_client_id = get_from_dict_or_env( values, "beam_client_id", "BEAM_CLIENT_ID" ) beam_client_secret = get_from_dict_or_env( values, "beam_client_secret", "BEAM_CLIENT_SECRET" ) values["beam_client_id"] = beam_client_id values["beam_client_secret"] = beam_client_secret return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "name": self.name, "cpu": self.cpu, "memory": self.memory, "gpu": self.gpu, "python_version": self.python_version, "python_packages": self.python_packages, "max_length": self.max_length, "model_kwargs": self.model_kwargs, } @property def _llm_type(self) -> str: """Return type of llm.""" return "beam" def app_creation(self) -> None: """Creates a Python file which will contain your Beam app definition.""" script = textwrap.dedent( """\ import beam # The environment your code will run on app = beam.App( name="{name}", cpu={cpu}, memory="{memory}", gpu="{gpu}", python_version="{python_version}", python_packages={python_packages}, ) app.Trigger.RestAPI( inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = "app.py" with open(script_name, "w") as file: file.write( script.format( name=self.name, cpu=self.cpu, memory=self.memory, gpu=self.gpu, python_version=self.python_version, python_packages=self.python_packages, ) ) def run_creation(self) -> None: """Creates a Python file which will be deployed on beam.""" script = textwrap.dedent( """ import os import transformers from transformers import GPT2LMHeadModel, GPT2Tokenizer model_name = "{model_name}" def beam_langchain(**inputs): prompt = inputs["prompt"] length = inputs["max_length"] tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) encodedPrompt = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(encodedPrompt, max_length=int(length), do_sample=True, pad_token_id=tokenizer.eos_token_id) output = tokenizer.decode(outputs[0], skip_special_tokens=True) print(output) # noqa: T201 return {{"text": output}} """ ) script_name = "run.py" with open(script_name, "w") as file: file.write(script.format(model_name=self.model_name)) def _deploy(self) -> str: """Call to Beam.""" try: import beam # type: ignore if beam.__path__ == "": raise ImportError except ImportError: raise ImportError( "Could not import beam python package. " "Please install it with `curl " "https://raw.githubusercontent.com/slai-labs" "/get-beam/main/get-beam.sh -sSfL | sh`." ) self.app_creation() self.run_creation() process = subprocess.run( "beam deploy app.py", shell=True, capture_output=True, text=True ) if process.returncode == 0: output = process.stdout logger.info(output) lines = output.split("\n") for line in lines: if line.startswith(" i Send requests to: https://apps.beam.cloud/"): self.app_id = line.split("/")[-1] self.url = line.split(":")[1].strip() return self.app_id raise ValueError( f"""Failed to retrieve the appID from the deployment output. Deployment output: {output}""" ) else: raise ValueError(f"Deployment failed. Error: {process.stderr}") @property def authorization(self) -> str: if self.beam_client_id: credential_str = self.beam_client_id + ":" + self.beam_client_secret else: credential_str = self.beam_client_secret return base64.b64encode(credential_str.encode()).decode() def _call( self, prompt: str, stop: Optional[list] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Beam.""" url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url payload = {"prompt": prompt, "max_length": self.max_length} payload.update(kwargs) headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Authorization": "Basic " + self.authorization, "Connection": "keep-alive", "Content-Type": "application/json", } for _ in range(DEFAULT_NUM_TRIES): request = requests.post(url, headers=headers, data=json.dumps(payload)) if request.status_code == 200: return request.json()["text"] time.sleep(DEFAULT_SLEEP_TIME) logger.warning("Unable to successfully call model.") return ""
langchain/libs/community/langchain_community/llms/beam.py/0
{ "file_path": "langchain/libs/community/langchain_community/llms/beam.py", "repo_id": "langchain", "token_count": 4328 }
275
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 调试 ## 多GPU网络问题调试 当使用`DistributedDataParallel`和多个GPU进行训练或推理时,如果遇到进程和(或)节点之间的互联问题,您可以使用以下脚本来诊断网络问题。 ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` 例如,要测试两个GPU之间的互联,请执行以下操作: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` 如果两个进程能够相互通信并分配GPU内存,它们各自将打印出 "OK" 状态。 对于更多的GPU或节点,可以根据脚本中的参数进行调整。 在诊断脚本内部,您将找到更多详细信息,甚至有关如何在SLURM环境中运行它的说明。 另一种级别的调试是添加 `NCCL_DEBUG=INFO` 环境变量,如下所示: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` 这将产生大量与NCCL相关的调试信息,如果发现有问题报告,您可以在线搜索以获取相关信息。或者,如果您不确定如何解释输出,可以在`issue`中分享日志文件。 ## 下溢和上溢检测 <Tip> 目前,此功能仅适用于PyTorch。 </Tip> <Tip> 对于多GPU训练,它需要使用DDP(`torch.distributed.launch`)。 </Tip> <Tip> 此功能可以与任何基于`nn.Module`的模型一起使用。 </Tip> 如果您开始发现`loss=NaN`或模型因激活值或权重中的`inf`或`nan`而出现一些异常行为,就需要发现第一个下溢或上溢发生的地方以及导致它的原因。幸运的是,您可以通过激活一个特殊模块来自动进行检测。 如果您正在使用[`Trainer`],只需把以下内容: ```bash --debug underflow_overflow ``` 添加到常规命令行参数中,或在创建[`TrainingArguments`]对象时传递 `debug="underflow_overflow"`。 如果您正在使用自己的训练循环或其他Trainer,您可以通过以下方式实现相同的功能: ```python from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`debug_utils.DebugUnderflowOverflow`] 将`hooks`插入模型,紧跟在每次前向调用之后,进而测试输入和输出变量,以及相应模块的权重。一旦在激活值或权重的至少一个元素中检测到`inf`或`nan`,程序将执行`assert`并打印报告,就像这样(这是在`google/mt5-small`下使用fp16混合精度捕获的): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` 由于篇幅原因,示例输出中间的部分已经被缩减。 第二列显示了绝对最大元素的值,因此,如果您仔细查看最后`frame`,输入和输出都在`1e4`的范围内。因此,在使用fp16混合精度进行训练时,最后一步发生了溢出(因为在`fp16`下,在`inf`之前的最大数字是`64e3`)。为了避免在`fp16`下发生溢出,激活值必须保持低于`1e4`,因为`1e4 * 1e4 = 1e8`,因此任何具有大激活值的矩阵乘法都会导致数值溢出。 在跟踪的开始处,您可以发现问题发生在哪个批次(这里的`Detected inf/nan during batch_number=0`表示问题发生在第一个批次)。 每个报告的`frame`都以声明相应模块的层信息为开头,说明这一`frame`是为哪个模块报告的。如果只看这个`frame`: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` 在这里,`encoder.block.2.layer.1.layer_norm` 表示它是编码器的第二个块中第一层的`layer norm`。而 `forward` 的具体调用是 `T5LayerNorm`。 让我们看看该报告的最后几个`frame`: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` 最后一个`frame`报告了`Dropout.forward`函数,第一个条目是唯一的输入,第二个条目是唯一的输出。您可以看到,它是从`DenseReluDense`类内的属性`dropout`中调用的。我们可以看到它发生在第2个块的第1层,也就是在第一个批次期间。最后,绝对最大的输入元素值为`6.27e+04`,输出也是`inf`。 您可以在这里看到,`T5DenseGatedGeluDense.forward`产生了输出激活值,其绝对最大值约为62.7K,非常接近fp16的上限64K。在下一个`frame`中,我们有`Dropout`对权重进行重新归一化,之后将某些元素归零,将绝对最大值推到了64K以上,导致溢出(`inf`)。 正如你所看到的,我们需要查看前面的`frame`, 从那里fp16数字开始变得非常大。 让我们将报告与`models/t5/modeling_t5.py`中的代码匹配: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` 现在很容易看到`dropout`调用,以及所有之前的调用。 由于检测是在前向`hook`中进行的,这些报告将立即在每个`forward`返回后打印出来。 回到完整的报告,要采取措施并解决问题,我们需要往回看几个`frame`,在那里数字开始上升,并且最有可能切换到fp32模式以便在乘法或求和时数字不会溢出。当然,可能还有其他解决方案。例如,如果启用了`amp`,我们可以在将原始`forward`移到`helper wrapper`中后,暂时关闭它,如下所示: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` 由于自动检测器仅报告完整`frame`的输入和输出,一旦知道在哪里查找,您可能还希望分析特定`forward`函数的中间阶段。在这种情况下,您可以使用`detect_overflow`辅助函数将检测器放到希望的位置,例如: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` 可以看到,我们添加了2个检测器,现在我们可以跟踪是否在`forwarded_states`中间的某个地方检测到了`inf`或`nan`。 实际上,检测器已经报告了这些,因为上面示例中的每个调用都是一个`nn.Module`,但假设如果您有一些本地的直接计算,这就是您将如何执行的方式。 此外,如果您在自己的代码中实例化调试器,您可以调整从其默认打印的`frame`数,例如: ```python from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### 特定批次的绝对最小值和最大值跟踪 当关闭下溢/上溢检测功能, 同样的调试类可以用于批处理跟踪。 假设您想要监视给定批次的每个`forward`调用的所有成分的绝对最小值和最大值,并且仅对批次1和3执行此操作,您可以这样实例化这个类: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` 现在,完整的批次1和3将以与下溢/上溢检测器相同的格式进行跟踪。 批次从0开始计数。 如果您知道程序在某个批次编号之后开始出现问题,那么您可以直接快进到该区域。以下是一个截取的配置示例输出: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` 在这里,您将获得大量的`frame`被`dump` - 与您的模型中的前向调用一样多,它有可能符合也可能不符合您的要求,但有时对于调试目的来说,它可能比正常的调试器更容易使用。例如,如果问题开始发生在批次号150上,您可以`dump`批次149和150的跟踪,并比较数字开始发散的地方。 你还可以使用以下命令指定停止训练的批次号: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
transformers/docs/source/zh/debugging.md/0
{ "file_path": "transformers/docs/source/zh/debugging.md", "repo_id": "transformers", "token_count": 7278 }
561
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPTextConfig, CLIPTextModel, CLIPTokenizer, DPTConfig, DPTFeatureExtractor, DPTForDepthEstimation, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionDepth2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionDepth2ImgPipeline test_save_load_optional_components = False params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=5, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } depth_estimator_config = DPTConfig( image_size=32, patch_size=16, num_channels=3, hidden_size=32, num_hidden_layers=4, backbone_out_indices=(0, 1, 2, 3), num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, is_decoder=False, initializer_range=0.02, is_hybrid=True, backbone_config=backbone_config, backbone_featmap_shape=[1, 384, 24, 24], ) depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval() feature_extractor = DPTFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-DPTForDepthEstimation" ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "depth_estimator": depth_estimator, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device) pipe_fp16.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results") def test_dict_tuple_outputs_equivalent(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() self.assertLess(max_diff, 1e-4) def test_progress_bar(self): super().test_progress_bar() def test_stable_diffusion_depth2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626]) else: expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = 2 * [inputs["image"]] image = pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551]) else: expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_pil(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] if torch_device == "mps": expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) @slow @require_torch_gpu class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_depth2img_pipeline_default(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(expected_slice - image_slice).max() < 6e-1 def test_stable_diffusion_depth2img_pipeline_k_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306]) assert np.abs(expected_slice - image_slice).max() < 8e-4 def test_stable_diffusion_depth2img_pipeline_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436]) assert np.abs(expected_slice - image_slice).max() < 5e-4 def test_stable_diffusion_depth2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9 @nightly @require_torch_gpu class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_depth2img_pndm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_depth2img_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_dpm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() inputs["num_inference_steps"] = 30 image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py", "repo_id": "diffusers", "token_count": 11024 }
283
"""Video audio parser. Contains parsers for mp3, mp4 files. """ from pathlib import Path from typing import Any, Dict, List, Optional, cast from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class VideoAudioReader(BaseReader): """Video audio parser. Extract text from transcript of video/audio files. """ def __init__(self, *args: Any, model_version: str = "base", **kwargs: Any) -> None: """Init parser.""" super().__init__(*args, **kwargs) self._model_version = model_version try: import whisper except ImportError: raise ImportError( "Please install OpenAI whisper model " "'pip install git+https://github.com/openai/whisper.git' " "to use the model" ) model = whisper.load_model(self._model_version) self.parser_config = {"model": model} def load_data( self, file: Path, extra_info: Optional[Dict] = None ) -> List[Document]: """Parse file.""" import whisper if file.name.endswith("mp4"): try: from pydub import AudioSegment except ImportError: raise ImportError("Please install pydub 'pip install pydub' ") # open file video = AudioSegment.from_file(file, format="mp4") # Extract audio from video audio = video.split_to_mono()[0] file_str = str(file)[:-4] + ".mp3" # export file audio.export(file_str, format="mp3") model = cast(whisper.Whisper, self.parser_config["model"]) result = model.transcribe(str(file)) transcript = result["text"] return [Document(text=transcript, metadata=extra_info or {})]
llama_index/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/video_audio/base.py/0
{ "file_path": "llama_index/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/video_audio/base.py", "repo_id": "llama_index", "token_count": 806 }
1,371
<jupyter_start><jupyter_text>Custom String Evaluator[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/custom.ipynb)You can make your own custom string evaluators by inheriting from the `StringEvaluator` class and implementing the `_evaluate_strings` (and `_aevaluate_strings` for async support) methods.In this example, you will create a perplexity evaluator using the HuggingFace [evaluate](https://huggingface.co/docs/evaluate/index) library.[Perplexity](https://en.wikipedia.org/wiki/Perplexity) is a measure of how well the generated text would be predicted by the model used to compute the metric.<jupyter_code>%pip install --upgrade --quiet evaluate > /dev/null from typing import Any, Optional from evaluate import load from langchain.evaluation import StringEvaluator class PerplexityEvaluator(StringEvaluator): """Evaluate the perplexity of a predicted string.""" def __init__(self, model_id: str = "gpt2"): self.model_id = model_id self.metric_fn = load( "perplexity", module_type="metric", model_id=self.model_id, pad_token=0 ) def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: results = self.metric_fn.compute( predictions=[prediction], model_id=self.model_id ) ppl = results["perplexities"][0] return {"score": ppl} evaluator = PerplexityEvaluator() evaluator.evaluate_strings(prediction="The rains in Spain fall mainly on the plain.") # The perplexity is much higher since LangChain was introduced after 'gpt-2' was released and because it is never used in the following context. evaluator.evaluate_strings(prediction="The rains in Spain fall mainly on LangChain.")<jupyter_output>Using pad_token, but it is not set yet.
langchain/docs/docs/guides/evaluation/string/custom.ipynb/0
{ "file_path": "langchain/docs/docs/guides/evaluation/string/custom.ipynb", "repo_id": "langchain", "token_count": 676 }
86
from llama_index.core.llama_pack import BaseLlamaPack from llama_index.packs.agent_search_retriever import AgentSearchRetrieverPack def test_class(): names_of_base_classes = [b.__name__ for b in AgentSearchRetrieverPack.__mro__] assert BaseLlamaPack.__name__ in names_of_base_classes
llama_index/llama-index-packs/llama-index-packs-agent-search-retriever/tests/test_packs_agent_search_retriever.py/0
{ "file_path": "llama_index/llama-index-packs/llama-index-packs-agent-search-retriever/tests/test_packs_agent_search_retriever.py", "repo_id": "llama_index", "token_count": 104 }
1,657
# coding=utf-8 # Copyright 2022 UW-Madison The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Nystromformer model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_nystromformer import NystromformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "uw-madison/nystromformer-512" _CONFIG_FOR_DOC = "NystromformerConfig" NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "uw-madison/nystromformer-512", # See all Nyströmformer models at https://huggingface.co/models?filter=nystromformer ] class NystromformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NystromformerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.num_landmarks = config.num_landmarks self.seq_len = config.segment_means_seq_len self.conv_kernel_size = config.conv_kernel_size if config.inv_coeff_init_option: self.init_option = config["inv_init_coeff_option"] else: self.init_option = "original" self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.conv_kernel_size is not None: self.conv = nn.Conv2d( in_channels=self.num_attention_heads, out_channels=self.num_attention_heads, kernel_size=(self.conv_kernel_size, 1), padding=(self.conv_kernel_size // 2, 0), bias=False, groups=self.num_attention_heads, ) # Function to approximate Moore-Penrose inverse via the iterative method def iterative_inv(self, mat, n_iter=6): identity = torch.eye(mat.size(-1), device=mat.device) key = mat # The entries of key are positive and ||key||_{\infty} = 1 due to softmax if self.init_option == "original": # This original implementation is more conservative to compute coefficient of Z_0. value = 1 / torch.max(torch.sum(key, dim=-2)) * key.transpose(-1, -2) else: # This is the exact coefficient computation, 1 / ||key||_1, of initialization of Z_0, leading to faster convergence. value = 1 / torch.max(torch.sum(key, dim=-2), dim=-1).values[:, :, None, None] * key.transpose(-1, -2) for _ in range(n_iter): key_value = torch.matmul(key, value) value = torch.matmul( 0.25 * value, 13 * identity - torch.matmul(key_value, 15 * identity - torch.matmul(key_value, 7 * identity - key_value)), ) return value def transpose_for_scores(self, layer): new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) layer = layer.view(*new_layer_shape) return layer.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) query_layer = query_layer / math.sqrt(math.sqrt(self.attention_head_size)) key_layer = key_layer / math.sqrt(math.sqrt(self.attention_head_size)) if self.num_landmarks == self.seq_len: attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in NystromformerModel forward() function) attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) context_layer = torch.matmul(attention_probs, value_layer) else: q_landmarks = query_layer.reshape( -1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size, ).mean(dim=-2) k_landmarks = key_layer.reshape( -1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size, ).mean(dim=-2) kernel_1 = torch.nn.functional.softmax(torch.matmul(query_layer, k_landmarks.transpose(-1, -2)), dim=-1) kernel_2 = torch.nn.functional.softmax(torch.matmul(q_landmarks, k_landmarks.transpose(-1, -2)), dim=-1) attention_scores = torch.matmul(q_landmarks, key_layer.transpose(-1, -2)) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in NystromformerModel forward() function) attention_scores = attention_scores + attention_mask kernel_3 = nn.functional.softmax(attention_scores, dim=-1) attention_probs = torch.matmul(kernel_1, self.iterative_inv(kernel_2)) new_value_layer = torch.matmul(kernel_3, value_layer) context_layer = torch.matmul(attention_probs, new_value_layer) if self.conv_kernel_size is not None: context_layer += self.conv(value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class NystromformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NystromformerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = NystromformerSelfAttention(config, position_embedding_type=position_embedding_type) self.output = NystromformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Nystromformer class NystromformerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nystromformer class NystromformerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NystromformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = NystromformerAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = NystromformerIntermediate(config) self.output = NystromformerOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class NystromformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([NystromformerLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nystromformer class NystromformerPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Nystromformer class NystromformerLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = NystromformerPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nystromformer class NystromformerOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = NystromformerLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class NystromformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NystromformerConfig base_model_prefix = "nystromformer" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) NYSTROMFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NystromformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NYSTROMFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nyströmformer Model transformer outputting raw hidden-states without any specific head on top.", NYSTROMFORMER_START_DOCSTRING, ) class NystromformerModel(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = NystromformerEmbeddings(config) self.encoder = NystromformerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""Nyströmformer Model with a `language modeling` head on top.""", NYSTROMFORMER_START_DOCSTRING) class NystromformerForMaskedLM(NystromformerPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder"] def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.cls = NystromformerOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class NystromformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ Nyströmformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, NYSTROMFORMER_START_DOCSTRING, ) class NystromformerForSequenceClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.classifier = NystromformerClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Nyströmformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, NYSTROMFORMER_START_DOCSTRING, ) class NystromformerForMultipleChoice(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Nyströmformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, NYSTROMFORMER_START_DOCSTRING, ) class NystromformerForTokenClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Nyströmformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, NYSTROMFORMER_START_DOCSTRING, ) class NystromformerForQuestionAnswering(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NYSTROMFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/nystromformer/modeling_nystromformer.py/0
{ "file_path": "transformers/src/transformers/models/nystromformer/modeling_nystromformer.py", "repo_id": "transformers", "token_count": 20641 }
671
"""Index registry.""" from typing import Dict, Type from llama_index.legacy.data_structs.struct_type import IndexStructType from llama_index.legacy.indices.base import BaseIndex from llama_index.legacy.indices.document_summary.base import DocumentSummaryIndex from llama_index.legacy.indices.empty.base import EmptyIndex from llama_index.legacy.indices.keyword_table.base import KeywordTableIndex from llama_index.legacy.indices.knowledge_graph.base import KnowledgeGraphIndex from llama_index.legacy.indices.list.base import SummaryIndex from llama_index.legacy.indices.multi_modal import MultiModalVectorStoreIndex from llama_index.legacy.indices.struct_store.pandas import PandasIndex from llama_index.legacy.indices.struct_store.sql import SQLStructStoreIndex from llama_index.legacy.indices.tree.base import TreeIndex from llama_index.legacy.indices.vector_store.base import VectorStoreIndex INDEX_STRUCT_TYPE_TO_INDEX_CLASS: Dict[IndexStructType, Type[BaseIndex]] = { IndexStructType.TREE: TreeIndex, IndexStructType.LIST: SummaryIndex, IndexStructType.KEYWORD_TABLE: KeywordTableIndex, IndexStructType.VECTOR_STORE: VectorStoreIndex, IndexStructType.SQL: SQLStructStoreIndex, IndexStructType.PANDAS: PandasIndex, IndexStructType.KG: KnowledgeGraphIndex, IndexStructType.EMPTY: EmptyIndex, IndexStructType.DOCUMENT_SUMMARY: DocumentSummaryIndex, IndexStructType.MULTIMODAL_VECTOR_STORE: MultiModalVectorStoreIndex, }
llama_index/llama-index-legacy/llama_index/legacy/indices/registry.py/0
{ "file_path": "llama_index/llama-index-legacy/llama_index/legacy/indices/registry.py", "repo_id": "llama_index", "token_count": 479 }
1,576
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain_core.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain_core.embeddings import Embeddings class ElasticsearchEmbeddings(Embeddings): """Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
langchain/libs/community/langchain_community/embeddings/elasticsearch.py/0
{ "file_path": "langchain/libs/community/langchain_community/embeddings/elasticsearch.py", "repo_id": "langchain", "token_count": 3606 }
275
[package] name = "text-generation-launcher" description = "Text Generation Launcher" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] clap = { version = "4.4.5", features = ["derive", "env"] } ctrlc = { version = "3.4.1", features = ["termination"] } nix = "0.27.1" serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } [dev-dependencies] float_eq = "1.0.1" reqwest = { version = "0.11.20", features = ["blocking", "json"] } [build-dependencies] vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
text-generation-inference/launcher/Cargo.toml/0
{ "file_path": "text-generation-inference/launcher/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 277 }
429
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_auto_awq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AwqConfigTest(unittest.TestCase): def test_wrong_backend(self): """ Simple test that checks if a user passes a wrong backend an error is raised """ # This should work fine _ = AwqConfig(bits=4) with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") # These should work fine _ = AwqConfig(bits=4, version="GEMM") _ = AwqConfig(bits=4, version="gemm") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="unexisting-backend") compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 8: # LLMAWQ does not work on a T4 with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq") else: # LLMAWQ should work on an A100 AwqConfig(bits=4, backend="llm-awq") def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AwqConfig(bits=4) config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} quantization_config = AwqConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) self.assertEqual(dict["zero_point"], quantization_config.zero_point) self.assertEqual(dict["backend"], quantization_config.backend) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" model_with_no_k_proj_quantized = "hf-internal-testing/opt-125m-awq-no-k-proj" input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) # Try with `modules_not_to_convert` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AwqConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) def test_quantized_model_no_device_map(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1, 2, 3}) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_no_k_proj_quantized(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ dummy_input = torch.LongTensor([[0, 1, 0]]).to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_with_no_k_proj_quantized).to(torch_device) self.assertTrue(isinstance(quantized_model.model.decoder.layers[0].self_attn.k_proj, torch.nn.Linear)) self.assertFalse(isinstance(quantized_model.model.decoder.layers[0].self_attn.v_proj, torch.nn.Linear)) EXPECTED_OUTPUT = torch.LongTensor([[0, 1, 0, 50118, 50118, 133, 248, 12, 134, 16, 10, 372, 2031]]).to( torch_device ) output = quantized_model.generate(dummy_input, max_new_tokens=10) self.assertTrue((EXPECTED_OUTPUT == output).all()) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqFusedTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-OpenOrca-AWQ" model_revision = "7048b2af77d0dd1c81b000b19d73f9cc8950b510" custom_mapping_model_id = "TheBloke/Yi-34B-AWQ" custom_model_revision = "f1b2cd1b7459ceecfdc1fac5bb8725f13707c589" mixtral_model_name = "casperhansen/mixtral-instruct-awq" mixtral_model_revision = "87dd4ec502dde74fb3a624835c776b000d190c3b" multi_modal_model_name = "ybelkada/llava-1.5-7b-hf-awq" multi_modal_model_code_revision = "ad108a50f5b9e681bdd7378409f57b7fa59a7442" prompt = ( "You're standing on the surface of the Earth. " "You walk one mile south, one mile west and one mile north. " "You end up exactly where you started. Where are you?" ) EXPECTED_GENERATION = prompt + "\n\nThis is a classic puzzle that has been around for" EXPECTED_GENERATION_CUSTOM_MODEL = "HelloWorld.java:11)\r\n\tat org" EXPECTED_GENERATION_MIXTRAL = prompt + " You're on the North Pole.\n\nThe" def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def _check_fused_modules(self, model): has_fused_modules = False fused_modules_name = ["QuantAttentionFused", "QuantFusedMLP", "FasterTransformerRMSNorm"] for _, module in model.named_modules(): if module.__class__.__name__ in fused_modules_name: has_fused_modules = True break self.assertTrue(has_fused_modules, "Modules fusing not performed correctly!") def test_raise_save_pretrained(self): """ Test that `save_pretrained` is effectively blocked for fused models """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) def test_fused_modules_to_not_convert(self): """ Test if fused + modules to_not_covnert work as expected """ model_id = "hf-internal-testing/Mixtral-tiny-AWQ" quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=quantization_config, low_cpu_mem_usage=True, ).to(torch_device) # Check if model has been correctly fused self._check_fused_modules(model) # Checks if the modules_to_not_convert (here gate layer) is a Linear self.assertTrue(isinstance(model.model.layers[0].block_sparse_moe.gate, torch.nn.Linear)) def test_generation_fused(self): """ Test generation quality for fused models - single batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) inputs = tokenizer(self.prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION) def test_generation_fused_batched(self): """ Test generation quality for fused models - multi batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) tokenizer.pad_token_id = tokenizer.eos_token_id inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION) def test_generation_llava_fused(self): from transformers import pipeline quantization_config = AwqConfig(do_fuse=True, fuse_max_seq_len=2048) pipe = pipeline( "image-to-text", model=self.multi_modal_model_name, device=0, model_kwargs={ "quantization_config": quantization_config, }, revision=self.multi_modal_model_code_revision, ) url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png" prompt = "USER: <image>\nCan you please describe this image?\nASSISTANT:" outputs = pipe(url, prompt=prompt, generate_kwargs={"max_new_tokens": 100}) EXPECTED_OUTPUT = "USER: \nCan you please describe this image?\nASSISTANT: The image features a brown and white cat sitting on a green surface, possibly a carpet or a grassy area. The cat is holding a red ball in its paws, seemingly playing with it. The cat appears to be focused on the ball, possibly preparing to play or just enjoying the toy." self.assertEqual(outputs[0]["generated_text"], EXPECTED_OUTPUT) @require_torch_multi_gpu def test_generation_custom_model(self): """ Test generation quality for fused models using custom fused map. """ quantization_config = AwqConfig( bits=4, fuse_max_seq_len=512, modules_to_fuse={ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "layernorm": ["ln1", "ln2", "norm"], "mlp": ["gate_proj", "up_proj", "down_proj"], "use_alibi": False, "num_attention_heads": 56, "num_key_value_heads": 8, "hidden_size": 7168, }, ) model = AutoModelForCausalLM.from_pretrained( self.custom_mapping_model_id, quantization_config=quantization_config, trust_remote_code=True, device_map="balanced", revision=self.custom_model_revision, ) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained( self.custom_mapping_model_id, revision=self.custom_model_revision, trust_remote_code=True ) prompt = "Hello" inputs = tokenizer(prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_CUSTOM_MODEL) @require_torch_multi_gpu def test_generation_mixtral_fused(self): """ Text generation test for Mixtral + AWQ + fused """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=1024, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.mixtral_model_name, quantization_config=quantization_config, device_map="auto", revision=self.mixtral_model_revision, ) tokenizer = AutoTokenizer.from_pretrained(self.mixtral_model_name) tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_MIXTRAL)
transformers/tests/quantization/autoawq/test_awq.py/0
{ "file_path": "transformers/tests/quantization/autoawq/test_awq.py", "repo_id": "transformers", "token_count": 7755 }
763
--- hide_table_of_contents: true sidebar_class_name: node-only --- # S3 File :::tip Compatibility Only available on Node.js. ::: This covers how to load document objects from an s3 file object. ## Setup To run this index you'll need to have Unstructured already set up and ready to use at an available URL endpoint. It can also be configured to run locally. See the docs [here](https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/unstructured) for information on how to do that. You'll also need to install the official AWS SDK: ```bash npm2yarn npm install @aws-sdk/client-s3 ``` ## Usage Once Unstructured is configured, you can use the S3 loader to load files and then convert them into a Document. You can optionally provide a s3Config parameter to specify your bucket region, access key, and secret access key. If these are not provided, you will need to have them in your environment (e.g., by running `aws configure`). import CodeBlock from "@theme/CodeBlock"; import Example from "@examples/document_loaders/s3.ts"; <CodeBlock language="typescript">{Example}</CodeBlock>
langchainjs/docs/core_docs/docs/integrations/document_loaders/web_loaders/s3.mdx/0
{ "file_path": "langchainjs/docs/core_docs/docs/integrations/document_loaders/web_loaders/s3.mdx", "repo_id": "langchainjs", "token_count": 324 }
735
# coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for XGLM.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xglm import XGLMTokenizer else: XGLMTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/xglm-564M": 2048, } class XGLMTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" XGLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = XGLMTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", **kwargs, ): # Compatibility with the original tokenizer self.num_madeup_words = 7 madeup_words = [f"<madeupword{i}>" for i in range(self.num_madeup_words)] kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + sep + token_ids_1 def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(sep + token_ids_0) * [0] return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/xglm/tokenization_xglm_fast.py/0
{ "file_path": "transformers/src/transformers/models/xglm/tokenization_xglm_fast.py", "repo_id": "transformers", "token_count": 3356 }
719
from llama_index.core.vector_stores.types import VectorStore from llama_index.vector_stores.rocksetdb import RocksetVectorStore def test_class(): names_of_base_classes = [b.__name__ for b in RocksetVectorStore.__mro__] assert VectorStore.__name__ in names_of_base_classes
llama_index/llama-index-integrations/vector_stores/llama-index-vector-stores-rocksetdb/tests/test_vector_stores_rocksetdb.py/0
{ "file_path": "llama_index/llama-index-integrations/vector_stores/llama-index-vector-stores-rocksetdb/tests/test_vector_stores_rocksetdb.py", "repo_id": "llama_index", "token_count": 94 }
1,475
import logging from typing import Callable, List, Optional, Sequence from llama_index.core.async_utils import run_async_tasks from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.base_selector import BaseSelector from llama_index.core.base.response.schema import ( RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.llms.llm import LLM from llama_index.core.objects.base import ObjectRetriever from llama_index.core.prompts.default_prompt_selectors import ( DEFAULT_TREE_SUMMARIZE_PROMPT_SEL, ) from llama_index.core.prompts.mixin import PromptMixinType from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.schema import BaseNode, QueryBundle from llama_index.core.selectors.utils import get_selector_from_llm from llama_index.core.service_context import ServiceContext from llama_index.core.settings import ( Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context, ) from llama_index.core.tools.query_engine import QueryEngineTool from llama_index.core.tools.types import ToolMetadata from llama_index.core.utils import print_text logger = logging.getLogger(__name__) def combine_responses( summarizer: TreeSummarize, responses: List[RESPONSE_TYPE], query_bundle: QueryBundle ) -> RESPONSE_TYPE: """Combine multiple response from sub-engines.""" logger.info("Combining responses from multiple query engines.") response_strs = [] source_nodes = [] for response in responses: if isinstance(response, (StreamingResponse, PydanticResponse)): response_obj = response.get_response() else: response_obj = response source_nodes.extend(response_obj.source_nodes) response_strs.append(str(response)) summary = summarizer.get_response(query_bundle.query_str, response_strs) if isinstance(summary, str): return Response(response=summary, source_nodes=source_nodes) elif isinstance(summary, BaseModel): return PydanticResponse(response=summary, source_nodes=source_nodes) else: return StreamingResponse(response_gen=summary, source_nodes=source_nodes) async def acombine_responses( summarizer: TreeSummarize, responses: List[RESPONSE_TYPE], query_bundle: QueryBundle ) -> RESPONSE_TYPE: """Async combine multiple response from sub-engines.""" logger.info("Combining responses from multiple query engines.") response_strs = [] source_nodes = [] for response in responses: if isinstance(response, (StreamingResponse, PydanticResponse)): response_obj = response.get_response() else: response_obj = response source_nodes.extend(response_obj.source_nodes) response_strs.append(str(response)) summary = await summarizer.aget_response(query_bundle.query_str, response_strs) if isinstance(summary, str): return Response(response=summary, source_nodes=source_nodes) elif isinstance(summary, BaseModel): return PydanticResponse(response=summary, source_nodes=source_nodes) else: return StreamingResponse(response_gen=summary, source_nodes=source_nodes) class RouterQueryEngine(BaseQueryEngine): """Router query engine. Selects one out of several candidate query engines to execute a query. Args: selector (BaseSelector): A selector that chooses one out of many options based on each candidate's metadata and query. query_engine_tools (Sequence[QueryEngineTool]): A sequence of candidate query engines. They must be wrapped as tools to expose metadata to the selector. service_context (Optional[ServiceContext]): A service context. summarizer (Optional[TreeSummarize]): Tree summarizer to summarize sub-results. """ def __init__( self, selector: BaseSelector, query_engine_tools: Sequence[QueryEngineTool], llm: Optional[LLM] = None, summarizer: Optional[TreeSummarize] = None, verbose: bool = False, # deprecated service_context: Optional[ServiceContext] = None, ) -> None: self._llm = llm or llm_from_settings_or_context(Settings, llm) self._selector = selector self._query_engines = [x.query_engine for x in query_engine_tools] self._metadatas = [x.metadata for x in query_engine_tools] self._summarizer = summarizer or TreeSummarize( llm=self._llm, service_context=service_context, summary_template=DEFAULT_TREE_SUMMARIZE_PROMPT_SEL, ) self._verbose = verbose super().__init__( callback_manager=callback_manager_from_settings_or_context( Settings, service_context ) ) def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" # NOTE: don't include tools for now return {"summarizer": self._summarizer, "selector": self._selector} @classmethod def from_defaults( cls, query_engine_tools: Sequence[QueryEngineTool], llm: Optional[LLM] = None, selector: Optional[BaseSelector] = None, summarizer: Optional[TreeSummarize] = None, select_multi: bool = False, # deprecated service_context: Optional[ServiceContext] = None, ) -> "RouterQueryEngine": llm = llm or llm_from_settings_or_context(Settings, llm) selector = selector or get_selector_from_llm(llm, is_multi=select_multi) assert selector is not None return cls( selector, query_engine_tools, service_context=service_context, summarizer=summarizer, ) def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: result = self._selector.select(self._metadatas, query_bundle) if len(result.inds) > 1: responses = [] for i, engine_ind in enumerate(result.inds): log_str = ( f"Selecting query engine {engine_ind}: " f"{result.reasons[i]}." ) logger.info(log_str) if self._verbose: print_text(log_str + "\n", color="pink") selected_query_engine = self._query_engines[engine_ind] responses.append(selected_query_engine.query(query_bundle)) if len(responses) > 1: final_response = combine_responses( self._summarizer, responses, query_bundle ) else: final_response = responses[0] else: try: selected_query_engine = self._query_engines[result.ind] log_str = f"Selecting query engine {result.ind}: {result.reason}." logger.info(log_str) if self._verbose: print_text(log_str + "\n", color="pink") except ValueError as e: raise ValueError("Failed to select query engine") from e final_response = selected_query_engine.query(query_bundle) # add selected result final_response.metadata = final_response.metadata or {} final_response.metadata["selector_result"] = result query_event.on_end(payload={EventPayload.RESPONSE: final_response}) return final_response async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: result = await self._selector.aselect(self._metadatas, query_bundle) if len(result.inds) > 1: tasks = [] for i, engine_ind in enumerate(result.inds): log_str = ( f"Selecting query engine {engine_ind}: " f"{result.reasons[i]}." ) logger.info(log_str) if self._verbose: print_text(log_str + "\n", color="pink") selected_query_engine = self._query_engines[engine_ind] tasks.append(selected_query_engine.aquery(query_bundle)) responses = run_async_tasks(tasks) if len(responses) > 1: final_response = await acombine_responses( self._summarizer, responses, query_bundle ) else: final_response = responses[0] else: try: selected_query_engine = self._query_engines[result.ind] log_str = f"Selecting query engine {result.ind}: {result.reason}." logger.info(log_str) if self._verbose: print_text(log_str + "\n", color="pink") except ValueError as e: raise ValueError("Failed to select query engine") from e final_response = await selected_query_engine.aquery(query_bundle) # add selected result final_response.metadata = final_response.metadata or {} final_response.metadata["selector_result"] = result query_event.on_end(payload={EventPayload.RESPONSE: final_response}) return final_response def default_node_to_metadata_fn(node: BaseNode) -> ToolMetadata: """Default node to metadata function. We use the node's text as the Tool description. """ metadata = node.metadata or {} if "tool_name" not in metadata: raise ValueError("Node must have a tool_name in metadata.") return ToolMetadata(name=metadata["tool_name"], description=node.get_content()) class RetrieverRouterQueryEngine(BaseQueryEngine): """Retriever-based router query engine. NOTE: this is deprecated, please use our new ToolRetrieverRouterQueryEngine Use a retriever to select a set of Nodes. Each node will be converted into a ToolMetadata object, and also used to retrieve a query engine, to form a QueryEngineTool. NOTE: this is a beta feature. We are figuring out the right interface between the retriever and query engine. Args: selector (BaseSelector): A selector that chooses one out of many options based on each candidate's metadata and query. query_engine_tools (Sequence[QueryEngineTool]): A sequence of candidate query engines. They must be wrapped as tools to expose metadata to the selector. callback_manager (Optional[CallbackManager]): A callback manager. """ def __init__( self, retriever: BaseRetriever, node_to_query_engine_fn: Callable, callback_manager: Optional[CallbackManager] = None, ) -> None: self._retriever = retriever self._node_to_query_engine_fn = node_to_query_engine_fn super().__init__(callback_manager) def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" # NOTE: don't include tools for now return {"retriever": self._retriever} def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: nodes_with_score = self._retriever.retrieve(query_bundle) # TODO: for now we only support retrieving one node if len(nodes_with_score) > 1: raise ValueError("Retrieved more than one node.") node = nodes_with_score[0].node query_engine = self._node_to_query_engine_fn(node) return query_engine.query(query_bundle) async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: return self._query(query_bundle) class ToolRetrieverRouterQueryEngine(BaseQueryEngine): """Tool Retriever router query engine. Selects a set of candidate query engines to execute a query. Args: retriever (ObjectRetriever): A retriever that retrieves a set of query engine tools. service_context (Optional[ServiceContext]): A service context. summarizer (Optional[TreeSummarize]): Tree summarizer to summarize sub-results. """ def __init__( self, retriever: ObjectRetriever[QueryEngineTool], service_context: Optional[ServiceContext] = None, summarizer: Optional[TreeSummarize] = None, ) -> None: self.service_context = service_context or ServiceContext.from_defaults() self._summarizer = summarizer or TreeSummarize( service_context=self.service_context, summary_template=DEFAULT_TREE_SUMMARIZE_PROMPT_SEL, ) self._retriever = retriever super().__init__(self.service_context.callback_manager) def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" # NOTE: don't include tools for now return {"summarizer": self._summarizer} def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: query_engine_tools = self._retriever.retrieve(query_bundle) responses = [] for query_engine_tool in query_engine_tools: query_engine = query_engine_tool.query_engine responses.append(query_engine.query(query_bundle)) if len(responses) > 1: final_response = combine_responses( self._summarizer, responses, query_bundle ) else: final_response = responses[0] # add selected result final_response.metadata = final_response.metadata or {} final_response.metadata["retrieved_tools"] = query_engine_tools query_event.on_end(payload={EventPayload.RESPONSE: final_response}) return final_response async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: query_engine_tools = self._retriever.retrieve(query_bundle) tasks = [] for query_engine_tool in query_engine_tools: query_engine = query_engine_tool.query_engine tasks.append(query_engine.aquery(query_bundle)) responses = run_async_tasks(tasks) if len(responses) > 1: final_response = await acombine_responses( self._summarizer, responses, query_bundle ) else: final_response = responses[0] # add selected result final_response.metadata = final_response.metadata or {} final_response.metadata["retrieved_tools"] = query_engine_tools query_event.on_end(payload={EventPayload.RESPONSE: final_response}) return final_response
llama_index/llama-index-core/llama_index/core/query_engine/router_query_engine.py/0
{ "file_path": "llama_index/llama-index-core/llama_index/core/query_engine/router_query_engine.py", "repo_id": "llama_index", "token_count": 6714 }
1,241
import pytest import torch from copy import copy from transformers import AutoTokenizer from text_generation_server.pb import generate_pb2 from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch @pytest.fixture(scope="session") def mt0_small_tokenizer(): tokenizer = AutoTokenizer.from_pretrained( "bigscience/mt0-small", padding_side="left" ) tokenizer.bos_token_id = 0 return tokenizer @pytest.fixture(scope="session") def default_seq2seq_lm(): return Seq2SeqLM("bigscience/mt0-small") @pytest.fixture def default_pb_request(default_pb_parameters, default_pb_stop_parameters): return generate_pb2.Request( id=0, inputs="Test", prefill_logprobs=True, truncate=100, parameters=default_pb_parameters, stopping_parameters=default_pb_stop_parameters, ) @pytest.fixture def default_pb_batch(default_pb_request): return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1) @pytest.fixture def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer): return Seq2SeqLMBatch.from_pb( default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu") ) @pytest.fixture def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer): req_0 = copy(default_pb_request) req_0.id = 1 req_1 = default_pb_request req_1.id = 2 req_1.stopping_parameters.max_new_tokens = 5 batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2) return Seq2SeqLMBatch.from_pb( batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu") ) def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch): batch = default_seq2seq_lm_batch sequence_length = len(default_seq2seq_lm_batch.input_ids[0]) assert batch.batch_id == default_pb_batch.id assert batch.requests == default_pb_batch.requests assert batch.input_ids.shape == (default_pb_batch.size, sequence_length) assert batch.input_ids[0][-2] == 4268 assert batch.input_ids[0][-1] == 1 assert torch.all(batch.input_ids[0][:-2] == 0) assert torch.all(batch.attention_mask[0][-2:] == 1) assert torch.all(batch.attention_mask[0][:-2] == 0) assert len(batch.decoder_input_ids) == default_pb_batch.size assert batch.decoder_attention_mask is None assert batch.encoder_last_hidden_state is None assert batch.past_key_values is None assert batch.input_lengths == [2] assert batch.decoder_input_lengths == [1] assert len(batch) == default_pb_batch.size assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch) assert batch.max_input_length == batch.input_lengths[0] assert batch.max_decoder_input_length == batch.decoder_input_lengths[0] def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch): with pytest.raises(ValueError): Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch]) def test_seq2seq_lm_batch_type(default_seq2seq_lm): assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch): sequence_length = len(default_seq2seq_lm_batch.input_ids[0]) generations, next_batch, _ = default_seq2seq_lm.generate_token( default_seq2seq_lm_batch ) assert len(generations) == len(next_batch) assert isinstance(next_batch, Seq2SeqLMBatch) assert next_batch.input_ids is None assert torch.equal( next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask ) assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length assert ( next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers ) assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias assert len(next_batch.decoder_input_ids) == len(next_batch) assert next_batch.all_decoder_input_ids[0][0] == 0 assert next_batch.all_decoder_input_ids[0][1] == 259 assert next_batch.decoder_attention_mask is None assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512) assert next_batch.decoder_input_lengths == [2] assert next_batch.max_decoder_input_length == 2 assert next_batch.past_key_values is not None assert all( [p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values] ) assert all( [ p[2].shape == (len(next_batch), 6, sequence_length, 64) for p in next_batch.past_key_values ] ) assert all( [ p[3].shape == (len(next_batch), 6, sequence_length, 64) for p in next_batch.past_key_values ] ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) assert all( [ token_id.item() == 259 for generation in generations for token_id in generation.tokens.token_ids ] ) assert all( [ token_text == " " for generation in generations for token_text in generation.tokens.texts ] ) assert generations[0].request_id == 0 def test_seq2seq_lm_generate_token_completion( default_seq2seq_lm, default_seq2seq_lm_batch ): next_batch = default_seq2seq_lm_batch for _ in range(6): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id assert generations[0].generated_text.generated_tokens == 7 def test_seq2seq_lm_generate_token_completion_multi( default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch ): next_batch = default_multi_requests_seq2seq_lm_batch for i in range(4): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[1].generated_text.text == "a few " assert ( generations[1].request_id == default_multi_requests_seq2seq_lm_batch.requests[1].id ) assert generations[1].generated_text.generated_tokens == 5 next_batch = next_batch.filter([next_batch.requests[0].id]) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert ( generations[0].request_id == default_multi_requests_seq2seq_lm_batch.requests[0].id ) assert generations[0].generated_text.generated_tokens == 7 def test_batch_concatenate( default_seq2seq_lm, default_seq2seq_lm_batch, default_multi_requests_seq2seq_lm_batch, ): next_batch_0 = default_seq2seq_lm_batch _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_seq2seq_lm_batch _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1) # Copy hidden state because it is removed from the concatenated branches next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches next_batch_0_past_key_values = [ [t.clone() for t in layer] for layer in next_batch_0.past_key_values ] next_batch_1_past_key_values = [ [t.clone() for t in layer] for layer in next_batch_1.past_key_values ] next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1]) assert next_batch.batch_id == 0 assert torch.equal( next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0] ) assert next_batch.all_decoder_input_ids[1][0] == 0 assert next_batch.all_decoder_input_ids[2][0] == 0 assert torch.equal( next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids ) assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1) assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0) assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0) assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1) assert torch.equal( next_batch.encoder_last_hidden_state[0], next_batch_0_encoder_last_hidden_state[0, -2:], ) assert torch.equal( next_batch.encoder_last_hidden_state[1:], next_batch_1_encoder_last_hidden_state[:, -2:], ) assert next_batch.input_lengths == [2, 2, 2] assert next_batch.decoder_input_lengths == [3, 2, 2] assert next_batch.max_input_length == 2 assert next_batch.max_decoder_input_length == 3 assert next_batch.requests[0] == next_batch_0.requests[0] assert next_batch.requests[1:] == next_batch_1.requests assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0] assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0] assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias assert next_batch.past_key_values is not None assert all( [p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) for i, past in enumerate(next_batch.past_key_values): assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0]) assert torch.equal( next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0]) assert torch.equal( next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0]) assert torch.equal( next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:] ) assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0]) assert torch.equal( next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:] ) for _ in range(3): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 assert generations[2].generated_text.text == "a few " assert ( generations[2].request_id == default_multi_requests_seq2seq_lm_batch.requests[1].id ) assert generations[2].generated_text.generated_tokens == 5 next_batch = next_batch.filter( [next_batch.requests[0].id, next_batch.requests[1].id] ) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[0].generated_text.text == "a few weeks" assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id assert generations[0].generated_text.generated_tokens == 7 next_batch = next_batch.filter([next_batch.requests[1].id]) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert ( generations[0].request_id == default_multi_requests_seq2seq_lm_batch.requests[0].id ) assert generations[0].generated_text.generated_tokens == 7
text-generation-inference/server/tests/models/test_seq2seq_lm.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_seq2seq_lm.py", "repo_id": "text-generation-inference", "token_count": 5483 }
393
[package] name = "candle-metal-kernels" version = "0.4.0" edition = "2021" description = "Metal kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] metal = { version = "0.27.0", features = ["mps"] } once_cell = "1.18.0" thiserror = "1" tracing = "0.1.37" [dev-dependencies] half = { version = "2.3.1", features = [ "num-traits", "use-intrinsics", "rand_distr", ] } rand = "0.8.5"
candle/candle-metal-kernels/Cargo.toml/0
{ "file_path": "candle/candle-metal-kernels/Cargo.toml", "repo_id": "candle", "token_count": 218 }
54
// Licensed to the LF AI & Data foundation under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rootcoord import ( "context" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus/pkg/util/merr" ) // hasCollectionTask has collection request task type hasCollectionTask struct { baseTask Req *milvuspb.HasCollectionRequest Rsp *milvuspb.BoolResponse } func (t *hasCollectionTask) Prepare(ctx context.Context) error { if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasCollection); err != nil { return err } return nil } // Execute task execution func (t *hasCollectionTask) Execute(ctx context.Context) error { t.Rsp.Status = merr.Success() ts := getTravelTs(t.Req) // TODO: what if err != nil && common.IsCollectionNotExistError == false, should we consider this RPC as failure? _, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.GetCollectionName(), ts) t.Rsp.Value = err == nil return nil }
milvus/internal/rootcoord/has_collection_task.go/0
{ "file_path": "milvus/internal/rootcoord/has_collection_task.go", "repo_id": "milvus", "token_count": 543 }
1,862
# What are the policy-based methods? The main goal of Reinforcement learning is to **find the optimal policy \\(\pi^{*}\\) that will maximize the expected cumulative reward**. Because Reinforcement Learning is based on the *reward hypothesis*: **all goals can be described as the maximization of the expected cumulative reward.** For instance, in a soccer game (where you're going to train the agents in two units), the goal is to win the game. We can describe this goal in reinforcement learning as **maximizing the number of goals scored** (when the ball crosses the goal line) into your opponent's soccer goals. And **minimizing the number of goals in your soccer goals**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/soccer.jpg" alt="Soccer" /> ## Value-based, Policy-based, and Actor-critic methods In the first unit, we saw two methods to find (or, most of the time, approximate) this optimal policy \\(\pi^{*}\\). - In *value-based methods*, we learn a value function. - The idea is that an optimal value function leads to an optimal policy \\(\pi^{*}\\). - Our objective is to **minimize the loss between the predicted and target value** to approximate the true action-value function. - We have a policy, but it's implicit since it **is generated directly from the value function**. For instance, in Q-Learning, we used an (epsilon-)greedy policy. - On the other hand, in *policy-based methods*, we directly learn to approximate \\(\pi^{*}\\) without having to learn a value function. - The idea is **to parameterize the policy**. For instance, using a neural network \\(\pi_\theta\\), this policy will output a probability distribution over actions (stochastic policy). - <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/stochastic_policy.png" alt="stochastic policy" /> - Our objective then is **to maximize the performance of the parameterized policy using gradient ascent**. - To do that, we control the parameter \\(\theta\\) that will affect the distribution of actions over a state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_based.png" alt="Policy based" /> - Next time, we'll study the *actor-critic* method, which is a combination of value-based and policy-based methods. Consequently, thanks to policy-based methods, we can directly optimize our policy \\(\pi_\theta\\) to output a probability distribution over actions \\(\pi_\theta(a|s)\\) that leads to the best cumulative return. To do that, we define an objective function \\(J(\theta)\\), that is, the expected cumulative reward, and we **want to find the value \\(\theta\\) that maximizes this objective function**. ## The difference between policy-based and policy-gradient methods Policy-gradient methods, what we're going to study in this unit, is a subclass of policy-based methods. In policy-based methods, the optimization is most of the time *on-policy* since for each update, we only use data (trajectories) collected **by our most recent version of** \\(\pi_\theta\\). The difference between these two methods **lies on how we optimize the parameter** \\(\theta\\): - In *policy-based methods*, we search directly for the optimal policy. We can optimize the parameter \\(\theta\\) **indirectly** by maximizing the local approximation of the objective function with techniques like hill climbing, simulated annealing, or evolution strategies. - In *policy-gradient methods*, because it is a subclass of the policy-based methods, we search directly for the optimal policy. But we optimize the parameter \\(\theta\\) **directly** by performing the gradient ascent on the performance of the objective function \\(J(\theta)\\). Before diving more into how policy-gradient methods work (the objective function, policy gradient theorem, gradient ascent, etc.), let's study the advantages and disadvantages of policy-based methods.
deep-rl-class/units/en/unit4/what-are-policy-based-methods.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/what-are-policy-based-methods.mdx", "repo_id": "deep-rl-class", "token_count": 1034 }
180
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card