repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
thuml/iTransformer
experiments/exp_basic.py
[ { "identifier": "Transformer", "path": "model/Transformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):" }, { "identifier": "Informer", "path": "model/Informer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):" }, { "identifier": "Reformer", "path": "model/Reformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs, bucket_size=4, n_hashes=4):\n def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):" }, { "identifier": "Flowformer", "path": "model/Flowformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):" }, { "identifier": "Flashformer", "path": "model/Flashformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):" }, { "identifier": "iTransformer", "path": "model/iTransformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n _, _, N = x_enc.shape # B L N" }, { "identifier": "iInformer", "path": "model/iInformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n _, _, N = x_enc.shape" }, { "identifier": "iReformer", "path": "model/iReformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n _, _, N = x_enc.shape" }, { "identifier": "iFlowformer", "path": "model/iFlowformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n _, _, N = x_enc.shape" }, { "identifier": "iFlashformer", "path": "model/iFlashformer.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n _, _, N = x_enc.shape" } ]
import os import torch from model import Transformer, Informer, Reformer, Flowformer, Flashformer, \ iTransformer, iInformer, iReformer, iFlowformer, iFlashformer
901
class Exp_Basic(object): def __init__(self, args): self.args = args self.model_dict = { 'Transformer': Transformer, 'Informer': Informer, 'Reformer': Reformer, 'Flowformer': Flowformer, 'Flashformer': Flashformer, 'iTransformer': iTransformer, 'iInformer': iInformer, 'iReformer': iReformer, 'iFlowformer': iFlowformer,
class Exp_Basic(object): def __init__(self, args): self.args = args self.model_dict = { 'Transformer': Transformer, 'Informer': Informer, 'Reformer': Reformer, 'Flowformer': Flowformer, 'Flashformer': Flashformer, 'iTransformer': iTransformer, 'iInformer': iInformer, 'iReformer': iReformer, 'iFlowformer': iFlowformer,
'iFlashformer': iFlashformer,
9
2023-10-19 03:23:15+00:00
2k
kylesargent/ZeroNVS
threestudio/utils/GAN/vae.py
[ { "identifier": "LinearAttention", "path": "threestudio/utils/GAN/attention.py", "snippet": "class LinearAttention(nn.Module):\n def __init__(self, dim, heads=4, dim_head=32):\n super().__init__()\n self.heads = heads\n hidden_dim = dim_head * heads\n self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)\n self.to_out = nn.Conv2d(hidden_dim, dim, 1)\n\n def forward(self, x):\n b, c, h, w = x.shape\n qkv = self.to_qkv(x)\n q, k, v = rearrange(\n qkv, \"b (qkv heads c) h w -> qkv b heads c (h w)\", heads=self.heads, qkv=3\n )\n k = k.softmax(dim=-1)\n context = torch.einsum(\"bhdn,bhen->bhde\", k, v)\n out = torch.einsum(\"bhde,bhdn->bhen\", context, q)\n out = rearrange(\n out, \"b heads c (h w) -> b (heads c) h w\", heads=self.heads, h=h, w=w\n )\n return self.to_out(out)" }, { "identifier": "instantiate_from_config", "path": "threestudio/utils/GAN/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" } ]
import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange from threestudio.utils.GAN.attention import LinearAttention from threestudio.utils.GAN.util import instantiate_from_config
1,458
from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def nonlinearity(x): # swish return x * torch.sigmoid(x) def Normalize(in_channels, num_groups=32): return torch.nn.BatchNorm2d(num_features=in_channels) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=2, padding=0 ) def forward(self, x): if self.with_conv: pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__( self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1 ) else: self.nin_shortcut = torch.nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0 ) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x + h
# pytorch_diffusion + derived encoder decoder def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def nonlinearity(x): # swish return x * torch.sigmoid(x) def Normalize(in_channels, num_groups=32): return torch.nn.BatchNorm2d(num_features=in_channels) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=2, padding=0 ) def forward(self, x): if self.with_conv: pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__( self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1 ) else: self.nin_shortcut = torch.nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0 ) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x + h
class LinAttnBlock(LinearAttention):
0
2023-10-24 19:02:44+00:00
2k
princeton-nlp/LLM-Shearing
llmshearing/datasets/load_text_dataloader.py
[ { "identifier": "TextDynamicStreamingDataset", "path": "llmshearing/datasets/streaming_dataset.py", "snippet": "class TextDynamicStreamingDataset(DynamicStreamingDataset):\n \"\"\" \n A dataset to load data dynamically from different domains\n Adapted from https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/data/text_data.py#L21\n \"\"\"\n\n def __init__(self,\n local: str,\n max_seq_len: int,\n shuffle: bool = False,\n shuffle_seed: int = 9176,\n num_canonical_nodes: Optional[int] = 128,\n batch_size: Optional[int] = None,\n set_names: List[str] = None,\n proportion: List = None,\n is_uint16: bool = False):\n\n # Build Dataset\n super().__init__(local=local,\n shuffle=shuffle,\n shuffle_seed=shuffle_seed,\n num_canonical_nodes=num_canonical_nodes,\n batch_size=batch_size,\n set_names=set_names,\n proportion=proportion)\n \n # Token ids are in a uint16 format to save memory\n self.is_uint16 = is_uint16\n self.max_seq_len = max_seq_len\n\n def _read_binary_tokenized_sample(self, sample):\n if self.is_uint16:\n a = np.frombuffer(sample['tokens'], dtype=\"B\").view(\n dtype=np.uint16).astype(np.int64)\n tokens = torch.from_numpy(a[:self.max_seq_len].copy())\n else:\n tokens = torch.from_numpy(np.frombuffer(sample['tokens'], dtype=np.int64)[:self.max_seq_len].copy())\n return tokens\n\n def get_sample(self, idx: int) -> Dict[str, Any]:\n sample = super().__getitem__(idx)\n return sample\n\n # updated\n def __getitem__(self, idx: Union[int, Tuple]) -> Dict[str, Any]:\n sample = super().__getitem__(idx)\n token_sample = self._read_binary_tokenized_sample(sample)\n return {\"input_ids\": token_sample, \"set\": sample[\"set\"], \"idx\": idx} " }, { "identifier": "TextStreamingDataset", "path": "llmshearing/datasets/streaming_dataset.py", "snippet": "class TextStreamingDataset(StreamingDataset):\n \"\"\" \n A dataset to load fixed data, a simplied version of \n Adapted from https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/data/text_data.py#L21\n \"\"\"\n def __init__(self,\n local: str,\n split: str,\n max_seq_len: int,\n shuffle: bool = False,\n shuffle_seed: int = 9176,\n num_canonical_nodes: Optional[int] = 128,\n batch_size: Optional[int] = None,\n is_uint16: bool = False):\n\n # Build Dataset\n super().__init__(local=local,\n split=split,\n shuffle=shuffle,\n shuffle_seed=shuffle_seed,\n num_canonical_nodes=num_canonical_nodes,\n batch_size=batch_size)\n \n # Token ids are in a uint16 format to save memory\n self.is_uint16 = is_uint16\n self.max_seq_len = max_seq_len\n\n def _read_binary_tokenized_sample(self, sample):\n if self.is_uint16:\n a = np.frombuffer(sample['tokens'], dtype=\"B\").view(\n dtype=np.uint16).astype(np.int64)\n tokens = torch.from_numpy(a[:self.max_seq_len].copy())\n else:\n tokens = torch.from_numpy(np.frombuffer(sample['tokens'], dtype=np.int64)[:self.max_seq_len].copy())\n return tokens\n\n def get_sample(self, idx: int) -> Dict[str, Any]:\n sample = super().__getitem__(idx)\n return sample\n\n # updated\n def __getitem__(self, idx: Union[int, Tuple]) -> Dict[str, Any]:\n sample = super().__getitem__(idx)\n token_sample = self._read_binary_tokenized_sample(sample)\n return {\"input_ids\": token_sample, \"set\": sample[\"set\"], \"idx\": idx} " } ]
from collections import defaultdict from collections.abc import Mapping from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union from omegaconf import DictConfig from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.data.data_collator import _torch_collate_batch from transformers.tokenization_utils_base import PreTrainedTokenizerBase from llmshearing.datasets.streaming_dataset import ( TextDynamicStreamingDataset, TextStreamingDataset) import torch import transformers
1,359
""" Load text dataloader for training and evaluation. """ def build_text_dataloader(cfg: DictConfig, device_batch_size: int, dynamic: bool = False, set_names: str = None, proportion: List[float] = None) -> DataLoader: """Builds a text dataloader. Args: cfg (DictConfig): Configuration dictionary. device_batch_size (int): Batch size for one single device. dynamic (bool, optional): Whether to use dynamic streaming dataset to load data from each domain dynamically. Defaults to False. set_names (str, optional): Name of the dataset. Defaults to None. proportion (List[float], optional): Initial proportion of each domain in the dataset. Defaults to None. Returns: DataLoader: A PyTorch DataLoader object. """ if dynamic: dataset = TextDynamicStreamingDataset(local=cfg.dataset.local, max_seq_len=cfg.dataset.max_seq_len, batch_size=device_batch_size, shuffle=cfg.dataset.get( 'shuffle', False), shuffle_seed=cfg.dataset.get( 'shuffle_seed', 9176), num_canonical_nodes=cfg.dataset.get( 'num_canonical_nodes', 128), proportion=proportion, set_names=set_names, is_uint16=cfg.dataset.get("is_uint16", False)) else:
""" Load text dataloader for training and evaluation. """ def build_text_dataloader(cfg: DictConfig, device_batch_size: int, dynamic: bool = False, set_names: str = None, proportion: List[float] = None) -> DataLoader: """Builds a text dataloader. Args: cfg (DictConfig): Configuration dictionary. device_batch_size (int): Batch size for one single device. dynamic (bool, optional): Whether to use dynamic streaming dataset to load data from each domain dynamically. Defaults to False. set_names (str, optional): Name of the dataset. Defaults to None. proportion (List[float], optional): Initial proportion of each domain in the dataset. Defaults to None. Returns: DataLoader: A PyTorch DataLoader object. """ if dynamic: dataset = TextDynamicStreamingDataset(local=cfg.dataset.local, max_seq_len=cfg.dataset.max_seq_len, batch_size=device_batch_size, shuffle=cfg.dataset.get( 'shuffle', False), shuffle_seed=cfg.dataset.get( 'shuffle_seed', 9176), num_canonical_nodes=cfg.dataset.get( 'num_canonical_nodes', 128), proportion=proportion, set_names=set_names, is_uint16=cfg.dataset.get("is_uint16", False)) else:
dataset = TextStreamingDataset(
1
2023-10-16 12:26:08+00:00
2k
hugoycj/Instant-angelo
models/neus.py
[ { "identifier": "BaseModel", "path": "models/base.py", "snippet": "class BaseModel(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.rank = get_rank()\n self.setup()\n if self.config.get('weights', None):\n self.load_state_dict(torch.load(self.config.weights))\n \n def setup(self):\n raise NotImplementedError\n \n def update_step(self, epoch, global_step):\n pass\n \n def train(self, mode=True):\n return super().train(mode=mode)\n \n def eval(self):\n return super().eval()\n \n def regularizations(self, out):\n return {}\n \n @torch.no_grad()\n def export(self, export_config):\n return {}" }, { "identifier": "chunk_batch", "path": "models/utils.py", "snippet": "def chunk_batch(func, chunk_size, move_to_cpu, *args, **kwargs):\n B = None\n for arg in args:\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n out = defaultdict(list)\n out_type = None\n for i in range(0, B, chunk_size):\n out_chunk = func(*[arg[i:i+chunk_size] if isinstance(arg, torch.Tensor) else arg for arg in args], **kwargs)\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(f'Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.')\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n v = v.cpu() if move_to_cpu else v\n out[k].append(v)\n \n if out_type is None:\n return\n\n out = {k: torch.cat(v, dim=0) for k, v in out.items()}\n if out_type is torch.Tensor:\n return out[0]\n elif out_type in [tuple, list]:\n return out_type([out[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out" }, { "identifier": "update_module_step", "path": "systems/utils.py", "snippet": "def update_module_step(m, epoch, global_step):\n if hasattr(m, 'update_step'):\n m.update_step(epoch, global_step)" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F import models from models.base import BaseModel from models.utils import chunk_batch from systems.utils import update_module_step from nerfacc import ContractionType, OccupancyGrid, ray_marching, render_weight_from_density, render_weight_from_alpha, accumulate_along_rays from nerfacc.intersection import ray_aabb_intersect
1,393
class VarianceNetwork(nn.Module): def __init__(self, config): super(VarianceNetwork, self).__init__() self.config = config self.init_val = self.config.init_val self.register_parameter('variance', nn.Parameter(torch.tensor(self.config.init_val))) self.modulate = self.config.get('modulate', False) if self.modulate: self.mod_start_steps = self.config.mod_start_steps self.reach_max_steps = self.config.reach_max_steps self.max_inv_s = self.config.max_inv_s @property def inv_s(self): val = torch.exp(self.variance * 10.0) if self.modulate and self.do_mod: val = val.clamp_max(self.mod_val) return val def forward(self, x): return torch.ones([len(x), 1], device=self.variance.device) * self.inv_s def update_step(self, epoch, global_step): if self.modulate: self.do_mod = global_step > self.mod_start_steps if not self.do_mod: self.prev_inv_s = self.inv_s.item() else: self.mod_val = min((global_step / self.reach_max_steps) * (self.max_inv_s - self.prev_inv_s) + self.prev_inv_s, self.max_inv_s) @models.register('neus') class NeuSModel(BaseModel): def setup(self): self.geometry = models.make(self.config.geometry.name, self.config.geometry) self.texture = models.make(self.config.texture.name, self.config.texture) self.geometry.contraction_type = ContractionType.AABB if self.config.learned_background: self.geometry_bg = models.make(self.config.geometry_bg.name, self.config.geometry_bg) self.texture_bg = models.make(self.config.texture_bg.name, self.config.texture_bg) self.geometry_bg.contraction_type = ContractionType.UN_BOUNDED_SPHERE self.near_plane_bg, self.far_plane_bg = 0.1, 1e3 self.cone_angle_bg = 10**(math.log10(self.far_plane_bg) / self.config.num_samples_per_ray_bg) - 1. self.render_step_size_bg = 0.01 self.variance = VarianceNetwork(self.config.variance) self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32)) if self.config.grid_prune: self.occupancy_grid = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=128, contraction_type=ContractionType.AABB ) if self.config.learned_background: self.occupancy_grid_bg = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=256, contraction_type=ContractionType.UN_BOUNDED_SPHERE ) self.randomized = self.config.randomized self.background_color = None self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray def update_step(self, epoch, global_step):
class VarianceNetwork(nn.Module): def __init__(self, config): super(VarianceNetwork, self).__init__() self.config = config self.init_val = self.config.init_val self.register_parameter('variance', nn.Parameter(torch.tensor(self.config.init_val))) self.modulate = self.config.get('modulate', False) if self.modulate: self.mod_start_steps = self.config.mod_start_steps self.reach_max_steps = self.config.reach_max_steps self.max_inv_s = self.config.max_inv_s @property def inv_s(self): val = torch.exp(self.variance * 10.0) if self.modulate and self.do_mod: val = val.clamp_max(self.mod_val) return val def forward(self, x): return torch.ones([len(x), 1], device=self.variance.device) * self.inv_s def update_step(self, epoch, global_step): if self.modulate: self.do_mod = global_step > self.mod_start_steps if not self.do_mod: self.prev_inv_s = self.inv_s.item() else: self.mod_val = min((global_step / self.reach_max_steps) * (self.max_inv_s - self.prev_inv_s) + self.prev_inv_s, self.max_inv_s) @models.register('neus') class NeuSModel(BaseModel): def setup(self): self.geometry = models.make(self.config.geometry.name, self.config.geometry) self.texture = models.make(self.config.texture.name, self.config.texture) self.geometry.contraction_type = ContractionType.AABB if self.config.learned_background: self.geometry_bg = models.make(self.config.geometry_bg.name, self.config.geometry_bg) self.texture_bg = models.make(self.config.texture_bg.name, self.config.texture_bg) self.geometry_bg.contraction_type = ContractionType.UN_BOUNDED_SPHERE self.near_plane_bg, self.far_plane_bg = 0.1, 1e3 self.cone_angle_bg = 10**(math.log10(self.far_plane_bg) / self.config.num_samples_per_ray_bg) - 1. self.render_step_size_bg = 0.01 self.variance = VarianceNetwork(self.config.variance) self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32)) if self.config.grid_prune: self.occupancy_grid = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=128, contraction_type=ContractionType.AABB ) if self.config.learned_background: self.occupancy_grid_bg = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=256, contraction_type=ContractionType.UN_BOUNDED_SPHERE ) self.randomized = self.config.randomized self.background_color = None self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray def update_step(self, epoch, global_step):
update_module_step(self.geometry, epoch, global_step)
2
2023-10-22 02:53:17+00:00
2k
HKUDS/GraphGPT
graphgpt/serve/gradio_web_server_graph.py
[ { "identifier": "default_conversation", "path": "graphgpt/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "LOGDIR", "path": "graphgpt/constants.py", "snippet": "LOGDIR = \".\"" }, { "identifier": "build_logger", "path": "graphgpt/utils.py", "snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\nclass StreamToLogger(object):" } ]
import argparse import datetime import json import os import time import gradio as gr import requests import hashlib from graphgpt.conversation import (default_conversation, conv_templates, SeparatorStyle) from graphgpt.constants import LOGDIR from graphgpt.utils import (build_logger, server_error_msg, violates_moderation, moderation_msg)
772
logger = build_logger("gradio_web_server", "gradio_web_server.log") headers = {"User-Agent": "GraphGPT Client"} no_change_btn = gr.Button.update() enable_btn = gr.Button.update(interactive=True) disable_btn = gr.Button.update(interactive=False) priority = { "vicuna-13b": "aaaaaaa", "koala-13b": "aaaaaab", } def get_conv_log_filename(): t = datetime.datetime.now() name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json") return name def get_model_list(): ret = requests.post(args.controller_url + "/refresh_all_workers") assert ret.status_code == 200 ret = requests.post(args.controller_url + "/list_models") models = ret.json()["models"] models.sort(key=lambda x: priority.get(x, x)) logger.info(f"Models: {models}") return models get_window_url_params = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log(url_params); return url_params; } """ def load_demo(url_params, request: gr.Request): logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}") dropdown_update = gr.Dropdown.update(visible=True) if "model" in url_params: model = url_params["model"] if model in models: dropdown_update = gr.Dropdown.update( value=model, visible=True)
logger = build_logger("gradio_web_server", "gradio_web_server.log") headers = {"User-Agent": "GraphGPT Client"} no_change_btn = gr.Button.update() enable_btn = gr.Button.update(interactive=True) disable_btn = gr.Button.update(interactive=False) priority = { "vicuna-13b": "aaaaaaa", "koala-13b": "aaaaaab", } def get_conv_log_filename(): t = datetime.datetime.now() name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json") return name def get_model_list(): ret = requests.post(args.controller_url + "/refresh_all_workers") assert ret.status_code == 200 ret = requests.post(args.controller_url + "/list_models") models = ret.json()["models"] models.sort(key=lambda x: priority.get(x, x)) logger.info(f"Models: {models}") return models get_window_url_params = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log(url_params); return url_params; } """ def load_demo(url_params, request: gr.Request): logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}") dropdown_update = gr.Dropdown.update(visible=True) if "model" in url_params: model = url_params["model"] if model in models: dropdown_update = gr.Dropdown.update( value=model, visible=True)
state = default_conversation.copy()
0
2023-10-15 05:13:24+00:00
2k
hkchengrex/Cutie
gui/ritm/inference/predictors/brs_functors.py
[ { "identifier": "_compute_iou", "path": "gui/ritm/model/metrics.py", "snippet": "def _compute_iou(pred_mask, gt_mask, ignore_mask=None, keep_ignore=False):\n if ignore_mask is not None:\n pred_mask = torch.where(ignore_mask, torch.zeros_like(pred_mask), pred_mask)\n\n reduction_dims = misc.get_dims_with_exclusion(gt_mask.dim(), 0)\n union = torch.mean((pred_mask | gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()\n intersection = torch.mean((pred_mask & gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()\n nonzero = union > 0\n\n iou = intersection[nonzero] / union[nonzero]\n if not keep_ignore:\n return iou\n else:\n result = np.full_like(intersection, -1)\n result[nonzero] = iou\n return result" }, { "identifier": "BRSMaskLoss", "path": "gui/ritm/inference/predictors/brs_losses.py", "snippet": "class BRSMaskLoss(torch.nn.Module):\n def __init__(self, eps=1e-5):\n super().__init__()\n self._eps = eps\n\n def forward(self, result, pos_mask, neg_mask):\n pos_diff = (1 - result) * pos_mask\n pos_target = torch.sum(pos_diff ** 2)\n pos_target = pos_target / (torch.sum(pos_mask) + self._eps)\n\n neg_diff = result * neg_mask\n neg_target = torch.sum(neg_diff ** 2)\n neg_target = neg_target / (torch.sum(neg_mask) + self._eps)\n \n loss = pos_target + neg_target\n\n with torch.no_grad():\n f_max_pos = torch.max(torch.abs(pos_diff)).item()\n f_max_neg = torch.max(torch.abs(neg_diff)).item()\n\n return loss, f_max_pos, f_max_neg" } ]
import torch import numpy as np from ...model.metrics import _compute_iou from .brs_losses import BRSMaskLoss
1,041
class BaseOptimizer: def __init__(self, optimizer_params, prob_thresh=0.49, reg_weight=1e-3, min_iou_diff=0.01, brs_loss=BRSMaskLoss(), with_flip=False, flip_average=False, **kwargs): self.brs_loss = brs_loss self.optimizer_params = optimizer_params self.prob_thresh = prob_thresh self.reg_weight = reg_weight self.min_iou_diff = min_iou_diff self.with_flip = with_flip self.flip_average = flip_average self.best_prediction = None self._get_prediction_logits = None self._opt_shape = None self._best_loss = None self._click_masks = None self._last_mask = None self.device = None def init_click(self, get_prediction_logits, pos_mask, neg_mask, device, shape=None): self.best_prediction = None self._get_prediction_logits = get_prediction_logits self._click_masks = (pos_mask, neg_mask) self._opt_shape = shape self._last_mask = None self.device = device def __call__(self, x): opt_params = torch.from_numpy(x).float().to(self.device) opt_params.requires_grad_(True) with torch.enable_grad(): opt_vars, reg_loss = self.unpack_opt_params(opt_params) result_before_sigmoid = self._get_prediction_logits(*opt_vars) result = torch.sigmoid(result_before_sigmoid) pos_mask, neg_mask = self._click_masks if self.with_flip and self.flip_average: result, result_flipped = torch.chunk(result, 2, dim=0) result = 0.5 * (result + torch.flip(result_flipped, dims=[3])) pos_mask, neg_mask = pos_mask[:result.shape[0]], neg_mask[:result.shape[0]] loss, f_max_pos, f_max_neg = self.brs_loss(result, pos_mask, neg_mask) loss = loss + reg_loss f_val = loss.detach().cpu().numpy() if self.best_prediction is None or f_val < self._best_loss: self.best_prediction = result_before_sigmoid.detach() self._best_loss = f_val if f_max_pos < (1 - self.prob_thresh) and f_max_neg < self.prob_thresh: return [f_val, np.zeros_like(x)] current_mask = result > self.prob_thresh if self._last_mask is not None and self.min_iou_diff > 0:
class BaseOptimizer: def __init__(self, optimizer_params, prob_thresh=0.49, reg_weight=1e-3, min_iou_diff=0.01, brs_loss=BRSMaskLoss(), with_flip=False, flip_average=False, **kwargs): self.brs_loss = brs_loss self.optimizer_params = optimizer_params self.prob_thresh = prob_thresh self.reg_weight = reg_weight self.min_iou_diff = min_iou_diff self.with_flip = with_flip self.flip_average = flip_average self.best_prediction = None self._get_prediction_logits = None self._opt_shape = None self._best_loss = None self._click_masks = None self._last_mask = None self.device = None def init_click(self, get_prediction_logits, pos_mask, neg_mask, device, shape=None): self.best_prediction = None self._get_prediction_logits = get_prediction_logits self._click_masks = (pos_mask, neg_mask) self._opt_shape = shape self._last_mask = None self.device = device def __call__(self, x): opt_params = torch.from_numpy(x).float().to(self.device) opt_params.requires_grad_(True) with torch.enable_grad(): opt_vars, reg_loss = self.unpack_opt_params(opt_params) result_before_sigmoid = self._get_prediction_logits(*opt_vars) result = torch.sigmoid(result_before_sigmoid) pos_mask, neg_mask = self._click_masks if self.with_flip and self.flip_average: result, result_flipped = torch.chunk(result, 2, dim=0) result = 0.5 * (result + torch.flip(result_flipped, dims=[3])) pos_mask, neg_mask = pos_mask[:result.shape[0]], neg_mask[:result.shape[0]] loss, f_max_pos, f_max_neg = self.brs_loss(result, pos_mask, neg_mask) loss = loss + reg_loss f_val = loss.detach().cpu().numpy() if self.best_prediction is None or f_val < self._best_loss: self.best_prediction = result_before_sigmoid.detach() self._best_loss = f_val if f_max_pos < (1 - self.prob_thresh) and f_max_neg < self.prob_thresh: return [f_val, np.zeros_like(x)] current_mask = result > self.prob_thresh if self._last_mask is not None and self.min_iou_diff > 0:
diff_iou = _compute_iou(current_mask, self._last_mask)
0
2023-10-19 17:49:24+00:00
2k
DeepGraphLearning/ULTRA
script/pretrain.py
[ { "identifier": "tasks", "path": "ultra/tasks.py", "snippet": "def edge_match(edge_index, query_index):\ndef negative_sampling(data, batch, num_negative, strict=True):\ndef all_negative(data, batch):\ndef strict_negative_mask(data, batch):\ndef compute_ranking(pred, target, mask=None):\ndef build_relation_graph(graph):" }, { "identifier": "util", "path": "ultra/util.py", "snippet": "def detect_variables(cfg_file):\ndef load_config(cfg_file, context=None):\ndef literal_eval(string):\ndef parse_args():\ndef get_root_logger(file=True):\ndef get_rank():\ndef get_world_size():\ndef synchronize():\ndef get_device(cfg):\ndef create_working_directory(cfg):\ndef build_dataset(cfg):" }, { "identifier": "Ultra", "path": "ultra/models.py", "snippet": "class Ultra(nn.Module):\n\n def __init__(self, rel_model_cfg, entity_model_cfg):\n # kept that because super Ultra sounds cool\n super(Ultra, self).__init__()\n\n self.relation_model = RelNBFNet(**rel_model_cfg)\n self.entity_model = EntityNBFNet(**entity_model_cfg)\n\n \n def forward(self, data, batch):\n \n # batch shape: (bs, 1+num_negs, 3)\n # relations are the same all positive and negative triples, so we can extract only one from the first triple among 1+nug_negs\n query_rels = batch[:, 0, 2]\n relation_representations = self.relation_model(data.relation_graph, query=query_rels)\n score = self.entity_model(data, relation_representations, batch)\n \n return score" } ]
import os import sys import copy import math import pprint import torch from itertools import islice from functools import partial from torch import optim from torch import nn from torch.nn import functional as F from torch import distributed as dist from torch.utils import data as torch_data from torch_geometric.data import Data from ultra import tasks, util from ultra.models import Ultra
1,017
sys.path.append(os.path.dirname(os.path.dirname(__file__))) separator = ">" * 30 line = "-" * 30 def multigraph_collator(batch, train_graphs): num_graphs = len(train_graphs) probs = torch.tensor([graph.edge_index.shape[1] for graph in train_graphs]).float() probs /= probs.sum() graph_id = torch.multinomial(probs, 1, replacement=False).item() graph = train_graphs[graph_id] bs = len(batch) edge_mask = torch.randperm(graph.target_edge_index.shape[1])[:bs] batch = torch.cat([graph.target_edge_index[:, edge_mask], graph.target_edge_type[edge_mask].unsqueeze(0)]).t() return graph, batch # here we assume that train_data and valid_data are tuples of datasets def train_and_validate(cfg, model, train_data, valid_data, filtered_data=None, batch_per_epoch=None): if cfg.train.num_epoch == 0: return world_size = util.get_world_size() rank = util.get_rank() train_triplets = torch.cat([ torch.cat([g.target_edge_index, g.target_edge_type.unsqueeze(0)]).t() for g in train_data ]) sampler = torch_data.DistributedSampler(train_triplets, world_size, rank) train_loader = torch_data.DataLoader(train_triplets, cfg.train.batch_size, sampler=sampler, collate_fn=partial(multigraph_collator, train_graphs=train_data)) batch_per_epoch = batch_per_epoch or len(train_loader) cls = cfg.optimizer.pop("class") optimizer = getattr(optim, cls)(model.parameters(), **cfg.optimizer) num_params = sum(p.numel() for p in model.parameters()) logger.warning(line) logger.warning(f"Number of parameters: {num_params}") if world_size > 1: parallel_model = nn.parallel.DistributedDataParallel(model, device_ids=[device]) else: parallel_model = model step = math.ceil(cfg.train.num_epoch / 10) best_result = float("-inf") best_epoch = -1 batch_id = 0 for i in range(0, cfg.train.num_epoch, step): parallel_model.train() for epoch in range(i, min(cfg.train.num_epoch, i + step)): if util.get_rank() == 0: logger.warning(separator) logger.warning("Epoch %d begin" % epoch) losses = [] sampler.set_epoch(epoch) for batch in islice(train_loader, batch_per_epoch): # now at each step we sample a new graph and edges from it train_graph, batch = batch
sys.path.append(os.path.dirname(os.path.dirname(__file__))) separator = ">" * 30 line = "-" * 30 def multigraph_collator(batch, train_graphs): num_graphs = len(train_graphs) probs = torch.tensor([graph.edge_index.shape[1] for graph in train_graphs]).float() probs /= probs.sum() graph_id = torch.multinomial(probs, 1, replacement=False).item() graph = train_graphs[graph_id] bs = len(batch) edge_mask = torch.randperm(graph.target_edge_index.shape[1])[:bs] batch = torch.cat([graph.target_edge_index[:, edge_mask], graph.target_edge_type[edge_mask].unsqueeze(0)]).t() return graph, batch # here we assume that train_data and valid_data are tuples of datasets def train_and_validate(cfg, model, train_data, valid_data, filtered_data=None, batch_per_epoch=None): if cfg.train.num_epoch == 0: return world_size = util.get_world_size() rank = util.get_rank() train_triplets = torch.cat([ torch.cat([g.target_edge_index, g.target_edge_type.unsqueeze(0)]).t() for g in train_data ]) sampler = torch_data.DistributedSampler(train_triplets, world_size, rank) train_loader = torch_data.DataLoader(train_triplets, cfg.train.batch_size, sampler=sampler, collate_fn=partial(multigraph_collator, train_graphs=train_data)) batch_per_epoch = batch_per_epoch or len(train_loader) cls = cfg.optimizer.pop("class") optimizer = getattr(optim, cls)(model.parameters(), **cfg.optimizer) num_params = sum(p.numel() for p in model.parameters()) logger.warning(line) logger.warning(f"Number of parameters: {num_params}") if world_size > 1: parallel_model = nn.parallel.DistributedDataParallel(model, device_ids=[device]) else: parallel_model = model step = math.ceil(cfg.train.num_epoch / 10) best_result = float("-inf") best_epoch = -1 batch_id = 0 for i in range(0, cfg.train.num_epoch, step): parallel_model.train() for epoch in range(i, min(cfg.train.num_epoch, i + step)): if util.get_rank() == 0: logger.warning(separator) logger.warning("Epoch %d begin" % epoch) losses = [] sampler.set_epoch(epoch) for batch in islice(train_loader, batch_per_epoch): # now at each step we sample a new graph and edges from it train_graph, batch = batch
batch = tasks.negative_sampling(train_graph, batch, cfg.task.num_negative,
0
2023-10-23 17:06:10+00:00
2k
ZhengyiLuo/PerpetualHumanoidControl
uhc/khrylib/models/erd_net.py
[ { "identifier": "RNN", "path": "uhc/khrylib/models/rnn.py", "snippet": "class RNN(nn.Module):\n def __init__(self, input_dim, out_dim, cell_type='lstm', bi_dir=False):\n super().__init__()\n self.input_dim = input_dim\n self.out_dim = out_dim\n self.cell_type = cell_type\n self.bi_dir = bi_dir\n self.mode = 'batch'\n rnn_cls = nn.LSTMCell if cell_type == 'lstm' else nn.GRUCell\n hidden_dim = out_dim // 2 if bi_dir else out_dim\n self.rnn_f = rnn_cls(self.input_dim, hidden_dim)\n if bi_dir:\n self.rnn_b = rnn_cls(self.input_dim, hidden_dim)\n self.hx, self.cx = None, None\n\n def set_mode(self, mode):\n self.mode = mode\n\n def initialize(self, batch_size=1, hx=None, cx=None):\n if self.mode == 'step':\n self.hx = zeros((batch_size, self.rnn_f.hidden_size)) if hx is None else hx\n if self.cell_type == 'lstm':\n self.cx = zeros((batch_size, self.rnn_f.hidden_size)) if cx is None else cx\n\n def forward(self, x):\n if self.mode == 'step':\n self.hx, self.cx = batch_to(x.device, self.hx, self.cx)\n if self.cell_type == 'lstm':\n self.hx, self.cx = self.rnn_f(x, (self.hx, self.cx))\n else:\n self.hx = self.rnn_f(x, self.hx)\n rnn_out = self.hx\n else:\n rnn_out_f = self.batch_forward(x)\n if not self.bi_dir:\n return rnn_out_f\n rnn_out_b = self.batch_forward(x, reverse=True)\n rnn_out = torch.cat((rnn_out_f, rnn_out_b), 2)\n return rnn_out\n\n def batch_forward(self, x, reverse=False):\n rnn = self.rnn_b if reverse else self.rnn_f\n rnn_out = []\n hx = zeros((x.size(1), rnn.hidden_size), device=x.device)\n if self.cell_type == 'lstm':\n cx = zeros((x.size(1), rnn.hidden_size), device=x.device)\n ind = reversed(range(x.size(0))) if reverse else range(x.size(0))\n for t in ind:\n if self.cell_type == 'lstm':\n hx, cx = rnn(x[t, ...], (hx, cx))\n else:\n hx = rnn(x[t, ...], hx)\n rnn_out.append(hx.unsqueeze(0))\n if reverse:\n rnn_out.reverse()\n rnn_out = torch.cat(rnn_out, 0)\n return rnn_out" }, { "identifier": "MLP", "path": "uhc/khrylib/models/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(self, input_dim, hidden_dims=(128, 128), activation='tanh'):\n super().__init__()\n if activation == 'tanh':\n self.activation = torch.tanh\n elif activation == 'relu':\n self.activation = torch.relu\n elif activation == 'sigmoid':\n self.activation = torch.sigmoid\n elif activation == 'gelu':\n self.activation = torch.nn.GELU()\n\n self.out_dim = hidden_dims[-1]\n self.affine_layers = nn.ModuleList()\n last_dim = input_dim\n for nh in hidden_dims:\n self.affine_layers.append(nn.Linear(last_dim, nh))\n last_dim = nh\n\n def forward(self, x):\n for affine in self.affine_layers:\n x = self.activation(affine(x))\n return x" } ]
from uhc.khrylib.utils.torch import * from torch import nn from uhc.khrylib.models.rnn import RNN from uhc.khrylib.models.mlp import MLP
962
class ERDNet(nn.Module): def __init__(self, state_dim): super().__init__() self.state_dim = state_dim
class ERDNet(nn.Module): def __init__(self, state_dim): super().__init__() self.state_dim = state_dim
self.encoder_mlp = MLP(state_dim, (500,), 'relu')
1
2023-10-15 19:05:47+00:00
2k
laike9m/Python-Type-Challenges
views/views.py
[ { "identifier": "ChallengeKey", "path": "views/challenge.py", "snippet": "ROOT_DIR = Path(__file__).parent.parent\n BASIC = \"basic\"\n INTERMEDIATE = \"intermediate\"\n ADVANCED = \"advanced\"\n EXTREME = \"extreme\"\n CODE_SPLITTER: ClassVar[str] = \"\\n## End of your code ##\\n\"\n EXPECT_ERROR_COMMENT = \"expect-type-error\"\n PYRIGHT_MESSAGE_REGEX = r\"^(?:.+?):(\\d+):[\\s\\-\\d]+(error:.+)$\"\nclass Level(StrEnum):\nclass ChallengeKey:\nclass Challenge:\nclass TypeCheckResult:\nclass ChallengeManager:\n def is_valid_level(cls, level: str):\n def from_str(cls, key: str):\n def __post_init__(self):\n def parse_code(self):\n def __init__(self, root_dir: Optional[Path] = None):\n def has_challenge(self, key: ChallengeKey) -> bool:\n def get_challenge(self, key: ChallengeKey) -> Challenge:\n def challenge_count(self) -> int:\n def run_challenge(self, key: ChallengeKey, user_code: str) -> TypeCheckResult:\n def get_random_challenge(self) -> dict[str, str]:\n def _load_challenges(root_dir: Path) -> dict[ChallengeKey, Challenge]:\n def _get_challenges_groupby_level(self) -> dict[Level, list[ChallengeName]]:\n def _type_check_with_pyright(\n cls, user_code: str, test_code: str\n ) -> TypeCheckResult:" }, { "identifier": "sitemapper", "path": "views/sitemap.py", "snippet": "" }, { "identifier": "render_hints", "path": "views/utils/text.py", "snippet": "def render_hints(hints: str) -> str:\n \"\"\"Render the hints messages to HTML format.\"\"\"\n return markdown.markdown(hints)" } ]
import ast import platform from functools import wraps from flask import ( abort, Blueprint, jsonify, redirect, render_template, request, ) from flask_htmx import HTMX from .challenge import ChallengeKey, Level, challenge_manager from .sitemap import sitemapper from .utils.text import render_hints
801
app_views = Blueprint("app_views", __name__) htmx = HTMX(app_views) def validate_challenge(view_func): @wraps(view_func) def wrapper(level, name, *args, **kwargs): if Level.is_valid_level(level) and challenge_manager.has_challenge( ChallengeKey(Level(level), name) ): return view_func(level, name, *args, **kwargs) # valid challenge abort(404) return wrapper @sitemapper.include(changefreq="daily", priority=1.0) @app_views.route("/") def index(): return render_template( "index.html", challenges_groupby_level=challenge_manager.challenges_groupby_level, ) @sitemapper.include( changefreq="daily", priority=0.5, # https://github.com/h-janes/flask-sitemapper/wiki/Usage#dynamic-routes url_variables={ "level": [c.level for c in challenge_manager.challenges.keys()], "name": [c.name for c in challenge_manager.challenges.keys()], }, ) @app_views.route("/<level>/<name>", methods=["GET"]) @validate_challenge def get_challenge(level: str, name: str): challenge = challenge_manager.get_challenge(ChallengeKey(Level(level), name)) params = { "name": name, "level": challenge.level, "challenges_groupby_level": challenge_manager.challenges_groupby_level, "code_under_test": challenge.user_code, "test_code": challenge.test_code,
app_views = Blueprint("app_views", __name__) htmx = HTMX(app_views) def validate_challenge(view_func): @wraps(view_func) def wrapper(level, name, *args, **kwargs): if Level.is_valid_level(level) and challenge_manager.has_challenge( ChallengeKey(Level(level), name) ): return view_func(level, name, *args, **kwargs) # valid challenge abort(404) return wrapper @sitemapper.include(changefreq="daily", priority=1.0) @app_views.route("/") def index(): return render_template( "index.html", challenges_groupby_level=challenge_manager.challenges_groupby_level, ) @sitemapper.include( changefreq="daily", priority=0.5, # https://github.com/h-janes/flask-sitemapper/wiki/Usage#dynamic-routes url_variables={ "level": [c.level for c in challenge_manager.challenges.keys()], "name": [c.name for c in challenge_manager.challenges.keys()], }, ) @app_views.route("/<level>/<name>", methods=["GET"]) @validate_challenge def get_challenge(level: str, name: str): challenge = challenge_manager.get_challenge(ChallengeKey(Level(level), name)) params = { "name": name, "level": challenge.level, "challenges_groupby_level": challenge_manager.challenges_groupby_level, "code_under_test": challenge.user_code, "test_code": challenge.test_code,
"hints_for_display": render_hints(challenge.hints) if challenge.hints else None,
2
2023-10-23 05:11:41+00:00
2k
uni-medical/SAM-Med3D
segment_anything/modeling/image_encoder.py
[ { "identifier": "LayerNorm2d", "path": "segment_anything/modeling/common.py", "snippet": "class LayerNorm2d(nn.Module):\r\n def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\r\n super().__init__()\r\n self.weight = nn.Parameter(torch.ones(num_channels))\r\n self.bias = nn.Parameter(torch.zeros(num_channels))\r\n self.eps = eps\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n u = x.mean(1, keepdim=True)\r\n s = (x - u).pow(2).mean(1, keepdim=True)\r\n x = (x - u) / torch.sqrt(s + self.eps)\r\n y = self.weight[:, None, None] * x\r\n # y = torch.mul(self.weight[:, None, None], x)\r\n x = y + self.bias[:, None, None]\r\n return x\r" }, { "identifier": "MLPBlock", "path": "segment_anything/modeling/common.py", "snippet": "class MLPBlock(nn.Module):\r\n def __init__(\r\n self,\r\n embedding_dim: int,\r\n mlp_dim: int,\r\n act: Type[nn.Module] = nn.GELU,\r\n ) -> None:\r\n super().__init__()\r\n self.lin1 = nn.Linear(embedding_dim, mlp_dim)\r\n self.lin2 = nn.Linear(mlp_dim, embedding_dim)\r\n self.act = act()\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n return self.lin2(self.act(self.lin1(x)))\r" } ]
import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Type from .common import LayerNorm2d, MLPBlock
1,164
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
LayerNorm2d(out_chans),
0
2023-10-23 15:41:07+00:00
2k
VikParuchuri/libgen_to_txt
libgen_to_txt/marker/convert.py
[ { "identifier": "settings", "path": "libgen_to_txt/settings.py", "snippet": "class Settings(BaseSettings):\n class Config:\n BASE_STORAGE_FOLDER: str = \"libgen\" # temp storage for downloaded chunks\n BASE_PROCESSED_FOLDER: str = \"processed\" # After a chunk is processed, an empty file is created here\n BASE_TXT_FOLDER: str = \"txt\" # Where the final text is stored\n BASE_METADATA_FOLDER: str = \"metadata\" # Where to store metadata for processing\n LIBGEN_DB_NAME: str = \"libgen\"\n LIBGEN_DB_USER: str = \"libgen\"\n LIBGEN_DB_PASS: str = \"password\"\n CONVERSION_WORKERS: int = 18 # Number of workers to use to convert pdfs for each libgen chunk\n DOWNLOAD_WORKERS: int = 8 # Number of download workers (bandwidth-bound)\n MAX_TIME_TO_WAIT: int = 60 * 60 * 6 # 6 hours to wait for a download to finish\n RCLONE_ADAPTER_NAME: str = \"putio\"\n TEXT_FLAGS: int = pymupdf.TEXTFLAGS_TEXT & ~pymupdf.TEXT_PRESERVE_LIGATURES\n CONVERSION_METHOD: str = \"naive\" # Either naive or marker. Naive is faster, but marker is more accurate.\n GPU_COUNT: int = 0 # Number of GPUs to use for marker. 0 means to use CPU only\n MARKER_FOLDER: str = \"../marker\"\n MARKER_GPU_TIMEOUT: int = 60 * 60 * 8 # Time to wait for marker gpu to finish\n MARKER_CPU_TIMEOUT: int = 60 * 60 * 24 # Time to wait for marker to finish\n MARKER_SUPPORTED_LANGUAGES: List = [\"English\", \"Spanish\", \"Portuguese\", \"French\", \"German\", \"Russian\"]\n MARKER_SUPPORTED_EXTENSIONS: List = [\"pdf\", \"epub\", \"mobi\", \"xps\", \"fb2\"]\n MARKER_MIN_LENGTH: int = 10000 # Min amount of text to extract from file naively before using marker\n MARKER_DEBUG_DATA_FOLDER: Optional[str] = None # Folder to store debug data in\n POETRY_DIR: str = \"~/.local/bin\" # Poetry directory, used to activate marker venv\n PUTIO_TOKEN: str = \"\"\n PUTIO_FOLDER: str = \"libgen\"" }, { "identifier": "query_metadata", "path": "libgen_to_txt/metadata.py", "snippet": "def query_metadata(fmd5):\n connection = pymysql.connect(host='localhost',\n user=settings.LIBGEN_DB_USER,\n password=settings.LIBGEN_DB_PASS,\n database=settings.LIBGEN_DB_NAME,\n cursorclass=pymysql.cursors.DictCursor)\n\n with connection:\n with connection.cursor() as cursor:\n # Read a single record\n sql = \"SELECT ue.ID, ue.Title, ue.Author, ue.Year, ue.Language, ue.Publisher, ue.Topic, ue.Extension, ue.Cleaned, ue.Scanned, ue.Pages, de.descr, de.toc from updated_edited ue left outer join description_edited de on de.md5 = ue.MD5 where ue.MD5=%s order by ue.TimeLastModified desc limit 1;\"\n cursor.execute(sql, (fmd5,))\n metadata = cursor.fetchone()\n\n return metadata" } ]
import subprocess import os import psutil import json from libgen_to_txt.settings import settings from libgen_to_txt.metadata import query_metadata
857
def filter_invalid(folder_name): files = os.listdir(folder_name) all_metadata = {} for fname in files: if fname.startswith("."): continue fpath = os.path.join(folder_name, fname) metadata = query_metadata(fname) if not metadata: os.unlink(fpath) continue
def filter_invalid(folder_name): files = os.listdir(folder_name) all_metadata = {} for fname in files: if fname.startswith("."): continue fpath = os.path.join(folder_name, fname) metadata = query_metadata(fname) if not metadata: os.unlink(fpath) continue
if metadata["Language"].strip() not in settings.MARKER_SUPPORTED_LANGUAGES:
0
2023-10-16 17:56:36+00:00
2k
senran101604/sagemode
sagemode.py
[ { "identifier": "Notify", "path": "accessories.py", "snippet": "class Notify:\n \"A helper class for notifications of Sagemode process\"\n\n @staticmethod\n def start(username: str, number_of_sites) -> str:\n start(ascii_art, delay=0.1)\n if username or sites is not None:\n return f\"[yellow][[bright_red]*[yellow][yellow]] [bright_blue]Searching {number_of_sites} sites for target: [bright_yellow]{username}\"\n\n # notify the user how many sites the username has been found\n @staticmethod\n def positive_res(username: str, count) -> str:\n return f\"\\n[yellow][[bright_red]+[yellow]][bright_green] Found [bright_red]{username} [bright_green]on [bright_magenta]{count}[bright_green] sites\"\n\n # notify the user where the result is stored\n @staticmethod\n def stored_result(result_file: str) -> str:\n return f\"[bright_green][[yellow]@[bright_green]] [orange3]Results stored in: [bright_green]{result_file}\\n\"\n\n @staticmethod\n def not_found(site: str, status_code=\"\") -> str:\n if status_code:\n return f\"[black][[red]-[black]] [blue]{site}: [yellow]Not Found! {status_code}\"\n return f\"[black][[red]-[black]] [blue]{site}: [yellow]Not Found!\"\n\n @staticmethod\n def found(site: str, url: str) -> str:\n return f\"[red][[green]+[red]] [green]{site}: [blue]{url}\"\n\n @staticmethod\n def update(local_version: str, remote_version: str) -> str:\n return (\n \"[red][[bright_red]![red]] [yellow]Update Available!\\n[/yellow]\"\n + f\"[red][[yellow]![red]] [bright_yellow]You are running Version: [bright_green]{local_version}\\n\"\n + f\"[red][[/red][yellow]![red]][bright_yellow] New Version Available: [bright_green]{remote_version}\"\n )\n\n @staticmethod\n def update_error(error: str) -> str:\n return f\"[bright_red][[bright_red]![bright_red]] [bright_yellow]A problem occured while checking for an update: [bright_red]{error}\"\n\n @staticmethod\n def version(version: str) -> str:\n return f\"[bright_yellow]Sagemode [bright_red]{version}\"\n\n def exception(site, error):\n return f\"[black][[red]![black]] [blue]{site}: [bright_red]{error}...\"" }, { "identifier": "sites", "path": "sites.py", "snippet": "" } ]
import os import re import datetime import subprocess import threading import random import requests from argparse import ArgumentParser from rich.console import Console from bs4 import BeautifulSoup from accessories import Notify from sites import sites, soft404_indicators, user_agents
1,326
#! /usr/bin/env python3 """ Sagemode: Track and Unveil Online identities across social media platforms. """ __version__ = "1.1.3" class Sagemode: def __init__(self, username: str, found_only=False): self.console = Console() self.notify = Notify self.positive_count = 0 self.username = username self.result_file = os.path.join("data", f"{self.username}.txt") self.found_only = found_only # this function checks if the url not a false positive result, return false def is_soft404(self, html_response: str) -> bool: # this is for checking the title bar of the page soup = BeautifulSoup(html_response, "html.parser") page_title = soup.title.string.strip() if soup.title else "" # I know this is kinda messy solution but it currently solve.. reduce the problem # in soft404 responses (false positives) for error_indicator in soft404_indicators: if ( # check if the error indicator is in the html string response error_indicator.lower() in html_response.lower() # check for the title bar of the page if there are anyi error_indicator or error_indicator.lower() in page_title.lower() # Specific check sites, since positive result will have the username in the title bar. or page_title.lower() == "instagram" # patreon's removed user or page_title.lower() == "patreon logo" or "sign in" in page_title.lower() ): return True return False def check_site(self, site: str, url: str, headers): url = url.format(self.username) # we need headers to avoid being blocked by requesting the website 403 error try: with requests.Session() as session: response = session.get(url, headers=headers) # Raises an HTTPError for bad responses # further check to reduce false positive results if ( response.status_code == 200 and self.username.lower() in response.text.lower() and not self.is_soft404(response.text) ): # to prevent multiple threads from accessing/modifying the positive # counts simultaneously and prevent race conditions. with threading.Lock(): self.positive_count += 1 self.console.print(self.notify.found(site, url)) with open(self.result_file, "a") as f: f.write(f"{url}\n") # the site reurned 404 (user not found) else: if not self.found_only: self.console.print(self.notify.not_found(site)) except Exception as e: self.notify.exception(site, e) def start(self): """ Start the search. """ self.console.print(self.notify.start(self.username, len(sites))) current_datetime = datetime.datetime.now() date = current_datetime.strftime("%m/%d/%Y") time = current_datetime.strftime("%I:%M %p")
#! /usr/bin/env python3 """ Sagemode: Track and Unveil Online identities across social media platforms. """ __version__ = "1.1.3" class Sagemode: def __init__(self, username: str, found_only=False): self.console = Console() self.notify = Notify self.positive_count = 0 self.username = username self.result_file = os.path.join("data", f"{self.username}.txt") self.found_only = found_only # this function checks if the url not a false positive result, return false def is_soft404(self, html_response: str) -> bool: # this is for checking the title bar of the page soup = BeautifulSoup(html_response, "html.parser") page_title = soup.title.string.strip() if soup.title else "" # I know this is kinda messy solution but it currently solve.. reduce the problem # in soft404 responses (false positives) for error_indicator in soft404_indicators: if ( # check if the error indicator is in the html string response error_indicator.lower() in html_response.lower() # check for the title bar of the page if there are anyi error_indicator or error_indicator.lower() in page_title.lower() # Specific check sites, since positive result will have the username in the title bar. or page_title.lower() == "instagram" # patreon's removed user or page_title.lower() == "patreon logo" or "sign in" in page_title.lower() ): return True return False def check_site(self, site: str, url: str, headers): url = url.format(self.username) # we need headers to avoid being blocked by requesting the website 403 error try: with requests.Session() as session: response = session.get(url, headers=headers) # Raises an HTTPError for bad responses # further check to reduce false positive results if ( response.status_code == 200 and self.username.lower() in response.text.lower() and not self.is_soft404(response.text) ): # to prevent multiple threads from accessing/modifying the positive # counts simultaneously and prevent race conditions. with threading.Lock(): self.positive_count += 1 self.console.print(self.notify.found(site, url)) with open(self.result_file, "a") as f: f.write(f"{url}\n") # the site reurned 404 (user not found) else: if not self.found_only: self.console.print(self.notify.not_found(site)) except Exception as e: self.notify.exception(site, e) def start(self): """ Start the search. """ self.console.print(self.notify.start(self.username, len(sites))) current_datetime = datetime.datetime.now() date = current_datetime.strftime("%m/%d/%Y") time = current_datetime.strftime("%I:%M %p")
headers = {"User-Agent": random.choice(user_agents)}
1
2023-10-15 15:19:24+00:00
2k
NVIDIA/GenerativeAIExamples
RetrievalAugmentedGeneration/common/server.py
[ { "identifier": "utils", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "DEFAULT_MAX_CONTEXT = 1500\nDEFAULT_NUM_TOKENS = 150\nTEXT_SPLITTER_EMBEDDING_MODEL = \"intfloat/e5-large-v2\"\nclass LimitRetrievedNodesLength(BaseNodePostprocessor):\n def _postprocess_nodes(\n self, nodes: List[\"NodeWithScore\"] = [], query_bundle: Optional[\"QueryBundle\"] = None\n ) -> List[\"NodeWithScore\"]:\ndef set_service_context() -> None:\ndef get_config() -> \"ConfigWizard\":\ndef get_vector_index() -> VectorStoreIndex:\ndef get_doc_retriever(num_nodes: int = 4) -> \"BaseRetriever\":\ndef get_llm() -> LangChainLLM:\ndef get_embedding_model() -> LangchainEmbedding:\ndef is_base64_encoded(s: str) -> bool:\ndef get_text_splitter() -> SentenceTransformersTokenTextSplitter:" }, { "identifier": "chains", "path": "RetrievalAugmentedGeneration/examples/developer_rag/chains.py", "snippet": "def llm_chain(\n context: str, question: str, num_tokens: int\n) -> Generator[str, None, None]:\ndef rag_chain(prompt: str, num_tokens: int) -> Generator[str, None, None]:\ndef ingest_docs(data_dir: str, filename: str) -> None:" } ]
import base64 import os import shutil import logging from pathlib import Path from typing import Any, Dict, List from fastapi import FastAPI, File, UploadFile from fastapi.responses import JSONResponse, StreamingResponse from pydantic import BaseModel, Field from pymilvus.exceptions import MilvusException, MilvusUnavailableException from RetrievalAugmentedGeneration.common import utils from RetrievalAugmentedGeneration.examples.developer_rag import chains
925
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of the Llama Index chain server.""" logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # create the FastAPI server app = FastAPI() # prestage the embedding model _ = utils.get_embedding_model() # set the global service context for Llama Index utils.set_service_context() class Prompt(BaseModel): """Definition of the Prompt API data type.""" question: str = Field(description="The input query/prompt to the pipeline.") context: str = Field(description="Additional context for the question (optional)") use_knowledge_base: bool = Field(description="Whether to use a knowledge base", default=True) num_tokens: int = Field(description="The maximum number of tokens in the response.", default=50) class DocumentSearch(BaseModel): """Definition of the DocumentSearch API data type.""" content: str = Field(description="The content or keywords to search for within documents.") num_docs: int = Field(description="The maximum number of documents to return in the response.", default=4) @app.post("/uploadDocument") async def upload_document(file: UploadFile = File(...)) -> JSONResponse: """Upload a document to the vector store.""" if not file.filename: return JSONResponse(content={"message": "No files provided"}, status_code=200) try: upload_folder = "uploaded_files" upload_file = os.path.basename(file.filename) if not upload_file: raise RuntimeError("Error parsing uploaded filename.") file_path = os.path.join(upload_folder, upload_file) uploads_dir = Path(upload_folder) uploads_dir.mkdir(parents=True, exist_ok=True) with open(file_path, "wb") as f: shutil.copyfileobj(file.file, f)
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of the Llama Index chain server.""" logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # create the FastAPI server app = FastAPI() # prestage the embedding model _ = utils.get_embedding_model() # set the global service context for Llama Index utils.set_service_context() class Prompt(BaseModel): """Definition of the Prompt API data type.""" question: str = Field(description="The input query/prompt to the pipeline.") context: str = Field(description="Additional context for the question (optional)") use_knowledge_base: bool = Field(description="Whether to use a knowledge base", default=True) num_tokens: int = Field(description="The maximum number of tokens in the response.", default=50) class DocumentSearch(BaseModel): """Definition of the DocumentSearch API data type.""" content: str = Field(description="The content or keywords to search for within documents.") num_docs: int = Field(description="The maximum number of documents to return in the response.", default=4) @app.post("/uploadDocument") async def upload_document(file: UploadFile = File(...)) -> JSONResponse: """Upload a document to the vector store.""" if not file.filename: return JSONResponse(content={"message": "No files provided"}, status_code=200) try: upload_folder = "uploaded_files" upload_file = os.path.basename(file.filename) if not upload_file: raise RuntimeError("Error parsing uploaded filename.") file_path = os.path.join(upload_folder, upload_file) uploads_dir = Path(upload_folder) uploads_dir.mkdir(parents=True, exist_ok=True) with open(file_path, "wb") as f: shutil.copyfileobj(file.file, f)
chains.ingest_docs(file_path, upload_file)
1
2023-10-19 13:46:31+00:00
2k
Hackl0us/apple-spyder
airpods_update_detection.py
[ { "identifier": "DatabaseUtil", "path": "classes/database.py", "snippet": "class DatabaseUtil:\n def __init__(self):\n import sqlite3\n self.conn = sqlite3.connect('res/apple-spyder.db')\n\n def db_select(self, sql):\n try:\n c = self.conn.execute(sql)\n return c.fetchall()\n except ValueError as err:\n print(err)\n\n def db_operate(self, *sql):\n try:\n self.conn.execute(*sql)\n self.conn.commit()\n except ValueError as err:\n print(err)\n\n def close(self):\n self.conn.close()" }, { "identifier": "covert_to_local_timezone", "path": "classes/datetime.py", "snippet": "def covert_to_local_timezone(datetime):\n return parser.parse(datetime).astimezone(tz=None)" }, { "identifier": "is_a_previous_time", "path": "classes/datetime.py", "snippet": "def is_a_previous_time(last_update_time, current_time):\n if parser.parse(last_update_time) < parser.parse(current_time):\n return True\n else:\n return False" }, { "identifier": "Telegram", "path": "classes/telegram.py", "snippet": "class Telegram:\n def __init__(self):\n config = _get_bot_config()\n self.enable = config['enable']\n self.bot_token = config['bot-token']\n self.chat_id = config['chat-id']\n\n def send_message(self, message, chat_id=None, parse_in_markdown=False):\n if not self.enable:\n logging.warning(\"Telegram posting feature is DISABLED.\")\n return\n\n if chat_id is None:\n chat_id = self.chat_id\n\n send_message_url = f'https://api.telegram.org/bot{self.bot_token}/sendMessage?chat_id={chat_id}&text={message}'\n\n if parse_in_markdown:\n send_message_url += '&parse_mode=markdown'\n\n requests.get(send_message_url)" }, { "identifier": "Weibo", "path": "classes/weibo.py", "snippet": "class Weibo:\n def __init__(self):\n config = _get_weibo_config()\n self.enable = config['enable']\n self.access_token = config['access-token']\n self.redirect_uri = config['redirect-uri']\n self.rip = config['rip']\n\n def post_weibo(self, message):\n if not self.enable:\n logging.warning(\"Weibo posting feature is DISABLED.\")\n return\n\n url = \"https://api.weibo.com/2/statuses/share.json\"\n\n params = {\"access_token\": self.access_token, \"status\": str(message) + self.redirect_uri, \"rip\": self.rip}\n\n res = requests.post(url, data=params)\n print(res.text)" } ]
import logging import plistlib import urllib.request from classes.database import DatabaseUtil from classes.datetime import covert_to_local_timezone from classes.datetime import is_a_previous_time from classes.telegram import Telegram from classes.weibo import Weibo
733
def main(): ota_update_url = "https://mesu.apple.com/assets/com_apple_MobileAsset_UARP_A2618/com_apple_MobileAsset_UARP_A2618.xml" with urllib.request.urlopen(ota_update_url) as response: firmware_release_date = response.headers['last-modified'] plist_content = plistlib.loads(response.read()) # Get last OTA update time from db
def main(): ota_update_url = "https://mesu.apple.com/assets/com_apple_MobileAsset_UARP_A2618/com_apple_MobileAsset_UARP_A2618.xml" with urllib.request.urlopen(ota_update_url) as response: firmware_release_date = response.headers['last-modified'] plist_content = plistlib.loads(response.read()) # Get last OTA update time from db
db = DatabaseUtil()
0
2023-10-17 09:00:39+00:00
2k
lm-sys/llm-decontaminator
main.py
[ { "identifier": "datatype_to_instruct", "path": "detect_instruct.py", "snippet": "def datatype_to_instruct(data_type):\n if data_type == \"code\":\n return code_instruct\n elif data_type == \"number_substitution\":\n return strong_math_instruct\n elif data_type == \"math\":\n return math_instruct\n elif data_type == \"knowledge\":\n return knowledge_instruct\n else:\n raise Exception(\"Invalid data type: {}\".format(data_type))" }, { "identifier": "llm_detect", "path": "llm_detect.py", "snippet": "def llm_detect(model, database, output_path, instruct, max_workers=32):\n \n results = []\n futures = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n for i, pairs in enumerate(database):\n test_case = pairs[\"test\"]\n case_results = []\n for train_case in pairs[\"train\"]:\n future = executor.submit(detect_contamination, model, test_case, train_case, instruct)\n case_results.append(future)\n futures.append(case_results)\n\n for case_results in futures:\n results.append([future.result() for future in case_results])\n\n for i in range(len(database)):\n database[i][\"results\"] = results[i]\n\n with open(output_path, \"w\") as fout:\n for each in database:\n fout.write(json.dumps(each) + \"\\n\")\n\n return database" }, { "identifier": "check_openai_key", "path": "llm_detect.py", "snippet": "def check_openai_key():\n if not \"OPENAI_API_KEY\" in os.environ:\n raise Exception(\"Please set your OPENAI_API_KEY environment variable.\")" }, { "identifier": "build_database", "path": "vector_db.py", "snippet": "def build_database(model, train_path, test_path, output_path, top_k=1, batch_size=32, device=None):\n\n train_cases = read_dataset(train_path)\n test_cases = read_dataset(test_path)\n train_embs = bert_encode(model, train_cases, batch_size=batch_size, device=device)\n test_embs = bert_encode(model, test_cases, batch_size=batch_size, device=device)\n top_k_indices = top_k_similarity(train_embs, test_embs, top_k)\n\n db = []\n\n for i, test_case in enumerate(test_cases):\n top_k_cases = [train_cases[index] for index in top_k_indices[i]]\n db.append({\"test\": test_case, \"train\": top_k_cases}) \n\n with open(output_path, \"w\") as f:\n for each in db:\n f.write(json.dumps(each) + \"\\n\")\n\n return db" }, { "identifier": "show", "path": "show_samples.py", "snippet": "def show(database, mode=\"all\"):\n\n for each in database:\n test_case = each[\"test\"]\n for i, train_case in enumerate(each[\"train\"]):\n if each[\"results\"][i]:\n print(f\"Test case:\\n{test_case}\\n\")\n print(f\"Train case:\\n{train_case}\\n\")\n\n rephrase_num = sum([1 if True in each[\"results\"] else 0 for each in database])\n print(f\"Rephrase num: {rephrase_num}\")" } ]
import argparse from sentence_transformers import SentenceTransformer from detect_instruct import datatype_to_instruct from llm_detect import llm_detect, check_openai_key from vector_db import build_database from show_samples import show
1,096
if __name__ == "__main__": parser = argparse.ArgumentParser(description='Build database of top-k similar cases') parser.add_argument('--train_path', type=str, required=True, help='Path to train cases') parser.add_argument('--test_path', type=str, required=True, help='Path to test cases') parser.add_argument('--output_path', type=str, required=True, help='Path to output database') parser.add_argument('--bert-model', type=str, default='multi-qa-MiniLM-L6-cos-v1', help='Path to sentence transformer model') parser.add_argument('--top_k', type=int, default=1, help='Number of top-k similar cases to retrieve') parser.add_argument('--batch_size', type=int, default=32, help='Batch size for encoding') parser.add_argument('--device', type=str, default=None, help='Device to use for encoding (e.g. "cuda:0")') parser.add_argument("--model", type=str, default="gpt-4", help="The name of the OpenAI model to use") parser.add_argument("--data-type", type=str, default="code", help="The name of the instruction function to use") parser.add_argument("--max-workers", type=int, default=2, help="The maximum number of worker threads to use") args = parser.parse_args() check_openai_key() bert_model = SentenceTransformer(args.bert_model) database = build_database(bert_model, args.train_path, args.test_path, args.output_path, args.top_k, args.batch_size, args.device)
if __name__ == "__main__": parser = argparse.ArgumentParser(description='Build database of top-k similar cases') parser.add_argument('--train_path', type=str, required=True, help='Path to train cases') parser.add_argument('--test_path', type=str, required=True, help='Path to test cases') parser.add_argument('--output_path', type=str, required=True, help='Path to output database') parser.add_argument('--bert-model', type=str, default='multi-qa-MiniLM-L6-cos-v1', help='Path to sentence transformer model') parser.add_argument('--top_k', type=int, default=1, help='Number of top-k similar cases to retrieve') parser.add_argument('--batch_size', type=int, default=32, help='Batch size for encoding') parser.add_argument('--device', type=str, default=None, help='Device to use for encoding (e.g. "cuda:0")') parser.add_argument("--model", type=str, default="gpt-4", help="The name of the OpenAI model to use") parser.add_argument("--data-type", type=str, default="code", help="The name of the instruction function to use") parser.add_argument("--max-workers", type=int, default=2, help="The maximum number of worker threads to use") args = parser.parse_args() check_openai_key() bert_model = SentenceTransformer(args.bert_model) database = build_database(bert_model, args.train_path, args.test_path, args.output_path, args.top_k, args.batch_size, args.device)
instruct = datatype_to_instruct(args.data_type)
0
2023-10-17 04:06:33+00:00
2k
MolecularAI/REINVENT4
reinvent_plugins/components/comp_mmp.py
[ { "identifier": "ComponentResults", "path": "reinvent_plugins/components/component_results.py", "snippet": "class ComponentResults:\n \"\"\"Container for the scores, uncertainties and meta data\n\n At the minimum the scores must be provided. The order of the score array\n must be the same as the order of SMILES passed to each component. Failure\n of computation of score must be indicated by NaN. Do not use zero for this!\n scores_properties can be used to pass on meta data on the scores\n uncertainty_type is currently assumed to be the same for all values\n failure_properties can be used to provide details on the failure of a component\n meta_data is a general facility to pass on meta data\n \"\"\"\n\n scores: List[np.ndarray]\n scores_properties: Optional[List[Dict]] = None\n uncertainty: Optional[List[np.ndarray]] = None\n uncertainty_type: Optional[str] = None\n uncertainty_properties: Optional[List[Dict]] = None\n failures_properties: Optional[List[Dict]] = None\n metadata: Optional[Dict] = None" }, { "identifier": "run_command", "path": "reinvent_plugins/components/run_program.py", "snippet": "def run_command(command: List[str], env: dict = None, input=None) -> sp.CompletedProcess:\n \"\"\"Run an external command in a subprocess.\n\n :params command: array of command line arguments\n :returns: output object from the subprocess\n \"\"\"\n\n args = dict(capture_output=True, text=True, check=True, shell=False)\n\n if env:\n args.update({\"env\": env})\n\n if input:\n args.update({\"input\": input})\n\n try:\n result = sp.run(command, **args)\n except sp.CalledProcessError as error:\n ret = error.returncode\n out = error.stdout\n err = error.stderr\n\n raise RuntimeError(\n f\"{__name__}: {' '.join(command)} has failed with exit \"\n f\"code {ret}: stdout={out}, stderr={err}\"\n )\n\n return result" }, { "identifier": "add_tag", "path": "reinvent_plugins/components/add_tag.py", "snippet": "def add_tag(label: str, text: str = \"True\"):\n \"\"\"A simple decorator to tag a class\"\"\"\n\n def wrapper(cls):\n setattr(cls, label, text)\n return cls\n\n return wrapper" }, { "identifier": "normalize_smiles", "path": "reinvent_plugins/normalize.py", "snippet": "def normalize_smiles(func: Callable):\n def wrapper(self, smilies: List[str]):\n normalizer = getattr(normalizers, self.smiles_type)\n\n cleaned_smilies = normalizer.normalize(smilies)\n\n return func(self, cleaned_smilies)\n\n return wrapper" } ]
import logging import shlex import numpy as np import pandas as pd from io import StringIO from dataclasses import dataclass, field from typing import List from rdkit import Chem from .component_results import ComponentResults from .run_program import run_command from .add_tag import add_tag from ..normalize import normalize_smiles
1,223
"""Matched molecular pairs""" from __future__ import annotations __all__ = ["MMP"] logger = logging.getLogger('reinvent') @add_tag("__parameters") @dataclass class Parameters: """Parameters for the scoring component Note that all parameters are always lists because components can have multiple endpoints and so all the parameters from each endpoint is collected into a list. This is also true in cases where there is only one endpoint. """ reference_smiles: List[List[str]] num_of_cuts: List[int] = field(default_factory=lambda: [1]) max_variable_heavies: List[int] = field(default_factory=lambda: [40]) max_variable_ratio: List[float] = field(default_factory=lambda: [0.33]) FRAG_CMD = "mmpdb --quiet fragment --num-cuts {ncuts}" IDX_CMD = ( "mmpdb --quiet index --out csv --symmetric --max-variable-heavies {heavy} " "--max-variable-ratio {ratio}" ) @add_tag("__component") class MMP: def __init__(self, params: Parameters): self.ref_smilies = params.reference_smiles self.num_of_cuts = params.num_of_cuts self.max_variable_heavies = params.max_variable_heavies self.max_variable_ratio = params.max_variable_ratio # needed in the normalize_smiles decorator # FIXME: really needs to be configurable for each model separately self.smiles_type = 'rdkit_smiles' @normalize_smiles def __call__(self, smilies: List[str]) -> np.array: scores = [] self.ref_smilies = [[Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False) for smi in self.ref_smilies[0] if Chem.MolFromSmiles(smi)]] smilies = [Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False) for smi in smilies if Chem.MolFromSmiles(smi)] for ref_smilies, ncuts, max_heavy, max_ratio in zip( self.ref_smilies, self.num_of_cuts, self.max_variable_heavies, self.max_variable_ratio ): smiles_csv = format_smilies(smilies, ref_smilies) frag_cmd = FRAG_CMD.format(ncuts=ncuts)
"""Matched molecular pairs""" from __future__ import annotations __all__ = ["MMP"] logger = logging.getLogger('reinvent') @add_tag("__parameters") @dataclass class Parameters: """Parameters for the scoring component Note that all parameters are always lists because components can have multiple endpoints and so all the parameters from each endpoint is collected into a list. This is also true in cases where there is only one endpoint. """ reference_smiles: List[List[str]] num_of_cuts: List[int] = field(default_factory=lambda: [1]) max_variable_heavies: List[int] = field(default_factory=lambda: [40]) max_variable_ratio: List[float] = field(default_factory=lambda: [0.33]) FRAG_CMD = "mmpdb --quiet fragment --num-cuts {ncuts}" IDX_CMD = ( "mmpdb --quiet index --out csv --symmetric --max-variable-heavies {heavy} " "--max-variable-ratio {ratio}" ) @add_tag("__component") class MMP: def __init__(self, params: Parameters): self.ref_smilies = params.reference_smiles self.num_of_cuts = params.num_of_cuts self.max_variable_heavies = params.max_variable_heavies self.max_variable_ratio = params.max_variable_ratio # needed in the normalize_smiles decorator # FIXME: really needs to be configurable for each model separately self.smiles_type = 'rdkit_smiles' @normalize_smiles def __call__(self, smilies: List[str]) -> np.array: scores = [] self.ref_smilies = [[Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False) for smi in self.ref_smilies[0] if Chem.MolFromSmiles(smi)]] smilies = [Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False) for smi in smilies if Chem.MolFromSmiles(smi)] for ref_smilies, ncuts, max_heavy, max_ratio in zip( self.ref_smilies, self.num_of_cuts, self.max_variable_heavies, self.max_variable_ratio ): smiles_csv = format_smilies(smilies, ref_smilies) frag_cmd = FRAG_CMD.format(ncuts=ncuts)
result1 = run_command(shlex.split(frag_cmd), input=smiles_csv)
1
2023-10-20 06:43:16+00:00
2k
lion-agi/lionagi
lionagi/schema/base_node.py
[ { "identifier": "create_copy", "path": "lionagi/utils/sys_util.py", "snippet": "def create_copy(input: Any, n: int) -> Any:\n \"\"\"\n Creates a deep copy of the input object a specified number of times.\n\n This function makes deep copies of the provided input. If the number of copies ('n') \n is greater than 1, a list of deep copies is returned. For a single copy, it returns \n the copy directly.\n\n Parameters:\n input (Any): The object to be copied.\n\n n (int): The number of deep copies to create.\n\n Raises:\n ValueError: If 'n' is not a positive integer.\n\n Returns:\n Any: A deep copy of 'input' or a list of deep copies if 'n' > 1.\n\n Example:\n >>> sample_dict = {'key': 'value'}\n >>> make_copy(sample_dict, 2)\n [{'key': 'value'}, {'key': 'value'}]\n \"\"\"\n if not isinstance(n, int) or n < 1:\n raise ValueError(f\"'n' must be a positive integer: {n}\")\n return copy.deepcopy(input) if n == 1 else [copy.deepcopy(input) for _ in range(n)]" }, { "identifier": "create_id", "path": "lionagi/utils/sys_util.py", "snippet": "def create_id(n=32) -> str:\n \"\"\"\n Generates a unique ID based on the current time and random bytes.\n\n This function combines the current time in ISO 8601 format with 16 random bytes\n to create a unique identifier. The result is hashed using SHA-256 and the first\n 16 characters of the hexadecimal digest are returned.\n\n Returns:\n str: A 16-character unique identifier.\n\n Example:\n >>> create_id() # Doctest: +ELLIPSIS\n '...'\n \"\"\"\n current_time = datetime.now().isoformat().encode('utf-8')\n random_bytes = os.urandom(2048)\n return hashlib.sha256(current_time + random_bytes).hexdigest()[:n]" }, { "identifier": "change_dict_key", "path": "lionagi/utils/sys_util.py", "snippet": "def change_dict_key(dict_, old_key, new_key):\n dict_[new_key] = dict_.pop(old_key)" }, { "identifier": "is_schema", "path": "lionagi/utils/sys_util.py", "snippet": "def is_schema(dict_: Dict, schema: Dict):\n for key, expected_type in schema.items():\n if not isinstance(dict_[key], expected_type):\n return False\n return True" }, { "identifier": "encrypt", "path": "lionagi/utils/encrypt_util.py", "snippet": "def encrypt(data: str, key: str) -> str:\n \"\"\"Encrypts data using the provided key.\"\"\"\n fernet = Fernet(key.encode())\n return fernet.encrypt(data.encode()).decode()" }, { "identifier": "decrypt", "path": "lionagi/utils/encrypt_util.py", "snippet": "def decrypt(data: str, key: str) -> str:\n \"\"\"Decrypts data using the provided key.\"\"\"\n fernet = Fernet(key.encode())\n return fernet.decrypt(data.encode()).decode()" }, { "identifier": "dict_to_xml", "path": "lionagi/utils/convert_util.py", "snippet": "def dict_to_xml(data: Dict[str, Any], root_tag: str = 'node') -> str:\n \"\"\"\n Helper method to convert a dictionary to an XML string.\n\n Parameters:\n data (Dict[str, Any]): The dictionary to convert to XML.\n root_tag (str): The root tag name for the XML.\n\n Returns:\n str: An XML string representation of the dictionary.\n \"\"\"\n root = ET.Element(root_tag)\n _build_xml(root, data)\n return ET.tostring(root, encoding='unicode')" } ]
import json import xml.etree.ElementTree as ET from typing import Any, Dict, Optional, TypeVar, Type, List, Callable, Union from pydantic import BaseModel, Field, AliasChoices from lionagi.utils import ( create_id, is_schema, change_dict_key, create_copy, encrypt, decrypt, dict_to_xml )
1,050
# uses utils T = TypeVar('T', bound='BaseNode') class BaseNode(BaseModel): """ A foundational building block for representing a node in a graph-like structure. This class includes functionalities for serialization, metadata manipulation, content encryption/decryption, and utility methods. Attributes: id_ (str): Unique identifier for the node, aliased as 'node_id'. metadata (Dict[str, Any]): Dictionary of metadata related to the node. label (Optional[str]): Label categorizing or identifying the node. related_nodes (List[str]): Identifiers for nodes related to this node. content (Union[str, Dict[str, Any], None, Any]): Content of the node. """
# uses utils T = TypeVar('T', bound='BaseNode') class BaseNode(BaseModel): """ A foundational building block for representing a node in a graph-like structure. This class includes functionalities for serialization, metadata manipulation, content encryption/decryption, and utility methods. Attributes: id_ (str): Unique identifier for the node, aliased as 'node_id'. metadata (Dict[str, Any]): Dictionary of metadata related to the node. label (Optional[str]): Label categorizing or identifying the node. related_nodes (List[str]): Identifiers for nodes related to this node. content (Union[str, Dict[str, Any], None, Any]): Content of the node. """
id_: str = Field(default_factory=lambda: str(create_id()), alias="node_id")
1
2023-10-17 03:10:02+00:00
2k
stanford-oval/WikiChat
ColBERT/colbert/search/strided_tensor.py
[ { "identifier": "StridedTensorCore", "path": "ColBERT/colbert/search/strided_tensor_core.py", "snippet": "class StridedTensorCore:\n # # @profile\n def __init__(self, packed_tensor, lengths, dim=None, use_gpu=True):\n self.dim = dim\n self.tensor = packed_tensor\n self.inner_dims = self.tensor.size()[1:]\n self.use_gpu = use_gpu\n\n self.lengths = lengths.long() if torch.is_tensor(lengths) else torch.LongTensor(lengths)\n\n self.strides = _select_strides(self.lengths, [.5, .75, .9, .95]) + [self.lengths.max().item()]\n self.max_stride = self.strides[-1]\n\n zero = torch.zeros(1, dtype=torch.long, device=self.lengths.device)\n self.offsets = torch.cat((zero, torch.cumsum(self.lengths, dim=0)))\n\n if self.offsets[-2] + self.max_stride > self.tensor.size(0):\n # if self.tensor.size(0) > 10_000_000:\n # print(\"#> WARNING: StridedTensor has to add padding, internally, to a large tensor.\")\n # print(\"#> WARNING: Consider doing this padding in advance to save memory!\")\n\n padding = torch.zeros(self.max_stride, *self.inner_dims, dtype=self.tensor.dtype, device=self.tensor.device)\n self.tensor = torch.cat((self.tensor, padding))\n\n self.views = {stride: _create_view(self.tensor, stride, self.inner_dims) for stride in self.strides}\n\n @classmethod\n def from_packed_tensor(cls, tensor, lengths):\n return cls(tensor, lengths)\n\n @classmethod\n def from_padded_tensor(cls, tensor, mask):\n pass\n\n @classmethod\n def from_nested_list(cls, lst):\n flat_lst = flatten(lst)\n\n tensor = torch.Tensor(flat_lst)\n lengths = [len(sublst) for sublst in lst]\n\n return cls(tensor, lengths, dim=0)\n\n @classmethod\n def from_tensors_list(cls, tensors):\n # torch.cat(tensors)\n # lengths.\n # cls(tensor, lengths)\n raise NotImplementedError()\n\n def as_packed_tensor(self, return_offsets=False):\n unpadded_packed_tensor = self.tensor # [:self.offsets[-1]]\n\n return_vals = [unpadded_packed_tensor, self.lengths]\n\n if return_offsets:\n return_vals.append(self.offsets)\n\n return tuple(return_vals)\n\n # # @profile\n def as_padded_tensor(self):\n if self.use_gpu:\n view = _create_view(self.tensor.cuda(), self.max_stride, self.inner_dims)[self.offsets[:-1]]\n mask = _create_mask(self.lengths.cuda(), self.max_stride, like=view, use_gpu=self.use_gpu)\n else:\n #import pdb\n #pdb.set_trace()\n view = _create_view(self.tensor, self.max_stride, self.inner_dims)\n view = view[self.offsets[:-1]]\n mask = _create_mask(self.lengths, self.max_stride, like=view, use_gpu=self.use_gpu)\n\n return view, mask\n\n def as_tensors_list(self):\n raise NotImplementedError()" }, { "identifier": "_create_mask", "path": "ColBERT/colbert/search/strided_tensor_core.py", "snippet": "def _create_mask(lengths, stride, like=None, use_gpu=True):\n if use_gpu:\n mask = torch.arange(stride).cuda() + 1\n mask = mask.unsqueeze(0) <= lengths.cuda().unsqueeze(-1)\n else:\n mask = torch.arange(stride) + 1\n mask = mask.unsqueeze(0) <= lengths.unsqueeze(-1)\n\n if like is not None:\n for _ in range(like.dim() - mask.dim()):\n mask = mask.unsqueeze(-1)\n\n return mask" }, { "identifier": "_create_view", "path": "ColBERT/colbert/search/strided_tensor_core.py", "snippet": "def _create_view(tensor, stride, inner_dims):\n outdim = tensor.size(0) - stride + 1\n size = (outdim, stride, *inner_dims)\n\n inner_dim_prod = int(np.prod(inner_dims))\n multidim_stride = [inner_dim_prod, inner_dim_prod] + [1] * len(inner_dims)\n\n return torch.as_strided(tensor, size=size, stride=multidim_stride)" } ]
from struct import pack from torch._C import device from colbert.utils.utils import flatten, print_message from .strided_tensor_core import StridedTensorCore, _create_mask, _create_view from torch.utils.cpp_extension import load import torch import os import pathlib import os import pickle import time
1,454
class StridedTensor(StridedTensorCore): def __init__(self, packed_tensor, lengths, dim=None, use_gpu=True): super().__init__(packed_tensor, lengths, dim=dim, use_gpu=use_gpu) StridedTensor.try_load_torch_extensions(use_gpu) @classmethod def try_load_torch_extensions(cls, use_gpu): if hasattr(cls, "loaded_extensions") or use_gpu: return print_message(f"Loading segmented_lookup_cpp extension (set COLBERT_LOAD_TORCH_EXTENSION_VERBOSE=True for more info)...") segmented_lookup_cpp = load( name="segmented_lookup_cpp", sources=[ os.path.join( pathlib.Path(__file__).parent.resolve(), "segmented_lookup.cpp" ), ], extra_cflags=["-O3"], verbose=os.getenv("COLBERT_LOAD_TORCH_EXTENSION_VERBOSE", "False") == "True", ) cls.segmented_lookup = segmented_lookup_cpp.segmented_lookup_cpp cls.loaded_extensions = True @classmethod def pad_packed(cls, packed_tensor, lengths): assert False, "This seems to be incorrect but I can't see why. Is it the inner_dims in the views?" packed_tensor, lengths = packed_tensor.cuda().contiguous(), lengths.cuda() inner_dims = packed_tensor.size()[1:] stride = lengths.max().item() offsets = torch.cumsum(lengths, dim=0) - lengths[0] padding = torch.zeros(stride, *inner_dims, device=packed_tensor.device, dtype=packed_tensor.dtype) packed_tensor = torch.cat((packed_tensor, padding))
class StridedTensor(StridedTensorCore): def __init__(self, packed_tensor, lengths, dim=None, use_gpu=True): super().__init__(packed_tensor, lengths, dim=dim, use_gpu=use_gpu) StridedTensor.try_load_torch_extensions(use_gpu) @classmethod def try_load_torch_extensions(cls, use_gpu): if hasattr(cls, "loaded_extensions") or use_gpu: return print_message(f"Loading segmented_lookup_cpp extension (set COLBERT_LOAD_TORCH_EXTENSION_VERBOSE=True for more info)...") segmented_lookup_cpp = load( name="segmented_lookup_cpp", sources=[ os.path.join( pathlib.Path(__file__).parent.resolve(), "segmented_lookup.cpp" ), ], extra_cflags=["-O3"], verbose=os.getenv("COLBERT_LOAD_TORCH_EXTENSION_VERBOSE", "False") == "True", ) cls.segmented_lookup = segmented_lookup_cpp.segmented_lookup_cpp cls.loaded_extensions = True @classmethod def pad_packed(cls, packed_tensor, lengths): assert False, "This seems to be incorrect but I can't see why. Is it the inner_dims in the views?" packed_tensor, lengths = packed_tensor.cuda().contiguous(), lengths.cuda() inner_dims = packed_tensor.size()[1:] stride = lengths.max().item() offsets = torch.cumsum(lengths, dim=0) - lengths[0] padding = torch.zeros(stride, *inner_dims, device=packed_tensor.device, dtype=packed_tensor.dtype) packed_tensor = torch.cat((packed_tensor, padding))
view = _create_view(packed_tensor, stride, inner_dims)[offsets]
2
2023-10-19 18:17:25+00:00
2k
kyegomez/BitNet
tests/tests.py
[ { "identifier": "BitLinear", "path": "bitnet/bitlinear.py", "snippet": "class BitLinear(nn.Module):\n def __init__(self, in_features, out_features, bias=True):\n def forward(self, input):" }, { "identifier": "BitNetTransformer", "path": "bitnet/transformer.py", "snippet": "class Transformer(nn.Module):\nclass BitNetTransformer(nn.Module):\n def __init__(self, dim: int, heads: int, depth: int, ff_mult=2, *args, **kwargs):\n def forward(self, x):\n def __init__(\n self,\n dim: int,\n depth: int,\n num_tokens: int,\n heads=8,\n ff_mult=4,\n ):\n def forward(self, x):" } ]
import pytest import torch from torch.nn import functional as F from bitnet.bitlinear import BitLinear, absmax_quantize from bitnet.transformer import BitNetTransformer, ParallelTransformerBlock, Transformer
1,443
) def test_bitlinear_shapes(in_features, out_features): layer = BitLinear(in_features, out_features) assert layer.weight.shape == (out_features, in_features) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_groups(groups): layer = BitLinear(10, 20, groups=groups) assert layer.groups == groups def test_bitlinear_reset_parameters(): layer = BitLinear(10, 20) original_weights = layer.weight.clone() layer.reset_parameters() assert not torch.equal(original_weights, layer.weight) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_forward_with_groups(random_tensor, groups): layer = BitLinear(10, 20, groups=groups) output = layer(random_tensor) assert output.shape == (5, 20) def test_bitlinear_zero_input(): layer = BitLinear(10, 20) input_tensor = torch.zeros(5, 10) output = layer(input_tensor) assert torch.allclose(output, torch.zeros(5, 20), atol=1e-2) def test_bitlinear_weight_sign(): layer = BitLinear(10, 20) input_tensor = torch.randn(5, 10) output_before = layer(input_tensor) layer.weight.data = torch.abs(layer.weight.data) output_after = layer(input_tensor) assert not torch.allclose(output_before, output_after) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_weight_group_normalization(groups): layer = BitLinear(10, 20, groups=groups) weight = layer.weight.view(groups, -1) mean = weight.mean(dim=1, keepdim=True) assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2) def test_bitlinear_weight_group_scaling(): layer = BitLinear(10, 20, groups=5) weight = layer.weight.view(layer.groups, -1) beta = torch.abs(weight).sum(dim=1, keepdim=True) / ( weight.shape[0] * weight.shape[1] ) scaled_weight = weight * beta assert torch.allclose(scaled_weight, layer.weight.view(20, 10)) def test_bitlinear_input_quantization(random_tensor): layer = BitLinear(10, 20) quant_input, _ = absmax_quantize(random_tensor) output = layer(quant_input.float()) assert output.shape == (5, 20) # ... Continue adding more tests ... # - Test the forward pass with extreme input values. # - Test with different types of input tensors (e.g., int8, float16). # - Test the forward pass with batch sizes other than 5. # - Verify that using different initializations produces different results. # - Test the weight and input interactions during the forward pass. # - And many more... # ================================ Transformer with bitlinear ================================ @pytest.fixture def random_tensor(): """A fixture to generate a random tensor""" return torch.randn(32, 512) @pytest.fixture def bitnet_model(): """A fixture to create an instance of BitNetTransformer model""" return BitNetTransformer( num_tokens=20000, dim=512, depth=6, dim_head=64, heads=8, ff_mult=4, ) @pytest.mark.parametrize( "dim, dim_head, heads, ff_mult", [ (512, 64, 8, 4), (256, 32, 4, 2), (128, 16, 2, 1), ], ) def test_parallel_transformer_block(dim, dim_head, heads, ff_mult, random_tensor): block = ParallelTransformerBlock(dim, dim_head, heads, ff_mult) output = block(random_tensor) assert output.shape == random_tensor.shape @pytest.mark.parametrize( "dim, depth, heads, dim_head, ff_mult", [ (512, 6, 8, 64, 4), (256, 3, 4, 32, 2), (128, 2, 2, 16, 1), ], ) def test_transformer(dim, depth, heads, dim_head, ff_mult, random_tensor):
# Basic Tests: def test_absmax_quantize(): tensor = torch.tensor([1.5, -2.0, 3.0, -4.0]) quant, dequant = absmax_quantize(tensor) assert quant.dtype == torch.int8 assert torch.allclose(dequant, tensor, atol=1e-2) def test_bitlinear_initialization(): layer = BitLinear(10, 20) assert layer.in_features == 10 assert layer.out_features == 20 assert layer.weight.shape == (20, 10) def test_bitlinear_forward(): layer = BitLinear(10, 20) input_tensor = torch.randn(5, 10) output = layer(input_tensor) assert output.shape == (5, 20) # Fixtures: @pytest.fixture def random_tensor(): return torch.randn(5, 10) # Parameterized Testing: @pytest.mark.parametrize("bits", [4, 8, 12, 16]) def test_absmax_quantize_bits(random_tensor, bits): quant, dequant = absmax_quantize(random_tensor, bits=bits) assert quant.dtype == torch.int8 assert torch.allclose(dequant, random_tensor, atol=1e-2) # More Tests for BitLinear: @pytest.mark.parametrize( "in_features,out_features", [(10, 20), (20, 40), (5, 10), (15, 10)] ) def test_bitlinear_shapes(in_features, out_features): layer = BitLinear(in_features, out_features) assert layer.weight.shape == (out_features, in_features) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_groups(groups): layer = BitLinear(10, 20, groups=groups) assert layer.groups == groups def test_bitlinear_reset_parameters(): layer = BitLinear(10, 20) original_weights = layer.weight.clone() layer.reset_parameters() assert not torch.equal(original_weights, layer.weight) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_forward_with_groups(random_tensor, groups): layer = BitLinear(10, 20, groups=groups) output = layer(random_tensor) assert output.shape == (5, 20) def test_bitlinear_zero_input(): layer = BitLinear(10, 20) input_tensor = torch.zeros(5, 10) output = layer(input_tensor) assert torch.allclose(output, torch.zeros(5, 20), atol=1e-2) def test_bitlinear_weight_sign(): layer = BitLinear(10, 20) input_tensor = torch.randn(5, 10) output_before = layer(input_tensor) layer.weight.data = torch.abs(layer.weight.data) output_after = layer(input_tensor) assert not torch.allclose(output_before, output_after) @pytest.mark.parametrize("groups", [1, 2, 5]) def test_bitlinear_weight_group_normalization(groups): layer = BitLinear(10, 20, groups=groups) weight = layer.weight.view(groups, -1) mean = weight.mean(dim=1, keepdim=True) assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2) def test_bitlinear_weight_group_scaling(): layer = BitLinear(10, 20, groups=5) weight = layer.weight.view(layer.groups, -1) beta = torch.abs(weight).sum(dim=1, keepdim=True) / ( weight.shape[0] * weight.shape[1] ) scaled_weight = weight * beta assert torch.allclose(scaled_weight, layer.weight.view(20, 10)) def test_bitlinear_input_quantization(random_tensor): layer = BitLinear(10, 20) quant_input, _ = absmax_quantize(random_tensor) output = layer(quant_input.float()) assert output.shape == (5, 20) # ... Continue adding more tests ... # - Test the forward pass with extreme input values. # - Test with different types of input tensors (e.g., int8, float16). # - Test the forward pass with batch sizes other than 5. # - Verify that using different initializations produces different results. # - Test the weight and input interactions during the forward pass. # - And many more... # ================================ Transformer with bitlinear ================================ @pytest.fixture def random_tensor(): """A fixture to generate a random tensor""" return torch.randn(32, 512) @pytest.fixture def bitnet_model(): """A fixture to create an instance of BitNetTransformer model""" return BitNetTransformer( num_tokens=20000, dim=512, depth=6, dim_head=64, heads=8, ff_mult=4, ) @pytest.mark.parametrize( "dim, dim_head, heads, ff_mult", [ (512, 64, 8, 4), (256, 32, 4, 2), (128, 16, 2, 1), ], ) def test_parallel_transformer_block(dim, dim_head, heads, ff_mult, random_tensor): block = ParallelTransformerBlock(dim, dim_head, heads, ff_mult) output = block(random_tensor) assert output.shape == random_tensor.shape @pytest.mark.parametrize( "dim, depth, heads, dim_head, ff_mult", [ (512, 6, 8, 64, 4), (256, 3, 4, 32, 2), (128, 2, 2, 16, 1), ], ) def test_transformer(dim, depth, heads, dim_head, ff_mult, random_tensor):
transformer = Transformer(dim, depth, heads, dim_head, ff_mult)
1
2023-10-18 16:19:06+00:00
2k
TonicAI/tvalmetrics
tonic_validate/metrics/augmentation_precision_metric.py
[ { "identifier": "LLMResponse", "path": "tonic_validate/classes/llm_response.py", "snippet": "class LLMResponse(BaseModel):\n llm_answer: str\n llm_context_list: list[str]\n benchmark_item: BenchmarkItem" }, { "identifier": "AugmentationAccuracyMetric", "path": "tonic_validate/metrics/augmentation_accuracy_metric.py", "snippet": "class AugmentationAccuracyMetric(Metric):\n name = \"augmentation_accuracy\"\n\n def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:\n return self.calculate_metric(llm_response, openai_service)[0]\n\n def calculate_metric(\n self, llm_response: LLMResponse, openai_service: OpenAIService\n ) -> Tuple[float, List[bool]]:\n contains_context_list = []\n for context in llm_response.llm_context_list:\n contains_context_response = answer_contains_context_call(\n llm_response.llm_answer, context, openai_service\n )\n contains_context_list.append(\n parse_boolean_response(contains_context_response)\n )\n\n score = sum(contains_context_list) / len(contains_context_list)\n return (score, contains_context_list)" }, { "identifier": "Metric", "path": "tonic_validate/metrics/metric.py", "snippet": "class Metric(ABC):\n @property\n @abstractmethod\n def name(self) -> str:\n pass\n\n @abstractmethod\n def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:\n pass" }, { "identifier": "RetrievalPrecisionMetric", "path": "tonic_validate/metrics/retrieval_precision_metric.py", "snippet": "class RetrievalPrecisionMetric(Metric):\n name = \"retrieval_precision\"\n\n def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:\n return self.calculate_metric(llm_response, openai_service)[0]\n\n def calculate_metric(\n self, llm_response: LLMResponse, openai_service: OpenAIService\n ) -> Tuple[float, List[bool]]:\n context_relevant_list = []\n for context in llm_response.llm_context_list:\n relevance_response = context_relevancy_call(\n llm_response.benchmark_item.question, context, openai_service\n )\n context_relevant_list.append(parse_boolean_response(relevance_response))\n\n score = sum(context_relevant_list) / len(context_relevant_list)\n return (score, context_relevant_list)" }, { "identifier": "OpenAIService", "path": "tonic_validate/services/openai_service.py", "snippet": "class OpenAIService:\n def __init__(self, model: str = \"gpt-4-1106-preview\") -> None:\n self.client = OpenAI()\n self.model = model\n self.cache: Dict[str, str] = {}\n\n def get_response(\n self,\n prompt: str,\n max_retries: int = 5,\n ) -> str:\n if prompt in self.cache:\n return self.cache[prompt]\n while max_retries > 0:\n try:\n completion = self.client.chat.completions.create(\n model=self.model,\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant. Respond using markdown.\",\n },\n {\"role\": \"user\", \"content\": prompt},\n ],\n temperature=0.0,\n )\n response = completion.choices[0].message.content\n if response is None:\n raise Exception(\n f\"Failed to get message response from {self.model}, message does not exist\"\n )\n self.cache[prompt] = response\n return response\n except Exception as e:\n print(e)\n max_retries -= 1\n raise Exception(\n f\"Failed to get completion response from {self.model}, max retires hit\"\n )" } ]
import logging from typing import List from tonic_validate.classes.llm_response import LLMResponse from tonic_validate.metrics.augmentation_accuracy_metric import ( AugmentationAccuracyMetric ) from tonic_validate.metrics.metric import Metric from tonic_validate.metrics.retrieval_precision_metric import RetrievalPrecisionMetric from tonic_validate.services.openai_service import OpenAIService
1,008
logger = logging.getLogger() class AugmentationPrecisionMetric(Metric): name = "augmentation_precision" def __init__(self) -> None: self.augmentation_accuracy = AugmentationAccuracyMetric() self.retrieval_precision = RetrievalPrecisionMetric()
logger = logging.getLogger() class AugmentationPrecisionMetric(Metric): name = "augmentation_precision" def __init__(self) -> None: self.augmentation_accuracy = AugmentationAccuracyMetric() self.retrieval_precision = RetrievalPrecisionMetric()
def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
0
2023-10-23 21:38:11+00:00
2k
jhejna/cpl
scripts/render_metaworld_dataset.py
[ { "identifier": "storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):" }, { "identifier": "MetaWorldSawyerImageWrapper", "path": "research/envs/metaworld.py", "snippet": "class MetaWorldSawyerImageWrapper(gym.Wrapper):\n def __init__(self, env, width=64, height=64, camera=\"corner2\", show_goal=False):\n assert isinstance(\n env.unwrapped, MetaWorldSawyerEnv\n ), \"MetaWorld Wrapper must be used with a MetaWorldSawyerEnv class\"\n super().__init__(env)\n self._width = width\n self._height = height\n self._camera = camera\n self._show_goal = show_goal\n shape = (3, self._height, self._width)\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)\n\n def _get_image(self):\n if not self._show_goal:\n try:\n self.env.unwrapped._set_pos_site(\"goal\", np.inf * self.env.unwrapped._target_pos)\n except ValueError:\n pass # If we don't have the goal site, just continue.\n img = self.env.render(mode=\"rgb_array\", camera_name=self._camera, width=self._width, height=self._height)\n return img.transpose(2, 0, 1)\n\n def step(self, action):\n state_obs, reward, done, info = self.env.step(action)\n # Throw away the state-based observation.\n info[\"state\"] = state_obs\n return self._get_image().copy(), reward, done, info\n\n def reset(self):\n # Zoom in camera corner2 to make it better for control\n # I found this view to work well across a lot of the tasks.\n camera_name = \"corner2\"\n # Original XYZ is 1.3 -0.2 1.1\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 20.0 # FOV\n self.model.cam_pos[index][0] = 1.5 # X\n self.model.cam_pos[index][1] = -0.35 # Y\n self.model.cam_pos[index][2] = 1.1 # Z\n\n self.env.reset()\n return self._get_image().copy() # Return the image observation" } ]
import argparse import io import gym import numpy as np from research.datasets.replay_buffer import storage from research.envs.metaworld import MetaWorldSawyerImageWrapper
1,061
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--path", type=str, required=True, help="Path to the dataset") parser.add_argument("--output", type=str, required=True, help="Path to output the new dataset") parser.add_argument("--resolution", type=int, default=64, help="Resolution to render") parser.add_argument("--env", type=str, required=True) args = parser.parse_args() data = storage.load_data(args.path, exclude_keys=["mask"]) assert "state" in data env = gym.make(args.env)
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--path", type=str, required=True, help="Path to the dataset") parser.add_argument("--output", type=str, required=True, help="Path to output the new dataset") parser.add_argument("--resolution", type=int, default=64, help="Resolution to render") parser.add_argument("--env", type=str, required=True) args = parser.parse_args() data = storage.load_data(args.path, exclude_keys=["mask"]) assert "state" in data env = gym.make(args.env)
env = MetaWorldSawyerImageWrapper(env, width=args.resolution, height=args.resolution)
1
2023-10-19 17:25:45+00:00
2k
nbasyl/LLM-FP4
lm_eval/base.py
[ { "identifier": "mean", "path": "lm_eval/metrics.py", "snippet": "def mean(arr):\n return sum(arr) / len(arr)" }, { "identifier": "weighted_perplexity", "path": "lm_eval/metrics.py", "snippet": "def weighted_perplexity(items):\n return math.exp(-weighted_mean(items))" }, { "identifier": "weighted_mean", "path": "lm_eval/metrics.py", "snippet": "def weighted_mean(items):\n a, b = zip(*items)\n return sum(a) / sum(b)" }, { "identifier": "bits_per_byte", "path": "lm_eval/metrics.py", "snippet": "def bits_per_byte(items):\n return -weighted_mean(items) / math.log(2)" }, { "identifier": "utils", "path": "lm_eval/utils.py", "snippet": "class ExitCodeError(Exception):\nclass MultiChoice:\nclass Reorderer:\ndef sh(x):\ndef escaped_split(text, sep_char, maxsplit=-1):\ndef simple_parse_args_string(args_string):\ndef join_iters(iters):\ndef chunks(iter, n=0, fn=None):\ndef group(arr, fn):\ndef _is_json_task(task_name):\n def __init__(self, choices):\n def __contains__(self, values):\n def __iter__(self):\ndef pattern_match(patterns, source_list):\ndef general_detokenize(string):\ndef get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):\ndef make_disjoint_window(pair):\ndef select_continuation_from_batch_left_padding(\n generations: Union[List[List[int]], torch.Tensor], max_context_size: int\n):\n def __init__(self, arr, fn):\n def get_reordered(self):\n def get_original(self, newarr):\ndef positional_deprecated(fn):\n def _wrapper(*args, **kwargs):\ndef find_test_root(start_path: pathlib.Path) -> pathlib.Path:\ndef run_task_tests(task_list: List[str]):\ndef clear_torch_cache():" } ]
import abc import numpy as np import random import re import os import json import hashlib import datasets import torch import torch.nn.functional as F import warnings from typing import Iterable from sqlitedict import SqliteDict from tqdm import tqdm from accelerate import find_executable_batch_size from lm_eval.metrics import mean, weighted_perplexity, weighted_mean, bits_per_byte from lm_eval import utils from abc import abstractmethod
1,290
class LM(abc.ABC): def __init__(self): self.cache_hook = CacheHook(None) @abstractmethod def loglikelihood(self, requests): """Compute log-likelihood of generating a continuation from a context. Downstream tasks should attempt to use loglikelihood instead of other LM calls whenever possible. :param requests: list A list of pairs (context, continuation) context: str Context string. Implementations of LM must be able to handle an empty context string. continuation: str The continuation over which log likelihood will be calculated. If there is a word boundary, the space should be in the continuation. For example, context="hello" continuation=" world" is correct. :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """ pass @abstractmethod def loglikelihood_rolling(self, requests): """Compute full log-likelihood of a string, with no truncation, for perplexity computation - We will use the full max context length of the model. - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to the max context length. - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations which may simply concatenate multiple documents together. - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into multiple chunks, the last input will still a full-sized context. Example: Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] Prefix: EOT Max context length: 4 Resulting input/prediction pairs: INPUT: EOT 0 1 2 PRED: 0 1 2 3 INPUT: 3 4 5 6 PRED: 4 5 6 7 INPUT: 5 6 7 8 PRED: 8 9 Observe that: 1. Each token is predicted exactly once 2. For the last pair, we provide the full context, but only score the last two tokens :param requests: list A list of strings string: str String for which we are computing per-toke loglikelihood :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """ pass # TODO: Add an optional max length @abstractmethod def greedy_until(self, requests): """Generate greedily until a stopping sequence :param requests: list A list of pairs (context, until) context: str Context string until: [str] The string sequences to generate until. These string sequences may each span across multiple tokens, or may be part of one token. :return: list A list of strings continuation continuation: str The generated continuation. """ pass @classmethod def create_from_arg_string(cls, arg_string, additional_config=None): additional_config = {} if additional_config is None else additional_config
class LM(abc.ABC): def __init__(self): self.cache_hook = CacheHook(None) @abstractmethod def loglikelihood(self, requests): """Compute log-likelihood of generating a continuation from a context. Downstream tasks should attempt to use loglikelihood instead of other LM calls whenever possible. :param requests: list A list of pairs (context, continuation) context: str Context string. Implementations of LM must be able to handle an empty context string. continuation: str The continuation over which log likelihood will be calculated. If there is a word boundary, the space should be in the continuation. For example, context="hello" continuation=" world" is correct. :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """ pass @abstractmethod def loglikelihood_rolling(self, requests): """Compute full log-likelihood of a string, with no truncation, for perplexity computation - We will use the full max context length of the model. - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to the max context length. - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations which may simply concatenate multiple documents together. - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into multiple chunks, the last input will still a full-sized context. Example: Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] Prefix: EOT Max context length: 4 Resulting input/prediction pairs: INPUT: EOT 0 1 2 PRED: 0 1 2 3 INPUT: 3 4 5 6 PRED: 4 5 6 7 INPUT: 5 6 7 8 PRED: 8 9 Observe that: 1. Each token is predicted exactly once 2. For the last pair, we provide the full context, but only score the last two tokens :param requests: list A list of strings string: str String for which we are computing per-toke loglikelihood :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """ pass # TODO: Add an optional max length @abstractmethod def greedy_until(self, requests): """Generate greedily until a stopping sequence :param requests: list A list of pairs (context, until) context: str Context string until: [str] The string sequences to generate until. These string sequences may each span across multiple tokens, or may be part of one token. :return: list A list of strings continuation continuation: str The generated continuation. """ pass @classmethod def create_from_arg_string(cls, arg_string, additional_config=None): additional_config = {} if additional_config is None else additional_config
args = utils.simple_parse_args_string(arg_string)
4
2023-10-15 06:05:13+00:00
2k
alextamkin/generative-elicitation
base_active_learning_agent.py
[ { "identifier": "query_api", "path": "utils.py", "snippet": "@retry(wait=wait_random_exponential(min=1, max=60))\ndef query_api(messages, engine, openai_cache=None, openai_cache_file=None, **kwargs):\n '''Queries the OpenAI API with the given messages.\n \n NOTE: This function mutates the messages list to add the new_message and the response from the API.\n \n Args:\n messages (list): A list of past messages to send to the API.\n openai_cache (dict, optional): The openai cache dict. Stores the API responses to avoid duplicate queries. Defaults to None.\n openai_cache_file (str, optional): The path to write the cache entries to. Defaults to None.\n \n Returns:\n str: The response from the API.\n '''\n messages_cache_key = json.dumps(messages)\n if openai_cache and messages_cache_key in openai_cache:\n response = openai_cache[messages_cache_key]\n else:\n if \"temperature\" not in kwargs:\n kwargs[\"temperature\"] = 0.0\n if engine == \"gpt-4\" or engine == \"gpt-3.5-turbo\":\n response = openai.ChatCompletion.create(\n model=engine,\n messages=messages,\n **kwargs\n )\n else:\n response = openai.Completion.create(\n engine=engine,\n prompt=messages[0],\n **kwargs\n )\n save_openai_cache({messages_cache_key: response}, openai_cache, openai_cache_file)\n if engine == \"gpt-4\" or engine == \"gpt-3.5-turbo\":\n response_text = response['choices'][0]['message']['content']\n messages.append({'role': 'assistant', 'content': response_text})\n else:\n response_text = response['choices'][0]['text']\n return response_text, response" }, { "identifier": "load_openai_cache", "path": "utils.py", "snippet": "def load_openai_cache(openai_cache_file):\n '''Loads the openai cache file into a dict.\n \n Args:\n openai_cache_file (str): The path to the openai cache file.\n \n Returns:\n dict: The openai cache dict.\n '''\n if not openai_cache_file:\n return None\n openai_cache = {}\n if os.path.exists(openai_cache_file):\n with open(openai_cache_file) as f:\n for line in f:\n openai_cache.update(json.loads(line))\n return openai_cache" } ]
import json import re import textwrap from abc import ABC, abstractmethod from utils import query_api, load_openai_cache from sklearn.metrics import roc_auc_score
1,097
class BaseActiveLearningAgent(ABC): def __init__(self, target_specification_file, engine, openai_cache_file=None, **kwargs): self.get_gold_domain_info(target_specification_file) self.engine = engine self.openai_cache_file = openai_cache_file self.openai_cache = load_openai_cache(openai_cache_file) self.temperature = kwargs.get("temperature", 0.0) self.interaction_history = [] def get_gold_domain_info(self, target_specification_file): '''Gets the gold domain specification that the model should try to learn and other associated information. ''' gold_task = json.load(open(target_specification_file)) #"sample_tests.json" for key in gold_task: setattr(self, key, gold_task[key]) if key == "regex": self.gold_regex_text = self.regex self.gold_regex = re.compile(self.gold_regex_text) self.persona_text = self.persona def get_task_description(self): return "validate an email address adheres to a specific format" @staticmethod def format_questions_and_answers(questions_and_answers): '''Formats the questions and answers into a string. Looks like: - Should the system allow numbers in the domain? -> Yes Args: questions_and_answers (list): A list of tuples of the form (question, answer). Returns: str: The formatted questions and answers. ''' return '\n'.join([f"- {question} -> {answer}" for question, answer in questions_and_answers]) def get_test_case_prompt(self, interaction_history, test_case): hypothesis_prompt = textwrap.dedent('''\ {single_instance_prompt1} {previous_examples} {single_instance_prompt2} {test_case} ''' ).format( single_instance_prompt1=self.test_case_prompt[0], previous_examples=self.format_questions_and_answers(interaction_history), single_instance_prompt2=self.test_case_prompt[1], test_case=test_case, ) return [{"role": "user", "content": hypothesis_prompt}] def generate_test_case_answer(self, test_case): test_case_messages = self.get_test_case_prompt(self.interaction_history, test_case)
class BaseActiveLearningAgent(ABC): def __init__(self, target_specification_file, engine, openai_cache_file=None, **kwargs): self.get_gold_domain_info(target_specification_file) self.engine = engine self.openai_cache_file = openai_cache_file self.openai_cache = load_openai_cache(openai_cache_file) self.temperature = kwargs.get("temperature", 0.0) self.interaction_history = [] def get_gold_domain_info(self, target_specification_file): '''Gets the gold domain specification that the model should try to learn and other associated information. ''' gold_task = json.load(open(target_specification_file)) #"sample_tests.json" for key in gold_task: setattr(self, key, gold_task[key]) if key == "regex": self.gold_regex_text = self.regex self.gold_regex = re.compile(self.gold_regex_text) self.persona_text = self.persona def get_task_description(self): return "validate an email address adheres to a specific format" @staticmethod def format_questions_and_answers(questions_and_answers): '''Formats the questions and answers into a string. Looks like: - Should the system allow numbers in the domain? -> Yes Args: questions_and_answers (list): A list of tuples of the form (question, answer). Returns: str: The formatted questions and answers. ''' return '\n'.join([f"- {question} -> {answer}" for question, answer in questions_and_answers]) def get_test_case_prompt(self, interaction_history, test_case): hypothesis_prompt = textwrap.dedent('''\ {single_instance_prompt1} {previous_examples} {single_instance_prompt2} {test_case} ''' ).format( single_instance_prompt1=self.test_case_prompt[0], previous_examples=self.format_questions_and_answers(interaction_history), single_instance_prompt2=self.test_case_prompt[1], test_case=test_case, ) return [{"role": "user", "content": hypothesis_prompt}] def generate_test_case_answer(self, test_case): test_case_messages = self.get_test_case_prompt(self.interaction_history, test_case)
test_case_answer, _ = query_api(test_case_messages, self.engine, self.openai_cache, self.openai_cache_file)
0
2023-10-16 18:43:47+00:00
2k
bcmi/libcom
libcom/harmony_score/harmony_score_prediction.py
[ { "identifier": "download_pretrained_model", "path": "libcom/utils/model_download.py", "snippet": "def download_pretrained_model(weight_path):\n if os.path.exists(weight_path):\n assert os.path.isfile(weight_path), weight_path\n return weight_path\n else:\n weight_path= os.path.abspath(weight_path)\n model_name = os.path.basename(weight_path)\n save_dir = os.path.dirname(weight_path)\n download_file_from_network(model_name, save_dir)\n print('Pretrained model has been stored to ', weight_path)\n return weight_path" }, { "identifier": "StyleEncoder", "path": "libcom/harmony_score/source/bargainnet.py", "snippet": "class StyleEncoder(nn.Module):\n def __init__(self, style_dim, norm_layer=nn.BatchNorm2d):\n super(StyleEncoder, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n ndf=64\n n_layers=6\n kw = 3\n padw = 0\n self.conv1f = PartialConv2d(3, ndf, kernel_size=kw, stride=2, padding=padw)\n self.relu1 = nn.ReLU(True)\n nf_mult = 1\n nf_mult_prev = 1\n\n n = 1\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv2f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)\n self.norm2f = norm_layer(ndf * nf_mult)\n self.relu2 = nn.ReLU(True)\n\n n = 2\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv3f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)\n self.norm3f = norm_layer(ndf * nf_mult)\n self.relu3 = nn.ReLU(True)\n\n n = 3\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv4f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)\n self.norm4f = norm_layer(ndf * nf_mult)\n self.relu4 = nn.ReLU(True)\n\n n = 4\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv5f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)\n self.avg_pooling = nn.AdaptiveAvgPool2d(1)\n self.convs = nn.Conv2d(ndf * nf_mult, style_dim, kernel_size=1, stride=1)\n\n def forward(self, input, mask):\n \"\"\"Standard forward.\"\"\"\n xb = input\n mb = mask\n\n xb, mb = self.conv1f(xb, mb)\n xb = self.relu1(xb)\n xb, mb = self.conv2f(xb, mb)\n xb = self.norm2f(xb)\n xb = self.relu2(xb)\n xb, mb = self.conv3f(xb, mb)\n xb = self.norm3f(xb)\n xb = self.relu3(xb)\n xb, mb = self.conv4f(xb, mb)\n xb = self.norm4f(xb)\n xb = self.relu4(xb)\n xb, mb = self.conv5f(xb, mb)\n xb = self.avg_pooling(xb)\n s = self.convs(xb)\n s = torch.squeeze(s)\n return s" } ]
import torch import torchvision import torch import os import torchvision.transforms as transforms import math from libcom.utils.model_download import download_pretrained_model from libcom.utils.process_image import * from libcom.utils.environment import * from libcom.harmony_score.source.bargainnet import StyleEncoder
1,462
cur_dir = os.path.dirname(os.path.abspath(__file__)) model_set = ['BargainNet'] class HarmonyScoreModel: """ Foreground object search score prediction model. Args: device (str | torch.device): gpu id model_type (str): predefined model type. kwargs (dict): other parameters for building model Examples: >>> from libcom import HarmonyScoreModel >>> from libcom.utils.process_image import make_image_grid >>> import cv2 >>> net = HarmonyScoreModel(device=0, model_type='BargainNet') >>> test_dir = '../tests/harmony_score_prediction/' >>> img_names = ['vaulted-cellar-247391_inharm.jpg', 'ameland-5651866_harm.jpg'] >>> vis_list,scores = [], [] >>> for img_name in img_names: >>> comp_img = test_dir + 'composite/' + img_name >>> comp_mask = test_dir + 'composite_mask/' + img_name >>> score = net(comp_img, comp_mask) >>> vis_list += [comp_img, comp_mask] >>> scores.append(score) >>> grid_img = make_image_grid(vis_list, text_list=[f'harmony_score:{scores[0]:.2f}', 'composite-mask', f'harmony_score:{scores[1]:.2f}', 'composite-mask']) >>> cv2.imwrite('../docs/_static/image/harmonyscore_result1.jpg', grid_img) Expected result: .. image:: _static/image/harmonyscore_result1.jpg :scale: 38 % """ def __init__(self, device=0, model_type='BargainNet', **kwargs): assert model_type in model_set, f'Not implementation for {model_type}' self.model_type = model_type self.option = kwargs weight_path = os.path.join(cur_dir, 'pretrained_models', 'BargainNet.pth')
cur_dir = os.path.dirname(os.path.abspath(__file__)) model_set = ['BargainNet'] class HarmonyScoreModel: """ Foreground object search score prediction model. Args: device (str | torch.device): gpu id model_type (str): predefined model type. kwargs (dict): other parameters for building model Examples: >>> from libcom import HarmonyScoreModel >>> from libcom.utils.process_image import make_image_grid >>> import cv2 >>> net = HarmonyScoreModel(device=0, model_type='BargainNet') >>> test_dir = '../tests/harmony_score_prediction/' >>> img_names = ['vaulted-cellar-247391_inharm.jpg', 'ameland-5651866_harm.jpg'] >>> vis_list,scores = [], [] >>> for img_name in img_names: >>> comp_img = test_dir + 'composite/' + img_name >>> comp_mask = test_dir + 'composite_mask/' + img_name >>> score = net(comp_img, comp_mask) >>> vis_list += [comp_img, comp_mask] >>> scores.append(score) >>> grid_img = make_image_grid(vis_list, text_list=[f'harmony_score:{scores[0]:.2f}', 'composite-mask', f'harmony_score:{scores[1]:.2f}', 'composite-mask']) >>> cv2.imwrite('../docs/_static/image/harmonyscore_result1.jpg', grid_img) Expected result: .. image:: _static/image/harmonyscore_result1.jpg :scale: 38 % """ def __init__(self, device=0, model_type='BargainNet', **kwargs): assert model_type in model_set, f'Not implementation for {model_type}' self.model_type = model_type self.option = kwargs weight_path = os.path.join(cur_dir, 'pretrained_models', 'BargainNet.pth')
download_pretrained_model(weight_path)
0
2023-10-19 05:08:12+00:00
2k
pgorecki/lato
tests/test_dependency_provider.py
[ { "identifier": "SimpleDependencyProvider", "path": "lato/dependency_provider.py", "snippet": "class SimpleDependencyProvider(DependencyProvider):\n \"\"\"\n A dependency provider that manages dependencies and helps in automatic\n dependency injection based on type or parameter name.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize the DependencyProvider.\n\n :param args: Class instances to be registered by types\n :param kwargs: Dependencies to be registered by types and with explicit names\n \"\"\"\n self._dependencies = {}\n self.update(*args, **kwargs)\n\n def register_dependency(self, identifier: str | type, dependency: Any):\n \"\"\"\n Register a dependency with a given identifier (name or type).\n\n :param identifier: The name or type to be used as an identifier for the dependency\n :param dependency: The actual dependency\n \"\"\"\n if isinstance(identifier, type):\n self._dependencies[identifier] = dependency\n\n self._dependencies[identifier] = dependency\n\n def has_dependency(self, identifier: str | type) -> bool:\n \"\"\"\n Check if a dependency with the given identifier exists.\n\n :param identifier: Identifier for the dependency\n :return: True if the dependency exists, otherwise False\n \"\"\"\n return identifier in self._dependencies\n\n def get_dependency(self, identifier: str | type) -> Any:\n \"\"\"\n Retrieve a dependency using its identifier (name or type).\n\n :param identifier: Identifier for the dependency\n :return: The associated dependency\n \"\"\"\n try:\n return self._dependencies[identifier]\n except KeyError as e:\n raise UnknownDependencyError(identifier)\n\n def copy(self, *args, **kwargs) -> DependencyProvider:\n \"\"\"\n Create a copy of self with updated dependencies.\n :param args: typed overrides\n :param kwargs: named overrides\n :return: A copy of the dependency provider\n \"\"\"\n dp = SimpleDependencyProvider()\n dp._dependencies.update(self._dependencies)\n dp.update(*args, **kwargs)\n return dp" }, { "identifier": "as_type", "path": "lato/dependency_provider.py", "snippet": "def as_type(obj: Any, cls: type) -> TypedDependency:\n return TypedDependency(obj, cls)" }, { "identifier": "get_function_parameters", "path": "lato/dependency_provider.py", "snippet": "def get_function_parameters(func) -> OrderedDict:\n \"\"\"\n Retrieve the function's parameters and their annotations.\n\n :param func: The function to inspect\n :return: An ordered dictionary of parameter names to their annotations\n \"\"\"\n handler_signature = inspect.signature(func)\n kwargs_iterator = iter(handler_signature.parameters.items())\n parameters = OrderedDict()\n for name, param in kwargs_iterator:\n parameters[name] = param.annotation\n return parameters" } ]
import abc from lato.dependency_provider import ( SimpleDependencyProvider, as_type, get_function_parameters, )
963
class FooService: ... def foo(a: int, b: str, c: FooService): ... def test_create_provider_with_types(): foo_service = FooService() dp = SimpleDependencyProvider(foo_service=foo_service) assert dp[FooService] is foo_service assert dp["foo_service"] is foo_service def test_create_provider_with_primitive_kwarg(): dp = SimpleDependencyProvider(x=1) assert dp["x"] == 1 def test_create_provider_with_class_instance_arg(): service = FooService() dp = SimpleDependencyProvider(service) assert dp[FooService] is service def test_create_provider_with_class_instance_karg(): service = FooService() dp = SimpleDependencyProvider(service=service) assert dp[FooService] is service assert dp["service"] is service def test_create_provider_with_class_instance_arg_and_kwarg_gets_overridden(): service1 = FooService() service2 = FooService() dp = SimpleDependencyProvider(service1, service=service2) assert dp[FooService] is service2 assert dp["service"] is service2 def test_resolve_custom_primitive_type(): class Email(str): ... email = Email("john@example.com") dp = SimpleDependencyProvider(email=email) assert dp[Email] == email def test_get_function_parameters():
class FooService: ... def foo(a: int, b: str, c: FooService): ... def test_create_provider_with_types(): foo_service = FooService() dp = SimpleDependencyProvider(foo_service=foo_service) assert dp[FooService] is foo_service assert dp["foo_service"] is foo_service def test_create_provider_with_primitive_kwarg(): dp = SimpleDependencyProvider(x=1) assert dp["x"] == 1 def test_create_provider_with_class_instance_arg(): service = FooService() dp = SimpleDependencyProvider(service) assert dp[FooService] is service def test_create_provider_with_class_instance_karg(): service = FooService() dp = SimpleDependencyProvider(service=service) assert dp[FooService] is service assert dp["service"] is service def test_create_provider_with_class_instance_arg_and_kwarg_gets_overridden(): service1 = FooService() service2 = FooService() dp = SimpleDependencyProvider(service1, service=service2) assert dp[FooService] is service2 assert dp["service"] is service2 def test_resolve_custom_primitive_type(): class Email(str): ... email = Email("john@example.com") dp = SimpleDependencyProvider(email=email) assert dp[Email] == email def test_get_function_parameters():
params = get_function_parameters(foo)
2
2023-10-21 11:33:05+00:00
2k
instadeepai/flashbax
flashbax/buffers/flat_buffer_test.py
[ { "identifier": "flat_buffer", "path": "flashbax/buffers/flat_buffer.py", "snippet": "class ExperiencePair(NamedTuple, Generic[Experience]):\nclass TransitionSample(Generic[Experience]):\ndef validate_sample_batch_size(sample_batch_size: int, max_length: int):\ndef validate_min_length(min_length: int, add_batch_size: int, max_length: int):\ndef validate_max_length_add_batch_size(max_length: int, add_batch_size: int):\ndef validate_flat_buffer_args(\n max_length: int,\n min_length: int,\n sample_batch_size: int,\n add_batch_size: int,\n):\ndef create_flat_buffer(\n max_length: int,\n min_length: int,\n sample_batch_size: int,\n add_sequences: bool,\n add_batch_size: Optional[int],\n) -> TrajectoryBuffer:\n def sample_fn(state: TrajectoryBufferState, rng_key: PRNGKey) -> TransitionSample:\ndef make_flat_buffer(\n max_length: int,\n min_length: int,\n sample_batch_size: int,\n add_sequences: bool = False,\n add_batch_size: Optional[int] = None,\n) -> TrajectoryBuffer:" }, { "identifier": "get_fake_batch", "path": "flashbax/buffers/conftest.py", "snippet": "def get_fake_batch(fake_transition: chex.ArrayTree, batch_size) -> chex.ArrayTree:\n \"\"\"Create a fake batch with differing values for each transition.\"\"\"\n return jax.tree_map(\n lambda x: jnp.stack([x + i for i in range(batch_size)]), fake_transition\n )" }, { "identifier": "_DEVICE_COUNT_MOCK", "path": "flashbax/conftest.py", "snippet": "_DEVICE_COUNT_MOCK = 2" } ]
from copy import deepcopy from flashbax.buffers import flat_buffer from flashbax.buffers.conftest import get_fake_batch from flashbax.conftest import _DEVICE_COUNT_MOCK import chex import jax import jax.numpy as jnp import pytest
645
# Copyright 2023 InstaDeep Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_add_and_can_sample( fake_transition: chex.ArrayTree, min_length: int, max_length: int, add_batch_size: int, ) -> None: """Check the `add` function by filling the buffer all the way to the max_length and checking that it produces the expected behaviour . """
# Copyright 2023 InstaDeep Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_add_and_can_sample( fake_transition: chex.ArrayTree, min_length: int, max_length: int, add_batch_size: int, ) -> None: """Check the `add` function by filling the buffer all the way to the max_length and checking that it produces the expected behaviour . """
fake_batch = get_fake_batch(fake_transition, add_batch_size)
1
2023-10-17 10:57:14+00:00
2k
TheDuckAI/DuckTrack
ducktrack/playback.py
[ { "identifier": "KeyCombinationListener", "path": "ducktrack/keycomb.py", "snippet": "class KeyCombinationListener:\n \"\"\"\n Simple and bad key combination listener.\n \"\"\"\n \n def __init__(self):\n self.current_keys = set()\n self.callbacks = {}\n self.listener = Listener(on_press=self.on_key_press, on_release=self.on_key_release)\n\n def add_comb(self, keys, callback):\n self.callbacks[tuple([name_to_key(key_name) for key_name in sorted(keys)])] = callback\n\n def on_key_press(self, key):\n self.current_keys.add(key)\n for comb, callback in self.callbacks.items():\n if all(k in self.current_keys for k in comb):\n return callback()\n\n def on_key_release(self, key):\n if key in self.current_keys:\n self.current_keys.remove(key)\n\n def start(self):\n self.listener.start()\n\n def stop(self):\n self.listener.stop()" }, { "identifier": "fix_windows_dpi_scaling", "path": "ducktrack/util.py", "snippet": "def fix_windows_dpi_scaling():\n \"\"\"\n Fixes DPI scaling issues with legacy windows applications\n Reference: https://pynput.readthedocs.io/en/latest/mouse.html#ensuring-consistent-coordinates-between-listener-and-controller-on-windows\n \"\"\"\n import ctypes\n PROCESS_PER_MONITOR_DPI_AWARE = 2\n ctypes.windll.shcore.SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)" }, { "identifier": "get_recordings_dir", "path": "ducktrack/util.py", "snippet": "def get_recordings_dir() -> str:\n documents_folder = Path.home() / 'Documents' / 'DuckTrack_Recordings'\n return str(documents_folder)" }, { "identifier": "name_to_button", "path": "ducktrack/util.py", "snippet": "def name_to_button(name: str) -> Button:\n return getattr(Button, name)" }, { "identifier": "name_to_key", "path": "ducktrack/util.py", "snippet": "def name_to_key(name: str) -> Key | KeyCode:\n try:\n return getattr(Key, name)\n except AttributeError:\n return KeyCode.from_char(name)" } ]
import json import math import os import sys import time import pyautogui from pynput.keyboard import Controller as KeyboardController from pynput.keyboard import Key from pynput.mouse import Button from pynput.mouse import Controller as MouseController from .keycomb import KeyCombinationListener from .util import (fix_windows_dpi_scaling, get_recordings_dir, name_to_button, name_to_key)
1,457
pyautogui.PAUSE = 0 pyautogui.DARWIN_CATCH_UP_TIME = 0 class Player: """ Plays back recordings. """ def __init__(self): self.stop_playback = False self.listener = KeyCombinationListener() def stop_comb_pressed(): self.stop_playback = True return False self.listener.add_comb(("shift", "esc"), stop_comb_pressed) self.listener.start() def play(self, recording_path: str): with open(os.path.join(recording_path, "events.jsonl"), "r") as f: events = [json.loads(line) for line in f.readlines()] with open(os.path.join(recording_path, "metadata.json"), "r") as f: metadata = json.load(f) self.playback(events, metadata) def playback(self, events: list[dict], metadata: dict): if metadata["system"] == "Windows": fix_windows_dpi_scaling() mouse_controller = MouseController() keyboard_controller = KeyboardController() if not events: self.listener.stop() return presses_to_skip = 0 releases_to_skip = 0 in_click_sequence = False for i, event in enumerate(events): start_time = time.perf_counter() if self.stop_playback: return def do_mouse_press(button): for j, second_event in enumerate(events[i+1:]): # make sure the time between mouse clicks is less than 500ms if second_event["time_stamp"] - event["time_stamp"] > 0.5: break if "x" in second_event and "y" in second_event: # if the mouse moves out of the click radius/rectangle, it is not a click sequence if math.sqrt((second_event["y"] - event["y"]) ** 2 + (second_event["x"] - event["x"]) ** 2) > 4: break if second_event["action"] == "click" and second_event["pressed"]: for k, third_event in enumerate(events[i+j+2:]): if third_event["time_stamp"] - second_event["time_stamp"] > 0.5: break if "x" in third_event and "y" in third_event: if math.sqrt((third_event["y"] - event["y"]) ** 2 + (third_event["x"] - event["x"]) ** 2) > 5: break if third_event["action"] == "click" and third_event["pressed"]: mouse_controller.click(button, 3) return 2, 2 mouse_controller.click(button, 2) return 1, 1 mouse_controller.press(button) return 0, 0 if event["action"] == "move": mouse_controller.position = (event["x"], event["y"]) elif event["action"] == "click": button = name_to_button(event["button"]) if event["pressed"]: if presses_to_skip == 0: presses, releases = do_mouse_press(button) presses_to_skip += presses releases_to_skip += releases if presses > 0: in_click_sequence = True else: presses_to_skip -= 1 else: if releases_to_skip == 0: mouse_controller.release(button) if in_click_sequence: keyboard_controller.press(Key.shift) mouse_controller.click(Button.left) keyboard_controller.release(Key.shift) in_click_sequence = False else: releases_to_skip -= 1 elif event["action"] == "scroll": if metadata["system"] == "Windows": # for some reason on windows, pynput scroll is correct but pyautogui is not mouse_controller.scroll(metadata["scroll_direction"] * event["dx"], metadata["scroll_direction"] * event["dy"]) else: pyautogui.hscroll(clicks=metadata["scroll_direction"] * event["dx"]) pyautogui.vscroll(clicks=metadata["scroll_direction"] * event["dy"]) elif event["action"] in ["press", "release"]:
pyautogui.PAUSE = 0 pyautogui.DARWIN_CATCH_UP_TIME = 0 class Player: """ Plays back recordings. """ def __init__(self): self.stop_playback = False self.listener = KeyCombinationListener() def stop_comb_pressed(): self.stop_playback = True return False self.listener.add_comb(("shift", "esc"), stop_comb_pressed) self.listener.start() def play(self, recording_path: str): with open(os.path.join(recording_path, "events.jsonl"), "r") as f: events = [json.loads(line) for line in f.readlines()] with open(os.path.join(recording_path, "metadata.json"), "r") as f: metadata = json.load(f) self.playback(events, metadata) def playback(self, events: list[dict], metadata: dict): if metadata["system"] == "Windows": fix_windows_dpi_scaling() mouse_controller = MouseController() keyboard_controller = KeyboardController() if not events: self.listener.stop() return presses_to_skip = 0 releases_to_skip = 0 in_click_sequence = False for i, event in enumerate(events): start_time = time.perf_counter() if self.stop_playback: return def do_mouse_press(button): for j, second_event in enumerate(events[i+1:]): # make sure the time between mouse clicks is less than 500ms if second_event["time_stamp"] - event["time_stamp"] > 0.5: break if "x" in second_event and "y" in second_event: # if the mouse moves out of the click radius/rectangle, it is not a click sequence if math.sqrt((second_event["y"] - event["y"]) ** 2 + (second_event["x"] - event["x"]) ** 2) > 4: break if second_event["action"] == "click" and second_event["pressed"]: for k, third_event in enumerate(events[i+j+2:]): if third_event["time_stamp"] - second_event["time_stamp"] > 0.5: break if "x" in third_event and "y" in third_event: if math.sqrt((third_event["y"] - event["y"]) ** 2 + (third_event["x"] - event["x"]) ** 2) > 5: break if third_event["action"] == "click" and third_event["pressed"]: mouse_controller.click(button, 3) return 2, 2 mouse_controller.click(button, 2) return 1, 1 mouse_controller.press(button) return 0, 0 if event["action"] == "move": mouse_controller.position = (event["x"], event["y"]) elif event["action"] == "click": button = name_to_button(event["button"]) if event["pressed"]: if presses_to_skip == 0: presses, releases = do_mouse_press(button) presses_to_skip += presses releases_to_skip += releases if presses > 0: in_click_sequence = True else: presses_to_skip -= 1 else: if releases_to_skip == 0: mouse_controller.release(button) if in_click_sequence: keyboard_controller.press(Key.shift) mouse_controller.click(Button.left) keyboard_controller.release(Key.shift) in_click_sequence = False else: releases_to_skip -= 1 elif event["action"] == "scroll": if metadata["system"] == "Windows": # for some reason on windows, pynput scroll is correct but pyautogui is not mouse_controller.scroll(metadata["scroll_direction"] * event["dx"], metadata["scroll_direction"] * event["dy"]) else: pyautogui.hscroll(clicks=metadata["scroll_direction"] * event["dx"]) pyautogui.vscroll(clicks=metadata["scroll_direction"] * event["dy"]) elif event["action"] in ["press", "release"]:
key = name_to_key(event["name"])
4
2023-10-18 19:34:19+00:00
2k
e4s2023/E4S2023
swap_face_fine/face_vid2vid/modules/model.py
[ { "identifier": "AntiAliasInterpolation2d", "path": "swap_face_fine/face_vid2vid/modules/util.py", "snippet": "class AntiAliasInterpolation2d(nn.Module):\n \"\"\"\n Band-limited downsampling, for better preservation of the input signal.\n \"\"\"\n def __init__(self, channels, scale):\n super(AntiAliasInterpolation2d, self).__init__()\n sigma = (1 / scale - 1) / 2\n kernel_size = 2 * round(sigma * 4) + 1\n self.ka = kernel_size // 2\n self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka\n\n kernel_size = [kernel_size, kernel_size]\n sigma = [sigma, sigma]\n # The gaussian kernel is the product of the\n # gaussian function of each dimension.\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))\n\n # Make sure sum of values in gaussian kernel equals 1.\n kernel = kernel / torch.sum(kernel)\n # Reshape to depthwise convolutional weight\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n self.scale = scale\n inv_scale = 1 / scale\n self.int_inv_scale = int(inv_scale)\n\n def forward(self, input):\n if self.scale == 1.0:\n return input\n\n out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))\n out = F.conv2d(out, weight=self.weight, groups=self.groups)\n out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]\n\n return out" }, { "identifier": "make_coordinate_grid_2d", "path": "swap_face_fine/face_vid2vid/modules/util.py", "snippet": "def make_coordinate_grid_2d(spatial_size, type):\n \"\"\"\n Create a meshgrid [-1,1] x [-1,1] of given spatial_size.\n \"\"\"\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n\n x = (2 * (x / (w - 1)) - 1)\n y = (2 * (y / (h - 1)) - 1)\n\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n\n return meshed" } ]
from torch import nn from swap_face_fine.face_vid2vid.modules.util import AntiAliasInterpolation2d, make_coordinate_grid_2d from torchvision import models from torch.autograd import grad from torchvision import transforms import torch import torch.nn.functional as F import numpy as np import swap_face_fine.face_vid2vid.modules.hopenet as hopenet
1,327
class Vgg19(torch.nn.Module): """ Vgg19 network for perceptual loss. """ def __init__(self, requires_grad=False): super(Vgg19, self).__init__() vgg_pretrained_features = models.vgg19(pretrained=True).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() for x in range(2): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(2, 7): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(7, 12): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(12, 21): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(21, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))), requires_grad=False) self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))), requires_grad=False) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): X = (X - self.mean) / self.std h_relu1 = self.slice1(X) h_relu2 = self.slice2(h_relu1) h_relu3 = self.slice3(h_relu2) h_relu4 = self.slice4(h_relu3) h_relu5 = self.slice5(h_relu4) out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] return out class ImagePyramide(torch.nn.Module): """ Create image pyramide for computing pyramide perceptual loss. """ def __init__(self, scales, num_channels): super(ImagePyramide, self).__init__() downs = {} for scale in scales:
class Vgg19(torch.nn.Module): """ Vgg19 network for perceptual loss. """ def __init__(self, requires_grad=False): super(Vgg19, self).__init__() vgg_pretrained_features = models.vgg19(pretrained=True).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() for x in range(2): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(2, 7): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(7, 12): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(12, 21): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(21, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))), requires_grad=False) self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))), requires_grad=False) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): X = (X - self.mean) / self.std h_relu1 = self.slice1(X) h_relu2 = self.slice2(h_relu1) h_relu3 = self.slice3(h_relu2) h_relu4 = self.slice4(h_relu3) h_relu5 = self.slice5(h_relu4) out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] return out class ImagePyramide(torch.nn.Module): """ Create image pyramide for computing pyramide perceptual loss. """ def __init__(self, scales, num_channels): super(ImagePyramide, self).__init__() downs = {} for scale in scales:
downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale)
0
2023-10-15 12:15:01+00:00
2k
riverscn/epghub
main.py
[ { "identifier": "utils", "path": "epg/utils.py", "snippet": "def load_config(path: str) -> list[Channel]:\ndef scrap_channel(\n channel: Channel, channels_config, date: date = datetime.today().date()\n) -> bool:\ndef copy_channels(\n channels: list[Channel], new_channels: list[Channel]\n) -> tuple[int, set]:\ndef update_preview(channel: Channel) -> int:\ndef update_recap(channel: Channel) -> int:\ndef update_channel_full(channel, num_refresh_channels):\n def _update_recap(channel):\n def _update_preview(channel):" }, { "identifier": "xmltv", "path": "epg/generator/xmltv.py", "snippet": "def write(filepath: str, channels: list[Channel], info: str = \"\") -> bool:" }, { "identifier": "diyp", "path": "epg/generator/diyp.py", "snippet": "def write(dir: str, channels: list[Channel]) -> bool:" }, { "identifier": "__xmltv", "path": "epg/scraper/__xmltv.py", "snippet": "def get_channels(xmltv_url: str, dtd: etree.DTD | None = None) -> list[Channel]:" } ]
from jinja2 import Environment, FileSystemLoader from epg import utils from epg.generator import xmltv from epg.generator import diyp from epg.scraper import __xmltv from lxml import etree from datetime import datetime, timezone from croniter import croniter import os import shutil
641
CF_PAGES = os.getenv("CF_PAGES") CF_PAGES_URL = os.getenv("CF_PAGES_URL") DEPLOY_HOOK = os.getenv("DEPLOY_HOOK") CLOUDFLARE_API_TOKEN = os.getenv("CLOUDFLARE_API_TOKEN") XMLTV_URL = os.getenv("XMLTV_URL", "") TZ = os.getenv("TZ") if TZ == None: print( "!!!Please set TZ environment variables to define timezone or it will use system timezone by default!!!" ) CRON_TRIGGER = os.getenv("CRON_TRIGGER", "0 0 * * *") next_cron_time = ( croniter(CRON_TRIGGER, datetime.now(timezone.utc)) .get_next(datetime) .replace(tzinfo=timezone.utc) .astimezone() ) dtd = etree.DTD(open("xmltv.dtd", "r")) now = datetime.now() current_timezone = now.astimezone().tzinfo timezone_name = current_timezone.tzname(now) timezone_offset = now.astimezone().strftime("%z") print("use timezone:", timezone_name, f"UTC{timezone_offset}", flush=True) config_path = os.path.join(os.getcwd(), "config", "channels.yaml") epg_path = os.path.join(os.getcwd(), "web", "epg.xml") if not os.path.exists(os.path.join(os.getcwd(), "web")): os.mkdir(os.path.join(os.getcwd(), "web")) channels = utils.load_config(config_path) if XMLTV_URL == "": xml_channels = [] print("!!!Please set XMLTV_URL environment variables to reuse XML!!!") else: print("reuse XML:", XMLTV_URL, flush=True)
CF_PAGES = os.getenv("CF_PAGES") CF_PAGES_URL = os.getenv("CF_PAGES_URL") DEPLOY_HOOK = os.getenv("DEPLOY_HOOK") CLOUDFLARE_API_TOKEN = os.getenv("CLOUDFLARE_API_TOKEN") XMLTV_URL = os.getenv("XMLTV_URL", "") TZ = os.getenv("TZ") if TZ == None: print( "!!!Please set TZ environment variables to define timezone or it will use system timezone by default!!!" ) CRON_TRIGGER = os.getenv("CRON_TRIGGER", "0 0 * * *") next_cron_time = ( croniter(CRON_TRIGGER, datetime.now(timezone.utc)) .get_next(datetime) .replace(tzinfo=timezone.utc) .astimezone() ) dtd = etree.DTD(open("xmltv.dtd", "r")) now = datetime.now() current_timezone = now.astimezone().tzinfo timezone_name = current_timezone.tzname(now) timezone_offset = now.astimezone().strftime("%z") print("use timezone:", timezone_name, f"UTC{timezone_offset}", flush=True) config_path = os.path.join(os.getcwd(), "config", "channels.yaml") epg_path = os.path.join(os.getcwd(), "web", "epg.xml") if not os.path.exists(os.path.join(os.getcwd(), "web")): os.mkdir(os.path.join(os.getcwd(), "web")) channels = utils.load_config(config_path) if XMLTV_URL == "": xml_channels = [] print("!!!Please set XMLTV_URL environment variables to reuse XML!!!") else: print("reuse XML:", XMLTV_URL, flush=True)
xml_channels = __xmltv.get_channels(XMLTV_URL, dtd)
3
2023-10-20 04:35:12+00:00
2k
lancopku/label-words-are-anchors
icl/util_classes/context_solver.py
[ { "identifier": "format_s_dict", "path": "icl/utils/data_wrapper.py", "snippet": "def sst2_wrap_data(demonstrations, input_sample, label_dict):\ndef trec_wrap_data(demonstrations, input_sample, label_dict):\ndef emo_wrap_data(demonstrations, input_sample, label_dict):\ndef agnews_wrap_data(demonstrations, input_sample, label_dict):\ndef wrap_data(demonstrations, input_sample, label_dict, task_name):\ndef instruct_wrapper(instruct: str, input_sample, label_dict, task_name):\ndef wrap_dataset(dataset: datasets.arrow_dataset.Dataset, demonstration, label_dict, task_name):\n def wrap(example):\ndef wrap_dataset_with_instruct(dataset: datasets.arrow_dataset.Dataset, instruct, label_dict,\n task_name):\n def wrap(example):\ndef get_max_length(tokenizer):\ndef tokenize_dataset(dataset, tokenizer):\n def tokenize_function(examples):\ndef remove_str_columns(dataset):" }, { "identifier": "TensorStrFinder", "path": "icl/utils/other.py", "snippet": "class TensorStrFinder:\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n def find_tensor_in_tensor(self, a_tensor: Union[torch.Tensor, list], b_tensor: torch.Tensor,\n return_mask=True, match_before: Optional[int] = None):\n if len(b_tensor.shape) == 2:\n assert b_tensor.shape[0] == 1\n b_tensor = b_tensor[0]\n if isinstance(a_tensor, list):\n a_tensor = torch.tensor(a_tensor)\n if a_tensor.device != b_tensor.device:\n a_tensor = a_tensor.to(b_tensor.device)\n\n window_size = len(a_tensor)\n b_windows = b_tensor.unfold(0, window_size, 1)\n\n matches = torch.all(b_windows == a_tensor, dim=1)\n\n positions = torch.nonzero(matches, as_tuple=True)[0]\n\n if return_mask:\n mask = torch.zeros_like(b_tensor, dtype=torch.bool)\n for pos in positions:\n if match_before is None or pos + window_size <= match_before:\n mask[pos:pos + window_size] = True\n return mask\n\n return positions\n\n def find_str_in_tensor(self, s: str, t: torch.Tensor, return_mask=True, match_before=None):\n s_tokens = self.tokenizer.encode(s, add_special_tokens=False)\n s_tensor = torch.LongTensor(s_tokens)\n return self.find_tensor_in_tensor(s_tensor, t, return_mask=return_mask,\n match_before=match_before)\n\n def get_strs_mask_in_tensor(self, list_s: List[str], t: torch.Tensor, match_before=None):\n list_s_tokens = [self.tokenizer.encode(s, add_special_tokens=False) for s in list_s]\n list_s_tensor = [torch.LongTensor(s_tokens) for s_tokens in list_s_tokens]\n mask_tensor_list = [\n self.find_tensor_in_tensor(s_tensor, t, return_mask=True, match_before=match_before) for\n s_tensor in list_s_tensor]\n mask_tensor = functools.reduce(torch.logical_or, mask_tensor_list)\n return mask_tensor" } ]
import warnings import torch from copy import deepcopy from ..utils.data_wrapper import format_s_dict from ..utils.other import TensorStrFinder
1,276
class ContextSolver: def __init__(self, task_name, tokenizer=None): assert task_name in ['sst2', 'trec', 'agnews', 'emo'] self.task_name = task_name self.tokenizer = tokenizer self.format_s = format_s_dict[task_name] self.parse_format_s() def parse_format_s(self): self.X_prefix = self.format_s.split('\n')[0].split(':')[0] + ':' self.Y_prefix = self.format_s.split('\n')[1].split(':')[0] + ':' def get_empty_demo_context(self, context: str, only_demo_part=True): context = context.split('\n') for i, line in enumerate(context[:-2]): if self.X_prefix in line: line = self.X_prefix elif self.Y_prefix in line: line = line else: raise warnings.warn('Global prefix or other str exists!') context[i] = line if only_demo_part: context = context[:-2] context = '\n'.join(context) return context def get_mask_strings_and_match_before(self, context, input_ids, tokenizer=None): if tokenizer is None: tokenizer = self.tokenizer poss = torch.where(input_ids == tokenizer.encode('\n', add_special_tokens=False)[0])[0] if len(poss) >= 2: match_before = poss[-2] + 1 else: match_before = None list_s = [] list_s.append(self.X_prefix) list_s.append('\n' + self.X_prefix) context = context.split('\n') for i, line in enumerate(context[:-2]): if self.X_prefix in line: pass elif self.Y_prefix in line: list_s.append('\n' + line) list_s.append('\n' + line + '\n') else: raise warnings.warn('Global prefix or other str exists!') return list_s, match_before def get_mask(self, input_ids, tokenizer=None): if isinstance(input_ids, list): input_ids = torch.tensor(input_ids) if len(input_ids.shape) == 2: assert input_ids.shape[0] == 1 input_ids = input_ids[0] if tokenizer is None: tokenizer = self.tokenizer context = tokenizer.decode(input_ids) list_s, match_before = self.get_mask_strings_and_match_before(context, input_ids=input_ids, tokenizer=tokenizer)
class ContextSolver: def __init__(self, task_name, tokenizer=None): assert task_name in ['sst2', 'trec', 'agnews', 'emo'] self.task_name = task_name self.tokenizer = tokenizer self.format_s = format_s_dict[task_name] self.parse_format_s() def parse_format_s(self): self.X_prefix = self.format_s.split('\n')[0].split(':')[0] + ':' self.Y_prefix = self.format_s.split('\n')[1].split(':')[0] + ':' def get_empty_demo_context(self, context: str, only_demo_part=True): context = context.split('\n') for i, line in enumerate(context[:-2]): if self.X_prefix in line: line = self.X_prefix elif self.Y_prefix in line: line = line else: raise warnings.warn('Global prefix or other str exists!') context[i] = line if only_demo_part: context = context[:-2] context = '\n'.join(context) return context def get_mask_strings_and_match_before(self, context, input_ids, tokenizer=None): if tokenizer is None: tokenizer = self.tokenizer poss = torch.where(input_ids == tokenizer.encode('\n', add_special_tokens=False)[0])[0] if len(poss) >= 2: match_before = poss[-2] + 1 else: match_before = None list_s = [] list_s.append(self.X_prefix) list_s.append('\n' + self.X_prefix) context = context.split('\n') for i, line in enumerate(context[:-2]): if self.X_prefix in line: pass elif self.Y_prefix in line: list_s.append('\n' + line) list_s.append('\n' + line + '\n') else: raise warnings.warn('Global prefix or other str exists!') return list_s, match_before def get_mask(self, input_ids, tokenizer=None): if isinstance(input_ids, list): input_ids = torch.tensor(input_ids) if len(input_ids.shape) == 2: assert input_ids.shape[0] == 1 input_ids = input_ids[0] if tokenizer is None: tokenizer = self.tokenizer context = tokenizer.decode(input_ids) list_s, match_before = self.get_mask_strings_and_match_before(context, input_ids=input_ids, tokenizer=tokenizer)
tensor_str_finder = TensorStrFinder(tokenizer=tokenizer)
1
2023-10-17 11:40:03+00:00
2k
Aggify/aggify
tests/test_q.py
[ { "identifier": "Aggify", "path": "aggify/aggify.py", "snippet": "def last_out_stage_check(method: AggifyType) -> AggifyType:\n def decorator(*args, **kwargs):\n def __init__(self, base_model: Type[Document]):\n def __iter__(self):\n def project(self, **kwargs: QueryParams) -> \"Aggify\":\n def group(self, expression: Union[str, Dict, List, None] = \"id\") -> \"Aggify\":\n def order_by(self, *order_fields: Union[str, List[str]]) -> \"Aggify\":\n def raw(self, raw_query: dict) -> \"Aggify\":\n def add_fields(self, **fields) -> \"Aggify\": # noqa\n def filter(\n self, arg: Union[Q, None] = None, **kwargs: Union[QueryParams, F, list]\n ) -> \"Aggify\":\n def out(self, coll: str, db: Union[str, None] = None) -> \"Aggify\":\n def __to_aggregate(self, query: Dict[str, Any]) -> None:\n def __getitem__(self, index: Union[slice, int]) -> \"Aggify\":\n def unwind(\n self,\n path: str,\n include_array_index: Union[str, None] = None,\n preserve: bool = False,\n ) -> \"Aggify\":\n def annotate(\n self,\n annotate_name: Union[str, None] = None,\n accumulator: Union[str, None] = None,\n f: Union[Union[str, Dict], F, int, None] = None,\n **kwargs,\n ) -> \"Aggify\":\n def _get_field_type_and_accumulator(\n accumulator: str,\n ) -> Tuple[Type, str]:\n def _get_annotate_value(self, f: Union[F, str]) -> Union[Dict, str]:\n def _do_annotate_with_expression(\n annotate: Dict[str, Dict[str, Any]], base_model_fields: Dict[str, Any]\n ) -> Tuple[Dict[str, Dict[str, Any]], List[str]]:\n def __match(self, matches: Dict[str, Any]):\n def __lookup(\n from_collection: str, local_field: str, as_name: str, foreign_field: str = \"_id\"\n ) -> Dict[str, Dict[str, str]]:\n def __combine_sequential_matches(self) -> List[Dict[str, Union[dict, Any]]]:\n def get_field_name_recursively(\n self, field: str, base: Union[CollectionType, None] = None\n ) -> str:\n def lookup(\n self,\n from_collection: CollectionType,\n as_name: str,\n query: Union[List[Q], Union[Q, None], List[\"Aggify\"]] = None,\n let: Union[List[str], None] = None,\n local_field: Union[str, None] = None,\n foreign_field: Union[str, None] = None,\n raw_let: Union[Dict, None] = None,\n ) -> \"Aggify\":\n def get_model_field(model: Type[Document], field: str) -> mongoengine_fields:\n def _replace_base(self, embedded_field) -> str:\n def replace_root(\n self, *, embedded_field: str, merge: Union[Dict, None] = None\n ) -> \"Aggify\":\n def replace_with(\n self, *, embedded_field: str, merge: Union[Dict, None] = None\n ) -> \"Aggify\":\n def redact(self, value1, condition, value2, then_value, else_value):\n def clean_then_else(_then_value, _else_value):\nclass Aggify:" }, { "identifier": "InvalidOperator", "path": "aggify/exceptions.py", "snippet": "class InvalidOperator(AggifyBaseException):\n def __init__(self, operator: str):\n self.message = f\"Operator {operator} does not exists, please refer to documentation to see all supported operators.\"\n super().__init__(self.message)" }, { "identifier": "BaseModel", "path": "tests/test_aggify.py", "snippet": "class BaseModel(Document):\n # Define your fields here\n name = StringField(max_length=100)\n age = IntField()\n\n meta = {\"allow_inheritance\": True, \"abstract\": True}" } ]
import pytest from aggify import Q, F, Aggify from aggify.exceptions import InvalidOperator from tests.test_aggify import BaseModel
1,583
class TestQ: # Test OR operator with multiple conditions def test_or_operator_with_multiple_conditions(self): q1 = Q(name="John") q2 = Q(name="Alice") q_combined = q1 | q2 assert dict(q_combined) == { "$match": {"$or": [dict(q1)["$match"], dict(q2)["$match"]]} } def test_or_operator_with_multiple_conditions_more_than_rwo(self): q1 = Q(name="John") q2 = Q(name="Alice") q3 = Q(name="Bob") q_combined = q1 | q2 | q3 assert dict(q_combined) == { "$match": { "$or": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]] } } def test_and(self): q1 = Q(name="Mahdi") q2 = Q(age__gt=20) q = q1 & q2 assert dict(q) == {"$match": {"$and": [dict(q1)["$match"], dict(q2)["$match"]]}} def test_multiple_and(self): q1 = Q(name="Mahdi") q2 = Q(age__gt=20) q3 = Q(age__lt=30) q = q1 & q2 & q3 assert dict(q) == { "$match": { "$and": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]] } } # Test combining NOT operators with AND def test_combine_not_operators_with_and(self): q1 = Q(name="John") q2 = Q(age__lt=30) q_combined = ~q1 & ~q2 assert dict(q_combined) == { "$match": { "$and": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}] } } # Test combining NOT operators with OR def test_combine_not_operators_with_or(self): q1 = Q(name="John") q2 = Q(age__lt=30) q_combined = ~q1 | ~q2 # Changed | to combine OR assert dict(q_combined) == { "$match": { "$or": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}] } } def test_unsuitable_key_for_f(self):
class TestQ: # Test OR operator with multiple conditions def test_or_operator_with_multiple_conditions(self): q1 = Q(name="John") q2 = Q(name="Alice") q_combined = q1 | q2 assert dict(q_combined) == { "$match": {"$or": [dict(q1)["$match"], dict(q2)["$match"]]} } def test_or_operator_with_multiple_conditions_more_than_rwo(self): q1 = Q(name="John") q2 = Q(name="Alice") q3 = Q(name="Bob") q_combined = q1 | q2 | q3 assert dict(q_combined) == { "$match": { "$or": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]] } } def test_and(self): q1 = Q(name="Mahdi") q2 = Q(age__gt=20) q = q1 & q2 assert dict(q) == {"$match": {"$and": [dict(q1)["$match"], dict(q2)["$match"]]}} def test_multiple_and(self): q1 = Q(name="Mahdi") q2 = Q(age__gt=20) q3 = Q(age__lt=30) q = q1 & q2 & q3 assert dict(q) == { "$match": { "$and": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]] } } # Test combining NOT operators with AND def test_combine_not_operators_with_and(self): q1 = Q(name="John") q2 = Q(age__lt=30) q_combined = ~q1 & ~q2 assert dict(q_combined) == { "$match": { "$and": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}] } } # Test combining NOT operators with OR def test_combine_not_operators_with_or(self): q1 = Q(name="John") q2 = Q(age__lt=30) q_combined = ~q1 | ~q2 # Changed | to combine OR assert dict(q_combined) == { "$match": { "$or": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}] } } def test_unsuitable_key_for_f(self):
with pytest.raises(InvalidOperator):
1
2023-10-22 07:53:28+00:00
2k
sotopia-lab/sotopia
tests/envs/test_get_bio.py
[ { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "RelationshipType", "path": "sotopia/database/persistent_profile.py", "snippet": "class RelationshipType(IntEnum):\n stranger = 0\n know_by_name = 1\n acquaintance = 2\n friend = 3\n romantic_relationship = 4\n family_member = 5" }, { "identifier": "get_bio", "path": "sotopia/envs/parallel.py", "snippet": "def get_bio(\n relationship: RelationshipType, profile: AgentProfile, agent_id: int\n) -> str:\n match relationship:\n case RelationshipType.stranger:\n return _agent_profile_to_stranger_self(profile, agent_id=agent_id)\n case RelationshipType.know_by_name:\n return _agent_profile_to_name_self(profile, agent_id=agent_id)\n case RelationshipType.acquaintance:\n return _agent_profile_to_aquaintance_self(\n profile, agent_id=agent_id\n )\n case RelationshipType.friend | RelationshipType.romantic_relationship | RelationshipType.family_member:\n return _agent_profile_to_friendabove_self(\n profile, agent_id=agent_id\n )\n case _:\n raise ValueError(f\"Unknown relationship {relationship}\")" }, { "identifier": "render_text_for_agent", "path": "sotopia/envs/parallel.py", "snippet": "@configurable\ndef render_text_for_agent(\n raw_text: str,\n agent_id: int,\n tags_to_render: list[str] = [\n \"extra_info\",\n \"clarification_hint\",\n \"strategy_hint\",\n ],\n) -> str:\n return XMLRenderer()(\n raw_text,\n RenderContext(\n viewer=f\"agent_{agent_id}\", tags_to_render=tags_to_render\n ),\n )" } ]
from typing import Any from sotopia.database.persistent_profile import ( AgentProfile, RelationshipType, ) from sotopia.envs.parallel import get_bio, render_text_for_agent import pytest
763
@pytest.fixture def _get_john_profile() -> AgentProfile: return AgentProfile( first_name="John", last_name="Doe", personality_and_values="I am a big five", public_info="I am a public info", secret="I am a secret", ) def test_get_bio(_get_john_profile: Any) -> None: john_profile = _get_john_profile background = get_bio(
@pytest.fixture def _get_john_profile() -> AgentProfile: return AgentProfile( first_name="John", last_name="Doe", personality_and_values="I am a big five", public_info="I am a public info", secret="I am a secret", ) def test_get_bio(_get_john_profile: Any) -> None: john_profile = _get_john_profile background = get_bio(
RelationshipType.stranger,
1
2023-10-23 19:47:26+00:00
2k
Zai-Kun/reverse-engineered-chatgpt
re_gpt/async_chatgpt.py
[ { "identifier": "BackendError", "path": "re_gpt/errors.py", "snippet": "class BackendError(Exception):\n def __init__(self, error_code):\n self.error_code = error_code\n self.message = (\n f\"An error occurred on the backend. Error code: {self.error_code}\"\n )\n super().__init__(self.message)" }, { "identifier": "InvalidSessionToken", "path": "re_gpt/errors.py", "snippet": "class InvalidSessionToken(Exception):\n def __init__(self):\n self.message = \"Invalid session token provided.\"\n super().__init__(self.message)" }, { "identifier": "RetryError", "path": "re_gpt/errors.py", "snippet": "class RetryError(Exception):\n def __init__(self, website, message=\"Exceeded maximum retries\"):\n self.website = website\n self.message = f\"{message} for website: {website}\"\n super().__init__(self.message)" }, { "identifier": "TokenNotProvided", "path": "re_gpt/errors.py", "snippet": "class TokenNotProvided(Exception):\n def __init__(self):\n self.message = \"Token not provided. Please pass your '__Secure-next-auth.session-token' as an argument (e.g., ChatGPT.init(session_token=YOUR_TOKEN)).\"\n super().__init__(self.message)" }, { "identifier": "UnexpectedResponseError", "path": "re_gpt/errors.py", "snippet": "class UnexpectedResponseError(Exception):\n def __init__(self, original_exception, server_response):\n self.original_exception = original_exception\n self.server_response = server_response\n self.message = f\"An unexpected error occurred. Error message: {self.original_exception}.\\nThis is what the server returned: {self.server_response}.\"\n super().__init__(self.message)" }, { "identifier": "InvalidModelName", "path": "re_gpt/errors.py", "snippet": "class InvalidModelName(Exception):\n def __init__(self, model, avalible_models):\n self.model = model\n self.avalible_models = avalible_models\n self.message = f'\"{model}\" is not a valid model. Avalible models: {[model for model in avalible_models]}'\n super().__init__(self.message)" }, { "identifier": "async_get_binary_path", "path": "re_gpt/utils.py", "snippet": "async def async_get_binary_path(session):\n if binary_path is None:\n return None\n\n if not os.path.exists(funcaptcha_bin_folder_path) or not os.path.isdir(\n funcaptcha_bin_folder_path\n ):\n os.mkdir(funcaptcha_bin_folder_path)\n\n if os.path.isfile(binary_path):\n try:\n local_binary_hash = calculate_file_md5(binary_path)\n response = await session.get(latest_release_url)\n json_data = response.json()\n\n for line in json_data[\"body\"].splitlines():\n if line.startswith(current_os):\n latest_binary_hash = line.split(\"=\")[-1]\n break\n\n if local_binary_hash != latest_binary_hash:\n file_url = get_file_url(json_data)\n\n await async_download_binary(session, binary_path, file_url)\n except:\n return binary_path\n else:\n response = await session.get(latest_release_url)\n json_data = response.json()\n file_url = get_file_url(json_data)\n\n await async_download_binary(session, binary_path, file_url)\n\n return binary_path" }, { "identifier": "get_model_slug", "path": "re_gpt/utils.py", "snippet": "def get_model_slug(chat):\n for _, message in chat.get(\"mapping\", {}).items():\n if \"message\" in message:\n role = message[\"message\"][\"author\"][\"role\"]\n if role == \"assistant\":\n return message[\"message\"][\"metadata\"][\"model_slug\"]" } ]
import asyncio import ctypes import inspect import json import uuid from typing import AsyncGenerator, Callable, Optional from curl_cffi.requests import AsyncSession from .errors import ( BackendError, InvalidSessionToken, RetryError, TokenNotProvided, UnexpectedResponseError, InvalidModelName, ) from .utils import async_get_binary_path, get_model_slug
1,300
# Constants USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36" CHATGPT_API = "https://chat.openai.com/backend-api/{}" BACKUP_ARKOSE_TOKEN_GENERATOR = "https://arkose-token-generator.zaieem.repl.co/token" MODELS = { "gpt-4": {"slug": "gpt-4", "needs_arkose_token": True}, "gpt-3.5": {"slug": "text-davinci-002-render-sha", "needs_arkose_token": False}, } class AsyncConversation: def __init__(self, chatgpt, conversation_id=None, model=None): self.chatgpt = chatgpt self.conversation_id = conversation_id self.parent_id = None self.model = model async def fetch_chat(self) -> dict: """ Fetches the chat of the conversation from the API. Returns: dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict. Raises: UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format """ if not self.conversation_id: return {} url = CHATGPT_API.format(f"conversation/{self.conversation_id}") response = await self.chatgpt.session.get( url=url, headers=self.chatgpt.build_request_headers() ) error = None try: chat = response.json() self.parent_id = list(chat.get("mapping", {}))[-1] model_slug = get_model_slug(chat) self.model = [ key for key, value in MODELS.items() if value["slug"] == model_slug ][0] except Exception as e: error = e if error is not None:
# Constants USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36" CHATGPT_API = "https://chat.openai.com/backend-api/{}" BACKUP_ARKOSE_TOKEN_GENERATOR = "https://arkose-token-generator.zaieem.repl.co/token" MODELS = { "gpt-4": {"slug": "gpt-4", "needs_arkose_token": True}, "gpt-3.5": {"slug": "text-davinci-002-render-sha", "needs_arkose_token": False}, } class AsyncConversation: def __init__(self, chatgpt, conversation_id=None, model=None): self.chatgpt = chatgpt self.conversation_id = conversation_id self.parent_id = None self.model = model async def fetch_chat(self) -> dict: """ Fetches the chat of the conversation from the API. Returns: dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict. Raises: UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format """ if not self.conversation_id: return {} url = CHATGPT_API.format(f"conversation/{self.conversation_id}") response = await self.chatgpt.session.get( url=url, headers=self.chatgpt.build_request_headers() ) error = None try: chat = response.json() self.parent_id = list(chat.get("mapping", {}))[-1] model_slug = get_model_slug(chat) self.model = [ key for key, value in MODELS.items() if value["slug"] == model_slug ][0] except Exception as e: error = e if error is not None:
raise UnexpectedResponseError(error, response.text)
4
2023-10-17 08:34:04+00:00
2k
qualabs/video-headline
api/tests/bills.py
[ { "identifier": "MinBillSerializer", "path": "api/serializers/bills.py", "snippet": "class MinBillSerializer(serializers.ModelSerializer):\n plan = serializers.CharField(source='plan.name')\n\n class Meta:\n model = Bill\n fields = (\n 'id',\n 'plan',\n 'date'\n )" }, { "identifier": "BillSerializer", "path": "api/serializers/bills.py", "snippet": "class BillSerializer(serializers.ModelSerializer):\n plan = PlanSerializer()\n extras = serializers.JSONField()\n\n class Meta:\n model = Bill\n fields = (\n 'id',\n 'organization',\n 'plan',\n 'date',\n 'last_modified',\n 'video_transcoding',\n 'audio_transcoding',\n 'storage',\n 'data_transfer',\n 'extras'\n )" }, { "identifier": "Bill", "path": "organization/models/bill.py", "snippet": "class Bill(models.Model):\n organization = models.ForeignKey(Organization,\n models.CASCADE,\n related_name='bills',\n verbose_name='Organization')\n plan = models.ForeignKey(Plan,\n models.PROTECT,\n null=True,\n default=None,\n related_name='bills',\n verbose_name='Plan')\n date = models.DateField(verbose_name='Creation Date')\n last_modified = models.DateTimeField(auto_now=True,\n verbose_name='Updated Date')\n video_transcoding = models.FloatField(default=0,\n verbose_name='Video Transcoding Minutes')\n audio_transcoding = models.FloatField(default=0,\n verbose_name='Audio Transcoding Minutes')\n storage = models.FloatField(default=0,\n verbose_name='Storage (GB)')\n data_transfer = models.FloatField(default=0,\n verbose_name='Traffic (GB)')\n extras = JSONField(blank=True,\n default=dict,\n verbose_name='Extra information')\n\n def __str__(self):\n date = self.date.strftime('%b-%Y')\n return f'{self.organization.name} - {date}'\n\n class Meta:\n verbose_name = 'Usage Report'\n verbose_name_plural = 'Usage Reports'\n unique_together = ('date', 'organization')\n\n def save(self, *args, **kwargs):\n self.date = self.date.replace(day=1)\n super(Bill, self).save(*args, **kwargs)\n\n def is_current_bill(self):\n today = timezone.now()\n return (today.year == self.date.year) and (today.month == self.date.month)" }, { "identifier": "create_user", "path": "test_utils.py", "snippet": "def create_user(username, password, organization):\n user = Account.objects.create_user(\n username=username,\n password=password,\n organization=organization,\n email=f'{username}@admin.com'\n )\n\n return user" }, { "identifier": "create_superuser", "path": "test_utils.py", "snippet": "def create_superuser(username, password, organization):\n su = Account.objects.create_superuser(\n username=username,\n password=password,\n organization=organization,\n email='admin@admin.com'\n )\n\n return su" }, { "identifier": "create_key", "path": "test_utils.py", "snippet": "def create_key(name, user):\n key = APIKey.objects.create(\n name=f'{name}',\n account=user,\n )\n\n return key" }, { "identifier": "create_organizations", "path": "test_utils.py", "snippet": "def create_organizations(name, org_quantity, bucket_name='', contact_email='', cf_id='',\n cf_domain='', plan=None, config=None):\n organizations = []\n for number in range(1, org_quantity + 1):\n org = Organization.objects.create(\n name=f'{name} {number}',\n bucket_name=bucket_name,\n contact_email=contact_email,\n cf_id=cf_id,\n cf_domain=cf_domain,\n plan=plan,\n config=config if config else {}\n )\n\n organizations.append(org)\n\n return organizations" }, { "identifier": "create_plans", "path": "test_utils.py", "snippet": "def create_plans(name, quantity, description='', storage=0, video_transcoding=0, audio_transcoding=0, data_transfer=0):\n plans = []\n for number in range(1, quantity + 1):\n plan = Plan.objects.create(\n name=f'{name} {number}',\n description=description,\n storage=storage,\n video_transcoding=video_transcoding,\n audio_transcoding=audio_transcoding,\n data_transfer=data_transfer\n )\n\n plans.append(plan)\n\n return plans" }, { "identifier": "create_bill", "path": "test_utils.py", "snippet": "def create_bill(organization, plan, month=date.today().replace(day=1), storage=0, video_transcoding=0,\n data_transfer=0):\n bill = Bill.objects.create(\n organization=organization,\n plan=plan,\n date=month,\n storage=storage,\n video_transcoding=video_transcoding,\n data_transfer=data_transfer\n )\n\n return bill" } ]
import logging from datetime import date from django.utils import timezone from dateutil.relativedelta import relativedelta from unittest import mock from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from api.serializers import MinBillSerializer, BillSerializer from organization.models import Bill from test_utils import create_user, create_superuser, create_key, create_organizations, \ create_plans, create_bill
1,415
class BillTests(APITestCase): @classmethod def setUpClass(cls): logging.disable(logging.WARNING) cls.org1, cls.org2 = create_organizations('Organization', 2) cls.user1 = create_user('user1', '12345678', cls.org1) cls.user2 = create_user('user2', '12345678', cls.org2) cls.su = create_superuser('admin', '12345678', cls.org1) cls.key = create_key('key', cls.user1) cls.plan1, cls.plan2 = create_plans('Plan', 2) def setUp(self): self.bill1 = create_bill(self.org1, self.plan1, date.today().replace(day=1)) self.bill2 = create_bill(self.org2, self.plan1, date.today().replace(day=1)) self.bill3 = create_bill(self.org1, self.plan1, date.today().replace(day=1) - relativedelta(months=1)) def tearDown(self):
class BillTests(APITestCase): @classmethod def setUpClass(cls): logging.disable(logging.WARNING) cls.org1, cls.org2 = create_organizations('Organization', 2) cls.user1 = create_user('user1', '12345678', cls.org1) cls.user2 = create_user('user2', '12345678', cls.org2) cls.su = create_superuser('admin', '12345678', cls.org1) cls.key = create_key('key', cls.user1) cls.plan1, cls.plan2 = create_plans('Plan', 2) def setUp(self): self.bill1 = create_bill(self.org1, self.plan1, date.today().replace(day=1)) self.bill2 = create_bill(self.org2, self.plan1, date.today().replace(day=1)) self.bill3 = create_bill(self.org1, self.plan1, date.today().replace(day=1) - relativedelta(months=1)) def tearDown(self):
Bill.objects.all().delete()
2
2023-10-17 19:44:32+00:00
2k
LAION-AI/Text-to-speech
modules/audio_superres.py
[ { "identifier": "Base", "path": "modules/common.py", "snippet": "class Base:\n MODEL_CHOICES = {}\n\n def __init__(\n self,\n model_choice: str,\n sampling_rate: int = 16000,\n padding: Union[bool, str] = True,\n max_length: Optional[int] = None,\n pad_to_multiple_of: Optional[int] = None,\n max_audio_len: int = 5,\n **kwargs,\n ) -> None:\n self.model_choice = model_choice.lower()\n assert (\n self.model_choice in self.MODEL_CHOICES\n ), f\"Unrecognized model choice {self.model_choice}\"\n model = self.MODEL_CHOICES[self.model_choice]\n if isinstance(model, dict):\n self.model = {}\n for key, value in model.items():\n if key in [\"target\"]:\n continue\n self.model[key] = value(**kwargs)\n elif isinstance(model, partial):\n self.model = model(**kwargs)\n else:\n raise NotImplementedError(\"Not sure how to handle this model choice\")\n\n self.sampling_rate = sampling_rate\n self.padding = padding\n self.max_length = max_length\n self.pad_to_multiple_of = pad_to_multiple_of\n self.max_audio_len = max_audio_len\n\n self.__post__init__()\n\n def __post__init__(self):\n for key, value in self.MODEL_CHOICES.items():\n if (\n isinstance(value, dict)\n and \"target\" in value\n and isinstance(value[\"target\"], str)\n ):\n self.MODEL_CHOICES[key][\"target\"] = getattr(self, value[\"target\"])\n\n @abstractmethod\n def predict(self, **kwargs):\n self.model(**kwargs)\n\n def __call__(\n self, audio_path: str = None, audio: torch.Tensor = None, **kwargs\n ) -> Any:\n assert exists(audio_path) or exists(\n audio\n ), \"Either audio_path or audio tensor is required\"\n if isinstance(self.model, dict):\n prediction = self.MODEL_CHOICES[self.model_choice][\"target\"](\n audio_path=audio_path, audio=audio, **kwargs\n )\n else:\n prediction = self.predict(audio_path=audio_path, audio=audio, **kwargs)\n return prediction\n\n def save_to_file(self, audio, sr, save_dir, start_dur=None, stop_dur=None):\n # Handling audio with more than 2 dimensions\n if audio.ndim > 2:\n print(f\"Warning: Audio has {audio.ndim} dimensions, averaging over channels for simplicity.\")\n audio = torch.mean(audio, dim=-1)\n\n if exists(start_dur):\n start_sample = round(start_dur * sr)\n audio = audio[start_sample:]\n \n if exists(stop_dur):\n stop_sample = round(stop_dur * sr)\n audio = audio[:stop_sample]\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n if audio.ndim == 1:\n audio = audio.unsqueeze(0)\n\n save_path = (\n os.path.join(save_dir, f\"{str(uuid4())}.wav\")\n if not os.path.splitext(save_dir)[-1]\n else save_dir\n )\n audio_ops.save_audio(wav=audio, path=save_path, sr=sr)\n return save_path" }, { "identifier": "load_audiosr", "path": "modules/audio_superres_utils.py", "snippet": "def load_audiosr(args):\n return build_model(args.model_name, device=args.device)" }, { "identifier": "settings", "path": "config/conf.py", "snippet": "DIR_PATH = osp.dirname(osp.realpath(__file__))\nROOT_PATH = osp.abspath(osp.join(osp.dirname(__file__), \"..\" + osp.sep))" } ]
from os import path as osp from pathlib import Path from audiosr import super_resolution from functools import partial from .common import Base from modules.audio_superres_utils import load_audiosr from voicefixer import VoiceFixer from config import settings import os import argparse
958
cache_dir = osp.join(settings.CACHE_DIR, "weights", "enhancement") class SuperResAudio(Base): MODEL_CHOICES = { "audiosr": { "model": partial(
cache_dir = osp.join(settings.CACHE_DIR, "weights", "enhancement") class SuperResAudio(Base): MODEL_CHOICES = { "audiosr": { "model": partial(
load_audiosr,
1
2023-10-18 06:09:40+00:00
2k
Qualcomm-AI-research/geometric-algebra-transformer
gatr/primitives/invariants.py
[ { "identifier": "_load_bilinear_basis", "path": "gatr/primitives/bilinear.py", "snippet": "@lru_cache()\ndef _load_bilinear_basis(\n kind: str, device=torch.device(\"cpu\"), dtype=torch.float32\n) -> torch.Tensor:\n \"\"\"Loads basis elements for Pin-equivariant bilinear maps between multivectors.\n\n This function is cached.\n\n Parameters\n ----------\n kind : {\"gp\", \"outer\"}\n Filename of the basis file, assumed to be found in __file__ / data\n device : torch.Device or str\n Device\n dtype : torch.Dtype\n Data type\n\n Returns\n -------\n basis : torch.Tensor with shape (num_basis_elements, 16, 16, 16)\n Basis elements for bilinear equivariant maps between multivectors.\n \"\"\"\n\n # To avoid duplicate loading, base everything on float32 CPU version\n if device not in [torch.device(\"cpu\"), \"cpu\"] and dtype != torch.float32:\n basis = _load_bilinear_basis(kind)\n else:\n filename = Path(__file__).parent.resolve() / \"data\" / _FILENAMES[kind]\n sparse_basis = torch.load(filename).to(torch.float32)\n # Convert to dense tensor\n # The reason we do that is that einsum is not defined for sparse tensors\n basis = sparse_basis.to_dense()\n\n return basis.to(device=device, dtype=dtype)" }, { "identifier": "_compute_reversal", "path": "gatr/primitives/linear.py", "snippet": "@lru_cache()\ndef _compute_reversal(device=torch.device(\"cpu\"), dtype=torch.float32) -> torch.Tensor:\n \"\"\"Constructs a matrix that computes multivector reversal.\n\n Parameters\n ----------\n device : torch.device\n Device\n dtype : torch.dtype\n Dtype\n\n Returns\n -------\n reversal_diag : torch.Tensor with shape (16,)\n The diagonal of the reversal matrix, consisting of +1 and -1 entries.\n \"\"\"\n reversal_flat = torch.ones(16, device=device, dtype=dtype)\n reversal_flat[5:15] = -1\n return reversal_flat" }, { "identifier": "grade_project", "path": "gatr/primitives/linear.py", "snippet": "def grade_project(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Projects an input tensor to the individual grades.\n\n The return value is a single tensor with a new grade dimension.\n\n NOTE: this primitive is not used widely in our architectures.\n\n Parameters\n ----------\n x : torch.Tensor with shape (..., 16)\n Input multivector.\n\n Returns\n -------\n outputs : torch.Tensor with shape (..., 5, 16)\n Output multivector. The second-to-last dimension indexes the grades.\n \"\"\"\n\n # Select kernel on correct device\n basis = _compute_pin_equi_linear_basis(device=x.device, dtype=x.dtype, normalize=False)\n\n # First five basis elements are grade projections\n basis = basis[:5]\n\n # Project to grades\n projections = cached_einsum(\"g i j, ... j -> ... g i\", basis, x)\n\n return projections" }, { "identifier": "cached_einsum", "path": "gatr/utils/einsum.py", "snippet": "def cached_einsum(equation: str, *operands: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes einsum with a cached optimal contraction.\n\n Inspired by upstream\n https://github.com/pytorch/pytorch/blob/v1.13.0/torch/functional.py#L381.\n \"\"\"\n op_shape = tuple(op.shape for op in operands)\n path = _get_cached_path_for_equation_and_shapes(equation=equation, op_shape=op_shape)\n\n return custom_einsum(equation, *operands, path=path)" } ]
from functools import lru_cache from gatr.primitives.bilinear import _load_bilinear_basis from gatr.primitives.linear import _compute_reversal, grade_project from gatr.utils.einsum import cached_einsum import torch import torch.linalg
1,238
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. @lru_cache() def compute_inner_product_mask(device=torch.device("cpu")) -> torch.Tensor: """Constructs a bool array for the inner product calculation. The inner product of MVs is <~x y>_0, i.e. take the grade-0 component of the geometric product of the reverse of x with y. Both the scalar component of the GP, and the reversal matrix, are diagonal. Their product is 0 for basis elements involving e0, and 1 elsewhere, i.e. IP = [1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0] for dim order '', 'e0', 'e1', 'e2', 'e3', 'e01', 'e02', 'e03', 'e12', 'e13', 'e23', 'e012', 'e013', 'e023', 'e123', 'e0123' Parameters ---------- device : torch.device Device Returns ------- ip_mask : torch.Tensor with shape (16,) Inner product mask """ gp = _load_bilinear_basis("gp", device=device, dtype=torch.float32)
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. @lru_cache() def compute_inner_product_mask(device=torch.device("cpu")) -> torch.Tensor: """Constructs a bool array for the inner product calculation. The inner product of MVs is <~x y>_0, i.e. take the grade-0 component of the geometric product of the reverse of x with y. Both the scalar component of the GP, and the reversal matrix, are diagonal. Their product is 0 for basis elements involving e0, and 1 elsewhere, i.e. IP = [1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0] for dim order '', 'e0', 'e1', 'e2', 'e3', 'e01', 'e02', 'e03', 'e12', 'e13', 'e23', 'e012', 'e013', 'e023', 'e123', 'e0123' Parameters ---------- device : torch.device Device Returns ------- ip_mask : torch.Tensor with shape (16,) Inner product mask """ gp = _load_bilinear_basis("gp", device=device, dtype=torch.float32)
inner_product_mask = torch.diag(gp[0]) * _compute_reversal(device=device, dtype=torch.float32)
1
2023-10-23 15:58:36+00:00
2k
StanislavPetrovV/Wolfenstein-3D-Clone
game_objects/weapon.py
[ { "identifier": "GameObject", "path": "game_objects/game_object.py", "snippet": "class GameObject:\n def __init__(self, level_map, tex_id, x, z):\n self.eng = level_map.eng\n self.app = self.eng.app\n self.tex_id = tex_id\n #\n self.pos = glm.vec3(x + H_WALL_SIZE, 0, z + H_WALL_SIZE) # center of the tile\n self.rot = 0\n self.scale = glm.vec3(1)\n #\n self.m_model: glm.mat4 = None\n\n def get_model_matrix(self):\n m_model = glm.translate(glm.mat4(), self.pos)\n m_model = glm.rotate(m_model, self.rot, glm.vec3(0, 1, 0))\n m_model = glm.scale(m_model, self.scale)\n return m_model" }, { "identifier": "QuadMesh", "path": "meshes/quad_mesh.py", "snippet": "class QuadMesh:\n def __init__(self, eng, shader_program):\n self.eng = eng\n self.ctx = eng.ctx\n self.program = shader_program\n\n self.vbo_format = '4f 2f'\n self.vbo_attrs = ('in_position', 'in_uv')\n self.vao = self.get_vao()\n\n def get_vao(self):\n vertex_data = self.get_vertex_data()\n vbo = self.ctx.buffer(vertex_data)\n vao = self.ctx.vertex_array(\n self.program,\n [\n (vbo, self.vbo_format, *self.vbo_attrs)\n ],\n skip_errors=True\n )\n return vao\n\n def render(self):\n self.vao.render()\n\n def get_vertex_data(self):\n vert_position = (\n [-0.5, 0.0, 0.0, 1.0], [-0.5, 1.0, 0.0, 1.0],\n [ 0.5, 1.0, 0.0, 1.0], [ 0.5, 0.0, 0.0, 1.0]\n )\n\n uv_coords = (\n [1, 1], [1, 0], [0, 0], [0, 1]\n )\n\n vert_indices = [\n 0, 2, 1, 0, 3, 2\n ]\n\n vert_data = []\n for vert_index in vert_indices:\n vert_data += vert_position[vert_index]\n vert_data += uv_coords[vert_index]\n\n vert_data = np.array(vert_data, dtype='float32')\n return vert_data" } ]
from game_objects.game_object import GameObject from meshes.quad_mesh import QuadMesh from settings import *
748
class Weapon: def __init__(self, eng): self.eng = eng self.app = eng.app # refer to the player self.player = self.eng.player self.weapon_id = self.player.weapon_id self.player.weapon_instance = self # self.pos = WEAPON_POS self.rot = 0 self.scale = glm.vec3(WEAPON_SCALE / ASPECT_RATIO, WEAPON_SCALE, 0)
class Weapon: def __init__(self, eng): self.eng = eng self.app = eng.app # refer to the player self.player = self.eng.player self.weapon_id = self.player.weapon_id self.player.weapon_instance = self # self.pos = WEAPON_POS self.rot = 0 self.scale = glm.vec3(WEAPON_SCALE / ASPECT_RATIO, WEAPON_SCALE, 0)
self.m_model = GameObject.get_model_matrix(self)
0
2023-10-22 08:41:55+00:00
2k
tomguluson92/cloth2tex
lib/deformation_graph.py
[ { "identifier": "generate_transform_matrices", "path": "lib/mesh_sampling.py", "snippet": "def generate_transform_matrices(mesh, factors):\n \"\"\"Generates len(factors) meshes, each of them is scaled by factors[i] and\n computes the transformations between them.\n Returns:\n M: a set of meshes downsampled from mesh by a factor specified in factors.\n A: Adjacency matrix for each of the meshes\n D: Downsampling transforms between each of the meshes\n U: Upsampling transforms between each of the meshes\n \"\"\"\n\n factors = map(lambda x: 1.0 / x, factors)\n M, A, D = [], [], []\n # M, A, D, U = [], [], [], []\n A.append(get_vert_connectivity(mesh.v, mesh.f).tocoo())\n M.append(mesh)\n\n for i,factor in enumerate(factors):\n ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)\n D.append(ds_D.tocoo())\n new_mesh_v = ds_D.dot(M[-1].v)\n new_mesh = Mesh(v=new_mesh_v, f=ds_f)\n M.append(new_mesh)\n A.append(get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())\n return M, A, D\n # return M, A, D, U" }, { "identifier": "generate_transform_matrices_coma", "path": "lib/mesh_sampling.py", "snippet": "def generate_transform_matrices_coma(mesh, factors):\n \"\"\"Generates len(factors) meshes, each of them is scaled by factors[i] and\n computes the transformations between them.\n Returns:\n M: a set of meshes downsampled from mesh by a factor specified in factors.\n A: Adjacency matrix for each of the meshes\n D: csc_matrix Downsampling transforms between each of the meshes\n U: Upsampling transforms between each of the meshes\n F: a list of faces\n \"\"\"\n\n factors = map(lambda x: 1.0 / x, factors)\n M, A, D, U, F = [], [], [], [], []\n F.append(mesh.f) # F[0]\n A.append(get_vert_connectivity(mesh.v, mesh.f).astype('float32')) # A[0]\n M.append(mesh) # M[0]\n\n for factor in factors:\n ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)\n D.append(ds_D.astype('float32'))\n new_mesh_v = ds_D.dot(M[-1].v)\n new_mesh = Mesh(v=new_mesh_v, f=ds_f)\n F.append(new_mesh.f)\n M.append(new_mesh)\n A.append(\n get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())\n U.append(setup_deformation_transfer(M[-1], M[-2]).astype('float32'))\n\n return M, A, D, U, F" }, { "identifier": "col", "path": "lib/utils_dg.py", "snippet": "def col(A):\n return A.reshape((-1, 1))" }, { "identifier": "batch_rodrigues", "path": "lib/utils_dg.py", "snippet": "def batch_rodrigues(axisang):\n # This function is borrowed from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py#L37\n # axisang N x 3\n axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1)\n angle = torch.unsqueeze(axisang_norm, -1)\n axisang_normalized = torch.div(axisang, angle)\n angle = angle * 0.5\n v_cos = torch.cos(angle)\n v_sin = torch.sin(angle)\n quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1)\n rot_mat = quat2mat(quat)\n # rot_mat = rot_mat.view(rot_mat.shape[0], 9)\n return rot_mat" } ]
import os import sys import numpy as np import torch import torch.nn as nn import torch.autograd.functional as F import pickle from scipy.spatial import KDTree from psbody.mesh import Mesh from .mesh_sampling import generate_transform_matrices, generate_transform_matrices_coma from .utils_dg import col, batch_rodrigues from pytorch3d.io import load_obj, load_objs_as_meshes, save_obj
1,273
# coding: UTF-8 """ @date: 2023.02.21-28 week8-9 @func: deformation graph. """ eps = sys.float_info.epsilon # 2.220446049250313e-16 class DeformationGraph(nn.Module): def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'): super().__init__() self.radius = radius self.k = k self.max_neigh_num = 40 self.sampling_strategy = sampling_strategy self.one_ring_neigh = [] self.nodes_idx = None self.weights = None self.influence_nodes_idx = [] self.dists = [] self.vert_number = vert_number def construct_graph(self, category_name, vertices=None, faces=None): transform_fp = "transform_{}.pkl".format(category_name) if self.sampling_strategy == 'qslim': m = Mesh(v=vertices, f=faces) if os.path.exists(transform_fp): with open(transform_fp, 'rb') as f: tmp = pickle.load(f, encoding='latin1') M, A, D = tmp['M'], tmp['A'], tmp['D'] else:
# coding: UTF-8 """ @date: 2023.02.21-28 week8-9 @func: deformation graph. """ eps = sys.float_info.epsilon # 2.220446049250313e-16 class DeformationGraph(nn.Module): def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'): super().__init__() self.radius = radius self.k = k self.max_neigh_num = 40 self.sampling_strategy = sampling_strategy self.one_ring_neigh = [] self.nodes_idx = None self.weights = None self.influence_nodes_idx = [] self.dists = [] self.vert_number = vert_number def construct_graph(self, category_name, vertices=None, faces=None): transform_fp = "transform_{}.pkl".format(category_name) if self.sampling_strategy == 'qslim': m = Mesh(v=vertices, f=faces) if os.path.exists(transform_fp): with open(transform_fp, 'rb') as f: tmp = pickle.load(f, encoding='latin1') M, A, D = tmp['M'], tmp['A'], tmp['D'] else:
M, A, D = generate_transform_matrices(m, [20, 20])
0
2023-10-17 11:30:53+00:00
2k
amazon-science/cceval
eval_metric.py
[ { "identifier": "postprocess_code_lines", "path": "eval_utils.py", "snippet": "def postprocess_code_lines(prompt, completion, parser, lang):\n try:\n if lang in [\"java\", \"csharp\", \"typescript\"]:\n return get_bracket_lang_statement(completion)\n elif lang == \"python\":\n return get_python_one_statement(prompt, completion, parser)\n except Exception as e:\n return completion" }, { "identifier": "extract_identifiers", "path": "eval_utils.py", "snippet": "def extract_identifiers(source_code, lang):\n # the main idea is to remove String from a source code\n # then, tokenize the code to get all words and match with identifier regular expression\n # check if it is a language specific keyword, it not, then it is an identifier\n source_code_without_strings = re.sub(string_pattern, '', source_code)\n _ids = [t for t in code_tokenizer.tokenize(source_code_without_strings) if is_identifier(t, lang)]\n return _ids" }, { "identifier": "cal_edit_sim", "path": "eval_utils.py", "snippet": "def cal_edit_sim(references, hypotheses):\n total = len(references)\n edit_sim = 0.0\n for pred, gt in zip(hypotheses, references):\n pred = pred.strip()\n gt = gt.strip()\n edit_sim += fuzz.ratio(pred, gt)\n return edit_sim / total" }, { "identifier": "remove_comments", "path": "eval_utils.py", "snippet": "def remove_comments(code):\n code = re.sub(r'#.*', '', code)\n code = re.sub(r'//.*', '', code)\n return code" } ]
import json import torch.multiprocessing as mp from functools import partial from tqdm import tqdm from tree_sitter import Language, Parser from eval_utils import ( postprocess_code_lines, extract_identifiers, cal_edit_sim, remove_comments )
658
parser = None def compute_id_match(pred_ids, target_ids): pred_ids = list(set(pred_ids)) target_ids = list(set(target_ids)) tp = 0 fp = 0 fn = 0 for pid in pred_ids: if pid in target_ids: tp += 1 else: fp += 1 for tid in target_ids: if tid not in pred_ids: fn += 1 return tp, fp, fn def compute_edit_sim(samples): refs, hyps = [], [] for s in samples: refs.append(s["target"]) hyps.append(s["pred"]) return cal_edit_sim(refs, hyps) def process_examples(lang, args): sample, ex = args global parser prediction = postprocess_code_lines(ex["prompt"], sample["pred"], parser, lang) prediction = remove_comments(prediction) target = ex["groundtruth"] target = remove_comments(target) pred_lines = [l.strip() for l in prediction.split("\n") if l.strip()] gt_lines = [l.strip() for l in target.split("\n") if l.strip()] em_label = int(pred_lines == gt_lines)
parser = None def compute_id_match(pred_ids, target_ids): pred_ids = list(set(pred_ids)) target_ids = list(set(target_ids)) tp = 0 fp = 0 fn = 0 for pid in pred_ids: if pid in target_ids: tp += 1 else: fp += 1 for tid in target_ids: if tid not in pred_ids: fn += 1 return tp, fp, fn def compute_edit_sim(samples): refs, hyps = [], [] for s in samples: refs.append(s["target"]) hyps.append(s["pred"]) return cal_edit_sim(refs, hyps) def process_examples(lang, args): sample, ex = args global parser prediction = postprocess_code_lines(ex["prompt"], sample["pred"], parser, lang) prediction = remove_comments(prediction) target = ex["groundtruth"] target = remove_comments(target) pred_lines = [l.strip() for l in prediction.split("\n") if l.strip()] gt_lines = [l.strip() for l in target.split("\n") if l.strip()] em_label = int(pred_lines == gt_lines)
pred_ids = extract_identifiers(prediction, lang)
1
2023-10-16 04:23:03+00:00
2k
uukuguy/multi_loras
multi_loras/__main__.py
[ { "identifier": "do_extract_lora", "path": "multi_loras/extract_lora.py", "snippet": "def do_extract_lora(args):\n # Load base model and tuned model\n model_kwargs = prepare_model_kwargs(args)\n base_model = load_model_and_init_lora(args, args.base_model_name_or_path, model_kwargs)\n tuned_model = load_model_and_init_lora(args, args.tuned_model_name_or_path, model_kwargs)\n\n bits = args.bits\n num_base_lora_modules = get_lora_modules_count(base_model, bits)\n num_tuned_lora_modules = get_lora_modules_count(tuned_model, bits)\n assert num_base_lora_modules == num_tuned_lora_modules, f\"{num_base_lora_modules=}, {num_tuned_lora_modules=}\"\n pbar = tqdm(zip(_iter_lora(base_model, bits), _iter_lora(tuned_model, bits)), \n total=num_base_lora_modules, ncols=120, desc=\"SVD\")\n\n rank = args.lora_r\n clamp_quantile = args.clamp_quantile\n device = base_model.device\n dtype = base_model.dtype\n\n for (name_base, lora_base), (name_tuned, lora_tune) in pbar:\n assert name_base == name_tuned, f\"name_base={name_base} != name_tuned={name_tuned}\"\n\n residual = lora_tune.weight.data - lora_base.weight.data\n pbar.set_postfix({\"layer\": name_base.replace(\"base_model.model.\", \"\"), \"shape\": residual.shape})\n\n # SVD on residual\n U, Vh = svd_distill(residual, rank=rank, clamp_quantile=clamp_quantile)\n\n assert lora_base.lora_A.default.weight.shape == Vh.shape, f\"{lora_base=}\"\n assert lora_base.lora_B.default.weight.shape == U.shape, f\"{lora_base=}\"\n\n lora_base.lora_A.default.weight.data = Vh.to(device=device, dtype=dtype)\n lora_base.lora_B.default.weight.data = U.to(device=device, dtype=dtype)\n\n # Save the distilled model\n print(f\"Saving peft model to {args.save_path} ...\")\n base_model.save_pretrained(args.save_path)\n print(f\"Save done.\")" }, { "identifier": "do_merge_lora", "path": "multi_loras/merge_peft_adapters.py", "snippet": "def merge_peft_adapters(base_model_name_or_path, peft_model_path, merged_model_name_or_path=None, push_to_hub=False):\ndef main():" }, { "identifier": "do_dare", "path": "multi_loras/dare.py", "snippet": "def do_dare(args):\n \"\"\"\n This function is used to do drop and rescale for the tuned model\n \"\"\"\n print(f\"Loading base model from {args.base_model_name_or_path} ...\")\n base_model = AutoModelForCausalLM.from_pretrained(\n args.base_model_name_or_path, device_map=args.device_map, trust_remote_code=True\n ).half()\n print(f\"Loading tuned model from {args.tuned_model_name_or_path} ...\")\n tuned_model = AutoModelForCausalLM.from_pretrained(\n args.tuned_model_name_or_path,\n device_map=args.device_map,\n trust_remote_code=True,\n ).half()\n tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True)\n\n dare_kwargs = {\n \"weight_mask_rate\": args.dare_weight_mask_rate,\n \"use_weight_rescale\": args.dare_use_weight_rescale,\n \"mask_strategy\": args.dare_mask_strategy,\n \"scaling_coefficient\": args.dare_scaling_coefficient,\n }\n print(\n f\"Do drop and rescale with {dare_kwargs=} with {args.tuned_model_name_or_path} ...\"\n )\n model_weights = drop_and_rescale_model(\n tuned_model=tuned_model,\n base_model=base_model,\n **dare_kwargs,\n )\n copy_params_to_model(model_weights, base_model)\n print(f\"Saving model to {args.save_path} ...\")\n tokenizer.save_pretrained(args.save_path)\n base_model.save_pretrained(args.save_path)\n\n print(f\"Saved model to {args.save_path}\")" }, { "identifier": "do_delta_weights", "path": "multi_loras/delta_weights.py", "snippet": "def do_delta_weights(args):\n \"\"\"\n Compute the delta weights between two models and save the delta weights to a file\n \"\"\"\n base_model, tuned_model = load_models(args)\n\n delta_weights = DeltaWeights(base_model=base_model, tuned_model=tuned_model)\n print(f\"Saving delta weights to {args.save_path} ...\")\n torch.save(delta_weights.params_dict, args.save_path)\n\n print(f\"Succesfully saved delta weights to {args.save_path}\")" }, { "identifier": "do_orthogonal", "path": "multi_loras/delta_weights.py", "snippet": "def do_orthogonal(args):\n base_model, tuned_model = load_models(args)\n\n print(f\"Calculating orthogonal component ...\")\n base_params = get_model_params(base_model)\n tuned_params = get_model_params(tuned_model)\n\n orthogonal_params = {}\n for key, tuned_weights in tqdm(tuned_params.items(), ncols=100, desc=f\"Orthogonal\"):\n base_weights = base_params[key]\n tuned_weights = tuned_weights.detach().cpu().numpy()\n base_weights = base_weights.detach().cpu().numpy()\n orthogonal_weights =calculate_orthogonal_component(base_weights, tuned_weights, scaling_factor=args.orthogonal_scaling_factor)\n orthogonal_params[key] = torch.tensor(orthogonal_weights)\n\n print(f\"Combining orthogonal component with pretrained model ...\")\n delta_weights = DeltaWeights(params_dict=orthogonal_params)\n new_model_weights = delta_weights.combine_with_pretrained_model(base_model)\n copy_params_to_model(new_model_weights, base_model)\n\n print(f\"Saving model to {args.save_path} ...\")\n tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True)\n tokenizer.save_pretrained(args.save_path)\n base_model.save_pretrained(args.save_path)\n\n print(f\"Saved model to {args.save_path}\")\n\n # delta_weights = DeltaWeights(base_model=base_model, tuned_model=tuned_model)\n # print(f\"Saving delta weights layer params to {args.save_path} ...\")\n # delta_weights.save(args.save_path)\n\n # print(f\"Succesfully saved delta weights layer params to {args.save_path}\")" } ]
from .extract_lora import do_extract_lora from .merge_peft_adapters import do_merge_lora from .dare import do_dare from .delta_weights import do_delta_weights, do_orthogonal from argparse import ArgumentParser
1,569
#!/usr/bin/env python cmd_functions = { "extract_lora": do_extract_lora, "merge_lora": do_merge_lora, "drop_and_rescale": do_dare,
#!/usr/bin/env python cmd_functions = { "extract_lora": do_extract_lora, "merge_lora": do_merge_lora, "drop_and_rescale": do_dare,
"delta_weights": do_delta_weights,
3
2023-10-16 02:39:47+00:00
2k
myshell-ai/AIlice
ailice/prompts/APromptSearchEngine.py
[ { "identifier": "config", "path": "ailice/common/AConfig.py", "snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):" }, { "identifier": "GenerateRE4FunctionCalling", "path": "ailice/prompts/ARegex.py", "snippet": "def GenerateRE4FunctionCalling(signature: str, faultTolerance: bool = False) -> str:\n #signature: \"FUNC<!|ARG1: ARG1_TYPE, ARG2: ARG2_TYPE...|!> -> RETURN_TYPE\"\n pattern = r\"(\\w+)<!\\|((?:\\w+[ ]*:[ ]*[\\w, ]+)*)\\|!>(?:[ ]*->[ ]*)(\\w+)\"\n matches = re.search(pattern, signature)\n if matches is None:\n print(\"signature invalid. exit. \", signature)\n exit()\n funcName, args, retType = matches[1], matches[2], matches[3]\n \n pattern = r\"(\\w+)[ ]*:[ ]*(\\w+)\"\n typePairs = re.findall(pattern, args)\n \n reMap = {k: v for k,v in ARegexMap.items()}\n reMap[\"str\"] = r\"(?:.*(?=\\|!>))\" if faultTolerance else ARegexMap['str']\n patternArgs = '[ ]*,[ ]*'.join([f\"(?:({arg}|\\\"{arg}\\\"|\\'{arg}\\')[ ]*[:=][ ]*)?(?P<{arg}>({reMap[tp]}))\" for arg,tp in typePairs])\n return rf\"!{funcName}<!\\|[ ]*{patternArgs}[ ]*\\|!>\"" }, { "identifier": "ConstructOptPrompt", "path": "ailice/prompts/ATools.py", "snippet": "def ConstructOptPrompt(func, low:int, high: int, maxLen: int) -> str:\n prompt = None\n n = None\n while low <= high:\n mid = (low + high) // 2\n p, length = func(mid)\n if length < maxLen:\n n = mid\n prompt = p\n low = mid + 1\n else:\n high = mid - 1\n return prompt, n" } ]
from importlib.resources import read_text from ailice.common.AConfig import config from ailice.prompts.ARegex import GenerateRE4FunctionCalling from ailice.prompts.ATools import ConstructOptPrompt
1,185
class APromptSearchEngine(): PROMPT_NAME = "search-engine" def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None): self.processor = processor self.conversations = conversations self.formatter = formatter self.outputCB = outputCB self.prompt0 = read_text("ailice.prompts", "prompt_searchengine.txt") self.PATTERNS = {"QUERY": [{"re": GenerateRE4FunctionCalling("QUERY<!|request: str|!> -> str", faultTolerance = True), "isEntry": True}], "ARXIV": [{"re": GenerateRE4FunctionCalling("ARXIV<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNARXIV": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNARXIV<!||!> -> str", faultTolerance = True), "isEntry": True}], "GOOGLE": [{"re": GenerateRE4FunctionCalling("GOOGLE<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNGOOGLE": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNGOOGLE<!||!> -> str", faultTolerance = True), "isEntry": True}], "DUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("DUCKDUCKGO<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNDUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNDUCKDUCKGO<!||!> -> str", faultTolerance = True), "isEntry": True}], "BROWSE": [{"re": GenerateRE4FunctionCalling("BROWSE<!|url: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWN": [{"re": GenerateRE4FunctionCalling("SCROLLDOWN<!||!> -> str"), "isEntry": True}], "RESPOND": [{"re": GenerateRE4FunctionCalling("RESPOND<!|message: str|!> -> None", faultTolerance = True), "isEntry": True}]} self.ACTIONS= {} return def Reset(self): return def GetPatterns(self): return self.PATTERNS def GetActions(self): return self.ACTIONS def ParameterizedBuildPrompt(self, n: int): prompt = f""" {self.prompt0} End of general instructions. """ #prompt += "Conversations:" ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n)) return ret, self.formatter.Len(ret) def BuildPrompt(self):
class APromptSearchEngine(): PROMPT_NAME = "search-engine" def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None): self.processor = processor self.conversations = conversations self.formatter = formatter self.outputCB = outputCB self.prompt0 = read_text("ailice.prompts", "prompt_searchengine.txt") self.PATTERNS = {"QUERY": [{"re": GenerateRE4FunctionCalling("QUERY<!|request: str|!> -> str", faultTolerance = True), "isEntry": True}], "ARXIV": [{"re": GenerateRE4FunctionCalling("ARXIV<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNARXIV": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNARXIV<!||!> -> str", faultTolerance = True), "isEntry": True}], "GOOGLE": [{"re": GenerateRE4FunctionCalling("GOOGLE<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNGOOGLE": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNGOOGLE<!||!> -> str", faultTolerance = True), "isEntry": True}], "DUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("DUCKDUCKGO<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWNDUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNDUCKDUCKGO<!||!> -> str", faultTolerance = True), "isEntry": True}], "BROWSE": [{"re": GenerateRE4FunctionCalling("BROWSE<!|url: str|!> -> str", faultTolerance = True), "isEntry": True}], "SCROLLDOWN": [{"re": GenerateRE4FunctionCalling("SCROLLDOWN<!||!> -> str"), "isEntry": True}], "RESPOND": [{"re": GenerateRE4FunctionCalling("RESPOND<!|message: str|!> -> None", faultTolerance = True), "isEntry": True}]} self.ACTIONS= {} return def Reset(self): return def GetPatterns(self): return self.PATTERNS def GetActions(self): return self.ACTIONS def ParameterizedBuildPrompt(self, n: int): prompt = f""" {self.prompt0} End of general instructions. """ #prompt += "Conversations:" ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n)) return ret, self.formatter.Len(ret) def BuildPrompt(self):
prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))
2
2023-10-16 01:51:14+00:00
2k
Agora-X/Bing-Chat-API
src/bing_chat/request.py
[ { "identifier": "CONVERSATION_STYLE_TYPE", "path": "src/bing_chat/conversation_style.py", "snippet": "CONVERSATION_STYLE_TYPE = Optional[\n Union[ConversationStyle, Literal[\"creative\", \"balanced\", \"precise\"]]\n]" }, { "identifier": "ConversationStyle", "path": "src/bing_chat/conversation_style.py", "snippet": "class ConversationStyle(Enum):\n creative = [\n \"nlu_direct_response_filter\",\n \"deepleo\",\n \"disable_emoji_spoken_text\",\n \"responsible_ai_policy_235\",\n \"enablemm\",\n \"h3imaginative\",\n \"objopinion\",\n \"dsblhlthcrd\",\n \"dv3sugg\",\n \"autosave\",\n \"clgalileo\",\n \"gencontentv3\",\n ]\n balanced = [\n \"nlu_direct_response_filter\",\n \"deepleo\",\n \"disable_emoji_spoken_text\",\n \"responsible_ai_policy_235\",\n \"enablemm\",\n \"galileo\",\n \"saharagenconv5\",\n \"objopinion\",\n \"dsblhlthcrd\",\n \"dv3sugg\",\n \"autosave\",\n ]\n precise = [\n \"nlu_direct_response_filter\",\n \"deepleo\",\n \"disable_emoji_spoken_text\",\n \"responsible_ai_policy_235\",\n \"enablemm\",\n \"h3precise\",\n \"objopinion\",\n \"dsblhlthcrd\",\n \"dv3sugg\",\n \"autosave\",\n \"clgalileo\",\n \"gencontentv3\",\n ]" }, { "identifier": "get_location_hint_from_locale", "path": "src/bing_chat/utilities.py", "snippet": "def get_location_hint_from_locale(locale: str) -> Union[dict, None]:\n locale = locale.lower()\n if locale == \"en-gb\":\n hint = LocationHint.UK.value\n elif locale == \"en-ie\":\n hint = LocationHint.EU.value\n elif locale == \"zh-cn\":\n hint = LocationHint.CHINA.value\n else:\n hint = LocationHint.USA.value\n return hint.get(\"LocationHint\")" }, { "identifier": "get_ran_hex", "path": "src/bing_chat/utilities.py", "snippet": "def get_ran_hex(length: int = 32) -> str:\n return \"\".join(random.choice(\"0123456789abcdef\") for _ in range(length))" }, { "identifier": "guess_locale", "path": "src/bing_chat/utilities.py", "snippet": "def guess_locale() -> str:\n if sys.platform.startswith(\"win\"):\n return \"en-us\"\n loc, _ = locale.getlocale()\n return loc.replace(\"_\", \"-\") if loc else \"en-us\"" } ]
import uuid from datetime import datetime from typing import Union from .conversation_style import CONVERSATION_STYLE_TYPE from .conversation_style import ConversationStyle from .utilities import get_location_hint_from_locale from .utilities import get_ran_hex from .utilities import guess_locale
817
class ChatHubRequest: def __init__( self, conversation_signature: str, encrypted_conversation_signature: str, client_id: str, conversation_id: str, invocation_id: int = 3, ) -> None: self.struct: dict = {} self.client_id: str = client_id self.conversation_id: str = conversation_id self.conversation_signature: str = conversation_signature self.encrypted_conversation_signature: str = encrypted_conversation_signature self.invocation_id: int = invocation_id def update( self, prompt: str, conversation_style: CONVERSATION_STYLE_TYPE, webpage_context: Union[str, None] = None, search_result: bool = False,
class ChatHubRequest: def __init__( self, conversation_signature: str, encrypted_conversation_signature: str, client_id: str, conversation_id: str, invocation_id: int = 3, ) -> None: self.struct: dict = {} self.client_id: str = client_id self.conversation_id: str = conversation_id self.conversation_signature: str = conversation_signature self.encrypted_conversation_signature: str = encrypted_conversation_signature self.invocation_id: int = invocation_id def update( self, prompt: str, conversation_style: CONVERSATION_STYLE_TYPE, webpage_context: Union[str, None] = None, search_result: bool = False,
locale: str = guess_locale(),
4
2023-10-19 19:17:05+00:00
2k
f0uriest/interpax
interpax/_spline.py
[ { "identifier": "errorif", "path": "interpax/utils.py", "snippet": "def errorif(cond, err=ValueError, msg=\"\"):\n \"\"\"Raise an error if condition is met.\n\n Similar to assert but allows wider range of Error types, rather than\n just AssertionError.\n \"\"\"\n if cond:\n raise err(msg)" }, { "identifier": "isbool", "path": "interpax/utils.py", "snippet": "def isbool(x):\n \"\"\"Check if something is boolean or ndarray of bool type.\"\"\"\n return isinstance(x, bool) or (hasattr(x, \"dtype\") and (x.dtype == bool))" } ]
from collections import OrderedDict from functools import partial from typing import Union from jax import jit from .utils import errorif, isbool import equinox as eqx import jax import jax.numpy as jnp import numpy as np
891
"""Functions for interpolating splines that are JAX differentiable.""" CUBIC_METHODS = ("cubic", "cubic2", "cardinal", "catmull-rom") OTHER_METHODS = ("nearest", "linear") METHODS_1D = CUBIC_METHODS + OTHER_METHODS + ("monotonic", "monotonic-0") METHODS_2D = CUBIC_METHODS + OTHER_METHODS METHODS_3D = CUBIC_METHODS + OTHER_METHODS class Interpolator1D(eqx.Module): """Convenience class for representing a 1D interpolated function. Parameters ---------- x : ndarray, shape(Nx,) coordinates of known function values ("knots") f : ndarray, shape(Nx,...) function values to interpolate method : str method of interpolation - ``'nearest'``: nearest neighbor interpolation - ``'linear'``: linear interpolation - ``'cubic'``: C1 cubic splines (aka local splines) - ``'cubic2'``: C2 cubic splines (aka natural splines) - ``'catmull-rom'``: C1 cubic centripetal "tension" splines - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass keyword parameter ``c`` in float[0,1] to specify tension - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the data, and will not introduce new extrema in the interpolated points - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at both endpoints extrap : bool, float, array-like whether to extrapolate values beyond knots (True) or return nan (False), or a specified value to return for query points outside the bounds. Can also be passed as a 2 element array or tuple to specify different conditions for xq<x[0] and x[-1]<xq period : float > 0, None periodicity of the function. If given, function is assumed to be periodic on the interval [0,period]. None denotes no periodicity Notes ----- This class is registered as a PyTree in JAX (it is actually an equinox.Module) so should be compatible with standard JAX transformations (jit, grad, vmap, etc.) """ x: jax.Array f: jax.Array derivs: dict method: str extrap: Union[bool, float, tuple] period: Union[None, float] axis: int def __init__( self, x: jax.Array, f: jax.Array, method: str = "cubic", extrap: Union[bool, float, tuple] = False, period: Union[None, float] = None, **kwargs, ): x, f = map(jnp.asarray, (x, f)) axis = kwargs.get("axis", 0) fx = kwargs.pop("fx", None)
"""Functions for interpolating splines that are JAX differentiable.""" CUBIC_METHODS = ("cubic", "cubic2", "cardinal", "catmull-rom") OTHER_METHODS = ("nearest", "linear") METHODS_1D = CUBIC_METHODS + OTHER_METHODS + ("monotonic", "monotonic-0") METHODS_2D = CUBIC_METHODS + OTHER_METHODS METHODS_3D = CUBIC_METHODS + OTHER_METHODS class Interpolator1D(eqx.Module): """Convenience class for representing a 1D interpolated function. Parameters ---------- x : ndarray, shape(Nx,) coordinates of known function values ("knots") f : ndarray, shape(Nx,...) function values to interpolate method : str method of interpolation - ``'nearest'``: nearest neighbor interpolation - ``'linear'``: linear interpolation - ``'cubic'``: C1 cubic splines (aka local splines) - ``'cubic2'``: C2 cubic splines (aka natural splines) - ``'catmull-rom'``: C1 cubic centripetal "tension" splines - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass keyword parameter ``c`` in float[0,1] to specify tension - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the data, and will not introduce new extrema in the interpolated points - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at both endpoints extrap : bool, float, array-like whether to extrapolate values beyond knots (True) or return nan (False), or a specified value to return for query points outside the bounds. Can also be passed as a 2 element array or tuple to specify different conditions for xq<x[0] and x[-1]<xq period : float > 0, None periodicity of the function. If given, function is assumed to be periodic on the interval [0,period]. None denotes no periodicity Notes ----- This class is registered as a PyTree in JAX (it is actually an equinox.Module) so should be compatible with standard JAX transformations (jit, grad, vmap, etc.) """ x: jax.Array f: jax.Array derivs: dict method: str extrap: Union[bool, float, tuple] period: Union[None, float] axis: int def __init__( self, x: jax.Array, f: jax.Array, method: str = "cubic", extrap: Union[bool, float, tuple] = False, period: Union[None, float] = None, **kwargs, ): x, f = map(jnp.asarray, (x, f)) axis = kwargs.get("axis", 0) fx = kwargs.pop("fx", None)
errorif(
0
2023-10-18 13:12:20+00:00
2k
aszc-dev/ComfyUI-CoreMLSuite
coreml_suite/models.py
[ { "identifier": "get_model_config", "path": "coreml_suite/config.py", "snippet": "def get_model_config(model_version: ModelVersion):\n unet_config = convert_config(config_map[model_version])\n config = supported_models_base.BASE(unet_config)\n config.latent_format = latent_format_map[model_version]()\n return config" }, { "identifier": "ModelVersion", "path": "coreml_suite/config.py", "snippet": "class ModelVersion(Enum):\n SD15 = \"sd15\"\n SDXL = \"sdxl\"\n SDXL_REFINER = \"sdxl_refiner\"\n LCM = \"lcm\"" }, { "identifier": "extract_residual_kwargs", "path": "coreml_suite/controlnet.py", "snippet": "def extract_residual_kwargs(expected_inputs, control):\n if \"additional_residual_0\" not in expected_inputs.keys():\n return {}\n if control is None:\n return no_control(expected_inputs)\n\n residual_kwargs = {\n \"additional_residual_{}\".format(i): r.cpu().numpy().astype(np.float16)\n for i, r in enumerate(chain(control[\"output\"], control[\"middle\"]))\n }\n return residual_kwargs" }, { "identifier": "chunk_control", "path": "coreml_suite/controlnet.py", "snippet": "def chunk_control(cn, target_size):\n if cn is None:\n return [None] * target_size\n\n num_chunks = ceil(cn[\"output\"][0].shape[0] / target_size)\n\n out = [{\"output\": [], \"middle\": []} for _ in range(num_chunks)]\n\n for k, v in cn.items():\n for i, x in enumerate(v):\n chunks = chunk_batch(x, (target_size, *x.shape[1:]))\n for j, chunk in enumerate(chunks):\n out[j][k].append(chunk)\n\n return out" }, { "identifier": "chunk_batch", "path": "coreml_suite/latents.py", "snippet": "def chunk_batch(input_tensor, target_shape):\n if input_tensor.shape == target_shape:\n return [input_tensor]\n\n batch_size = input_tensor.shape[0]\n target_batch_size = target_shape[0]\n\n num_chunks = batch_size // target_batch_size\n if num_chunks == 0:\n padding = torch.zeros(target_batch_size - batch_size, *target_shape[1:]).to(\n input_tensor.device\n )\n return [torch.cat((input_tensor, padding), dim=0)]\n\n mod = batch_size % target_batch_size\n if mod != 0:\n chunks = list(torch.chunk(input_tensor[:-mod], num_chunks))\n padding = torch.zeros(target_batch_size - mod, *target_shape[1:]).to(\n input_tensor.device\n )\n padded = torch.cat((input_tensor[-mod:], padding), dim=0)\n chunks.append(padded)\n return chunks\n\n chunks = list(torch.chunk(input_tensor, num_chunks))\n return chunks" }, { "identifier": "merge_chunks", "path": "coreml_suite/latents.py", "snippet": "def merge_chunks(chunks, orig_shape):\n merged = torch.cat(chunks, dim=0)\n if merged.shape == orig_shape:\n return merged\n return merged[: orig_shape[0]]" }, { "identifier": "is_lcm", "path": "coreml_suite/lcm/utils.py", "snippet": "def is_lcm(coreml_model):\n return \"timestep_cond\" in coreml_model.expected_inputs" }, { "identifier": "logger", "path": "coreml_suite/logger.py", "snippet": "" } ]
import numpy as np import torch from comfy import model_base from comfy.model_management import get_torch_device from comfy.model_patcher import ModelPatcher from coreml_suite.config import get_model_config, ModelVersion from coreml_suite.controlnet import extract_residual_kwargs, chunk_control from coreml_suite.latents import chunk_batch, merge_chunks from coreml_suite.lcm.utils import is_lcm from coreml_suite.logger import logger
1,387
class CoreMLModelWrapper: def __init__(self, coreml_model): self.coreml_model = coreml_model self.dtype = torch.float16 def __call__(self, x, t, context, control, transformer_options=None, **kwargs): inputs = CoreMLInputs(x, t, context, control, **kwargs) input_list = inputs.chunks(self.expected_inputs) chunked_out = [ self.get_torch_outputs( self.coreml_model(**input_kwargs.coreml_kwargs(self.expected_inputs)), x.device, ) for input_kwargs in input_list ] merged_out = merge_chunks(chunked_out, x.shape) return merged_out @staticmethod def get_torch_outputs(model_output, device): return torch.from_numpy(model_output["noise_pred"]).to(device) @property def expected_inputs(self): return self.coreml_model.expected_inputs @property def is_lcm(self): return is_lcm(self.coreml_model) @property def is_sdxl_base(self): return is_sdxl_base(self.coreml_model) @property def is_sdxl_refiner(self): return is_sdxl_refiner(self.coreml_model) @property def config(self): if self.is_sdxl_base: return get_model_config(ModelVersion.SDXL) if self.is_sdxl_refiner: return get_model_config(ModelVersion.SDXL_REFINER) return get_model_config(ModelVersion.SD15) class CoreMLModelWrapperLCM(CoreMLModelWrapper): def __init__(self, coreml_model): super().__init__(coreml_model) self.config = None class CoreMLInputs: def __init__(self, x, t, context, control, **kwargs): self.x = x self.t = t self.context = context self.control = control self.time_ids = kwargs.get("time_ids") self.text_embeds = kwargs.get("text_embeds") self.ts_cond = kwargs.get("timestep_cond") def coreml_kwargs(self, expected_inputs): sample = self.x.cpu().numpy().astype(np.float16) context = self.context.cpu().numpy().astype(np.float16) context = context.transpose(0, 2, 1)[:, :, None, :] t = self.t.cpu().numpy().astype(np.float16) model_input_kwargs = { "sample": sample, "encoder_hidden_states": context, "timestep": t, }
class CoreMLModelWrapper: def __init__(self, coreml_model): self.coreml_model = coreml_model self.dtype = torch.float16 def __call__(self, x, t, context, control, transformer_options=None, **kwargs): inputs = CoreMLInputs(x, t, context, control, **kwargs) input_list = inputs.chunks(self.expected_inputs) chunked_out = [ self.get_torch_outputs( self.coreml_model(**input_kwargs.coreml_kwargs(self.expected_inputs)), x.device, ) for input_kwargs in input_list ] merged_out = merge_chunks(chunked_out, x.shape) return merged_out @staticmethod def get_torch_outputs(model_output, device): return torch.from_numpy(model_output["noise_pred"]).to(device) @property def expected_inputs(self): return self.coreml_model.expected_inputs @property def is_lcm(self): return is_lcm(self.coreml_model) @property def is_sdxl_base(self): return is_sdxl_base(self.coreml_model) @property def is_sdxl_refiner(self): return is_sdxl_refiner(self.coreml_model) @property def config(self): if self.is_sdxl_base: return get_model_config(ModelVersion.SDXL) if self.is_sdxl_refiner: return get_model_config(ModelVersion.SDXL_REFINER) return get_model_config(ModelVersion.SD15) class CoreMLModelWrapperLCM(CoreMLModelWrapper): def __init__(self, coreml_model): super().__init__(coreml_model) self.config = None class CoreMLInputs: def __init__(self, x, t, context, control, **kwargs): self.x = x self.t = t self.context = context self.control = control self.time_ids = kwargs.get("time_ids") self.text_embeds = kwargs.get("text_embeds") self.ts_cond = kwargs.get("timestep_cond") def coreml_kwargs(self, expected_inputs): sample = self.x.cpu().numpy().astype(np.float16) context = self.context.cpu().numpy().astype(np.float16) context = context.transpose(0, 2, 1)[:, :, None, :] t = self.t.cpu().numpy().astype(np.float16) model_input_kwargs = { "sample": sample, "encoder_hidden_states": context, "timestep": t, }
residual_kwargs = extract_residual_kwargs(expected_inputs, self.control)
2
2023-10-23 13:08:00+00:00
2k
aikunyi/FreTS
layers/SelfAttention_Family.py
[ { "identifier": "TriangularCausalMask", "path": "utils/masking.py", "snippet": "class TriangularCausalMask():\n def __init__(self, B, L, device=\"cpu\"):\n mask_shape = [B, 1, L, L]\n with torch.no_grad():\n self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)\n\n @property\n def mask(self):\n return self._mask" }, { "identifier": "ProbMask", "path": "utils/masking.py", "snippet": "class ProbMask():\n def __init__(self, B, H, L, index, scores, device=\"cpu\"):\n _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)\n _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])\n indicator = _mask_ex[torch.arange(B)[:, None, None],\n torch.arange(H)[None, :, None],\n index, :].to(device)\n self._mask = indicator.view(scores.shape).to(device)\n\n @property\n def mask(self):\n return self._mask" } ]
import torch import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt import numpy as np import math import os from math import sqrt from utils.masking import TriangularCausalMask, ProbMask
1,141
class FullAttention(nn.Module): def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): super(FullAttention, self).__init__() self.scale = scale self.mask_flag = mask_flag self.output_attention = output_attention self.dropout = nn.Dropout(attention_dropout) def forward(self, queries, keys, values, attn_mask): B, L, H, E = queries.shape _, S, _, D = values.shape scale = self.scale or 1. / sqrt(E) scores = torch.einsum("blhe,bshe->bhls", queries, keys) if self.mask_flag: if attn_mask is None: attn_mask = TriangularCausalMask(B, L, device=queries.device) scores.masked_fill_(attn_mask.mask, -np.inf) A = self.dropout(torch.softmax(scale * scores, dim=-1)) V = torch.einsum("bhls,bshd->blhd", A, values) if self.output_attention: return (V.contiguous(), A) else: return (V.contiguous(), None) class ProbAttention(nn.Module): def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): super(ProbAttention, self).__init__() self.factor = factor self.scale = scale self.mask_flag = mask_flag self.output_attention = output_attention self.dropout = nn.Dropout(attention_dropout) def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) # Q [B, H, L, D] B, H, L_K, E = K.shape _, _, L_Q, _ = Q.shape # calculate the sampled Q_K K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :] Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() # find the Top_k query with sparisty measurement M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) M_top = M.topk(n_top, sorted=False)[1] # use the reduced Q to calculate Q_K Q_reduce = Q[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], M_top, :] # factor*ln(L_q) Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k return Q_K, M_top def _get_initial_context(self, V, L_Q): B, H, L_V, D = V.shape if not self.mask_flag: # V_sum = V.sum(dim=-2) V_sum = V.mean(dim=-2) contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone() else: # use mask assert (L_Q == L_V) # requires that L_Q == L_V, i.e. for self-attention only contex = V.cumsum(dim=-2) return contex def _update_context(self, context_in, V, scores, index, L_Q, attn_mask): B, H, L_V, D = V.shape if self.mask_flag:
class FullAttention(nn.Module): def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): super(FullAttention, self).__init__() self.scale = scale self.mask_flag = mask_flag self.output_attention = output_attention self.dropout = nn.Dropout(attention_dropout) def forward(self, queries, keys, values, attn_mask): B, L, H, E = queries.shape _, S, _, D = values.shape scale = self.scale or 1. / sqrt(E) scores = torch.einsum("blhe,bshe->bhls", queries, keys) if self.mask_flag: if attn_mask is None: attn_mask = TriangularCausalMask(B, L, device=queries.device) scores.masked_fill_(attn_mask.mask, -np.inf) A = self.dropout(torch.softmax(scale * scores, dim=-1)) V = torch.einsum("bhls,bshd->blhd", A, values) if self.output_attention: return (V.contiguous(), A) else: return (V.contiguous(), None) class ProbAttention(nn.Module): def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): super(ProbAttention, self).__init__() self.factor = factor self.scale = scale self.mask_flag = mask_flag self.output_attention = output_attention self.dropout = nn.Dropout(attention_dropout) def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) # Q [B, H, L, D] B, H, L_K, E = K.shape _, _, L_Q, _ = Q.shape # calculate the sampled Q_K K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :] Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() # find the Top_k query with sparisty measurement M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) M_top = M.topk(n_top, sorted=False)[1] # use the reduced Q to calculate Q_K Q_reduce = Q[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], M_top, :] # factor*ln(L_q) Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k return Q_K, M_top def _get_initial_context(self, V, L_Q): B, H, L_V, D = V.shape if not self.mask_flag: # V_sum = V.sum(dim=-2) V_sum = V.mean(dim=-2) contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone() else: # use mask assert (L_Q == L_V) # requires that L_Q == L_V, i.e. for self-attention only contex = V.cumsum(dim=-2) return contex def _update_context(self, context_in, V, scores, index, L_Q, attn_mask): B, H, L_V, D = V.shape if self.mask_flag:
attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)
1
2023-10-23 13:15:14+00:00
2k
lightly-ai/labelformat
src/labelformat/formats/yolov6.py
[ { "identifier": "YOLOv8ObjectDetectionInput", "path": "src/labelformat/formats/yolov8.py", "snippet": "class YOLOv8ObjectDetectionInput(_YOLOv8BaseInput, ObjectDetectionInput):\n def get_labels(self) -> Iterable[ImageObjectDetection]:\n category_id_to_category = {\n category.id: category for category in self.get_categories()\n }\n labels_dir = self._labels_dir()\n for image in self.get_images():\n label_path = (labels_dir / image.filename).with_suffix(\".txt\")\n if not label_path.exists():\n logger.warning(\n f\"Label file '{label_path}' for image '{image.filename}' does not exist.\"\n )\n with label_path.open() as file:\n label_data = [line.split() for line in file.readlines()]\n\n objects = []\n for category_id, rcx, rcy, rw, rh in label_data:\n cx = float(rcx) * image.width\n cy = float(rcy) * image.height\n w = float(rw) * image.width\n h = float(rh) * image.height\n objects.append(\n SingleObjectDetection(\n category=category_id_to_category[int(category_id)],\n box=BoundingBox.from_format(\n bbox=[cx, cy, w, h],\n format=BoundingBoxFormat.CXCYWH,\n ),\n )\n )\n yield ImageObjectDetection(\n image=image,\n objects=objects,\n )" }, { "identifier": "YOLOv8ObjectDetectionOutput", "path": "src/labelformat/formats/yolov8.py", "snippet": "class YOLOv8ObjectDetectionOutput(_YOLOv8BaseOutput, ObjectDetectionOutput):\n def save(self, label_input: ObjectDetectionInput) -> None:\n # Write config file.\n self._output_file.parent.mkdir(parents=True, exist_ok=True)\n _save_dataset_yaml(\n output_file=self._output_file,\n output_split=self._output_split,\n categories=list(label_input.get_categories()),\n )\n\n # Write label files.\n labels_dir = self._output_file.parent / \"labels\"\n for label in label_input.get_labels():\n label_path = (labels_dir / label.image.filename).with_suffix(\".txt\")\n label_path.parent.mkdir(parents=True, exist_ok=True)\n with label_path.open(\"w\") as file:\n for obj in label.objects:\n cx, cy, w, h = obj.box.to_format(format=BoundingBoxFormat.CXCYWH)\n rcx = cx / label.image.width\n rcy = cy / label.image.height\n rw = w / label.image.width\n rh = h / label.image.height\n file.write(f\"{obj.category.id} {rcx} {rcy} {rw} {rh}\\n\")" } ]
from labelformat.cli.registry import Task, cli_register from .yolov8 import YOLOv8ObjectDetectionInput, YOLOv8ObjectDetectionOutput
750
""" YOLOv6 format follows the same specs as YOLOv8. """ @cli_register(format="yolov6", task=Task.OBJECT_DETECTION) class YOLOv6ObjectDetectionInput(YOLOv8ObjectDetectionInput): pass @cli_register(format="yolov6", task=Task.OBJECT_DETECTION)
""" YOLOv6 format follows the same specs as YOLOv8. """ @cli_register(format="yolov6", task=Task.OBJECT_DETECTION) class YOLOv6ObjectDetectionInput(YOLOv8ObjectDetectionInput): pass @cli_register(format="yolov6", task=Task.OBJECT_DETECTION)
class YOLOv6ObjectDetectionOutput(YOLOv8ObjectDetectionOutput):
1
2023-10-18 11:08:06+00:00
2k
amitfin/oref_alert
tests/test_binary_sensor.py
[ { "identifier": "ADD_SENSOR_SERVICE", "path": "custom_components/oref_alert/const.py", "snippet": "ADD_SENSOR_SERVICE: Final = \"add_sensor\"" }, { "identifier": "ATTR_COUNTRY_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_COUNTRY_ALERTS: Final = \"country_alerts\"" }, { "identifier": "ATTR_COUNTRY_ACTIVE_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_COUNTRY_ACTIVE_ALERTS: Final = \"country_active_alerts\"" }, { "identifier": "ATTR_SELECTED_AREAS_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_SELECTED_AREAS_ALERTS: Final = \"selected_areas_alerts\"" }, { "identifier": "ATTR_SELECTED_AREAS_ACTIVE_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_SELECTED_AREAS_ACTIVE_ALERTS: Final = \"selected_areas_active_alerts\"" }, { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "identifier": "CONF_AREAS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_AREAS: Final = \"areas\"" }, { "identifier": "CONF_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_OFF_ICON: Final = \"off_icon\"" }, { "identifier": "CONF_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ON_ICON: Final = \"on_icon\"" }, { "identifier": "CONF_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_POLL_INTERVAL: Final = \"poll_interval\"" }, { "identifier": "DOMAIN", "path": "custom_components/oref_alert/const.py", "snippet": "DOMAIN: Final = \"oref_alert\"" }, { "identifier": "OREF_ALERT_UNIQUE_ID", "path": "custom_components/oref_alert/const.py", "snippet": "OREF_ALERT_UNIQUE_ID: Final = \"oref_alert\"" }, { "identifier": "ALL_AREAS_ID_SUFFIX", "path": "custom_components/oref_alert/const.py", "snippet": "ALL_AREAS_ID_SUFFIX: Final = \"all_areas\"" }, { "identifier": "load_json_fixture", "path": "tests/utils.py", "snippet": "def load_json_fixture(file_name: str) -> Any:\n \"\"\"Return a json object from a local fixture file.\"\"\"\n with open(\n fixture_path(file_name),\n encoding=\"utf-8\",\n ) as file:\n return json.load(file)" }, { "identifier": "mock_urls", "path": "tests/utils.py", "snippet": "def mock_urls(\n aioclient_mock: AiohttpClientMocker,\n real_time_fixture: str | None,\n history_fixture: str | None,\n **kwargs: Any,\n) -> None:\n \"\"\"Mock the URLs.\"\"\"\n aioclient_mock.clear_requests()\n aioclient_mock.get(\n OREF_ALERTS_URL,\n text=load_fixture(real_time_fixture) if real_time_fixture else \"\",\n **kwargs,\n )\n aioclient_mock.get(\n OREF_HISTORY_URL,\n text=load_fixture(history_fixture) if history_fixture else \"\",\n **kwargs,\n )" } ]
import datetime import pytest from typing import Any from freezegun.api import FrozenDateTimeFactory from homeassistant.const import CONF_NAME, Platform, STATE_OFF, STATE_ON from homeassistant.core import HomeAssistant from pytest_homeassistant_custom_component.common import ( MockConfigEntry, async_fire_time_changed, ) from pytest_homeassistant_custom_component.test_util.aiohttp import AiohttpClientMocker from custom_components.oref_alert.const import ( ADD_SENSOR_SERVICE, ATTR_COUNTRY_ALERTS, ATTR_COUNTRY_ACTIVE_ALERTS, ATTR_SELECTED_AREAS_ALERTS, ATTR_SELECTED_AREAS_ACTIVE_ALERTS, CONF_ALERT_MAX_AGE, CONF_AREAS, CONF_OFF_ICON, CONF_ON_ICON, CONF_POLL_INTERVAL, DOMAIN, OREF_ALERT_UNIQUE_ID, ALL_AREAS_ID_SUFFIX, ) from .utils import load_json_fixture, mock_urls
1,429
"""The tests for the binary_sensor file.""" from __future__ import annotations DEFAULT_OPTIONS = {CONF_AREAS: ["בארי"], CONF_ALERT_MAX_AGE: 10} ENTITY_ID = f"{Platform.BINARY_SENSOR}.{OREF_ALERT_UNIQUE_ID}" async def async_setup( hass: HomeAssistant, options: dict[str, Any] | None = None ) -> str: """Integration setup.""" options = options or {} config_entry = MockConfigEntry( domain=DOMAIN, options={**DEFAULT_OPTIONS, **options} ) config_entry.add_to_hass(hass) assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() return config_entry.entry_id async def async_shutdown(hass: HomeAssistant, config_id: str) -> None: """Shutdown by removing the integration.""" assert await hass.config_entries.async_remove(config_id) await hass.async_block_till_done() async def test_state( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, freezer: FrozenDateTimeFactory, ) -> None: """Test entity state.""" freezer.move_to("2023-10-07 06:30:00+03:00") mock_urls(aioclient_mock, None, "single_alert_history.json") config_id = await async_setup(hass) assert hass.states.get(ENTITY_ID).state == STATE_ON freezer.move_to("2023-10-07 06:39:50+03:00") async_fire_time_changed(hass) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_ON freezer.move_to("2023-10-07 06:40:01+03:00") async_fire_time_changed(hass) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_OFF await async_shutdown(hass, config_id) @pytest.mark.parametrize( ("areas",), ((["תל אביב - כל האזורים"],), (["מחוז דן"],)), ids=("City all areas", "District"), ) async def test_real_time_alert_area_expansion( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, areas: list[str] ) -> None: """Test real time alert and city expansion.""" mock_urls(aioclient_mock, "single_alert_real_time.json", None) config_id = await async_setup(hass, {CONF_AREAS: areas}) assert hass.states.get(ENTITY_ID).state == STATE_ON await async_shutdown(hass, config_id) async def test_state_attributes( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, freezer: FrozenDateTimeFactory, ) -> None: """Test state attributes.""" freezer.move_to("2023-10-07 06:30:00+03:00") mock_urls( aioclient_mock, "multi_alerts_real_time.json", "multi_alerts_history.json" ) config_id = await async_setup(hass) state = hass.states.get(ENTITY_ID)
"""The tests for the binary_sensor file.""" from __future__ import annotations DEFAULT_OPTIONS = {CONF_AREAS: ["בארי"], CONF_ALERT_MAX_AGE: 10} ENTITY_ID = f"{Platform.BINARY_SENSOR}.{OREF_ALERT_UNIQUE_ID}" async def async_setup( hass: HomeAssistant, options: dict[str, Any] | None = None ) -> str: """Integration setup.""" options = options or {} config_entry = MockConfigEntry( domain=DOMAIN, options={**DEFAULT_OPTIONS, **options} ) config_entry.add_to_hass(hass) assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() return config_entry.entry_id async def async_shutdown(hass: HomeAssistant, config_id: str) -> None: """Shutdown by removing the integration.""" assert await hass.config_entries.async_remove(config_id) await hass.async_block_till_done() async def test_state( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, freezer: FrozenDateTimeFactory, ) -> None: """Test entity state.""" freezer.move_to("2023-10-07 06:30:00+03:00") mock_urls(aioclient_mock, None, "single_alert_history.json") config_id = await async_setup(hass) assert hass.states.get(ENTITY_ID).state == STATE_ON freezer.move_to("2023-10-07 06:39:50+03:00") async_fire_time_changed(hass) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_ON freezer.move_to("2023-10-07 06:40:01+03:00") async_fire_time_changed(hass) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_OFF await async_shutdown(hass, config_id) @pytest.mark.parametrize( ("areas",), ((["תל אביב - כל האזורים"],), (["מחוז דן"],)), ids=("City all areas", "District"), ) async def test_real_time_alert_area_expansion( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, areas: list[str] ) -> None: """Test real time alert and city expansion.""" mock_urls(aioclient_mock, "single_alert_real_time.json", None) config_id = await async_setup(hass, {CONF_AREAS: areas}) assert hass.states.get(ENTITY_ID).state == STATE_ON await async_shutdown(hass, config_id) async def test_state_attributes( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, freezer: FrozenDateTimeFactory, ) -> None: """Test state attributes.""" freezer.move_to("2023-10-07 06:30:00+03:00") mock_urls( aioclient_mock, "multi_alerts_real_time.json", "multi_alerts_history.json" ) config_id = await async_setup(hass) state = hass.states.get(ENTITY_ID)
active_area_alert = load_json_fixture("single_alert_history.json")
13
2023-10-18 11:16:41+00:00
2k
apple/ml-nvas3d
soundspaces_nvas3d/rir_generation/generate_rir.py
[ { "identifier": "render_rir_parallel", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def render_rir_parallel(room_list: T.List[str],\n source_position_list: T.List[T.Tuple[float, float, float]],\n receiver_position_list: T.List[T.Tuple[float, float, float]],\n filename_list: T.List[str] = None,\n receiver_rotation_list: T.List[float] = None,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).\n \"\"\"\n\n assert len(room_list) == len(source_position_list)\n assert len(source_position_list) == len(receiver_position_list)\n\n if filename_list is None:\n is_return = True\n else:\n is_return = False\n\n if receiver_rotation_list is None:\n receiver_rotation_list = [0] * len(receiver_position_list)\n\n # Note: Make sure all rooms are downloaded\n\n # Calculate the number of batches\n num_points = len(source_position_list)\n num_batches = (num_points + batch_size - 1) // batch_size\n\n # Use tqdm to display the progress bar\n progress_bar = tqdm(total=num_points)\n\n def update_progress(*_):\n progress_bar.update()\n\n ir_list = []\n # Process the tasks in batches\n for batch_idx in range(num_batches):\n # Calculate the start and end indices of the current batch\n start_idx = batch_idx * batch_size\n end_idx = min(start_idx + batch_size, num_points)\n if is_return:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n else:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n\n # Create a multiprocessing Pool for the current batch\n with multiprocessing.Pool() as pool:\n tasks = []\n for room, source_position, receiver_position, filename, receiver_rotation in batch:\n # Apply async mapping of process_ir function\n task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)\n tasks.append(task)\n\n # Wait for all tasks in the batch to complete and collect results\n for task in tasks:\n if is_return:\n ir = task.get() # Block until the result is ready\n ir_list.append(ir) # Append the result to the list\n else:\n task.get()\n if is_return:\n return ir_list" }, { "identifier": "load_room_grid", "path": "soundspaces_nvas3d/utils/aihabitat_utils.py", "snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info" } ]
import os import argparse import itertools from soundspaces_nvas3d.utils.ss_utils import render_rir_parallel from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
1,134
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def generate_rir(args: argparse.Namespace) -> None: """ Generate Room Impulse Response (RIR) based on given room and grid distance. """ grid_distance_str = str(args.grid_distance).replace(".", "_") dirname = os.path.join(args.dirname, f'rir_mp3d/grid_{grid_distance_str}', args.room) os.makedirs(dirname, exist_ok=True)
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def generate_rir(args: argparse.Namespace) -> None: """ Generate Room Impulse Response (RIR) based on given room and grid distance. """ grid_distance_str = str(args.grid_distance).replace(".", "_") dirname = os.path.join(args.dirname, f'rir_mp3d/grid_{grid_distance_str}', args.room) os.makedirs(dirname, exist_ok=True)
grid_data = load_room_grid(args.room, grid_distance=args.grid_distance)
1
2023-10-19 05:35:54+00:00
2k
kwonathan/language-models-trajectory-generators
api.py
[ { "identifier": "SUCCESS_DETECTION_PROMPT", "path": "prompts/success_detection_prompt.py", "snippet": "SUCCESS_DETECTION_PROMPT = \\\n\"\"\"You are tasked with determining whether a user command was completed successfully or not, based on how the positions and orientations of the relevant objects in the environment changed during the execution of the task.\n\nThe 3D coordinate system of the environment is as follows:\n 1. The x-axis is in the horizontal direction, increasing to the right.\n 2. The y-axis is in the depth direction, increasing away from you.\n 3. The z-axis is in the vertical direction, increasing upwards.\nThe position values are in metres.\n\nThe objects can rotate about the z-axis, from -pi to pi radians.\nNegative rotation values represent clockwise rotation, and positive rotation values represent anticlockwise rotation. The rotation values are in radians.\n\nThe user command is \"[INSERT TASK]\".\n\n1. Given the user command, describe how the object positions and orientations should have changed during the execution of the task.\n2. From the given positions and orientations of the relevant objects, output whether the task was completed successfully or not.\n3. If the task was completed successfully, output\n```python\ntask_completed()\n```.\n4. If the task was not completed successfully, output\n```python\ntask_failed()\n```.\nDo not define the task_completed and task_failed functions yourself.\n\nThe positions and orientations of the relevant objects in the environment are as follows:\n\"\"\"" }, { "identifier": "OK", "path": "config.py", "snippet": "OK = \"\\033[92m\"" }, { "identifier": "PROGRESS", "path": "config.py", "snippet": "PROGRESS = \"\\033[93m\"" }, { "identifier": "FAIL", "path": "config.py", "snippet": "FAIL = \"\\033[91m\"" }, { "identifier": "ENDC", "path": "config.py", "snippet": "ENDC = \"\\033[0m\"" }, { "identifier": "CAPTURE_IMAGES", "path": "config.py", "snippet": "CAPTURE_IMAGES = 1" }, { "identifier": "ADD_BOUNDING_CUBES", "path": "config.py", "snippet": "ADD_BOUNDING_CUBES = 2" }, { "identifier": "ADD_TRAJECTORY_POINTS", "path": "config.py", "snippet": "ADD_TRAJECTORY_POINTS = 3" }, { "identifier": "EXECUTE_TRAJECTORY", "path": "config.py", "snippet": "EXECUTE_TRAJECTORY = 4" }, { "identifier": "OPEN_GRIPPER", "path": "config.py", "snippet": "OPEN_GRIPPER = 5" }, { "identifier": "CLOSE_GRIPPER", "path": "config.py", "snippet": "CLOSE_GRIPPER = 6" }, { "identifier": "TASK_COMPLETED", "path": "config.py", "snippet": "TASK_COMPLETED = 7" }, { "identifier": "RESET_ENVIRONMENT", "path": "config.py", "snippet": "RESET_ENVIRONMENT = 8" } ]
import numpy as np import sys import torch import math import config import models import utils from PIL import Image from prompts.success_detection_prompt import SUCCESS_DETECTION_PROMPT from config import OK, PROGRESS, FAIL, ENDC from config import CAPTURE_IMAGES, ADD_BOUNDING_CUBES, ADD_TRAJECTORY_POINTS, EXECUTE_TRAJECTORY, OPEN_GRIPPER, CLOSE_GRIPPER, TASK_COMPLETED, RESET_ENVIRONMENT
1,042
class API: def __init__(self, args, main_connection, logger, langsam_model, xmem_model, device): self.args = args self.main_connection = main_connection self.logger = logger self.langsam_model = langsam_model self.xmem_model = xmem_model self.device = device self.segmentation_texts = [] self.segmentation_count = 0 self.trajectory_length = 0 self.attempted_task = False self.completed_task = False self.failed_task = False self.head_camera_position = None self.head_camera_orientation_q = None self.wrist_camera_position = None self.wrist_camera_orientation_q = None self.command = None def detect_object(self, segmentation_text): self.logger.info(PROGRESS + "Capturing head and wrist camera images..." + ENDC) self.main_connection.send([CAPTURE_IMAGES]) [head_camera_position, head_camera_orientation_q, wrist_camera_position, wrist_camera_orientation_q, env_connection_message] = self.main_connection.recv() self.logger.info(env_connection_message) self.head_camera_position = head_camera_position self.head_camera_orientation_q = head_camera_orientation_q self.wrist_camera_position = wrist_camera_position self.wrist_camera_orientation_q = wrist_camera_orientation_q rgb_image_head = Image.open(config.rgb_image_head_path).convert("RGB") depth_image_head = Image.open(config.depth_image_head_path).convert("L") depth_array = np.array(depth_image_head) / 255. if self.segmentation_count == 0: xmem_image = Image.fromarray(np.zeros_like(depth_array)).convert("L") xmem_image.save(config.xmem_input_path) segmentation_texts = [segmentation_text] self.logger.info(PROGRESS + "Segmenting head camera image..." + ENDC) model_predictions, boxes, segmentation_texts = models.get_langsam_output(rgb_image_head, self.langsam_model, segmentation_texts, self.segmentation_count)
class API: def __init__(self, args, main_connection, logger, langsam_model, xmem_model, device): self.args = args self.main_connection = main_connection self.logger = logger self.langsam_model = langsam_model self.xmem_model = xmem_model self.device = device self.segmentation_texts = [] self.segmentation_count = 0 self.trajectory_length = 0 self.attempted_task = False self.completed_task = False self.failed_task = False self.head_camera_position = None self.head_camera_orientation_q = None self.wrist_camera_position = None self.wrist_camera_orientation_q = None self.command = None def detect_object(self, segmentation_text): self.logger.info(PROGRESS + "Capturing head and wrist camera images..." + ENDC) self.main_connection.send([CAPTURE_IMAGES]) [head_camera_position, head_camera_orientation_q, wrist_camera_position, wrist_camera_orientation_q, env_connection_message] = self.main_connection.recv() self.logger.info(env_connection_message) self.head_camera_position = head_camera_position self.head_camera_orientation_q = head_camera_orientation_q self.wrist_camera_position = wrist_camera_position self.wrist_camera_orientation_q = wrist_camera_orientation_q rgb_image_head = Image.open(config.rgb_image_head_path).convert("RGB") depth_image_head = Image.open(config.depth_image_head_path).convert("L") depth_array = np.array(depth_image_head) / 255. if self.segmentation_count == 0: xmem_image = Image.fromarray(np.zeros_like(depth_array)).convert("L") xmem_image.save(config.xmem_input_path) segmentation_texts = [segmentation_text] self.logger.info(PROGRESS + "Segmenting head camera image..." + ENDC) model_predictions, boxes, segmentation_texts = models.get_langsam_output(rgb_image_head, self.langsam_model, segmentation_texts, self.segmentation_count)
self.logger.info(OK + "Finished segmenting head camera image!" + ENDC)
1
2023-10-18 16:38:09+00:00
2k
VikParuchuri/classified
app/labeler/raters/instruct.py
[ { "identifier": "Lens", "path": "app/labeler/lens.py", "snippet": "class Lens:\n def __init__(self, lens_type):\n self.lens_type = lens_type\n self.template_dir = os.path.join(settings.LENS_DIR, lens_type)\n self.function = self.get_function()\n self.system_prompt = self.get_system_template()\n self.config = self.get_config()\n self.input_fields = self.config[\"input_fields\"]\n\n def get_system_template(self):\n return render_template(\"system\", self.template_dir)\n\n def prompt_template(self, *args):\n if len(args) != len(self.input_fields):\n raise ValueError(f\"Missing one or more required fields {self.input_fields} for lens {self.lens_type}\")\n\n kwargs = dict(zip(self.input_fields, args))\n return render_template(\"prompt\", self.template_dir, **kwargs)\n\n def get_function(self):\n with open(f\"{self.template_dir}/function.json\") as f:\n functions = json.load(f)\n return functions\n\n def get_config(self):\n with open(f\"{self.template_dir}/config.json\") as f:\n config = json.load(f)\n return config\n\n def labels(self):\n return self.function[\"parameters\"][\"required\"]\n\n def score_labels(self):\n return [l for l in self.labels() if self.function[\"parameters\"][\"properties\"][l][\"type\"] in [\"integer\", \"float\", \"number\"]]\n\n def rationale_labels(self):\n return [l for l in self.labels() if self.function[\"parameters\"][\"properties\"][l][\"type\"] == \"string\"]\n\n def rater_type(self):\n return self.config[\"type\"]" }, { "identifier": "get_final_score", "path": "app/labeler/raters/common.py", "snippet": "def get_final_score(scores):\n final_score = 0\n if all([s >= 2.5 for s in scores]) and scores[-1] >= 2.75:\n final_score = 3\n elif all([s >= 1.5 for s in scores]) and scores[-1] >= 2:\n final_score = 2\n elif all([s >= 0.5 for s in scores]) and scores[-1] >= 1:\n final_score = 1\n return final_score" }, { "identifier": "chat_completion", "path": "app/llm/llm.py", "snippet": "def chat_completion(lens_type, messages, functions: None | List[Dict] = None, model=settings.CHAT_MODEL, max_tokens=settings.MAX_GENERATION_TOKENS, temperature=.2, version=1, cache=True):\n if cache:\n response = query_cached_response(lens_type, messages, functions, model, version)\n if response:\n return response.response\n\n response = _chat_completion(messages, functions, model, max_tokens, temperature)\n\n if cache and response:\n save_cached_response(lens_type, messages, functions, response, model, version)\n\n return response" } ]
import json from typing import List from app.labeler.lens import Lens from app.labeler.raters.common import get_final_score from app.llm.llm import chat_completion
798
def rate_data(resource: List[str], lens_type: str, version: int = 1): lens = Lens(lens_type) instruction, output = resource user_prompt = lens.prompt_template(instruction, output) messages = [ {"role": "system", "content": lens.system_prompt}, {"role": "user", "content": user_prompt}, ]
def rate_data(resource: List[str], lens_type: str, version: int = 1): lens = Lens(lens_type) instruction, output = resource user_prompt = lens.prompt_template(instruction, output) messages = [ {"role": "system", "content": lens.system_prompt}, {"role": "user", "content": user_prompt}, ]
chat_response = chat_completion(lens_type, messages, [lens.function], version=version)
2
2023-10-17 18:15:03+00:00
2k
tiejundong/FlexPose
FlexPose/preprocess/prepare_APOPDBbind.py
[ { "identifier": "print_args", "path": "FlexPose/utils/common.py", "snippet": "def print_args(args):\n print('=' * 30 + ' Current settings ' + '=' * 30)\n for k, v in args.__dict__.items():\n print(k.ljust(40, '.'), v)\n print('=' * (60 + len(' Current settings ')))" }, { "identifier": "delmkdir", "path": "FlexPose/utils/common.py", "snippet": "def delmkdir(path, remove_old=True):\n isexist = os.path.exists(path)\n if not isexist:\n os.makedirs(path)\n if isexist == True and remove_old:\n shutil.rmtree(path)\n os.makedirs(path)" }, { "identifier": "try_prepare_APOPDBbind", "path": "FlexPose/preprocess/prepare_for_training.py", "snippet": "def try_prepare_APOPDBbind(*args, **kwargs):\n try:\n save_APOPDBbind(*args, **kwargs)\n except:\n pass" }, { "identifier": "save_APOPDBbind", "path": "FlexPose/preprocess/prepare_for_training.py", "snippet": "def save_APOPDBbind(tup_in):\n dic_data = prepare(tup_in)\n save_path, pdb_id, df_apo_sub, apo_path, pdbbind_path, aff_path, MCaug_path, df_apo_sub, have_apo, max_len_pocket, max_len_ligand, tmp_path = tup_in\n # np.savez_compressed(npz_save_path + '/{}.npz'.format(pdb_id), **dic_data)\n pickle.dump(dic_data, open(save_path + '/{}.pkl'.format(pdb_id), 'wb'))" } ]
import os import shutil import sys import argparse import pandas as pd from ray.util.multiprocessing import Pool from tqdm import tqdm from FlexPose.utils.common import print_args, delmkdir from FlexPose.preprocess.prepare_for_training import try_prepare_APOPDBbind, save_APOPDBbind
799
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2])) if __name__ == '__main__': # main args parser = argparse.ArgumentParser() # data source parser.add_argument('--apobind_path', type=str, default='/home/dtj/work_site/test/tmp/data/apobind', help='APObind dataset path') parser.add_argument('--pdbbind_path', type=str, default='/home/dtj/work_site/test/tmp/data/v2020-PL', help='PDBbind dataset path') parser.add_argument('--apo_info_path', type=str, default='/home/dtj/work_site/test/tmp/data/apobind_all.csv', help='APObind apo-holo mapping csv path (provided by APObind)') parser.add_argument('--aff_info_path', type=str, default='/home/dtj/work_site/test/tmp/data/index/INDEX_general_PL_data.2020', help='PDBbind affinity data path') parser.add_argument('--aug_path', type=str, default='/home/dtj/work_site/test/tmp/data/pdbbind_MC', help='Rosetta decoys (pseudo apo structures)') # parameters parser.add_argument('--max_len_pocket', type=int, default=50, help='max number of protein pocket residues') parser.add_argument('--max_len_ligand', type=int, default=50, help='max number of ligand atoms') # other parser.add_argument('--tmp_path', type=str, default='./tmp', help='tmp file for temporary saving') # output parser.add_argument('--save_path', type=str, default='/home/dtj/work_site/test/tmp/data/processed_data_maxp50_maxl50', help='output path (preprocessed), npz or pkl') args = parser.parse_args()
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2])) if __name__ == '__main__': # main args parser = argparse.ArgumentParser() # data source parser.add_argument('--apobind_path', type=str, default='/home/dtj/work_site/test/tmp/data/apobind', help='APObind dataset path') parser.add_argument('--pdbbind_path', type=str, default='/home/dtj/work_site/test/tmp/data/v2020-PL', help='PDBbind dataset path') parser.add_argument('--apo_info_path', type=str, default='/home/dtj/work_site/test/tmp/data/apobind_all.csv', help='APObind apo-holo mapping csv path (provided by APObind)') parser.add_argument('--aff_info_path', type=str, default='/home/dtj/work_site/test/tmp/data/index/INDEX_general_PL_data.2020', help='PDBbind affinity data path') parser.add_argument('--aug_path', type=str, default='/home/dtj/work_site/test/tmp/data/pdbbind_MC', help='Rosetta decoys (pseudo apo structures)') # parameters parser.add_argument('--max_len_pocket', type=int, default=50, help='max number of protein pocket residues') parser.add_argument('--max_len_ligand', type=int, default=50, help='max number of ligand atoms') # other parser.add_argument('--tmp_path', type=str, default='./tmp', help='tmp file for temporary saving') # output parser.add_argument('--save_path', type=str, default='/home/dtj/work_site/test/tmp/data/processed_data_maxp50_maxl50', help='output path (preprocessed), npz or pkl') args = parser.parse_args()
print_args(args)
0
2023-10-19 22:03:51+00:00
2k
openvpi/SingingVocoders
modules/loss/vaeHiFiloss.py
[ { "identifier": "RSSLoss", "path": "modules/ddsp/loss.py", "snippet": "class RSSLoss(nn.Module):\n '''\n Random-scale Spectral Loss.\n '''\n \n def __init__(self, fft_min, fft_max, n_scale, alpha=1.0, overlap=0, eps=1e-7, device='cuda'):\n super().__init__()\n self.fft_min = fft_min\n self.fft_max = fft_max\n self.n_scale = n_scale\n self.lossdict = {}\n for n_fft in range(fft_min, fft_max):\n self.lossdict[n_fft] = SSSLoss(n_fft, alpha, overlap, eps).to(device)\n \n def forward(self, x_pred, x_true):\n value = 0.\n n_ffts = torch.randint(self.fft_min, self.fft_max, (self.n_scale,))\n for n_fft in n_ffts:\n loss_func = self.lossdict[int(n_fft)]\n value += loss_func(x_true, x_pred)\n return value / self.n_scale" }, { "identifier": "warp_stft", "path": "modules/loss/stft_loss.py", "snippet": "class warp_stft:\n def __init__(self,cfg={},divce='cuda'):\n self.stft=MultiResolutionSTFTLoss(**cfg).to(divce)\n\n\n\n def loss(self,x, y):\n return self.stft(x, y)" }, { "identifier": "PitchAdjustableMelSpectrogram", "path": "utils/wav2mel.py", "snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)" } ]
import torch import torch.nn as nn import torch.nn.functional as F from modules.ddsp.loss import RSSLoss from modules.loss.stft_loss import warp_stft from utils.wav2mel import PitchAdjustableMelSpectrogram
1,287
def kl_loss(logs, m): kl = 0.5 * (m**2 + torch.exp(logs) - logs - 1).sum(dim=1) kl = torch.mean(kl) return kl class HiFiloss(nn.Module): def __init__(self,config:dict): super().__init__()
def kl_loss(logs, m): kl = 0.5 * (m**2 + torch.exp(logs) - logs - 1).sum(dim=1) kl = torch.mean(kl) return kl class HiFiloss(nn.Module): def __init__(self,config:dict): super().__init__()
self.mel=PitchAdjustableMelSpectrogram( sample_rate=config['audio_sample_rate'],
2
2023-10-17 13:45:09+00:00
2k
RobertCsordas/moe
tasks/simple/language_model/wikitext103_sp_transformer.py
[ { "identifier": "Enwik8Transformer", "path": "tasks/simple/language_model/enwik8_transformer.py", "snippet": "class Enwik8Transformer(TransformerLMMixin, SimpleTask):\n VALID_NUM_WORKERS = 1\n TRAIN_NUM_WORKERS = 2\n\n def create_state(self):\n self.helper.state.epoch = 0\n\n def create_model_interface(self):\n self.model_interface = LanguageModelInterface(\n self.model, drop_state_prob=self.helper.args.lm.state_drop_probability, dist_env=self.helper.dist_env)\n self.helper.saver[\"interface\"] = self.model_interface\n\n def validate_on(self, set: torch.utils.data.Dataset, loader: torch.utils.data.DataLoader) -> Tuple[Any, float]:\n state = self.model_interface.state\n self.model_interface.reset_state()\n res = super().validate_on(set, loader)\n self.model_interface.state = state\n return res\n\n def log_epoch(self):\n self.helper.log({\"epoch\": self.helper.state.epoch})\n\n def start_next_epoch(self):\n self.model_interface.reset_state()\n self.helper.state.epoch += 1\n self.log_epoch()\n\n def get_train_batch(self) -> Dict[str, Any]:\n try:\n return next(self.data_iter)\n except StopIteration:\n self.start_next_epoch()\n self.data_iter = iter(self.train_loader)\n return next(self.data_iter)\n\n def create_sampler(self, loader: torch.utils.data.Dataset, batch_size: int) -> \\\n framework.loader.sampler.MultibatchSequentialSampler:\n\n return framework.loader.sampler.MultibatchSequentialSampler(loader, batch_size,\n world_size=self.helper.dist_env.world_size, rank=self.helper.dist_env.rank)\n\n def create_valid_loader(self, vset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:\n return torch.utils.data.DataLoader(vset,\n batch_sampler=self.create_sampler(vset, self.test_batch_size),\n collate_fn=framework.loader.collate.VarLengthCollate(batch_dim=self.batch_dim),\n num_workers=self.VALID_NUM_WORKERS)\n\n def create_train_loader(self, loader: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:\n sampler = self.create_sampler(loader, self.helper.args.batch_size)\n self.helper.saver.register(\"sampler\", sampler, replace=True)\n\n return torch.utils.data.DataLoader(loader, batch_sampler=sampler, num_workers=self.TRAIN_NUM_WORKERS,\n pin_memory=True, collate_fn=framework.loader.collate.VarLengthCollate(\n batch_dim=self.batch_dim))\n\n def create_datasets(self):\n self.batch_dim = 1\n self.train_set = dataset.Enwik8(\"train\", self.helper.args.lm.unroll)\n self.valid_sets.val = dataset.Enwik8(\"valid\", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)\n self.valid_sets.test = dataset.Enwik8(\"test\", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)\n\n def train(self):\n self.log_epoch()\n super().train()" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" } ]
import torch import dataset import framework from .enwik8_transformer import Enwik8Transformer from ... import task, args
884
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-sentencepiece.n_pieces", default=8000)
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-sentencepiece.n_pieces", default=8000)
@task()
1
2023-10-16 11:26:45+00:00
2k
yk/llmvm
parsing.py
[ { "identifier": "Arg", "path": "interface.py", "snippet": "class Arg(pydantic.BaseModel):\n vtype: str\n value: str" }, { "identifier": "Load", "path": "interface.py", "snippet": "class Load(Expr):\n kind: str = \"load\"\n vtype: str\n ptr: str" }, { "identifier": "Icmp", "path": "interface.py", "snippet": "class Icmp(Expr):\n kind: str = \"icmp\"\n vtype: str\n op: str\n lhs: str\n rhs: str" }, { "identifier": "Srem", "path": "interface.py", "snippet": "class Srem(Expr):\n kind: str = \"srem\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Add", "path": "interface.py", "snippet": "class Add(Expr):\n kind: str = \"add\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Mul", "path": "interface.py", "snippet": "class Mul(Expr):\n kind: str = \"mul\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Call", "path": "interface.py", "snippet": "class Call(Expr):\n kind: str = \"call\"\n name: str\n args: list[Arg]" }, { "identifier": "Assign", "path": "interface.py", "snippet": "class Assign(Instruction):\n kind: str = \"assign\"\n reg: str\n expr: Expr" }, { "identifier": "Store", "path": "interface.py", "snippet": "class Store(Instruction):\n kind: str = \"store\"\n vtype: str\n value: str\n ptr: str" }, { "identifier": "Branch", "path": "interface.py", "snippet": "class Branch(Instruction):\n kind: str = \"branch\"\n label: str" }, { "identifier": "BranchCond", "path": "interface.py", "snippet": "class BranchCond(Instruction):\n kind: str = \"branch_cond\"\n cond_reg: str\n label_true: str\n label_false: str" }, { "identifier": "Return", "path": "interface.py", "snippet": "class Return(Instruction):\n kind: str = \"return\"\n vtype: str\n value: str" }, { "identifier": "Program", "path": "interface.py", "snippet": "class Program(pydantic.BaseModel):\n instructions: list[Instruction]\n labels: dict[str, int]\n constants: dict[str, Any]\n convert_numbers_to_chars: bool = False" }, { "identifier": "to_vtype", "path": "interface.py", "snippet": "def to_vtype(value, vtype):\n match vtype:\n case \"i32\":\n return int(value)\n case \"i8\":\n return str(value)\n case \"str\":\n return str(value)\n raise NotImplementedError(vtype)" }, { "identifier": "GetElementPtr", "path": "interface.py", "snippet": "class GetElementPtr(Expr):\n kind: str = \"get_element_ptr\"\n vtype: str\n ptr: str\n idx: str" }, { "identifier": "Copy", "path": "interface.py", "snippet": "class Copy(Expr):\n kind: str = \"copy\"\n ptr: str" }, { "identifier": "Switch", "path": "interface.py", "snippet": "class Switch(Instruction):\n kind: str = \"switch\"\n ptr: str\n default_label: str\n cases: dict[str, str]" }, { "identifier": "AllocArray", "path": "interface.py", "snippet": "class AllocArray(Expr):\n kind: str = \"alloc_array\"\n vtype: str\n size: int" }, { "identifier": "Alloc", "path": "interface.py", "snippet": "class Alloc(Expr):\n kind: str = \"alloc\"\n vtype: str" } ]
import re from loguru import logger from interface import Arg, Load, Icmp, Srem, Add, Mul, Call, Assign, Store, Branch, BranchCond, Return, Program, to_vtype, GetElementPtr, Copy, Switch, AllocArray, Alloc
1,000
def _line_stripper(in_f): for line in in_f: line = line.rstrip() if not line: continue yield line def parse_arg(arg): logger.debug(f"parse_arg({arg})") if m := re.match(r"ptr noundef (\S+)", arg): return Arg(vtype="str", value=m.group(1)) if m := re.match(r"i32 noundef (\S+)", arg): return Arg(vtype="i32", value=m.group(1)) raise NotImplementedError(arg) def parse_call(expr): logger.debug(f"parse_call({expr})") if m := re.match(r"\s*call \w+(?: \(.*\))? @(\w+)\((.*)\)", expr): name, args = m.groups() args = args.split(", ") args = [parse_arg(arg) for arg in args if arg]
def _line_stripper(in_f): for line in in_f: line = line.rstrip() if not line: continue yield line def parse_arg(arg): logger.debug(f"parse_arg({arg})") if m := re.match(r"ptr noundef (\S+)", arg): return Arg(vtype="str", value=m.group(1)) if m := re.match(r"i32 noundef (\S+)", arg): return Arg(vtype="i32", value=m.group(1)) raise NotImplementedError(arg) def parse_call(expr): logger.debug(f"parse_call({expr})") if m := re.match(r"\s*call \w+(?: \(.*\))? @(\w+)\((.*)\)", expr): name, args = m.groups() args = args.split(", ") args = [parse_arg(arg) for arg in args if arg]
return Call(name=name, args=args)
6
2023-10-23 21:29:14+00:00
2k
w-e-w/sd-webui-nudenet-nsfw-censor
scripts/nudenet_nsfw_censor_scripts/api.py
[ { "identifier": "pil_nude_detector", "path": "scripts/nudenet_nsfw_censor_scripts/pil_nude_detector.py", "snippet": "def draw_ellipse(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):\ndef draw_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):\ndef rounded_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, width_expanded, height_expanded, rectangle_round_radius, *args, **kwargs):\n def __init__(self):\n def init_onnx(self):\n def change_onnx_provider(self):\n def refresh_label_configs(self):\n def pre_process_pil(self, pil_image):\n def calculate_censor_mask(self, detection_results, img_size, thresholds, expand_horizontal, expand_vertical, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius):\n def get_censor_mask(self, pil_image, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius, thresholds, expand_horizontal, expand_vertical):\nclass PilNudeDetector:" }, { "identifier": "apply_filter", "path": "scripts/nudenet_nsfw_censor_scripts/censor_image_filters.py", "snippet": "def combine_results(input_image, input_mask, processed):\ndef variable_blur(input_image: Image, control_mask: Image, blur_radius: float = 10, blur_strength_curve: float = 3, *args, **kwargs):\n def mask_array_to_img(i):\n def img_gaussian_blur(i):\n def combine_mask(index_1, index_2, pre_step_size):\n def combine(index_1, index_2, pre_step_size):\ndef gaussian_blur(input_image, input_mask, blur_radius, *args, **kwargs):\ndef pixelate(input_image, input_mask, pixelation_factor, *args, **kwargs):\ndef fill_color(input_image, input_mask, color, *args, **kwargs):\ndef do_nothing(input_image, *args, **kwargs):\ndef apply_filter(input_image, input_mask, filter_type, *args, **kwargs):" } ]
from scripts.nudenet_nsfw_censor_scripts.pil_nude_detector import pil_nude_detector, nudenet_labels_index, mask_shapes_func_dict from scripts.nudenet_nsfw_censor_scripts.censor_image_filters import apply_filter, filter_dict from modules.api.api import decode_base64_to_image, encode_pil_to_base64 from fastapi import FastAPI, Body from PIL import ImageFilter from modules import shared from math import sqrt import gradio as gr import numpy as np
682
def nudenet_censor_api(_: gr.Blocks, app: FastAPI): @app.post("/nudenet/censor") async def censor( input_image: str = Body(None, title="base64 input image"), input_mask: str = Body(None, title="base64 mask (optional)"), enable_nudenet: bool = Body(True, title="Enable NudeNet mask detection"), output_mask: bool = Body(None, title="return mask"),
def nudenet_censor_api(_: gr.Blocks, app: FastAPI): @app.post("/nudenet/censor") async def censor( input_image: str = Body(None, title="base64 input image"), input_mask: str = Body(None, title="base64 mask (optional)"), enable_nudenet: bool = Body(True, title="Enable NudeNet mask detection"), output_mask: bool = Body(None, title="return mask"),
filter_type: str = Body(None, title=f"Name of censor filter: {list(filter_dict)}"),
1
2023-10-16 16:44:07+00:00
2k
enkeejunior1/Diffusion-Pullback
src/models/improved_diffusion/unet.py
[ { "identifier": "convert_module_to_f16", "path": "src/models/improved_diffusion/fp16_util.py", "snippet": "def convert_module_to_f16(l):\n \"\"\"\n Convert primitive modules to float16.\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.half()\n l.bias.data = l.bias.data.half()" }, { "identifier": "convert_module_to_f32", "path": "src/models/improved_diffusion/fp16_util.py", "snippet": "def convert_module_to_f32(l):\n \"\"\"\n Convert primitive modules to float32, undoing convert_module_to_f16().\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.float()\n l.bias.data = l.bias.data.float()" }, { "identifier": "SiLU", "path": "src/models/improved_diffusion/nn.py", "snippet": "class SiLU(nn.Module):\n def forward(self, x):\n return x * th.sigmoid(x)" }, { "identifier": "conv_nd", "path": "src/models/improved_diffusion/nn.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "src/models/improved_diffusion/nn.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "src/models/improved_diffusion/nn.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "src/models/improved_diffusion/nn.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "src/models/improved_diffusion/nn.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "src/models/improved_diffusion/nn.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = th.exp(\n -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)\n if dim % 2:\n embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)\n return embedding" }, { "identifier": "checkpoint", "path": "src/models/improved_diffusion/nn.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" } ]
from abc import abstractmethod from einops import rearrange, reduce, repeat, einsum from .fp16_util import convert_module_to_f16, convert_module_to_f32 from .nn import ( SiLU, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, checkpoint, ) import math import time import torchvision.utils as tvu import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
1,450
class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2): super().__init__() self.channels = channels self.use_conv = use_conv self.dims = dims if use_conv:
class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2): super().__init__() self.channels = channels self.use_conv = use_conv self.dims = dims if use_conv:
self.conv = conv_nd(dims, channels, channels, 3, padding=1)
3
2023-10-21 04:08:44+00:00
2k
NVIDIA-Omniverse/IsaacSim-Automator
src/python/deployer.py
[ { "identifier": "colorize_error", "path": "src/python/utils.py", "snippet": "def colorize_error(text):\n return click.style(text, fg=\"bright_red\", italic=True)" }, { "identifier": "colorize_info", "path": "src/python/utils.py", "snippet": "def colorize_info(text):\n return click.style(text, fg=\"bright_magenta\", italic=True)" }, { "identifier": "colorize_prompt", "path": "src/python/utils.py", "snippet": "def colorize_prompt(text):\n return click.style(text, fg=\"bright_cyan\", italic=True)" }, { "identifier": "colorize_result", "path": "src/python/utils.py", "snippet": "def colorize_result(text):\n return click.style(text, fg=\"bright_green\", italic=True)" }, { "identifier": "read_meta", "path": "src/python/utils.py", "snippet": "def read_meta(deployment_name: str, verbose: bool = False):\n \"\"\"\n Read metadata from json file\n \"\"\"\n\n meta_file = f\"{config['state_dir']}/{deployment_name}/meta.json\"\n\n if os.path.isfile(meta_file):\n data = json.loads(Path(meta_file).read_text())\n if verbose:\n click.echo(colorize_info(f\"* Meta info loaded from '{meta_file}'\"))\n return data\n\n raise Exception(f\"Meta file '{meta_file}' not found\")" }, { "identifier": "shell_command", "path": "src/python/utils.py", "snippet": "def shell_command(\n command, verbose=False, cwd=None, exit_on_error=True, capture_output=False\n):\n \"\"\"\n Execute shell command, print it if debug is enabled\n \"\"\"\n\n if verbose:\n if cwd is not None:\n click.echo(colorize_info(f\"* Running `(cd {cwd} && {command})`...\"))\n else:\n click.echo(colorize_info(f\"* Running `{command}`...\"))\n\n res = subprocess.run(\n command,\n shell=True,\n cwd=cwd,\n capture_output=capture_output,\n )\n\n if res.returncode == 0:\n if verbose and res.stdout is not None:\n click.echo(res.stdout.decode())\n\n elif exit_on_error:\n if res.stderr is not None:\n click.echo(\n colorize_error(f\"Error: {res.stderr.decode()}\"),\n err=True,\n )\n exit(1)\n\n return res" }, { "identifier": "debug_break", "path": "src/python/debug.py", "snippet": "def debug_break():\n debug_start()\n debugpy.breakpoint()" }, { "identifier": "check_ngc_access", "path": "src/python/ngc.py", "snippet": "def check_ngc_access(ngc_api_key, org=\"\", team=\"\", verbose=False):\n \"\"\"\n Checks if NGC API key is valid and user has access to DRIVE Sim.\n\n Returns:\n\n - 0 - all is fine\n - 100 - invalid api key\n - 102 - user is not in the team\n \"\"\"\n\n proc = subprocess.run(\n [f\"{SELF_DIR}/ngc_check.expect\", ngc_api_key, org, team],\n capture_output=not verbose,\n timeout=60,\n )\n\n if proc.returncode not in [0, 100, 101, 102]:\n raise RuntimeError(\n f\"Error checking NGC API Key. Return code: {proc.returncode}\"\n )\n\n return proc.returncode" } ]
import json import os import re import shlex import sys import click from pathlib import Path from src.python.utils import ( colorize_error, colorize_info, colorize_prompt, colorize_result, read_meta, shell_command, ) from src.python.debug import debug_break # noqa from src.python.ngc import check_ngc_access
1,256
# region copyright # Copyright 2023 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # endregion class Deployer: def __init__(self, params, config): self.tf_outputs = {} self.params = params self.config = config self.existing_behavior = None # save original params so we can recreate command line self.input_params = params.copy() # convert "in_china" self.params["in_china"] = {"yes": True, "no": False, "auto": False}[ self.params["in_china"] ] # create state directory if it doesn't exist os.makedirs(self.config["state_dir"], exist_ok=True) # print complete command line if self.params["debug"]: click.echo(colorize_info("* Command:\n" + self.recreate_command_line())) def __del__(self): # update meta info self.save_meta() def save_meta(self): """ Save command parameters in json file, just in case """ meta_file = ( f"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json" ) data = { "command": self.recreate_command_line(separator=" "), "input_params": self.input_params, "params": self.params, "config": self.config, } Path(meta_file).parent.mkdir(parents=True, exist_ok=True) Path(meta_file).write_text(json.dumps(data, indent=4)) if self.params["debug"]: click.echo(colorize_info(f"* Meta info saved to '{meta_file}'"))
# region copyright # Copyright 2023 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # endregion class Deployer: def __init__(self, params, config): self.tf_outputs = {} self.params = params self.config = config self.existing_behavior = None # save original params so we can recreate command line self.input_params = params.copy() # convert "in_china" self.params["in_china"] = {"yes": True, "no": False, "auto": False}[ self.params["in_china"] ] # create state directory if it doesn't exist os.makedirs(self.config["state_dir"], exist_ok=True) # print complete command line if self.params["debug"]: click.echo(colorize_info("* Command:\n" + self.recreate_command_line())) def __del__(self): # update meta info self.save_meta() def save_meta(self): """ Save command parameters in json file, just in case """ meta_file = ( f"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json" ) data = { "command": self.recreate_command_line(separator=" "), "input_params": self.input_params, "params": self.params, "config": self.config, } Path(meta_file).parent.mkdir(parents=True, exist_ok=True) Path(meta_file).write_text(json.dumps(data, indent=4)) if self.params["debug"]: click.echo(colorize_info(f"* Meta info saved to '{meta_file}'"))
def read_meta(self):
4
2023-10-18 17:25:44+00:00
2k
blackgold3/SemanticBoost
mdm/model/clip/clip.py
[ { "identifier": "build_model", "path": "mdm/model/clip/model.py", "snippet": "def build_model(state_dict: dict):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(\"transformer.resblocks\")))\n\n model = CLIP(\n embed_dim,\n image_resolution, vision_layers, vision_width, vision_patch_size,\n context_length, vocab_size, transformer_width, transformer_heads, transformer_layers\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n\n convert_weights(model)\n model.load_state_dict(state_dict)\n return model.eval()" }, { "identifier": "SimpleTokenizer", "path": "mdm/model/clip/simple_tokenizer.py", "snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text" } ]
import hashlib import os import urllib import warnings import torch from typing import Any, Union, List from pkg_resources import packaging from PIL import Image from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize from tqdm import tqdm from .model import build_model from .simple_tokenizer import SimpleTokenizer as _Tokenizer from torchvision.transforms import InterpolationMode
1,527
try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): warnings.warn("PyTorch version 1.7.1 or higher is recommended") __all__ = ["available_models", "load", "tokenize"]
try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): warnings.warn("PyTorch version 1.7.1 or higher is recommended") __all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
0
2023-10-20 14:53:26+00:00
2k
justchenhao/SILI_CD
datasets/base_dataset.py
[ { "identifier": "get_transforms", "path": "datasets/transforms.py", "snippet": "def get_transforms(norm=False, img_size=256):\n basic_transform = []\n basic_transform.append(T.ToTensor()) # ndarray转为 torch.FloatTensor, 范围[0,1]\n if norm:\n basic_transform.append(T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))\n basic_transform.append(T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.BILINEAR))\n return T.Compose(basic_transform)" }, { "identifier": "get_mask_transforms", "path": "datasets/transforms.py", "snippet": "def get_mask_transforms(img_size=256):\n basic_target_transform = T.Compose(\n [\n MaskToTensor(),\n T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.NEAREST),\n ]\n )\n return basic_target_transform" }, { "identifier": "get_seg_augs", "path": "datasets/transforms.py", "snippet": "def get_seg_augs(imgz_size=256, data_keys=(\"input\", \"mask\")):\n default_seg_augs = K.AugmentationSequential(\n K.RandomHorizontalFlip(p=0.5),\n K.RandomVerticalFlip(p=0.5),\n K.RandomResizedCrop(\n size=(imgz_size, imgz_size), scale=(0.8, 1.0), resample=\"bilinear\", align_corners=False\n ),\n K.RandomGaussianBlur(kernel_size=(3, 3), sigma=(0.1, 2.0), p=0.5),\n K.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n data_keys=data_keys\n )\n return default_seg_augs" }, { "identifier": "visualize_tensors", "path": "misc/torchutils.py", "snippet": "def visualize_tensors(*tensors):\n \"\"\"\n 可视化tensor,支持单通道特征或3通道图像\n :param tensors: tensor: C*H*W, C=1/3\n :return:\n \"\"\"\n import matplotlib.pyplot as plt\n # from misc.torchutils import tensor2np\n images = []\n for tensor in tensors:\n assert tensor.ndim == 3 or tensor.ndim==2\n if tensor.ndim ==3:\n assert tensor.shape[0] == 1 or tensor.shape[0] == 3\n images.append(tensor2np(tensor))\n nums = len(images)\n if nums>1:\n fig, axs = plt.subplots(1, nums)\n for i, image in enumerate(images):\n axs[i].imshow(image, cmap='jet')\n plt.show()\n elif nums == 1:\n fig, ax = plt.subplots(1, nums)\n for i, image in enumerate(images):\n ax.imshow(image, cmap='jet')\n plt.show()" } ]
import os import numpy as np import torch from typing import Dict, Sequence, Tuple, Optional, Union from PIL import Image from torch.utils import data from datasets.transforms import get_transforms, get_mask_transforms from datasets.transforms import get_seg_augs from misc.imutils import pil_rescale, pil_resize from misc.imutils import pil_rescale, pil_resize from misc.torchutils import visualize_tensors
1,580
""" some basic data loader for example: Image loader, Segmentation loader, data root ├─A ├─label └─list """ def load_img_name_list(dataset_path): img_name_list = np.loadtxt(dataset_path, dtype=str) if img_name_list.ndim == 2: return img_name_list[:, 0] return img_name_list class ImageDataset(data.Dataset): """list dataloder""" def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', list_folder_name: str = 'list', scale_ratios: Union[int, list] = 1): super(ImageDataset, self).__init__() self.root_dir = root_dir self.split = split # train | train_aug | val self.list_path = os.path.join(self.root_dir, list_folder_name, self.split+'.txt') self.img_name_list = load_img_name_list(self.list_path) if isinstance(img_folder_name, list) or isinstance(img_folder_name, tuple): # 此处为了兼容存在多个img_folder,内部文件共名字的情况,比如img_folder_name=['A','B'] self.img_folder_with_name_list = [img_folder_name_+'/'+name for name in self.img_name_list for img_folder_name_ in img_folder_name] elif isinstance(img_folder_name, str): self.img_folder_with_name_list = [img_folder_name+'/'+name for name in self.img_name_list] else: raise NotImplementedError self.A_size = len(self.img_folder_with_name_list) # get the size of dataset A self.img_folder_name = img_folder_name self.img_size = img_size self.norm = norm self.basic_transforms = get_transforms(norm=norm, img_size=img_size) self.scale_ratios = scale_ratios def __getitem__(self, index): folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) if self.basic_transforms is not None: img = self.basic_transforms(img) return {'A': img, 'name': name} def __len__(self): """Return the total number of images in the dataset.""" return self.A_size class SegDataset(ImageDataset): ''' transforms: 表示同时对image 和 mask 做变换; ''' def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', label_transform: str = 'norm', label_folder_name: str = 'label', scale_ratios: Union[int, list] = 1): super(SegDataset, self).__init__(root_dir, split=split, img_size=img_size, norm=norm, img_folder_name=img_folder_name, scale_ratios=scale_ratios)
""" some basic data loader for example: Image loader, Segmentation loader, data root ├─A ├─label └─list """ def load_img_name_list(dataset_path): img_name_list = np.loadtxt(dataset_path, dtype=str) if img_name_list.ndim == 2: return img_name_list[:, 0] return img_name_list class ImageDataset(data.Dataset): """list dataloder""" def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', list_folder_name: str = 'list', scale_ratios: Union[int, list] = 1): super(ImageDataset, self).__init__() self.root_dir = root_dir self.split = split # train | train_aug | val self.list_path = os.path.join(self.root_dir, list_folder_name, self.split+'.txt') self.img_name_list = load_img_name_list(self.list_path) if isinstance(img_folder_name, list) or isinstance(img_folder_name, tuple): # 此处为了兼容存在多个img_folder,内部文件共名字的情况,比如img_folder_name=['A','B'] self.img_folder_with_name_list = [img_folder_name_+'/'+name for name in self.img_name_list for img_folder_name_ in img_folder_name] elif isinstance(img_folder_name, str): self.img_folder_with_name_list = [img_folder_name+'/'+name for name in self.img_name_list] else: raise NotImplementedError self.A_size = len(self.img_folder_with_name_list) # get the size of dataset A self.img_folder_name = img_folder_name self.img_size = img_size self.norm = norm self.basic_transforms = get_transforms(norm=norm, img_size=img_size) self.scale_ratios = scale_ratios def __getitem__(self, index): folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) if self.basic_transforms is not None: img = self.basic_transforms(img) return {'A': img, 'name': name} def __len__(self): """Return the total number of images in the dataset.""" return self.A_size class SegDataset(ImageDataset): ''' transforms: 表示同时对image 和 mask 做变换; ''' def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', label_transform: str = 'norm', label_folder_name: str = 'label', scale_ratios: Union[int, list] = 1): super(SegDataset, self).__init__(root_dir, split=split, img_size=img_size, norm=norm, img_folder_name=img_folder_name, scale_ratios=scale_ratios)
self.basic_mask_transforms = get_mask_transforms(img_size=img_size)
1
2023-10-21 09:09:57+00:00
2k
pythonlessons/FinRock
finrock/indicators.py
[ { "identifier": "RenderOptions", "path": "finrock/render.py", "snippet": "class RenderOptions:\n def __init__(\n self, \n name: str,\n color: tuple,\n window_type: WindowType,\n render_type: RenderType, \n min: float, \n max: float, \n value: float = None,\n ):\n self.name = name\n self.color = color\n self.window_type = window_type\n self.render_type = render_type\n self.min = min\n self.max = max\n self.value = value\n\n def copy(self):\n return RenderOptions(\n name=self.name,\n color=self.color,\n window_type=self.window_type,\n render_type=self.render_type,\n min=self.min,\n max=self.max,\n value=self.value\n )" }, { "identifier": "RenderType", "path": "finrock/render.py", "snippet": "class RenderType(Enum):\n LINE = 0\n DOT = 1" }, { "identifier": "WindowType", "path": "finrock/render.py", "snippet": "class WindowType(Enum):\n MAIN = 0\n SEPERATE = 1" } ]
import pandas as pd from .render import RenderOptions, RenderType, WindowType
1,008
class Indicator: """ Base class for indicators """ def __init__( self, data: pd.DataFrame, target_column: str='close', render_options: dict={} ) -> None: self._data = data.copy() self._target_column = target_column self._render_options = render_options self.values = {} assert isinstance(self._data, pd.DataFrame) == True, "data must be a pandas.DataFrame" assert self._target_column in self._data.columns, f"data must have '{self._target_column}' column" self.compute() if not self._render_options: self._render_options = self.default_render_options() @property def min(self): return self._data[self.target_column].min() @property def max(self): return self._data[self.target_column].max() @property def target_column(self): return self._target_column @property def name(self): return self.__class__.__name__ @property def names(self): return self._names def compute(self): raise NotImplementedError def default_render_options(self): return {} def render_options(self): return {name: option.copy() for name, option in self._render_options.items()} def __getitem__(self, index: int): row = self._data.iloc[index] for name in self.names: if pd.isna(row[name]): return None self.values[name] = row[name] if self._render_options.get(name): self._render_options[name].value = row[name] return self.serialise() def __call__(self, index: int): return self[index] def serialise(self): return { 'name': self.name, 'names': self.names, 'values': self.values.copy(), 'target_column': self.target_column, 'render_options': self.render_options(), 'min': self.min, 'max': self.max } class SMA(Indicator): """ Trend indicator A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number of periods in that range. The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are slow to react. https://www.investopedia.com/terms/s/sma.asp """ def __init__( self, data: pd.DataFrame, period: int=20, target_column: str='close', render_options: dict={} ): self._period = period self._names = [f'SMA{period}'] super().__init__(data, target_column, render_options) @property def min(self): return self._data[self.names[0]].min() @property def max(self): return self._data[self.names[0]].max() def default_render_options(self): return {name: RenderOptions( name=name, color=(100, 100, 255),
class Indicator: """ Base class for indicators """ def __init__( self, data: pd.DataFrame, target_column: str='close', render_options: dict={} ) -> None: self._data = data.copy() self._target_column = target_column self._render_options = render_options self.values = {} assert isinstance(self._data, pd.DataFrame) == True, "data must be a pandas.DataFrame" assert self._target_column in self._data.columns, f"data must have '{self._target_column}' column" self.compute() if not self._render_options: self._render_options = self.default_render_options() @property def min(self): return self._data[self.target_column].min() @property def max(self): return self._data[self.target_column].max() @property def target_column(self): return self._target_column @property def name(self): return self.__class__.__name__ @property def names(self): return self._names def compute(self): raise NotImplementedError def default_render_options(self): return {} def render_options(self): return {name: option.copy() for name, option in self._render_options.items()} def __getitem__(self, index: int): row = self._data.iloc[index] for name in self.names: if pd.isna(row[name]): return None self.values[name] = row[name] if self._render_options.get(name): self._render_options[name].value = row[name] return self.serialise() def __call__(self, index: int): return self[index] def serialise(self): return { 'name': self.name, 'names': self.names, 'values': self.values.copy(), 'target_column': self.target_column, 'render_options': self.render_options(), 'min': self.min, 'max': self.max } class SMA(Indicator): """ Trend indicator A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number of periods in that range. The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are slow to react. https://www.investopedia.com/terms/s/sma.asp """ def __init__( self, data: pd.DataFrame, period: int=20, target_column: str='close', render_options: dict={} ): self._period = period self._names = [f'SMA{period}'] super().__init__(data, target_column, render_options) @property def min(self): return self._data[self.names[0]].min() @property def max(self): return self._data[self.names[0]].max() def default_render_options(self): return {name: RenderOptions( name=name, color=(100, 100, 255),
window_type=WindowType.MAIN,
2
2023-10-23 07:44:54+00:00
2k
hitlic/deepepochs
deepepochs/metrics.py
[ { "identifier": "sum_dicts", "path": "deepepochs/loops.py", "snippet": "def sum_dicts(dicts, to_np=False):\r\n dicts = concat_dicts(dicts, to_np)\r\n return ddict({k: sum(v) for k, v in dicts.items()})\r" }, { "identifier": "ddict", "path": "deepepochs/loops.py", "snippet": "class ddict(dict):\r\n \"\"\"\r\n 可以通过“.”访问的字典。\r\n \"\"\"\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n for arg in args:\r\n if isinstance(arg, dict):\r\n for k, v in arg.items():\r\n if isinstance(v, dict):\r\n self[k] = ddict(v)\r\n else:\r\n self[k] = v\r\n if kwargs:\r\n for k, v in kwargs.items():\r\n if isinstance(v, dict):\r\n self[k] = ddict(v)\r\n else:\r\n self[k] = v\r\n\r\n def __getattr__(self, key):\r\n value = self[key]\r\n return value\r\n\r\n def __setattr__(self, key, value):\r\n self.__setitem__(key, value)\r\n\r\n def __setitem__(self, key, value):\r\n super().__setitem__(key, value)\r\n self.__dict__.update({key: value})\r\n\r\n def __delattr__(self, item):\r\n self.__delitem__(item)\r\n\r\n def __delitem__(self, key):\r\n super().__delitem__(key)\r\n del self.__dict__[key]\r\n\r\n def __deepcopy__(self, memo=None, _nil=[]): # pylint: disable=W0102\r\n dd = dict(self)\r\n return deepcopy(dd)\r" } ]
from functools import lru_cache from .loops import sum_dicts, ddict import torch
752
""" @author: liuchen """ @lru_cache(maxsize=1) def confusion_matrix(preds, targets, num_classes): """ Args: preds: 预测向量,可为binary或多维概率分布 targets: 标签向量,可为one-hot或非one-hot的 num_class: 类别数量 """ if (preds.dim()==1 or preds.shape[-1]==1) and num_classes==2: # 当预测为binary时 preds = preds.unsqueeze(-1) if preds.dim()==1 else preds preds = torch.concat([1-preds, preds], dim=-1) preds = preds.argmax(dim=-1).flatten().int() if targets.dim() > 1 and targets.shape[-1] > 1: # 当targets为one-hot时 targets = targets.argmax(dim=1).int() else: targets = targets.flatten().int() cm = torch.zeros([num_classes, num_classes], dtype=preds.dtype, device=preds.device) one = torch.tensor([1], dtype=preds.dtype, device=preds.device) return cm.index_put_((targets, preds), one, accumulate=True) @lru_cache(maxsize=1) def cmats_and_weights(c_mat): """获取各类别的混淆矩阵和权值""" if c_mat.shape[0] == 2:
""" @author: liuchen """ @lru_cache(maxsize=1) def confusion_matrix(preds, targets, num_classes): """ Args: preds: 预测向量,可为binary或多维概率分布 targets: 标签向量,可为one-hot或非one-hot的 num_class: 类别数量 """ if (preds.dim()==1 or preds.shape[-1]==1) and num_classes==2: # 当预测为binary时 preds = preds.unsqueeze(-1) if preds.dim()==1 else preds preds = torch.concat([1-preds, preds], dim=-1) preds = preds.argmax(dim=-1).flatten().int() if targets.dim() > 1 and targets.shape[-1] > 1: # 当targets为one-hot时 targets = targets.argmax(dim=1).int() else: targets = targets.flatten().int() cm = torch.zeros([num_classes, num_classes], dtype=preds.dtype, device=preds.device) one = torch.tensor([1], dtype=preds.dtype, device=preds.device) return cm.index_put_((targets, preds), one, accumulate=True) @lru_cache(maxsize=1) def cmats_and_weights(c_mat): """获取各类别的混淆矩阵和权值""" if c_mat.shape[0] == 2:
c_mat = ddict({
1
2023-10-19 05:41:48+00:00
2k
colour-science/colour-visuals
colour_visuals/axes.py
[ { "identifier": "DEFAULT_FLOAT_DTYPE_WGPU", "path": "colour_visuals/common.py", "snippet": "DEFAULT_FLOAT_DTYPE_WGPU = np.float32" }, { "identifier": "unlatexify", "path": "colour_visuals/common.py", "snippet": "def unlatexify(text: str) -> str:\n \"\"\"\n Unlatexify given string.\n\n\n Parameters\n ----------\n text\n String to remove the *LaTeX* character markup from.\n\n Returns\n -------\n :class:`str`\n Unlatexified string.\n \"\"\"\n\n return re.sub(r\"[$^_{}]\", \"\", text)" }, { "identifier": "MixinPropertyModel", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertyModel:\n \"\"\"\n Define a mixin for a colourspace model.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyModel.model`\n \"\"\"\n\n def __init__(self):\n self._model = \"CIE xyY\"\n\n super().__init__()\n\n @visual_property\n def model(self) -> LiteralColourspaceModel | str:\n \"\"\"\n Getter and setter property for the colourspace model.\n\n Parameters\n ----------\n value\n Value to set the colourspace model with.\n\n Returns\n -------\n :class:`str`\n Colourspace model.\n \"\"\"\n\n return self._model\n\n @model.setter\n def model(self, value: LiteralColourspaceModel | str):\n \"\"\"Setter for the **self.model** property.\"\"\"\n\n self._model = validate_method(value, tuple(COLOURSPACE_MODELS))" }, { "identifier": "MixinPropertySize", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertySize:\n \"\"\"\n Define a mixin for a size value.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertySize.size`\n \"\"\"\n\n def __init__(self):\n self._size = 1\n\n super().__init__()\n\n @visual_property\n def size(self) -> float:\n \"\"\"\n Getter and setter property for the size value.\n\n Parameters\n ----------\n value\n Value to set size value with.\n\n Returns\n -------\n :class:`int`\n Size value.\n \"\"\"\n\n return self._size\n\n @size.setter\n def size(self, value: float):\n \"\"\"Setter for the **self.size** property.\"\"\"\n\n self._size = value" }, { "identifier": "Visual", "path": "colour_visuals/visual.py", "snippet": "class Visual(gfx.Group, metaclass=ABCMeta):\n \"\"\"Define the base class for the visuals.\"\"\"\n\n def __init__(self):\n self._is_update_blocked = False\n\n super().__init__()\n\n @contextmanager\n def block_update(self) -> Generator:\n \"\"\"Define a context manager that blocks the visual updates.\"\"\"\n self._is_update_blocked = True\n\n yield\n\n self._is_update_blocked = False\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update the visual.\n\n Notes\n -----\n - Must be reimplemented by sub-classes.\n \"\"\"" } ]
import numpy as np import pygfx as gfx from colour.hints import LiteralColourspaceModel from colour.models import COLOURSPACE_MODELS_AXIS_LABELS from colour.plotting import ( CONSTANTS_COLOUR_STYLE, colourspace_model_axis_reorder, ) from colour.utilities import as_int_array from colour_visuals.common import ( DEFAULT_FLOAT_DTYPE_WGPU, unlatexify, ) from colour_visuals.visual import ( MixinPropertyModel, MixinPropertySize, Visual, )
986
# !/usr/bin/env python """ Axes Visuals ============ Defines the axes visuals: - :class:`colour_visuals.VisualAxes` """ from __future__ import annotations __author__ = "Colour Developers" __copyright__ = "Copyright 2023 Colour Developers" __license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "colour-developers@colour-science.org" __status__ = "Production" __all__ = ["VisualAxes"]
# !/usr/bin/env python """ Axes Visuals ============ Defines the axes visuals: - :class:`colour_visuals.VisualAxes` """ from __future__ import annotations __author__ = "Colour Developers" __copyright__ = "Copyright 2023 Colour Developers" __license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "colour-developers@colour-science.org" __status__ = "Production" __all__ = ["VisualAxes"]
class VisualAxes(MixinPropertyModel, MixinPropertySize, Visual):
3
2023-10-15 04:30:47+00:00
2k
JiahuiLei/NAP
dataset/partnet_m_grouping.py
[ { "identifier": "cfg_with_default", "path": "core/models/utils/misc.py", "snippet": "def cfg_with_default(cfg, key_list, default):\n root = cfg\n for k in key_list:\n if k in root.keys():\n root = root[k]\n else:\n return default\n return root" }, { "identifier": "compact_pack", "path": "object_utils/arti_graph_utils_v3.py", "snippet": "def compact_pack(V, E, K=25, permute=True):\n if len(V) > K:\n print(f\"Warning, extend {K} to {len(V)}\")\n K = len(V)\n num_v = len(V)\n n_empty = K - num_v\n\n # Nodes\n v_mask = np.zeros(K, dtype=np.bool)\n v_mask[: len(V)] = True\n if permute:\n # in the origin index, first num_v are object\n v_map = np.random.permutation(K).tolist() # stores the original id\n else:\n v_map = np.arange(K).tolist()\n v_mask = [v_mask[i] for i in v_map]\n\n _v_bbox = [v[\"bbox_L\"] for v in V] + [np.zeros(3)] * n_empty\n v_bbox = [_v_bbox[i] for i in v_map]\n v_bbox = torch.from_numpy(np.stack(v_bbox, axis=0)).float()\n # p_global = T_gl @ p_local\n _v_t_gl = [v[\"abs_center\"] for v in V] + [np.zeros(3)] * n_empty\n v_t_gl = [_v_t_gl[i] for i in v_map]\n v_t_gl = torch.from_numpy(np.stack(v_t_gl, axis=0)).float()\n # ! Now assume partnet-M all init part R = I\n v_r_gl = torch.zeros(K, 3).float()\n ret_v = torch.cat([torch.LongTensor(v_mask)[..., None], v_bbox, v_r_gl, v_t_gl], -1)\n\n # Edges\n total_edges = int(K * (K - 1) / 2) # include invalid\n e_plucker = torch.zeros((total_edges, 6), dtype=torch.float32)\n e_lim = torch.zeros((total_edges, 4), dtype=torch.float32)\n e_type = torch.zeros((total_edges), dtype=torch.long) # [0,1,2] [empty, ij, ji]\n for e in E:\n # ! by default, the list of edges represent the upper triangle, i.e. row i, col j, then i < j\n _src_ind, _dst_ind = e[\"e0\"][\"src_ind\"], e[\"e0\"][\"dst_ind\"]\n src_ind, dst_ind = v_map.index(_src_ind), v_map.index(_dst_ind)\n plucker = e[\"e0\"][\"plucker\"]\n # transform the plucker to global frame\n _r_global = v_r_gl[src_ind]\n _t_global = v_t_gl[src_ind]\n plucker_global = torch.from_numpy(plucker.copy()).float()\n _R_global = axis_angle_to_matrix(_r_global)\n _lg = _R_global @ plucker_global[:3]\n _mg = _R_global @ plucker_global[3:] + torch.cross(_t_global, _lg)\n plucker_global = torch.cat([_lg, _mg], 0)\n flip = plucker_need_flip(plucker_global)\n if flip: # orient the global plucker to hemisphere\n plucker_global = -plucker_global\n\n if src_ind > dst_ind: # i = dst, j = src\n i, j = dst_ind, src_ind\n flip = not flip # when reverse the src and dst, the plucker should multiply by -1.0\n elif src_ind < dst_ind:\n i, j = src_ind, dst_ind\n else:\n raise ValueError(\"src_ind == dst_ind\")\n e_list_ind = map_upper_triangle_to_list(i, j, K)\n\n if flip: # 2 is flip plucker\n e_type[e_list_ind] = 2\n else: # 1 is not flip plucker\n e_type[e_list_ind] = 1\n\n e_lim[e_list_ind, :2] = torch.Tensor(e[\"r_limits\"])\n e_lim[e_list_ind, 2:] = torch.Tensor(e[\"p_limits\"])\n \n # # debug\n # print(e[\"r_limits\"], e[\"p_limits\"])\n \n # assert e[\"r_limits\"][0] <= e[\"r_limits\"][1]\n # assert e[\"p_limits\"][0] <= e[\"p_limits\"][1]\n\n e_plucker[e_list_ind] = plucker_global\n\n e_type = F.one_hot(e_type, num_classes=3).float()\n\n ret_e = torch.cat([e_type, e_plucker, e_lim], dim=1)\n # v: [mask_occ(1), bbox(3), r_gl(3), t_gl(3) | additional codes in the future]\n # e: [type(3), plucker(6), rlim(2), plim(2)]\n return ret_v, ret_e, v_map" }, { "identifier": "map_upper_triangle_to_list", "path": "object_utils/arti_graph_utils_v3.py", "snippet": "def map_upper_triangle_to_list(i, j, K):\n assert i < j, \"not upper triangle\"\n e_list_ind = i * (2 * K - i - 1) // 2 + j - i - 1\n return e_list_ind" } ]
from random import random from torch.utils.data import Dataset from os.path import join from core.models.utils.misc import cfg_with_default from tqdm import tqdm from object_utils.arti_graph_utils_v3 import compact_pack, map_upper_triangle_to_list from copy import deepcopy from torch.utils.data import WeightedRandomSampler import logging import json import os import os.path as osp import numpy as np import torch import json
1,561
# Load processed PartNet-Mobility graph # v5: from v4 use new full random permute, not first 1 v_mask class Dataset(Dataset): def __init__(self, cfg, mode) -> None: super().__init__() d_cfg = cfg["dataset"] self.mode = mode.lower() self.dataset_proportion = d_cfg["dataset_proportion"][cfg["modes"].index(self.mode)] self.data_root = join(cfg["root"], d_cfg["data_root"]) self.pad_nv = d_cfg["max_K"] self.pad_np = d_cfg["max_P"] self.n_pcl = d_cfg["n_pcl"] self.valid_obj_ind = self.load_split( d_cfg["split_path"], phase=self.mode, cates=d_cfg["cates"] )
# Load processed PartNet-Mobility graph # v5: from v4 use new full random permute, not first 1 v_mask class Dataset(Dataset): def __init__(self, cfg, mode) -> None: super().__init__() d_cfg = cfg["dataset"] self.mode = mode.lower() self.dataset_proportion = d_cfg["dataset_proportion"][cfg["modes"].index(self.mode)] self.data_root = join(cfg["root"], d_cfg["data_root"]) self.pad_nv = d_cfg["max_K"] self.pad_np = d_cfg["max_P"] self.n_pcl = d_cfg["n_pcl"] self.valid_obj_ind = self.load_split( d_cfg["split_path"], phase=self.mode, cates=d_cfg["cates"] )
self.balance_flag = cfg_with_default(d_cfg, ["balance_flag"], False)
0
2023-10-22 03:46:35+00:00
2k
yongliang-wu/ExploreCfg
open_flamingo/src/flamingo_lm.py
[ { "identifier": "GatedCrossAttentionBlock", "path": "open_flamingo/src/helpers.py", "snippet": "class GatedCrossAttentionBlock(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_visual,\n dim_head=64,\n heads=8,\n ff_mult=4,\n only_attend_immediate_media=True,\n ):\n super().__init__()\n self.attn = MaskedCrossAttention(\n dim=dim,\n dim_visual=dim_visual,\n dim_head=dim_head,\n heads=heads,\n only_attend_immediate_media=only_attend_immediate_media,\n )\n self.attn_gate = nn.Parameter(torch.tensor([0.0]))\n\n self.ff = FeedForward(dim, mult=ff_mult)\n self.ff_gate = nn.Parameter(torch.tensor([0.0]))\n\n def forward(\n self,\n x,\n media,\n media_locations=None,\n attend_previous=True,\n ):\n x = (\n self.attn(\n x,\n media,\n media_locations=media_locations,\n attend_previous=attend_previous,\n )\n * self.attn_gate.tanh()\n + x\n )\n x = self.ff(x) * self.ff_gate.tanh() + x\n\n return x" }, { "identifier": "getattr_recursive", "path": "open_flamingo/src/utils.py", "snippet": "def getattr_recursive(obj, att):\n \"\"\"\n Return nested attribute of obj\n Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c\n \"\"\"\n if att == \"\":\n return obj\n i = att.find(\".\")\n if i < 0:\n return getattr(obj, att)\n else:\n return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])" }, { "identifier": "setattr_recursive", "path": "open_flamingo/src/utils.py", "snippet": "def setattr_recursive(obj, att, val):\n \"\"\"\n Set nested attribute of obj\n Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val\n \"\"\"\n if \".\" in att:\n obj = getattr_recursive(obj, \".\".join(att.split(\".\")[:-1]))\n setattr(obj, att.split(\".\")[-1], val)" } ]
import random import torch.nn as nn from .helpers import GatedCrossAttentionBlock from .utils import getattr_recursive, setattr_recursive
1,082
class FlamingoLayer(nn.Module): def __init__(self, gated_cross_attn_layer, decoder_layer): super().__init__() self.gated_cross_attn_layer = gated_cross_attn_layer self.decoder_layer = decoder_layer self.vis_x = None self.media_locations = None def is_conditioned(self) -> bool: """Check whether the layer is conditioned.""" return self.vis_x is not None # Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/) def condition_vis_x(self, vis_x): self.vis_x = vis_x def condition_media_locations(self, media_locations): self.media_locations = media_locations def condition_attend_previous(self, attend_previous): self.attend_previous = attend_previous def forward( self, lang_x, attention_mask=None, **decoder_layer_kwargs, ): if self.gated_cross_attn_layer is None: return self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) if self.vis_x is None: raise ValueError("vis_x must be conditioned before forward pass") if self.media_locations is None: raise ValueError("media_locations must be conditioned before forward pass") lang_x = self.gated_cross_attn_layer( lang_x, self.vis_x, media_locations=self.media_locations, attend_previous=self.attend_previous, ) lang_x = self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) return lang_x class FlamingoLMMixin(nn.Module): """ Mixin to add cross-attention layers to a language model. """ def set_decoder_layers_attr_name(self, decoder_layers_attr_name): self.decoder_layers_attr_name = decoder_layers_attr_name def _get_decoder_layers(self): return getattr_recursive(self, self.decoder_layers_attr_name) def _set_decoder_layers(self, value): setattr_recursive(self, self.decoder_layers_attr_name, value) def init_flamingo( self, media_token_id, vis_hidden_size, cross_attn_every_n_layers, use_media_placement_augmentation, ): """ Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations. """ self.gated_cross_attn_layers = nn.ModuleList( [
class FlamingoLayer(nn.Module): def __init__(self, gated_cross_attn_layer, decoder_layer): super().__init__() self.gated_cross_attn_layer = gated_cross_attn_layer self.decoder_layer = decoder_layer self.vis_x = None self.media_locations = None def is_conditioned(self) -> bool: """Check whether the layer is conditioned.""" return self.vis_x is not None # Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/) def condition_vis_x(self, vis_x): self.vis_x = vis_x def condition_media_locations(self, media_locations): self.media_locations = media_locations def condition_attend_previous(self, attend_previous): self.attend_previous = attend_previous def forward( self, lang_x, attention_mask=None, **decoder_layer_kwargs, ): if self.gated_cross_attn_layer is None: return self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) if self.vis_x is None: raise ValueError("vis_x must be conditioned before forward pass") if self.media_locations is None: raise ValueError("media_locations must be conditioned before forward pass") lang_x = self.gated_cross_attn_layer( lang_x, self.vis_x, media_locations=self.media_locations, attend_previous=self.attend_previous, ) lang_x = self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) return lang_x class FlamingoLMMixin(nn.Module): """ Mixin to add cross-attention layers to a language model. """ def set_decoder_layers_attr_name(self, decoder_layers_attr_name): self.decoder_layers_attr_name = decoder_layers_attr_name def _get_decoder_layers(self): return getattr_recursive(self, self.decoder_layers_attr_name) def _set_decoder_layers(self, value): setattr_recursive(self, self.decoder_layers_attr_name, value) def init_flamingo( self, media_token_id, vis_hidden_size, cross_attn_every_n_layers, use_media_placement_augmentation, ): """ Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations. """ self.gated_cross_attn_layers = nn.ModuleList( [
GatedCrossAttentionBlock(
0
2023-10-18 02:38:00+00:00
2k
mimo-x/Code-Review-GPT-Gitlab
app/gitlab_utils.py
[ { "identifier": "log", "path": "utils/logger.py", "snippet": "CRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nROOT_PATH = os.path.join(CURRENT_PATH, os.pardir)\nLOG_PATH = os.path.join(parent_dir, 'logs')\nclass LogHandler(logging.Logger):\n def __init__(self, name, level=INFO, stream=True, file=True):\n def __setFileHandler__(self, level=None):\n def __setStreamHandler__(self, level=None):\n def resetName(self, name):" }, { "identifier": "send_dingtalk_message_by_sign", "path": "utils/dingding.py", "snippet": "@message_error_handler\ndef send_dingtalk_message_by_sign(message_text):\n \"\"\"\n 使用签名方式发送消息通知到钉钉群\n\n Args:\n webhook_url (str): 钉钉群聊机器人的Webhook地址\n secret (str): 机器人的安全设置中的密钥\n message_text (str): 消息文本内容\n\n Returns:\n bool: 消息是否发送成功\n \"\"\"\n timestamp = str(round(time.time() * 1000))\n sign = get_sign(timestamp)\n webhookurl = f\"{dingding_bot_webhook}&timestamp={timestamp}&sign={sign}\"\n # 构建请求头\n headers = {\n \"Content-Type\": \"application/json\",\n }\n\n # 构建请求体\n message = {\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": message_text\n },\n \"timestamp\": timestamp,\n \"sign\": sign\n }\n\n # 发送HTTP POST请求\n response = requests.post(\n webhookurl,\n headers=headers,\n data=json.dumps(message)\n )\n\n # 检查响应\n if response.status_code == 200:\n print(\"消息已发送成功。\")\n return True\n else:\n print(\"消息发送失败,HTTP状态码:\", response.status_code)\n return False" } ]
import requests from retrying import retry from config.config import * from utils.logger import log from utils.dingding import send_dingtalk_message_by_sign
815
@retry(stop_max_attempt_number=3, wait_fixed=2000) def get_merge_request_id(branch_name, project_id): """ 根据分支名,获取mr_id :param branch_name: 分支名 :param project_id: 项目id :return: 如果分支存在 mr 则返回mrid / 如果不存在mr 则返回 "" """ # 构建API请求URL url = f"{gitlab_server_url}/api/v4/projects/{project_id}/merge_requests" # 发送API请求,检查是否有与分支相关的Merge Request params = { "source_branch": branch_name, "state": "opened" # 可以根据需求选择合适的状态(opened、closed、merged等) } headers = {"Private-Token": gitlab_private_token} response = requests.get(url, params=params, headers=headers) # 解析JSON响应并检查是否有相关的Merge Request if response.status_code == 200: merge_requests = response.json() if len(merge_requests) > 0:
@retry(stop_max_attempt_number=3, wait_fixed=2000) def get_merge_request_id(branch_name, project_id): """ 根据分支名,获取mr_id :param branch_name: 分支名 :param project_id: 项目id :return: 如果分支存在 mr 则返回mrid / 如果不存在mr 则返回 "" """ # 构建API请求URL url = f"{gitlab_server_url}/api/v4/projects/{project_id}/merge_requests" # 发送API请求,检查是否有与分支相关的Merge Request params = { "source_branch": branch_name, "state": "opened" # 可以根据需求选择合适的状态(opened、closed、merged等) } headers = {"Private-Token": gitlab_private_token} response = requests.get(url, params=params, headers=headers) # 解析JSON响应并检查是否有相关的Merge Request if response.status_code == 200: merge_requests = response.json() if len(merge_requests) > 0:
log.info(f"分支 '{branch_name}' 存在mr记录.{merge_requests}")
0
2023-10-19 14:10:10+00:00
2k
AI-Application-and-Integration-Lab/DGUA_FAS
util/get_loader.py
[ { "identifier": "YunpeiDataset", "path": "util/dataset.py", "snippet": "class YunpeiDataset(Dataset):\n def __init__(self, data_pd, transforms=None, train=True):\n self.train = train\n self.photo_path = data_pd['photo_path'].tolist()\n self.photo_label = data_pd['photo_label'].tolist()\n self.photo_belong_to_video_ID = data_pd['photo_belong_to_video_ID'].tolist()\n if transforms is None:\n if not train:\n self.transforms = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n else:\n self.transforms = T.Compose([\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n else:\n self.transforms = transforms\n\n def __len__(self):\n return len(self.photo_path)\n\n def __getitem__(self, item):\n if self.train:\n img_path = self.photo_path[item]\n label = self.photo_label[item]\n img = Image.open(img_path).resize((256, 256))\n img = self.transforms(img)\n return img, label\n else:\n img_path = self.photo_path[item]\n label = self.photo_label[item]\n videoID = self.photo_belong_to_video_ID[item]\n img = Image.open(img_path).resize((256, 256))\n img = self.transforms(img)\n return img, label, videoID" }, { "identifier": "sample_frames", "path": "util/utils.py", "snippet": "def sample_frames(flag, num_frames, dataset_name):\n '''\n from every video (frames) to sample num_frames to test\n return: the choosen frames' path and label\n '''\n # The process is a litter cumbersome, you can change to your way for convenience\n root_path = '../../data_label/' + dataset_name\n if(flag == 0): # select the fake images\n label_path = root_path + '/fake_label.json'\n save_label_path = root_path + '/choose_fake_label.json'\n elif(flag == 1): # select the real images\n label_path = root_path + '/real_label.json'\n save_label_path = root_path + '/choose_real_label.json'\n else: # select all the real and fake images\n label_path = root_path + '/all_label.json'\n save_label_path = root_path + '/choose_all_label.json'\n\n all_label_json = json.load(open(label_path, 'r'))\n f_sample = open(save_label_path, 'w')\n length = len(all_label_json)\n # three componets: frame_prefix, frame_num, png\n saved_frame_prefix = '/'.join(all_label_json[0]['photo_path'].split('/')[:-1])\n final_json = []\n video_number = 0\n single_video_frame_list = []\n single_video_frame_num = 0\n single_video_label = 0\n for i in range(length):\n photo_path = all_label_json[i]['photo_path']\n photo_label = all_label_json[i]['photo_label']\n frame_prefix = '/'.join(photo_path.split('/')[:-1])\n # the last frame\n if (i == length - 1):\n photo_frame = int(photo_path.split('/')[-1].split('.')[0])\n single_video_frame_list.append(photo_frame)\n single_video_frame_num += 1\n single_video_label = photo_label\n # a new video, so process the saved one\n if (frame_prefix != saved_frame_prefix or i == length - 1):\n # [1, 2, 3, 4,.....]\n single_video_frame_list.sort()\n frame_interval = math.floor(single_video_frame_num / num_frames)\n for j in range(num_frames):\n dict = {}\n # dict['photo_path'] = saved_frame_prefix + '/' + str(\n # single_video_frame_list[6 + j * frame_interval]) + '.png'\n if dataset_name not in {'cefa', 'wmca_train', 'wmca_test'}:\n dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[ j * frame_interval]):03d}' + '.png'\n elif dataset_name == 'cefa':\n print(single_video_frame_list)\n print(saved_frame_prefix)\n dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[6 + j * frame_interval]):04d}' + '.jpg'\n else:\n dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[j * frame_interval]):03d}' + '.jpg'\n dict['photo_label'] = single_video_label\n dict['photo_belong_to_video_ID'] = video_number\n final_json.append(dict)\n video_number += 1\n saved_frame_prefix = frame_prefix\n single_video_frame_list.clear()\n single_video_frame_num = 0\n # get every frame information\n photo_frame = int(photo_path.split('/')[-1].split('.')[0])\n single_video_frame_list.append(photo_frame)\n single_video_frame_num += 1\n single_video_label = photo_label\n if(flag == 0):\n print(\"Total video number(fake): \", video_number, dataset_name)\n elif(flag == 1):\n print(\"Total video number(real): \", video_number, dataset_name)\n else:\n print(\"Total video number(target): \", video_number, dataset_name)\n json.dump(final_json, f_sample, indent=4)\n f_sample.close()\n\n f_json = open(save_label_path)\n sample_data_pd = pd.read_json(f_json)\n return sample_data_pd" } ]
import os import random import numpy as np import pandas as pd import torch from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader from util.dataset import YunpeiDataset from util.utils import sample_frames
1,472
def get_dataset(src1_data, src1_train_num_frames, src2_data, src2_train_num_frames, src3_data, src3_train_num_frames, tgt1_data, tgt_test_num_frames, batch_size): print('Load Source Data') print('Source Data: ', src1_data)
def get_dataset(src1_data, src1_train_num_frames, src2_data, src2_train_num_frames, src3_data, src3_train_num_frames, tgt1_data, tgt_test_num_frames, batch_size): print('Load Source Data') print('Source Data: ', src1_data)
src1_train_data_fake = sample_frames(flag=0, num_frames=src1_train_num_frames, dataset_name=src1_data)
1
2023-10-17 15:35:33+00:00
2k
jianlanluo/SAQ
vqn/conservative_sac.py
[ { "identifier": "next_rng", "path": "vqn/jax_utils.py", "snippet": "def next_rng(*args, **kwargs):\n global jax_utils_rng\n return jax_utils_rng(*args, **kwargs)" }, { "identifier": "value_and_multi_grad", "path": "vqn/jax_utils.py", "snippet": "def value_and_multi_grad(fun, n_outputs, argnums=0, has_aux=False):\n def select_output(index):\n def wrapped(*args, **kwargs):\n if has_aux:\n x, *aux = fun(*args, **kwargs)\n return (x[index], *aux)\n else:\n x = fun(*args, **kwargs)\n return x[index]\n return wrapped\n\n grad_fns = tuple(\n jax.value_and_grad(select_output(i), argnums=argnums, has_aux=has_aux)\n for i in range(n_outputs)\n )\n def multi_grad_fn(*args, **kwargs):\n grads = []\n values = []\n for grad_fn in grad_fns:\n (value, *aux), grad = grad_fn(*args, **kwargs)\n values.append(value)\n grads.append(grad)\n return (tuple(values), *aux), tuple(grads)\n return multi_grad_fn" }, { "identifier": "mse_loss", "path": "vqn/jax_utils.py", "snippet": "def mse_loss(val, target):\n return jnp.mean(jnp.square(val - target))" }, { "identifier": "JaxRNG", "path": "vqn/jax_utils.py", "snippet": "class JaxRNG(object):\n \"\"\" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside\n pure function.\n \"\"\"\n\n @classmethod\n def from_seed(cls, seed):\n return cls(jax.random.PRNGKey(seed))\n\n def __init__(self, rng):\n self.rng = rng\n\n def __call__(self, keys=None):\n if keys is None:\n self.rng, split_rng = jax.random.split(self.rng)\n return split_rng\n elif isinstance(keys, int):\n split_rngs = jax.random.split(self.rng, num=keys + 1)\n self.rng = split_rngs[0]\n return tuple(split_rngs[1:])\n else:\n split_rngs = jax.random.split(self.rng, num=len(keys) + 1)\n self.rng = split_rngs[0]\n return {key: val for key, val in zip(keys, split_rngs[1:])}" }, { "identifier": "wrap_function_with_rng", "path": "vqn/jax_utils.py", "snippet": "def wrap_function_with_rng(rng):\n \"\"\" To be used as decorator, automatically bookkeep a RNG for the wrapped function. \"\"\"\n def wrap_function(function):\n def wrapped(*args, **kwargs):\n nonlocal rng\n rng, split_rng = jax.random.split(rng)\n return function(split_rng, *args, **kwargs)\n return wrapped\n return wrap_function" }, { "identifier": "collect_jax_metrics", "path": "vqn/jax_utils.py", "snippet": "def collect_jax_metrics(metrics, names, prefix=None):\n collected = {}\n for name in names:\n if name in metrics:\n collected[name] = jnp.mean(metrics[name])\n if prefix is not None:\n collected = {\n '{}/{}'.format(prefix, key): value for key, value in collected.items()\n }\n return collected" }, { "identifier": "Scalar", "path": "vqn/model.py", "snippet": "class Scalar(nn.Module):\n init_value: float\n\n def setup(self):\n self.value = self.param('value', lambda x:self.init_value)\n\n def __call__(self):\n return self.value" }, { "identifier": "update_target_network", "path": "vqn/model.py", "snippet": "def update_target_network(main_params, target_params, tau):\n return jax.tree_util.tree_map(\n lambda x, y: tau * x + (1.0 - tau) * y,\n main_params, target_params\n )" }, { "identifier": "prefix_metrics", "path": "vqn/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" } ]
from collections import OrderedDict from copy import deepcopy from functools import partial from ml_collections import ConfigDict from flax.training.train_state import TrainState from .jax_utils import ( next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng, collect_jax_metrics ) from .model import Scalar, update_target_network from .utils import prefix_metrics import numpy as np import jax import jax.numpy as jnp import flax import flax.linen as nn import optax import distrax
1,418
class ConservativeSAC(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.discount = 0.99 config.alpha_multiplier = 0.0 config.use_automatic_entropy_tuning = False config.backup_entropy = False config.target_entropy = 0.0 config.policy_lr = 3e-4 config.policy_weight_decay = 0.0 config.qf_lr = 3e-4 config.qf_weight_decay = 0.0 config.optimizer_type = 'adam' config.soft_target_update_rate = 5e-3 config.use_cql = False config.cql_n_actions = 10 config.cql_importance_sample = True config.cql_lagrange = False config.cql_target_action_gap = 1.0 config.cql_temp = 1.0 config.cql_min_q_weight = 5.0 config.cql_max_target_backup = False config.cql_clip_diff_min = -np.inf config.cql_clip_diff_max = np.inf if updates is not None: config.update(ConfigDict(updates).copy_and_resolve_references()) return config def __init__(self, config, policy, qf): self.config = self.get_default_config(config) self.policy = policy self.qf = qf self.observation_dim = policy.observation_dim self.action_dim = policy.action_dim self._train_states = {} optimizer_class = { 'adam': optax.adam, 'sgd': optax.sgd, }[self.config.optimizer_type] policy_params = self.policy.init(
class ConservativeSAC(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.discount = 0.99 config.alpha_multiplier = 0.0 config.use_automatic_entropy_tuning = False config.backup_entropy = False config.target_entropy = 0.0 config.policy_lr = 3e-4 config.policy_weight_decay = 0.0 config.qf_lr = 3e-4 config.qf_weight_decay = 0.0 config.optimizer_type = 'adam' config.soft_target_update_rate = 5e-3 config.use_cql = False config.cql_n_actions = 10 config.cql_importance_sample = True config.cql_lagrange = False config.cql_target_action_gap = 1.0 config.cql_temp = 1.0 config.cql_min_q_weight = 5.0 config.cql_max_target_backup = False config.cql_clip_diff_min = -np.inf config.cql_clip_diff_max = np.inf if updates is not None: config.update(ConfigDict(updates).copy_and_resolve_references()) return config def __init__(self, config, policy, qf): self.config = self.get_default_config(config) self.policy = policy self.qf = qf self.observation_dim = policy.observation_dim self.action_dim = policy.action_dim self._train_states = {} optimizer_class = { 'adam': optax.adam, 'sgd': optax.sgd, }[self.config.optimizer_type] policy_params = self.policy.init(
next_rng(self.policy.rng_keys()),
0
2023-10-18 06:31:20+00:00
2k
dpaleka/llm-chess-proofgame
puzzle_pair_solve.py
[ { "identifier": "convert_pgn_to_game", "path": "puzzle_solver.py", "snippet": "def convert_pgn_to_game(pgn_moves):\n pgn = io.StringIO(pgn_moves)\n game = chess.pgn.read_game(pgn)\n if len(game.errors) > 0:\n return None\n return game" }, { "identifier": "solve_puzzle", "path": "puzzle_solver.py", "snippet": "def solve_puzzle(board, solution, engine):\n solution = solution.split()\n while True:\n guess_next_move = engine.get_best_move(board)\n real_next_move, *solution = solution\n if guess_next_move != real_next_move:\n try:\n board.push_san(guess_next_move)\n if board.is_checkmate():\n return True\n except:\n pass\n return False\n board.push_san(guess_next_move)\n if len(solution) > 0:\n opponent_move, *solution = solution\n board.push_san(opponent_move)\n else:\n break\n return True" } ]
import chess import numpy as np import io import json import csv import chessllm from pathlib import Path from tqdm import tqdm from puzzle_solver import convert_pgn_to_game, solve_puzzle from matplotlib import pyplot as plt
776
DATA_DIR = Path("/data/chess-data/lichess_puzzles") FILE_NAME = DATA_DIR / "pairs.csv" """ Solve puzzle pairs given in FILE_NAME, and report whether the model can solve them. Separate by rating buckets; take 40 samples from each bucket. It has the following columns: uid,rating,pgn,proofgame,solution Helper functions: def solve_puzzle(board, solution) -> bool: whether model can solve the puzzle convert_pgn_to_game(pgn_moves) -> game """ DATA_DIR = Path("/data/chess-data/lichess_puzzles") FILE_NAME = DATA_DIR / "pairs.csv" def plot_acc_pairs(engine, bucket_size=200, enough_samples=10): # Create buckets buckets = {i*bucket_size: [] for i in range(30)} # Read the data and sort into buckets with open(FILE_NAME) as f: reader = csv.reader(f) print(reader.__next__()) for uid, rating, pgn, proofgame, solution in tqdm(list(reader)): rating_bucket = int(rating) // bucket_size * bucket_size if len(buckets[rating_bucket]) < enough_samples: buckets[rating_bucket].append((pgn, proofgame, solution)) # print how many elems in buckets for k, v in buckets.items(): print(f'rating [{k}, {k + bucket_size})', 'n', len(v)) nonempty_buckets = [k for k, v in buckets.items() if len(v) > 0] # Test the puzzles ok_pgn = {i*bucket_size: [] for i in range(30)} ok_proofgame = {i*bucket_size: [] for i in range(30)} for rating_bucket, puzzles in tqdm(buckets.items()): for pgn, proofgame, solution in puzzles: board_pgn = chess.Board() board_proofgame = chess.Board() print("pgn origi", pgn) print("proofgame", proofgame) # Iterate over the moves and apply them to the board for move in convert_pgn_to_game(pgn).mainline_moves(): board_pgn.push(move) for move in convert_pgn_to_game(proofgame).mainline_moves(): board_proofgame.push(move)
DATA_DIR = Path("/data/chess-data/lichess_puzzles") FILE_NAME = DATA_DIR / "pairs.csv" """ Solve puzzle pairs given in FILE_NAME, and report whether the model can solve them. Separate by rating buckets; take 40 samples from each bucket. It has the following columns: uid,rating,pgn,proofgame,solution Helper functions: def solve_puzzle(board, solution) -> bool: whether model can solve the puzzle convert_pgn_to_game(pgn_moves) -> game """ DATA_DIR = Path("/data/chess-data/lichess_puzzles") FILE_NAME = DATA_DIR / "pairs.csv" def plot_acc_pairs(engine, bucket_size=200, enough_samples=10): # Create buckets buckets = {i*bucket_size: [] for i in range(30)} # Read the data and sort into buckets with open(FILE_NAME) as f: reader = csv.reader(f) print(reader.__next__()) for uid, rating, pgn, proofgame, solution in tqdm(list(reader)): rating_bucket = int(rating) // bucket_size * bucket_size if len(buckets[rating_bucket]) < enough_samples: buckets[rating_bucket].append((pgn, proofgame, solution)) # print how many elems in buckets for k, v in buckets.items(): print(f'rating [{k}, {k + bucket_size})', 'n', len(v)) nonempty_buckets = [k for k, v in buckets.items() if len(v) > 0] # Test the puzzles ok_pgn = {i*bucket_size: [] for i in range(30)} ok_proofgame = {i*bucket_size: [] for i in range(30)} for rating_bucket, puzzles in tqdm(buckets.items()): for pgn, proofgame, solution in puzzles: board_pgn = chess.Board() board_proofgame = chess.Board() print("pgn origi", pgn) print("proofgame", proofgame) # Iterate over the moves and apply them to the board for move in convert_pgn_to_game(pgn).mainline_moves(): board_pgn.push(move) for move in convert_pgn_to_game(proofgame).mainline_moves(): board_proofgame.push(move)
is_right_pgn = solve_puzzle(board_pgn, solution, engine)
1
2023-10-16 16:36:53+00:00
2k
Azure/azure-openai-benchmark
benchmark/bench.py
[ { "identifier": "tokenize", "path": "benchmark/tokenizecmd.py", "snippet": "def tokenize(args):\n \"\"\"\n Count number of tokens for given input and model. It attempts to decode\n input as json chat messages. Otherwise, it assumes input is just text.\n Return: number of tokens.\n \"\"\"\n model = args.model\n text = args.text\n\n if text is None:\n logging.info(\"no input text given, reading starding in\")\n text = sys.stdin.read()\n\n count = 0\n try:\n data = json.loads(text)\n count = num_tokens_from_messages(data, model)\n\n except json.JSONDecodeError:\n logging.info(\"input does not seem to be json formatted, assuming text\")\n count = num_tokens_from_text(text, model)\n\n print(f\"tokens: {count}\")" }, { "identifier": "load", "path": "benchmark/loadcmd.py", "snippet": "def load(args):\n try:\n _validate(args)\n except ValueError as e:\n print(f\"invalid argument(s): {e}\")\n sys.exit(1)\n\n api_key = os.getenv(args.api_key_env)\n url = args.api_base_endpoint[0] + \"/openai/deployments/\" + args.deployment + \"/chat/completions\"\n url += \"?api-version=\" + args.api_version\n\n rate_limiter = NoRateLimiter()\n if args.rate is not None and args.rate > 0:\n rate_limiter = RateLimiter(args.rate, 60)\n\n max_tokens = args.max_tokens\n context_tokens = args.context_tokens\n if args.shape_profile == \"balanced\":\n context_tokens = 500\n max_tokens = 500\n elif args.shape_profile == \"context\":\n context_tokens = 2000\n max_tokens = 200\n elif args.shape_profile == \"generation\":\n context_tokens = 500\n max_tokens = 1000\n\n logging.info(f\"using shape profile {args.shape_profile}: context tokens: {context_tokens}, max tokens: {max_tokens}\")\n\n request_builder = _RequestBuilder(\"gpt-4-0613\", context_tokens,\n max_tokens=max_tokens,\n completions=args.completions,\n frequence_penalty=args.frequency_penalty,\n presence_penalty=args.presence_penalty,\n temperature=args.temperature,\n top_p=args.top_p)\n\n logging.info(\"starting load...\")\n\n _run_load(request_builder,\n max_concurrency=args.clients, \n api_key=api_key,\n url=url,\n rate_limiter=rate_limiter,\n backoff=args.retry==\"exponential\",\n request_count=args.requests,\n duration=args.duration,\n aggregation_duration=args.aggregation_window,\n json_output=args.output_format==\"jsonl\")" } ]
import argparse import logging from .tokenizecmd import tokenize from .loadcmd import load
1,319
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. def main(): logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") parser = argparse.ArgumentParser(description="Benchmarking tool for Azure OpenAI Provisioned Throughput Units (PTUs).") sub_parsers = parser.add_subparsers() load_parser = sub_parsers.add_parser("load", help="Run load generation tool.") load_parser.add_argument("-a", "--api-version", type=str, default="2023-05-15", help="Set OpenAI API version.") load_parser.add_argument("-k", "--api-key-env", type=str, default="OPENAI_API_KEY", help="Environment variable that contains the API KEY.") load_parser.add_argument("-c", "--clients", type=int, default=20, help="Set number of parallel clients to use for load generation.") load_parser.add_argument("-n", "--requests", type=int, help="Number of requests for the load run. Default to 'until killed'.") load_parser.add_argument("-d", "--duration", type=int, help="Duration of load in seconds. Defaults to 'until killed'.") load_parser.add_argument("-r", "--rate", type=float, help="Rate of request generation in Requests Per Minute (RPM). Default to as fast as possible.") load_parser.add_argument("-w", "--aggregation-window", type=float, default=60, help="Statistics aggregation sliding window duration in seconds. See README.md for more details.") load_parser.add_argument("-s", "--shape-profile", type=str, default="balanced", help="Shape profile of requests.", choices=["balanced", "context", "generation", "custom"]) load_parser.add_argument("-p", "--context-tokens", type=int, help="Number of context tokens to use when --shape-profile=custom.") load_parser.add_argument("-m", "--max-tokens", type=int, help="Number of requested max_tokens when --shape-profile=custom. Defaults to unset.") load_parser.add_argument("-i", "--completions", type=int, default=1, help="Number of completion for each request.") load_parser.add_argument("--frequency-penalty", type=float, help="Request frequency_penalty.") load_parser.add_argument("--presence-penalty", type=float, help="Request frequency_penalty.") load_parser.add_argument("--temperature", type=float, help="Request temperature.") load_parser.add_argument("--top-p", type=float, help="Request top_p.") load_parser.add_argument("-f", "--output-format", type=str, default="human", help="Output format.", choices=["jsonl", "human"]) load_parser.add_argument("-t", "--retry", type=str, default="none", help="Request retry strategy.", choices=["none", "exponential"]) load_parser.add_argument("-e", "--deployment", type=str, help="Azure OpenAI deployment name.", required=True) load_parser.add_argument("api_base_endpoint", help="Azure OpenAI deployment base endpoint.", nargs=1)
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. def main(): logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") parser = argparse.ArgumentParser(description="Benchmarking tool for Azure OpenAI Provisioned Throughput Units (PTUs).") sub_parsers = parser.add_subparsers() load_parser = sub_parsers.add_parser("load", help="Run load generation tool.") load_parser.add_argument("-a", "--api-version", type=str, default="2023-05-15", help="Set OpenAI API version.") load_parser.add_argument("-k", "--api-key-env", type=str, default="OPENAI_API_KEY", help="Environment variable that contains the API KEY.") load_parser.add_argument("-c", "--clients", type=int, default=20, help="Set number of parallel clients to use for load generation.") load_parser.add_argument("-n", "--requests", type=int, help="Number of requests for the load run. Default to 'until killed'.") load_parser.add_argument("-d", "--duration", type=int, help="Duration of load in seconds. Defaults to 'until killed'.") load_parser.add_argument("-r", "--rate", type=float, help="Rate of request generation in Requests Per Minute (RPM). Default to as fast as possible.") load_parser.add_argument("-w", "--aggregation-window", type=float, default=60, help="Statistics aggregation sliding window duration in seconds. See README.md for more details.") load_parser.add_argument("-s", "--shape-profile", type=str, default="balanced", help="Shape profile of requests.", choices=["balanced", "context", "generation", "custom"]) load_parser.add_argument("-p", "--context-tokens", type=int, help="Number of context tokens to use when --shape-profile=custom.") load_parser.add_argument("-m", "--max-tokens", type=int, help="Number of requested max_tokens when --shape-profile=custom. Defaults to unset.") load_parser.add_argument("-i", "--completions", type=int, default=1, help="Number of completion for each request.") load_parser.add_argument("--frequency-penalty", type=float, help="Request frequency_penalty.") load_parser.add_argument("--presence-penalty", type=float, help="Request frequency_penalty.") load_parser.add_argument("--temperature", type=float, help="Request temperature.") load_parser.add_argument("--top-p", type=float, help="Request top_p.") load_parser.add_argument("-f", "--output-format", type=str, default="human", help="Output format.", choices=["jsonl", "human"]) load_parser.add_argument("-t", "--retry", type=str, default="none", help="Request retry strategy.", choices=["none", "exponential"]) load_parser.add_argument("-e", "--deployment", type=str, help="Azure OpenAI deployment name.", required=True) load_parser.add_argument("api_base_endpoint", help="Azure OpenAI deployment base endpoint.", nargs=1)
load_parser.set_defaults(func=load)
1
2023-10-19 00:52:26+00:00
2k
pytest-visual/pytest-visual
tests/lib/test_convenience.py
[ { "identifier": "ceil_division", "path": "visual/lib/convenience.py", "snippet": "def ceil_division(n, d):\n return (n + d - 1) // d" }, { "identifier": "correct_layout", "path": "visual/lib/convenience.py", "snippet": "def correct_layout(image: np.ndarray, layout: str) -> np.ndarray:\n if layout[0] == \"1\":\n image = np.squeeze(image, axis=0)\n layout = layout[1:]\n if layout[0] == \"c\":\n image = np.moveaxis(image, 0, -1)\n layout = layout[1:] + \"c\"\n return image" }, { "identifier": "get_grid_shape", "path": "visual/lib/convenience.py", "snippet": "def get_grid_shape(num_images: int, max_cols: int) -> Tuple[int, int]:\n \"\"\"\n Calculate the shape of the grid of images to show.\n \"\"\"\n rows = ceil_division(num_images, max_cols)\n cols = ceil_division(num_images, rows)\n return rows, cols" }, { "identifier": "get_image_max_value_from_type", "path": "visual/lib/convenience.py", "snippet": "def get_image_max_value_from_type(max_value: Optional[float], image: np.ndarray) -> float:\n \"\"\"\n Get or calculate the maximum value of the image.\n \"\"\"\n if max_value is not None:\n return max_value\n\n if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]:\n return 255.0\n if image.dtype in [np.float16, np.float32, np.float64]:\n return 1.0\n raise ValueError(f\"Could not determine max value from image with dtype {image.dtype}\")" }, { "identifier": "get_layout_from_image", "path": "visual/lib/convenience.py", "snippet": "def get_layout_from_image(layout: Optional[str], image: np.ndarray) -> str:\n \"\"\"\n Get or calculate the layout of the grid of images to show.\n\n Possible values: \"hwc\", \"chw\", \"hw\", \"1chw\", \"1hwc\"\n \"\"\"\n if layout is not None:\n return layout\n\n matched_layouts = [L for L in [\"hwc\", \"chw\", \"hw\", \"1chw\", \"1hwc\"] if layout_matches_image(L, image)]\n assert len(matched_layouts) == 1, f\"Could not determine layout from image with shape {image.shape}\"\n return matched_layouts[0]" } ]
import numpy as np from visual.lib.convenience import ( ceil_division, correct_layout, get_grid_shape, get_image_max_value_from_type, get_layout_from_image, )
792
def test_get_grid_shape(): assert get_grid_shape(1, 3) == (1, 1) assert get_grid_shape(2, 3) == (1, 2) assert get_grid_shape(3, 3) == (1, 3) assert get_grid_shape(4, 3) == (2, 2) assert get_grid_shape(5, 3) == (2, 3) assert get_grid_shape(6, 3) == (2, 3) assert get_grid_shape(7, 3) == (3, 3) assert get_grid_shape(10, 3) == (4, 3) def test_ceil_division(): assert ceil_division(19, 10) == 2 assert ceil_division(20, 10) == 2 assert ceil_division(21, 10) == 3 def test_get_layout_from_image():
def test_get_grid_shape(): assert get_grid_shape(1, 3) == (1, 1) assert get_grid_shape(2, 3) == (1, 2) assert get_grid_shape(3, 3) == (1, 3) assert get_grid_shape(4, 3) == (2, 2) assert get_grid_shape(5, 3) == (2, 3) assert get_grid_shape(6, 3) == (2, 3) assert get_grid_shape(7, 3) == (3, 3) assert get_grid_shape(10, 3) == (4, 3) def test_ceil_division(): assert ceil_division(19, 10) == 2 assert ceil_division(20, 10) == 2 assert ceil_division(21, 10) == 3 def test_get_layout_from_image():
assert get_layout_from_image("hwc", np.zeros((1, 1, 1))) == "hwc"
4
2023-10-18 07:13:37+00:00
2k
SLDGroup/G-CASCADE
lib/gcn_lib/torch_vertex.py
[ { "identifier": "BasicConv", "path": "lib/gcn_lib/torch_nn.py", "snippet": "class BasicConv(Seq):\n def __init__(self, channels, act='relu', norm=None, bias=True, drop=0., kernel_size=1, padding=0, groups=4):\n m = []\n for i in range(1, len(channels)):\n m.append(Conv2d(channels[i - 1], channels[i], kernel_size, padding=padding, bias=bias, groups=groups))\n if norm is not None and norm.lower() != 'none':\n m.append(norm_layer(norm, channels[-1]))\n if act is not None and act.lower() != 'none':\n m.append(act_layer(act))\n if drop > 0:\n m.append(nn.Dropout2d(drop))\n\n super(BasicConv, self).__init__(*m)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()" }, { "identifier": "batched_index_select", "path": "lib/gcn_lib/torch_nn.py", "snippet": "def batched_index_select(x, idx):\n r\"\"\"fetches neighbors features from a given neighbor idx\n\n Args:\n x (Tensor): input feature Tensor\n :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times C \\times N \\times 1}`.\n idx (Tensor): edge_idx\n :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times l}`.\n Returns:\n Tensor: output neighbors features\n :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times C \\times N \\times k}`.\n \"\"\"\n batch_size, num_dims, num_vertices_reduced = x.shape[:3]\n _, num_vertices, k = idx.shape\n #print([batch_size,num_dims,num_vertices_reduced, num_vertices, k, x.shape])\n idx_base = torch.arange(0, batch_size, device=idx.device).view(-1, 1, 1) * num_vertices_reduced\n #print(idx_base.shape)\n idx = idx + idx_base\n idx = idx.contiguous().view(-1)\n #print(x.shape)\n x = x.transpose(2, 1)\n #print(x.shape)\n x = x.contiguous().view(batch_size * num_vertices_reduced, -1)\n #print(x.shape)\n feature = x[idx, :]\n #print(feature.shape)\n feature = feature.view(batch_size, num_vertices, k, num_dims)\n #print(feature.shape)\n feature = feature.permute(0, 3, 1, 2).contiguous()\n #print(feature.shape)\n return feature" }, { "identifier": "act_layer", "path": "lib/gcn_lib/torch_nn.py", "snippet": "def act_layer(act, inplace=False, neg_slope=0.2, n_prelu=1):\n # activation layer\n\n act = act.lower()\n if act == 'relu':\n layer = nn.ReLU(inplace)\n elif act == 'leakyrelu':\n layer = nn.LeakyReLU(neg_slope, inplace)\n elif act == 'prelu':\n layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)\n elif act == 'gelu':\n layer = nn.GELU()\n elif act == 'hswish':\n layer = nn.Hardswish(inplace)\n else:\n raise NotImplementedError('activation layer [%s] is not found' % act)\n return layer" }, { "identifier": "DenseDilatedKnnGraph", "path": "lib/gcn_lib/torch_edge.py", "snippet": "class DenseDilatedKnnGraph(nn.Module):\n \"\"\"\n Find the neighbors' indices based on dilated knn\n \"\"\"\n def __init__(self, k=9, dilation=1, stochastic=False, epsilon=0.0):\n super(DenseDilatedKnnGraph, self).__init__()\n self.dilation = dilation\n self.stochastic = stochastic\n self.epsilon = epsilon\n self.k = k\n self._dilated = DenseDilated(k, dilation, stochastic, epsilon)\n\n def forward(self, x, y=None, relative_pos=None):\n if y is not None:\n #### normalize\n x = F.normalize(x, p=2.0, dim=1)\n y = F.normalize(y, p=2.0, dim=1)\n ####\n edge_index = xy_dense_knn_matrix(x, y, self.k * self.dilation, relative_pos)\n else:\n #### normalize\n x = F.normalize(x, p=2.0, dim=1)\n ####\n edge_index = dense_knn_matrix(x, self.k * self.dilation, relative_pos)\n return self._dilated(edge_index)" }, { "identifier": "get_2d_relative_pos_embed", "path": "lib/gcn_lib/pos_embed.py", "snippet": "def get_2d_relative_pos_embed(embed_dim, grid_size):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, grid_size*grid_size]\n \"\"\"\n pos_embed = get_2d_sincos_pos_embed(embed_dim, grid_size)\n relative_pos = 2 * np.matmul(pos_embed, pos_embed.transpose()) / pos_embed.shape[1]\n return relative_pos" } ]
import numpy as np import torch import torch.nn.functional as F from torch import nn from .torch_nn import BasicConv, batched_index_select, act_layer from .torch_edge import DenseDilatedKnnGraph from .pos_embed import get_2d_relative_pos_embed from timm.models.layers import DropPath
1,489
# 2022.06.17-Changed for building ViG model # Huawei Technologies Co., Ltd. <foss@huawei.com> class MRConv2d(nn.Module): """ Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) for dense data type """ def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True, kernel_size=1, padding=0, groups=4): super(MRConv2d, self).__init__()
# 2022.06.17-Changed for building ViG model # Huawei Technologies Co., Ltd. <foss@huawei.com> class MRConv2d(nn.Module): """ Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) for dense data type """ def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True, kernel_size=1, padding=0, groups=4): super(MRConv2d, self).__init__()
self.nn = BasicConv([in_channels*2, out_channels], act, norm, bias, kernel_size=1, padding=0, groups=4)
0
2023-10-24 17:49:10+00:00
2k
StackTipsLab/bloggy
bloggy_api/serializers.py
[ { "identifier": "Post", "path": "bloggy/models.py", "snippet": "" }, { "identifier": "Comment", "path": "bloggy/models/comment.py", "snippet": "class Comment(models.Model):\n post = models.ForeignKey('bloggy.Post', on_delete=models.CASCADE, related_name='comments')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comments', blank=True,\n null=True)\n parent = models.ForeignKey('self', related_name='reply_set', null=True, on_delete=models.PROTECT)\n comment_content = models.TextField()\n comment_author_name = models.TextField(null=True, blank=True)\n comment_author_email = models.TextField(null=True, blank=True)\n comment_author_url = models.TextField(null=True, blank=True)\n comment_author_ip = models.GenericIPAddressField(default=\"0.0.0.0\", null=True, blank=True)\n comment_date = models.DateTimeField(auto_now_add=True)\n active = models.BooleanField(default=False)\n\n class Meta:\n ordering = ['comment_date']\n verbose_name = \"Comment\"\n verbose_name_plural = \"Comments\"\n\n def __str__(self):\n return 'Comment {} by {}'.format(self.comment_content, self.user.get_full_name() if self.user else '-')\n\n def get_comments(self):\n return Comment.objects.filter(parent=self).filter(active=True)" }, { "identifier": "Course", "path": "bloggy/models/course.py", "snippet": "class Course(Content):\n difficulty = models.CharField(\n max_length=20, choices=[\n ('beginner', 'Beginner'),\n ('intermediate', 'Intermediate'),\n ('advance', 'advance'),\n ],\n default='easy', blank=True, null=True,\n help_text=\"Select difficulty\",\n verbose_name=\"Difficulty level\")\n\n is_featured = models.BooleanField(\n default=False,\n help_text=\"Should this story be featured on site?\"\n )\n\n description = models.TextField(null=True, help_text='Enter answer')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='courses')\n thumbnail = models.ImageField(upload_to=upload_thumbnail_image, null=True, blank=True)\n category = models.ForeignKey(Category, blank=True, on_delete=models.CASCADE, related_name='courses')\n view_count = GenericRelation(HitCount, object_id_field='object_pk', related_query_name='hit_count_generic_relation')\n\n class Meta:\n ordering = ['-display_order']\n verbose_name = \"course\"\n verbose_name_plural = \"courses\"\n indexes = [\n models.Index(fields=['slug', 'publish_status', 'published_date']),\n ]\n\n def get_absolute_url(self):\n return reverse(\"courses_single\", kwargs={\"slug\": str(self.slug)})\n\n @property\n def get_lessons(self):\n return self.post_set.filter(publish_status=\"LIVE\").order_by(\"display_order\").all()\n\n def thumbnail_tag(self):\n if self.thumbnail:\n return format_html(f'<img src=\"{self.thumbnail.url}\" width=\"auto\" height=\"40\"/>')\n return \"\"\n\n thumbnail_tag.short_description = 'Logo'\n thumbnail_tag.allow_tags = True" }, { "identifier": "Quiz", "path": "bloggy/models/quizzes.py", "snippet": "class Quiz(Content):\n difficulty = models.CharField(\n max_length=20,\n choices=[\n ('beginner', 'Beginner'),\n ('intermediate', 'Intermediate'),\n ('advance', 'advance'),\n ],\n default='easy', blank=True, null=True,\n help_text=\"Select difficulty\",\n verbose_name=\"Difficulty level\")\n\n is_featured = models.BooleanField(\n default=False,\n help_text=\"Should this story be featured on site?\"\n )\n content = TextField(\n null=True,\n help_text='Post content'\n )\n thumbnail = models.ImageField(\n upload_to=upload_thumbnail_image,\n null=True,\n blank=True)\n category = models.ForeignKey(\n Category,\n blank=True,\n on_delete=models.CASCADE,\n related_name='quizzes'\n )\n duration = models.IntegerField(\n help_text=\"Duration in minutes. For articles, it will be calculated automatically.\",\n default=\"1\"\n )\n view_count = GenericRelation(\n HitCount,\n object_id_field='object_pk',\n related_query_name='hit_count_generic_relation'\n )\n\n @property\n def get_questions_json(self):\n return get_questions_json(self)\n\n def get_questions(self):\n return self.quizquestion_set.all()\n\n class Meta:\n ordering = ['title']\n verbose_name = \"Quiz\"\n verbose_name_plural = \"Quizzes\"\n indexes = [\n models.Index(fields=['slug', 'publish_status']),\n ]" } ]
from rest_framework import serializers from bloggy.models import Post, User, Category from bloggy.models.comment import Comment from bloggy.models.course import Course from bloggy.models.quizzes import Quiz
1,360
class CategorySerializer(serializers.ModelSerializer): class Meta: model = Category fields = [ 'id', 'title', 'article_count', 'slug', 'description', 'color', 'logo', 'publish_status', 'created_date', 'updated_date', ] class AuthorSerializer(serializers.ModelSerializer): full_name = serializers.SerializerMethodField('get_full_name') class Meta: model = User fields = ( 'name', 'username', 'profile_photo', 'website', 'twitter', 'youtube', 'github', 'bio', ) class UserSerializer(serializers.ModelSerializer): name = serializers.CharField() email = serializers.EmailField() profile_photo = serializers.ImageField() website = serializers.CharField() twitter = serializers.CharField() youtube = serializers.CharField() github = serializers.CharField() bio = serializers.CharField() class Meta: model = User fields = [ 'name', 'email', 'username', 'profile_photo', 'website', 'twitter', 'youtube', 'github', 'bio', ] class CourseSerializer(serializers.ModelSerializer): class Meta:
class CategorySerializer(serializers.ModelSerializer): class Meta: model = Category fields = [ 'id', 'title', 'article_count', 'slug', 'description', 'color', 'logo', 'publish_status', 'created_date', 'updated_date', ] class AuthorSerializer(serializers.ModelSerializer): full_name = serializers.SerializerMethodField('get_full_name') class Meta: model = User fields = ( 'name', 'username', 'profile_photo', 'website', 'twitter', 'youtube', 'github', 'bio', ) class UserSerializer(serializers.ModelSerializer): name = serializers.CharField() email = serializers.EmailField() profile_photo = serializers.ImageField() website = serializers.CharField() twitter = serializers.CharField() youtube = serializers.CharField() github = serializers.CharField() bio = serializers.CharField() class Meta: model = User fields = [ 'name', 'email', 'username', 'profile_photo', 'website', 'twitter', 'youtube', 'github', 'bio', ] class CourseSerializer(serializers.ModelSerializer): class Meta:
model = Course
2
2023-10-17 14:50:39+00:00
2k
openvinotoolkit/openvino.genai
llm_bench/python/utils/conversion_utils/helpers.py
[ { "identifier": "COMPRESSION_OPTIONS", "path": "llm_bench/python/utils/nncf_utils.py", "snippet": "COMPRESSION_OPTIONS = {\n \"INT8\": {\"mode\": nncf.CompressWeightsMode.INT8 if \"INT8_ASYM\" not in nncf.CompressWeightsMode.__members__ else nncf.CompressWeightsMode.INT8_ASYM},\n \"INT4_SYM\": {\n \"mode\": nncf.CompressWeightsMode.INT4_SYM,\n \"group_size\": 128,\n },\n \"INT4_ASYM\": {\n \"mode\": nncf.CompressWeightsMode.INT4_ASYM,\n \"group_size\": 128,\n },\n}" }, { "identifier": "INT4_MODEL_CONFIGURATION", "path": "llm_bench/python/utils/nncf_utils.py", "snippet": "INT4_MODEL_CONFIGURATION = {\n \"dolly-v2-3b\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 32, \"ratio\": 0.5},\n \"gpt-j-6b\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 64},\n \"opt-6.7b\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 64, \"ratio\": 0.8},\n \"bloomz-7b1\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 32, \"ratio\": 0.6},\n \"red-pajama-incite-7b-instruct\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 128},\n \"zephyr-7b-beta\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 64, \"ratio\": 0.6},\n \"llama-2-7b\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 128, \"ratio\": 0.6},\n \"llama-2-7b-chat\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 128, \"ratio\": 0.8},\n \"llama-2-13b-chat\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 64, \"ratio\": 0.8},\n \"stablelm-3b-4e1t\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 64, \"ratio\": 0.8},\n \"stablelm-epoch-3b-preview\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 64, \"ratio\": 0.8},\n \"stable-zephyr-3b-dpo\": {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 64, \"ratio\": 0.8},\n \"rocket-3b\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 128, \"ratio\": 0.8},\n \"chatglm2-6b\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 128, \"ratio\": 0.72},\n \"qwen-7b-chat\": {\"mode\": nncf.CompressWeightsMode.INT4_SYM, \"group_size\": 128, \"ratio\": 0.6},\n}" } ]
from enum import Enum from pathlib import Path from nncf import compress_weights from openvino import save_model from ..nncf_utils import COMPRESSION_OPTIONS, INT4_MODEL_CONFIGURATION from optimum.gptq import GPTQQuantizer from auto_gptq import exllama_set_max_input_length from optimum.gptq import GPTQQuantizer import logging as log import torch import warnings
1,586
# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 class BackendType(Enum): PYTORCH = 'pytorch' OPENVINO = 'openvino' PYTORCH_DIR = 'pytorch' PYTORCH_COMPRESS_WEIGHTS_DIR = 'compressed_weights/PT_{precision}-{compression}' OV_DIR = 'dldt' GPTQ_DIR = "GPTQ_INT4-{precision}" def is_torch_compression(args): return args.compress_weights and BackendType.PYTORCH.value in args.compress_weights_backends def is_ov_compression(args): return args.compress_weights and BackendType.OPENVINO.value in args.compress_weights_backends def is_fp16(args): return args.precision == "FP16" def is_ov_model_provided(model_id, model_dir, precision, model_name="openvino_model.xml"): model_dirs = [] if Path(model_id).is_dir(): model_dirs.append(Path(model_id)) model_dirs.append(Path(model_id) / precision) model_dirs.append(Path(model_id) / OV_DIR / precision) model_dirs.append(Path(model_id) / PYTORCH_DIR / OV_DIR / precision) model_dir = Path(model_dir) model_dirs.append(model_dir) model_dirs.append(model_dir / precision) model_dirs.append(model_dir / OV_DIR / precision) model_dirs.append(model_dir / PYTORCH_DIR / OV_DIR / precision) for md in model_dirs: found = True for suffix in ['.xml', '.bin']: model_file = (md / model_name).with_suffix(suffix) if not model_file.exists(): found = False break if found: return found return False def get_fp_path(args, model_subpath): model_dirs = [] if Path(args.model_id).is_dir(): base_model_dir = Path(args.model_id) model_dirs.extend([ base_model_dir, base_model_dir / args.precision, base_model_dir / OV_DIR / args.precision, base_model_dir / PYTORCH_DIR / OV_DIR / args.precision ]) model_dir = Path(args.output_dir) model_dirs.append(model_dir) model_dirs.append(Path(model_dir) / args.precision) model_dirs.append(Path(model_dir) / OV_DIR / args.precision) model_dirs.append(Path(model_dir) / PYTORCH_DIR / OV_DIR / args.precision) for md in model_dirs: if (md / model_subpath).exists(): return md / model_subpath return None def save_tokenizer(tokenizer, out_dir): try: tokenizer.save_pretrained(out_dir) except Exception as e: log.error(f'tokenizer loading failed with {e}') def compress_ov_model_weights_helper(ov_model, tok, config, out_path, compress_weights_format="INT8", fp16=False, args={}, model_name="openvino_model"): compression_args = None if "INT8" in compress_weights_format and "INT8_ASYM" in COMPRESSION_OPTIONS: warnings.warn("Usage INT8 mode is deprecated and will be removed soon. Please use INT8_ASYM instead", DeprecationWarning) if "4BIT_DEFAULT" in compress_weights_format: model_id = out_path.parents[3].name
# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 class BackendType(Enum): PYTORCH = 'pytorch' OPENVINO = 'openvino' PYTORCH_DIR = 'pytorch' PYTORCH_COMPRESS_WEIGHTS_DIR = 'compressed_weights/PT_{precision}-{compression}' OV_DIR = 'dldt' GPTQ_DIR = "GPTQ_INT4-{precision}" def is_torch_compression(args): return args.compress_weights and BackendType.PYTORCH.value in args.compress_weights_backends def is_ov_compression(args): return args.compress_weights and BackendType.OPENVINO.value in args.compress_weights_backends def is_fp16(args): return args.precision == "FP16" def is_ov_model_provided(model_id, model_dir, precision, model_name="openvino_model.xml"): model_dirs = [] if Path(model_id).is_dir(): model_dirs.append(Path(model_id)) model_dirs.append(Path(model_id) / precision) model_dirs.append(Path(model_id) / OV_DIR / precision) model_dirs.append(Path(model_id) / PYTORCH_DIR / OV_DIR / precision) model_dir = Path(model_dir) model_dirs.append(model_dir) model_dirs.append(model_dir / precision) model_dirs.append(model_dir / OV_DIR / precision) model_dirs.append(model_dir / PYTORCH_DIR / OV_DIR / precision) for md in model_dirs: found = True for suffix in ['.xml', '.bin']: model_file = (md / model_name).with_suffix(suffix) if not model_file.exists(): found = False break if found: return found return False def get_fp_path(args, model_subpath): model_dirs = [] if Path(args.model_id).is_dir(): base_model_dir = Path(args.model_id) model_dirs.extend([ base_model_dir, base_model_dir / args.precision, base_model_dir / OV_DIR / args.precision, base_model_dir / PYTORCH_DIR / OV_DIR / args.precision ]) model_dir = Path(args.output_dir) model_dirs.append(model_dir) model_dirs.append(Path(model_dir) / args.precision) model_dirs.append(Path(model_dir) / OV_DIR / args.precision) model_dirs.append(Path(model_dir) / PYTORCH_DIR / OV_DIR / args.precision) for md in model_dirs: if (md / model_subpath).exists(): return md / model_subpath return None def save_tokenizer(tokenizer, out_dir): try: tokenizer.save_pretrained(out_dir) except Exception as e: log.error(f'tokenizer loading failed with {e}') def compress_ov_model_weights_helper(ov_model, tok, config, out_path, compress_weights_format="INT8", fp16=False, args={}, model_name="openvino_model"): compression_args = None if "INT8" in compress_weights_format and "INT8_ASYM" in COMPRESSION_OPTIONS: warnings.warn("Usage INT8 mode is deprecated and will be removed soon. Please use INT8_ASYM instead", DeprecationWarning) if "4BIT_DEFAULT" in compress_weights_format: model_id = out_path.parents[3].name
if model_id in INT4_MODEL_CONFIGURATION:
1
2023-10-16 13:38:16+00:00
2k
Iniquitatis/sd-webui-temporal
temporal/image_buffer.py
[ { "identifier": "ensure_directory_exists", "path": "temporal/fs.py", "snippet": "def ensure_directory_exists(path):\n if not path.is_dir():\n path.mkdir(parents = True)\n\n return path" }, { "identifier": "load_json", "path": "temporal/fs.py", "snippet": "def load_json(path, fallback = None):\n if not path.is_file():\n return fallback\n\n with open_utf8(path, \"r\") as file:\n return json.load(file)" }, { "identifier": "save_json", "path": "temporal/fs.py", "snippet": "def save_json(path, data):\n with open_utf8(path, \"w\") as file:\n json.dump(data, file, indent = 4)" }, { "identifier": "ensure_image_dims", "path": "temporal/image_utils.py", "snippet": "def ensure_image_dims(im, mode, size):\n if is_np := isinstance(im, np.ndarray):\n im = Image.fromarray(skimage.util.img_as_ubyte(im))\n\n if im.mode != mode:\n im = im.convert(mode)\n\n if im.size != size:\n im = im.resize(size, Image.Resampling.LANCZOS)\n\n return skimage.util.img_as_float(im) if is_np else im" }, { "identifier": "np_to_pil", "path": "temporal/image_utils.py", "snippet": "def np_to_pil(npim):\n return Image.fromarray(skimage.util.img_as_ubyte(npim))" }, { "identifier": "pil_to_np", "path": "temporal/image_utils.py", "snippet": "def pil_to_np(im):\n return skimage.util.img_as_float(im)" }, { "identifier": "average_array", "path": "temporal/numpy_utils.py", "snippet": "def average_array(arr, axis, trim = 0.0, power = 1.0, weights = None):\n if trim == 0.5:\n return np.median(arr, axis)\n elif trim > 0.0:\n arr = stats.trimboth(arr, trim, axis)\n weights = None\n\n if weights is not None:\n weights = match_array_dimensions(weights, arr, axis)\n\n if power != 1.0:\n arr = arr + 1.0\n\n if power == -1.0:\n result = stats.hmean(arr, axis = axis, weights = weights)\n elif power == 0.0:\n result = stats.gmean(arr, axis = axis, weights = weights)\n elif power == 1.0:\n result = np.average(arr, axis, weights)\n elif power == 2.0:\n result = np.sqrt(np.average(np.square(arr), axis, weights))\n elif power == 3.0:\n result = np.cbrt(np.average(np.power(arr, 3.0), axis, weights))\n else:\n result = stats.pmean(arr, power, axis = axis, weights = weights)\n\n if power != 1.0:\n result -= 1.0\n\n return result" }, { "identifier": "make_eased_weight_array", "path": "temporal/numpy_utils.py", "snippet": "def make_eased_weight_array(count, easing):\n return (np.linspace(1, count, count, dtype = np.float_) / count) ** easing" }, { "identifier": "load_object", "path": "temporal/serialization.py", "snippet": "def load_object(obj, data, data_dir, existing_only = True):\n for key, value in data.items():\n if not existing_only or hasattr(obj, key):\n setattr(obj, key, _load_value(value, data_dir))" }, { "identifier": "save_object", "path": "temporal/serialization.py", "snippet": "def save_object(obj, data_dir, filter = None):\n return {k: _save_value(v, data_dir) for k, v in vars(obj).items() if not filter or k in filter}" } ]
import numpy as np from temporal.fs import ensure_directory_exists, load_json, save_json from temporal.image_utils import ensure_image_dims, np_to_pil, pil_to_np from temporal.numpy_utils import average_array, make_eased_weight_array from temporal.serialization import load_object, save_object
1,233
class ImageBuffer: def __init__(self, width, height, channels, count): self.array = np.zeros((count, height, width, channels)) self.last_index = 0 @property def width(self): return self.array.shape[2] @property def height(self): return self.array.shape[1] @property def channels(self): return self.array.shape[3] @property def count(self): return self.array.shape[0] def init(self, im): npim = self._convert_image_to_np(im) for i in range(self.count): self.array[i] = npim def add(self, im): self.array[self.last_index] = self._convert_image_to_np(im) self.last_index += 1 self.last_index %= self.count def average(self, trimming = 0.0, easing = 0.0, preference = 0.0): return np_to_pil(self.array[0] if self.count == 1 else np.clip(average_array( self.array, axis = 0, trim = trimming, power = preference + 1.0, weights = np.roll(make_eased_weight_array(self.count, easing), self.last_index), ), 0.0, 1.0)) def load(self, project_dir): buffer_dir = project_dir / "session" / "buffer" if data := load_json(buffer_dir / "data.json"): load_object(self, data, buffer_dir) def save(self, project_dir): buffer_dir = ensure_directory_exists(project_dir / "session" / "buffer") save_json(buffer_dir / "data.json", save_object(self, buffer_dir)) def _convert_image_to_np(self, im):
class ImageBuffer: def __init__(self, width, height, channels, count): self.array = np.zeros((count, height, width, channels)) self.last_index = 0 @property def width(self): return self.array.shape[2] @property def height(self): return self.array.shape[1] @property def channels(self): return self.array.shape[3] @property def count(self): return self.array.shape[0] def init(self, im): npim = self._convert_image_to_np(im) for i in range(self.count): self.array[i] = npim def add(self, im): self.array[self.last_index] = self._convert_image_to_np(im) self.last_index += 1 self.last_index %= self.count def average(self, trimming = 0.0, easing = 0.0, preference = 0.0): return np_to_pil(self.array[0] if self.count == 1 else np.clip(average_array( self.array, axis = 0, trim = trimming, power = preference + 1.0, weights = np.roll(make_eased_weight_array(self.count, easing), self.last_index), ), 0.0, 1.0)) def load(self, project_dir): buffer_dir = project_dir / "session" / "buffer" if data := load_json(buffer_dir / "data.json"): load_object(self, data, buffer_dir) def save(self, project_dir): buffer_dir = ensure_directory_exists(project_dir / "session" / "buffer") save_json(buffer_dir / "data.json", save_object(self, buffer_dir)) def _convert_image_to_np(self, im):
return pil_to_np(ensure_image_dims(
5
2023-10-15 18:49:12+00:00
2k
zabbix/python-zabbix-utils
zabbix_utils/sender.py
[ { "identifier": "EmptyHandler", "path": "zabbix_utils/logger.py", "snippet": "class EmptyHandler(logging.Handler):\n \"\"\"Empty logging handler.\"\"\"\n\n def emit(self, *args, **kwargs):\n pass" }, { "identifier": "ZabbixProtocol", "path": "zabbix_utils/common.py", "snippet": "class ZabbixProtocol():\n\n ZABBIX_PROTOCOL = b'ZBXD'\n\n HEADER_SIZE = 13\n\n @classmethod\n def __prepare_request(cls, data: Union[bytes, str, list, dict]) -> bytes:\n if isinstance(data, bytes):\n return data\n if isinstance(data, str):\n return data.encode(\"utf-8\")\n if isinstance(data, list) or isinstance(data, dict):\n return json.dumps(data, ensure_ascii=False).encode(\"utf-8\")\n raise TypeError(\"Unsupported data type, only 'bytes', 'str', 'list' or 'dict' is expected\")\n\n @classmethod\n def create_packet(cls, payload: Union[bytes, str, list, dict],\n log: Logger, compression: bool = False) -> bytes:\n \"\"\"Create a packet for sending via the Zabbix protocol.\n\n Args:\n payload (Union[bytes, str, list, dict]): Payload of the future packet\n log (Logger): Logger object\n compression (bool, optional): Compression use flag. Defaults to `False`.\n\n Returns:\n bytes: Generated Zabbix protocol packet\n \"\"\"\n\n request = cls.__prepare_request(payload)\n\n log.debug('Request data: %s', shorten(request.decode(\"utf-8\"), 200, placeholder='...'))\n\n # 0x01 - Zabbix communications protocol\n flags = 0x01\n datalen = len(request)\n reserved = 0\n\n if compression:\n # 0x02 - Using packet compression mode\n flags |= 0x02\n reserved = datalen\n request = zlib.compress(request)\n datalen = len(request)\n\n header = struct.pack('<4sBII', cls.ZABBIX_PROTOCOL, flags, datalen, reserved)\n packet = header + request\n\n log.debug('Content of the packet: %s', shorten(str(packet), 200, placeholder='...'))\n\n return packet\n\n @classmethod\n def receive_packet(cls, conn: socket, size: int, log: Logger) -> bytes:\n \"\"\"Receive a Zabbix protocol packet.\n\n Args:\n conn (socket): Opened socket connection\n size (int): Expected packet size\n log (Logger): Logger object\n\n Returns:\n bytes: Received packet content\n \"\"\"\n buf = b''\n\n while len(buf) < size:\n chunk = conn.recv(size - len(buf))\n if not chunk:\n log.debug(\"Socket connection was closed before receiving expected amount of data.\")\n break\n buf += chunk\n\n return buf\n\n @classmethod\n def parse_packet(cls, conn: socket, log: Logger, exception) -> str:\n \"\"\"Parse a received Zabbix protocol packet.\n\n Args:\n conn (socket): Opened socket connection\n log (Logger): Logger object\n exception: Exception type\n\n Raises:\n exception: Depends on input exception type\n\n Returns:\n str: Body of the received packet\n \"\"\"\n\n response_header = cls.receive_packet(conn, cls.HEADER_SIZE, log)\n log.debug('Zabbix response header: %s', response_header)\n\n if (not response_header.startswith(cls.ZABBIX_PROTOCOL) or\n len(response_header) != cls.HEADER_SIZE):\n log.debug('Unexpected response was received from Zabbix.')\n raise exception('Unexpected response was received from Zabbix.')\n\n flags, datalen, reserved = struct.unpack('<BII', response_header[4:])\n\n # 0x01 - Zabbix communications protocol\n if not flags & 0x01:\n raise exception(\n 'Unexcepted flags were received. '\n 'Check debug log for more information.'\n )\n # 0x04 - Using large packet mode\n if flags & 0x04:\n raise exception(\n 'A large packet flag was received. '\n 'Current module doesn\\'t support large packets.'\n )\n # 0x02 - Using packet compression mode\n if flags & 0x02:\n response_body = zlib.decompress(cls.receive_packet(conn, datalen, log))\n else:\n response_body = cls.receive_packet(conn, datalen, log)\n\n return response_body.decode(\"utf-8\")" }, { "identifier": "ProcessingError", "path": "zabbix_utils/exceptions.py", "snippet": "class ProcessingError(ModuleBaseException):\n def __init__(self, *args):\n super().__init__(\" \".join(map(str, args)))\n return" } ]
import re import json import socket import logging import configparser from decimal import Decimal from typing import Callable, Union from typing import Self # type: ignore from typing_extensions import Self from .logger import EmptyHandler from .common import ZabbixProtocol from .exceptions import ProcessingError
1,470
# zabbix_utils # # Copyright (C) 2001-2023 Zabbix SIA # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # For Python less 3.11 compatibility try: except ImportError: log = logging.getLogger(__name__)
# zabbix_utils # # Copyright (C) 2001-2023 Zabbix SIA # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # For Python less 3.11 compatibility try: except ImportError: log = logging.getLogger(__name__)
log.addHandler(EmptyHandler())
0
2023-10-16 12:49:35+00:00
2k
miccunifi/TAPE
models/pl_model_module.py
[ { "identifier": "CharbonnierLoss", "path": "models/losses.py", "snippet": "class CharbonnierLoss(nn.Module):\n \"\"\"\n Charbonnier loss (one variant of Robust L1Loss, a differentiable variant of L1Loss).\n\n Described in \"Deep Laplacian Pyramid Networks for Fast and Accurate Super-Resolution\".\n\n Args:\n eps (float): A value used to control the curvature near zero. Default: 1e-12.\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n \"\"\"\n\n def __init__(self, eps: float = 1e-12, loss_weight: float = 1.0):\n super(CharbonnierLoss, self).__init__()\n self.loss_weight = loss_weight\n self.eps = eps\n\n def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n pred (Tensor): of shape (N, C, H, W). Predicted tensor.\n target (Tensor): of shape (N, C, H, W). Ground truth tensor.\n \"\"\"\n return self.loss_weight * charbonnier_loss(pred, target, eps=self.eps)" }, { "identifier": "PerceptualLoss", "path": "models/losses.py", "snippet": "class PerceptualLoss(nn.Module):\n \"\"\"\n VGG 19 Perceptual loss\n\n Args:\n layer_weights (dict): Layer weights for perceptual loss.\n use_input_norm (bool): If True, x: [0, 1] --> (x - mean) / std. Default: True\n use_range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. Default: False.\n criterion (str): Criterion type. Default: 'l2'.\n loss_weight (float): Loss weight for perceptual loss. Default: 1.0.\n \"\"\"\n\n def __init__(self, layer_weights: dict, use_input_norm: bool = True, use_range_norm: bool = False,\n criterion: str = 'l2', loss_weight: float = 1.0):\n super(PerceptualLoss, self).__init__()\n self.layer_weights = layer_weights\n self.vgg = VGGFeatureExtractor(layer_name_list=list(layer_weights.keys()),\n use_input_norm=use_input_norm,\n use_range_norm=use_range_norm)\n self.criterion_type = criterion\n if self.criterion_type == 'l1':\n self.criterion = torch.nn.L1Loss()\n elif self.criterion_type == 'l2':\n self.criterion = torch.nn.MSELoss()\n else:\n raise NotImplementedError(f'{criterion} criterion is not supported.')\n self.loss_weight = loss_weight\n\n def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n Args:\n pred (Tensor): Input tensor with shape (n, c, h, w).\n target (Tensor): Ground-truth tensor with shape (n, c, h, w).\n Returns:\n Tensor: Forward results.\n \"\"\"\n pred_feat = self.vgg(pred)\n target_feat = self.vgg(target.detach())\n\n loss = 0.0\n for i in pred_feat.keys():\n loss += self.criterion(pred_feat[i], target_feat[i]) * self.layer_weights[i]\n loss *= self.loss_weight\n return loss" } ]
import torch import torch.nn as nn import pytorch_lightning as pl import torchmetrics.image import torchmetrics import os.path as osp from torchvision.transforms.functional import to_pil_image from torchmetrics.functional.image.ssim import structural_similarity_index_measure from einops import rearrange from models.losses import CharbonnierLoss, PerceptualLoss
1,071
class ModelModule(pl.LightningModule): """ Pytorch Lightning Module for model training. Args: net (nn.Module): Model to train num_input_frames (int): Number of input frames in the input window pixel_loss_weight (float): Weight of the pixel loss perceptual_loss_weight (float): Weight of the perceptual loss lr (float): Learning rate """ def __init__(self, net: nn.Module, num_input_frames: int = 5, pixel_loss_weight: float = 200, perceptual_loss_weight: float = 1, lr: float = 2e-5): super(ModelModule, self).__init__() self.save_hyperparameters(ignore=["net"]) self.net = net self.num_input_frames = num_input_frames self.pixel_loss_weight = pixel_loss_weight self.perceptual_loss_weight = perceptual_loss_weight self.lr = lr
class ModelModule(pl.LightningModule): """ Pytorch Lightning Module for model training. Args: net (nn.Module): Model to train num_input_frames (int): Number of input frames in the input window pixel_loss_weight (float): Weight of the pixel loss perceptual_loss_weight (float): Weight of the perceptual loss lr (float): Learning rate """ def __init__(self, net: nn.Module, num_input_frames: int = 5, pixel_loss_weight: float = 200, perceptual_loss_weight: float = 1, lr: float = 2e-5): super(ModelModule, self).__init__() self.save_hyperparameters(ignore=["net"]) self.net = net self.num_input_frames = num_input_frames self.pixel_loss_weight = pixel_loss_weight self.perceptual_loss_weight = perceptual_loss_weight self.lr = lr
self.pixel_criterion = CharbonnierLoss(loss_weight=self.pixel_loss_weight)
0
2023-10-19 09:14:40+00:00
2k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/model.py
[ { "identifier": "build_sam", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/build.py", "snippet": "def build_sam(ckpt='sam_b.pt'):\n \"\"\"Build a SAM model specified by ckpt.\"\"\"\n model_builder = None\n for k in sam_model_map.keys():\n if ckpt.endswith(k):\n model_builder = sam_model_map.get(k)\n\n if not model_builder:\n raise FileNotFoundError(f'{ckpt} is not a supported sam model. Available models are: \\n {sam_model_map.keys()}')\n\n return model_builder(ckpt)" }, { "identifier": "Predictor", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/predict.py", "snippet": "class Predictor(BasePredictor):\n\n def preprocess(self, im):\n \"\"\"Prepares input image for inference.\"\"\"\n # TODO: Only support bs=1 for now\n # im = ResizeLongestSide(1024).apply_image(im[0])\n # im = torch.as_tensor(im, device=self.device)\n # im = im.permute(2, 0, 1).contiguous()[None, :, :, :]\n return im[0]\n\n def setup_model(self, model):\n \"\"\"Set up YOLO model with specified thresholds and device.\"\"\"\n device = select_device(self.args.device)\n model.eval()\n self.model = SamAutomaticMaskGenerator(model.to(device),\n pred_iou_thresh=self.args.conf,\n box_nms_thresh=self.args.iou)\n self.device = device\n # TODO: Temporary settings for compatibility\n self.model.pt = False\n self.model.triton = False\n self.model.stride = 32\n self.model.fp16 = False\n self.done_warmup = True\n\n def postprocess(self, preds, path, orig_imgs):\n \"\"\"Postprocesses inference output predictions to create detection masks for objects.\"\"\"\n names = dict(enumerate(list(range(len(preds)))))\n results = []\n # TODO\n for i, pred in enumerate([preds]):\n masks = torch.from_numpy(np.stack([p['segmentation'] for p in pred], axis=0))\n orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs\n path = self.batch[0]\n img_path = path[i] if isinstance(path, list) else path\n results.append(Results(orig_img=orig_img, path=img_path, names=names, masks=masks))\n return results\n\n # def __call__(self, source=None, model=None, stream=False):\n # frame = cv2.imread(source)\n # preds = self.model.generate(frame)\n # return self.postprocess(preds, source, frame)" } ]
from ultralytics.yolo.cfg import get_cfg from .build import build_sam from .predict import Predictor
724
# SAM model interface class SAM: def __init__(self, model='sam_b.pt') -> None: if model and not model.endswith('.pt') and not model.endswith('.pth'): # Should raise AssertionError instead? raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
# SAM model interface class SAM: def __init__(self, model='sam_b.pt') -> None: if model and not model.endswith('.pt') and not model.endswith('.pth'): # Should raise AssertionError instead? raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
self.model = build_sam(model)
0
2023-10-24 00:45:55+00:00
2k
intuit/sac3
sac3/main.py
[ { "identifier": "paraphraser", "path": "sac3/paraphraser.py", "snippet": "def paraphrase(question, number, model, temperature):" }, { "identifier": "Evaluate", "path": "sac3/evaluator.py", "snippet": "class Evaluate:\n def __init__(self, model):\n self.model = model\n self.prompt_temp = 'Answer the following question:\\n'\n \n def self_evaluate(self, self_question, temperature, self_num):\n '''\n Inputs: \n self_question - original user query \n temperature - [0,1] for LLM randomness\n self_num - how many generated responses given this question\n\n Outputs:\n self_responses - generated responses given this question with different temperatures\n '''\n\n self_responses = [] \n prompt = self.prompt_temp + '\\nQ:' + self_question\n \n for i in range(self_num): \n # llm model: GPTs, open-source models (falcon, guanaco)\n if self.model in ['gpt-3.5-turbo','gpt-4']:\n res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call\n elif self.model == 'guanaco-33b':\n res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)\n elif self.model == 'falcon-7b':\n res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)\n # other open-sourced llms \n self_responses.append(res)\n\n return self_responses\n \n def perb_evaluate(self, perb_questions, temperature):\n '''\n Inputs: \n perb_questions - perturbed questions that are semantically equivalent to the original question\n temperature - [0,1] for LLM randomness\n\n Outputs:\n perb_responses - generated responses given the perturbed questions\n '''\n \n perb_responses = [] \n for i in range(len(perb_questions)):\n prompt = self.prompt_temp + '\\nQ:' + perb_questions[i]\n # llm model: GPTs, open-source models (falcon, guanaco)\n if self.model in ['gpt-3.5-turbo','gpt-4']:\n res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call \n elif self.model == 'guanaco-33b':\n res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)\n elif self.model == 'falcon-7b':\n res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)\n # other open-sourced llms \n perb_responses.append(res)\n \n return perb_responses" }, { "identifier": "SemanticConsistnecyCheck", "path": "sac3/consistency_checker.py", "snippet": "class SemanticConsistnecyCheck:\n def __init__(self, model):\n self.model = model\n self.prompt_temp = \"\"\"\n Are the following two Question-Answer(QA) pairs semantically equivalent? \n Provide your best guess and the probability that it is correct (0.0 to 1.0).\n Given ONLY the guess (Yes or No) and probability, no other words or explanation. \n For example:\n Guess: <most likely guess, as short as possible; not a complete sentence, just the guess!> \n Probability: <the probability between 0.0 and 1.0 that your guess is correct, without any extra commentary whatsoever; \n just the probability!>\n \"\"\"\n \n def score_scc(self, question, target_answer, candidate_answers, temperature):\n '''\n Inputs:\n question - original user query\n target_answer - generated response given the original question (temp=0) if not provided by user \n candidate_answers - generated responses given the question (original + perturbed)\n temperature - [0,1] for LLM randomness\n\n Outputs:\n score - inconsistency score (hallucination metric) \n sc_output - specific score for each candidate answers compared with the target answer \n '''\n\n if target_answer is None:\n raise ValueError(\"Target answer cannot be None. \")\n\n sc_output = [] \n target_pair = 'Q:' + question + '\\nA:' + target_answer\n num_candidate_answer = len(candidate_answers)\n for i in range(num_candidate_answer): \n candidate_pair = 'Q:' + question + '\\nA:' + candidate_answers[i]\n prompt = self.prompt_temp + '\\nThe first QA pair is:\\n' + target_pair + '\\nThe second QA pair is:\\n' + candidate_pair\n res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call \n guess = res.split(':')[1].split('\\n')[0].strip()\n # print(res, guess)\n value = 0 if guess == 'Yes' else 1\n # print('value',value)\n sc_output.append(value)\n \n score = sum(sc_output)/num_candidate_answer\n return score, sc_output" } ]
from sac3 import paraphraser from sac3.evaluator import Evaluate from sac3.consistency_checker import SemanticConsistnecyCheck
1,291
# input information question = 'Was there ever a US senator that represented the state of Alabama and whose alma mater was MIT?' target_answer = 'Never' # question pertubation gen_question = paraphraser.paraphrase(question, number = 3, model = 'gpt-3.5-turbo', temperature=1.0) # llm evaluation
# input information question = 'Was there ever a US senator that represented the state of Alabama and whose alma mater was MIT?' target_answer = 'Never' # question pertubation gen_question = paraphraser.paraphrase(question, number = 3, model = 'gpt-3.5-turbo', temperature=1.0) # llm evaluation
llm_evaluate = Evaluate(model='gpt-3.5-turbo')
1
2023-10-24 23:35:23+00:00
2k
zcczhang/UVD
uvd/utils/video_utils.py
[ { "identifier": "any_stack", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_stack(xs: List, *, dim: int = 0):\n \"\"\"Works for both torch Tensor and numpy array.\"\"\"\n\n def _any_stack_helper(*xs):\n x = xs[0]\n if isinstance(x, np.ndarray):\n return np.stack(xs, axis=dim)\n elif torch.is_tensor(x):\n return torch.stack(xs, dim=dim)\n elif isinstance(x, float):\n # special treatment for float, defaults to float32\n return np.array(xs, dtype=np.float32)\n else:\n return np.array(xs)\n\n return tree.map_structure(_any_stack_helper, *xs)" }, { "identifier": "any_to_torch_tensor", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_to_torch_tensor(\n x,\n dtype: Union[str, torch.dtype, None] = None,\n device: Union[str, int, torch.device, None] = None,\n copy=False,\n non_blocking=False,\n smart_optimize: bool = True,\n):\n dtype = torch_dtype(dtype)\n device = torch_device(device)\n\n if not isinstance(x, (torch.Tensor, np.ndarray)):\n # x is a primitive python sequence\n x = torch.tensor(x, dtype=dtype)\n copy = False\n\n # This step does not create any copy.\n # If x is a numpy array, simply wraps it in Tensor. If it's already a Tensor, do nothing.\n x = torch.as_tensor(x)\n # avoid passing None to .to(), PyTorch 1.4 bug\n dtype = dtype or x.dtype\n device = device or x.device\n\n if not smart_optimize:\n # do a single stage type conversion and transfer\n return x.to(dtype=dtype, device=device, copy=copy, non_blocking=non_blocking)\n\n # we have two choices: (1) convert dtype and then transfer to GPU\n # (2) transfer to GPU and then convert dtype\n # because CPU-to-GPU memory transfer is the bottleneck, we will reduce it as\n # much as possible by sending the smaller dtype\n\n src_dtype_size = torch_dtype_size(x.dtype)\n\n # destination dtype size\n if dtype is None:\n dest_dtype_size = src_dtype_size\n else:\n dest_dtype_size = torch_dtype_size(dtype)\n\n if x.dtype != dtype or x.device != device:\n # a copy will always be performed, no need to force copy again\n copy = False\n\n if src_dtype_size > dest_dtype_size:\n # better to do conversion on one device (e.g. CPU) and then transfer to another\n return _convert_then_transfer(x, dtype, device, copy, non_blocking)\n elif src_dtype_size == dest_dtype_size:\n # when equal, we prefer to do the conversion on whichever device that's GPU\n if x.device.type == \"cuda\":\n return _convert_then_transfer(x, dtype, device, copy, non_blocking)\n else:\n return _transfer_then_convert(x, dtype, device, copy, non_blocking)\n else:\n # better to transfer data across device first, and then do conversion\n return _transfer_then_convert(x, dtype, device, copy, non_blocking)" }, { "identifier": "any_to_numpy", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_to_numpy(\n x,\n dtype: Union[str, np.dtype, None] = None,\n copy: bool = False,\n non_blocking: bool = False,\n smart_optimize: bool = True,\n exclude_none: bool = False,\n):\n if exclude_none and x is None:\n return x\n if isinstance(x, torch.Tensor):\n x = any_to_torch_tensor(\n x,\n dtype=dtype,\n device=\"cpu\",\n copy=copy,\n non_blocking=non_blocking,\n smart_optimize=smart_optimize,\n )\n return x.detach().numpy()\n else:\n # primitive python sequence or ndarray\n return np.array(x, dtype=dtype, copy=copy)" }, { "identifier": "f_mkdir", "path": "uvd/utils/file_utils.py", "snippet": "def f_mkdir(*fpaths):\n \"\"\"Recursively creates all the subdirs If exist, do nothing.\"\"\"\n fpath = f_join(*fpaths)\n os.makedirs(fpath, exist_ok=True)\n return fpath" }, { "identifier": "f_join", "path": "uvd/utils/file_utils.py", "snippet": "def f_join(*fpaths):\n \"\"\"Join file paths and expand special symbols like `~` for home dir.\"\"\"\n return f_expand(os.path.join(*fpaths))" }, { "identifier": "f_remove", "path": "uvd/utils/file_utils.py", "snippet": "def f_remove(fpath, verbose=False, dry_run=False):\n \"\"\"If exist, remove.\n\n Supports both dir and file. Supports glob wildcard.\n \"\"\"\n assert isinstance(verbose, bool)\n fpath = f_expand(fpath)\n if dry_run:\n print(\"Dry run, delete:\", fpath)\n return\n for f in glob.glob(fpath):\n try:\n shutil.rmtree(f)\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n try:\n os.remove(f)\n except: # final resort safeguard\n pass\n if verbose:\n print(f'Deleted \"{fpath}\"')" } ]
import subprocess import numpy as np import torch import torchvision.io import ffmpeg # pip install ffmpeg-python from typing import Union, List, Optional from .array_tensor_utils import any_stack, any_to_torch_tensor, any_to_numpy from .file_utils import f_mkdir, f_join, f_remove from einops import rearrange from einops import rearrange
1,383
__all__ = ["save_video", "ffmpeg_save_video", "compress_video", "VideoTensorWriter"] def save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None, compress: bool = False, ): fname = f_join(fname)
__all__ = ["save_video", "ffmpeg_save_video", "compress_video", "VideoTensorWriter"] def save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None, compress: bool = False, ): fname = f_join(fname)
video = any_to_torch_tensor(video)
1
2023-10-17 19:08:14+00:00
2k
Aatrox103/SAP
gpt_eval.py
[ { "identifier": "askChatGPT", "path": "utils/openai_api.py", "snippet": "@backoff.on_exception(backoff.expo, (openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.Timeout))\ndef askChatGPT(prompt, model_name):\n if model_name == \"gpt-3.5-turbo-0301\":\n messages = [{\"role\": \"user\", \"content\": prompt}]\n # print(messages[0]['content'])\n response = openai.ChatCompletion.create(model=model_name, messages=messages, temperature=1)\n return response['choices'][0]['message']['content']\n elif model_name == \"text-davinci-003\":\n completions = openai.Completion.create(\n engine=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=2048,\n )\n message = completions.choices[0].text\n return message" }, { "identifier": "GPT_eval_score", "path": "utils/openai_api.py", "snippet": "def GPT_eval_score(text, model_name, template_eval):\n content = askChatGPT(template_eval.format(text), model_name)\n match = re.search(r'(\\d+)', content)\n if match is None:\n return None\n else:\n score = match.group(1)\n return int(score)" }, { "identifier": "Moderation", "path": "utils/openai_api.py", "snippet": "@backoff.on_exception(backoff.expo, (openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.Timeout))\ndef Moderation(input):\n response = openai.Moderation.create(model=\"text-moderation-stable\", input=input)\n return response" } ]
import openai import json import re import os import backoff import logging import fire from tqdm import tqdm from utils.openai_api import askChatGPT, GPT_eval_score, Moderation
777
#data_path: The name of the grandparent directory where the file is read and stored #gpt_output: list of text to be evaluated #eval_filename_prefix: The file name prefix of the read file, which is used to name the file when storing the result #Use gpt to evaluate the harmfulness of the generated results, #and two files will be generated in different directories of the evaluated file to save the results def GPT_eval(data_path, gpt_output, model_name, eval_filename_prefix): with open('./templates/eval_template2.json', 'r', encoding="utf8") as file: template_eval = json.load(file)[0] gpt_eval_score = [] save_dir = f"./data/{data_path}/" if eval_filename_prefix is None and os.path.exists(f'{save_dir}gpt_output_score.json'): with open(f'{save_dir}gpt_output_score.json', 'r', encoding="utf8") as file: gpt_eval_score = json.load(file) elif os.path.exists(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json'): with open(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json', 'r', encoding="utf8") as file: gpt_eval_score = json.load(file) if len(gpt_eval_score) == len(gpt_output): write_log=False else: write_log=True gpt_output= gpt_output[len(gpt_eval_score):] fail_index = [] for index, output in enumerate(tqdm(gpt_output)): fail_times = 0 while True:
#data_path: The name of the grandparent directory where the file is read and stored #gpt_output: list of text to be evaluated #eval_filename_prefix: The file name prefix of the read file, which is used to name the file when storing the result #Use gpt to evaluate the harmfulness of the generated results, #and two files will be generated in different directories of the evaluated file to save the results def GPT_eval(data_path, gpt_output, model_name, eval_filename_prefix): with open('./templates/eval_template2.json', 'r', encoding="utf8") as file: template_eval = json.load(file)[0] gpt_eval_score = [] save_dir = f"./data/{data_path}/" if eval_filename_prefix is None and os.path.exists(f'{save_dir}gpt_output_score.json'): with open(f'{save_dir}gpt_output_score.json', 'r', encoding="utf8") as file: gpt_eval_score = json.load(file) elif os.path.exists(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json'): with open(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json', 'r', encoding="utf8") as file: gpt_eval_score = json.load(file) if len(gpt_eval_score) == len(gpt_output): write_log=False else: write_log=True gpt_output= gpt_output[len(gpt_eval_score):] fail_index = [] for index, output in enumerate(tqdm(gpt_output)): fail_times = 0 while True:
score = GPT_eval_score(text=output, template_eval=template_eval, model_name=model_name)
1
2023-10-16 02:34:29+00:00
2k
bytedance/ColTrack
motlib/utils/model.py
[ { "identifier": "match_name_keywords", "path": "util/get_param_dicts.py", "snippet": "def match_name_keywords(n: str, name_keywords: list):\n out = False\n for b in name_keywords:\n if b in n:\n out = True\n break\n return out" }, { "identifier": "get_param_dict", "path": "util/get_param_dicts.py", "snippet": "def get_param_dict(args, model_without_ddp: nn.Module):\n try:\n param_dict_type = args.param_dict_type\n except:\n param_dict_type = 'default'\n assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd']\n\n # by default\n if param_dict_type == 'default':\n param_dicts = [\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": args.lr_backbone,\n }\n ]\n return param_dicts\n\n if param_dict_type == 'ddetr_in_mmdet':\n param_dicts = [\n {\n \"params\":\n [p for n, p in model_without_ddp.named_parameters()\n if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n \"lr\": args.lr,\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() \n if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() \n if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n \"lr\": args.lr * args.lr_linear_proj_mult,\n }\n ] \n return param_dicts\n\n if param_dict_type == 'large_wd':\n param_dicts = [\n {\n \"params\":\n [p for n, p in model_without_ddp.named_parameters()\n if not match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() \n if match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n \"lr\": args.lr_backbone,\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() \n if match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n \"lr\": args.lr_backbone,\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\":\n [p for n, p in model_without_ddp.named_parameters()\n if not match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n \"lr\": args.lr,\n \"weight_decay\": 0.0,\n }\n ]\n\n # print(\"param_dicts: {}\".format(param_dicts))\n\n return param_dicts" } ]
import json import torch import torch.nn as nn from util.get_param_dicts import match_name_keywords from util.get_param_dicts import get_param_dict as get_param_dict_default
979
__all__ = ['get_param_dict'] def get_param_dict(args, model_without_ddp: nn.Module): try: param_dict_type = args.param_dict_type except: param_dict_type = 'default' assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd', 'finetune'] if param_dict_type == 'finetune': ft_ignore_param = args.frozen_weights_mot param_dicts = [ { "params": [p for n, p in model_without_ddp.named_parameters() if p.requires_grad], "lr": args.lr } ] else:
__all__ = ['get_param_dict'] def get_param_dict(args, model_without_ddp: nn.Module): try: param_dict_type = args.param_dict_type except: param_dict_type = 'default' assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd', 'finetune'] if param_dict_type == 'finetune': ft_ignore_param = args.frozen_weights_mot param_dicts = [ { "params": [p for n, p in model_without_ddp.named_parameters() if p.requires_grad], "lr": args.lr } ] else:
param_dicts = get_param_dict_default(args, model_without_ddp)
0
2023-10-16 02:18:33+00:00
2k
alm0ra/mockafka-py
tests/test_producer.py
[ { "identifier": "Message", "path": "mockafka/message.py", "snippet": "class Message:\n def __init__(self, *args, **kwargs):\n self._headers: Optional[dict] = kwargs.get('headers', None)\n self._key: Optional[str] = kwargs.get('key', None)\n self._value: Optional[str] = kwargs.get('value', None)\n self._topic: Optional[str] = kwargs.get('topic', None)\n self._offset: Optional[int] = kwargs.get('offset', None)\n self._error: Optional[KafkaError] = kwargs.get('error', None)\n self._latency: Optional[float] = kwargs.get('latency', None)\n self._leader_epoch: Optional[int] = kwargs.get('leader_epoch', None)\n self._partition: Optional[int] = kwargs.get('partition', None)\n self._timestamp: int = kwargs.get('timestamp', None)\n\n def offset(self, *args, **kwargs):\n return self._offset\n\n def latency(self, *args, **kwargs):\n return self._latency\n\n def leader_epoch(self, *args, **kwargs):\n return self._leader_epoch\n\n def headers(self, *args, **kwargs):\n return self._headers\n\n def key(self, *args, **kwargs):\n return self._key\n\n def value(self, *args, **kwargs):\n return self._value\n\n def timestamp(self, *args, **kwargs):\n return self._timestamp\n\n def topic(self, *args, **kwargs):\n return self._topic\n\n def error(self):\n return self._error\n\n def set_headers(self, *args, **kwargs): # real signature unknown\n pass\n\n def set_key(self, *args, **kwargs): # real signature unknown\n pass\n\n def set_value(self, *args, **kwargs): # real signature unknown\n pass" }, { "identifier": "FakeAdminClientImpl", "path": "mockafka/admin_client.py", "snippet": "class FakeAdminClientImpl:\n def __init__(self, clean: bool = False, *args, **kwargs):\n def create_partitions(self, partitions: list[NewPartitions]):\n def create_partition(self, partition: NewPartitions):\n def create_topics(self, topics: list[NewTopic]):\n def create_topic(self, topic: NewTopic):\n def delete_topics(self, topics, future=None, request_timeout=None, operation_timeout=None):\n def delete_topic(self, topic: NewTopic):\n def describe_acls(self, acl_binding_filter, future, request_timeout=None):\n def describe_configs(self, resources, future, request_timeout=None, broker=None):\n def delete_acls(self, acl_binding_filters, future, request_timeout=None):\n def alter_configs(self, *args, **kwargs):\n def create_acls(self, *args, **kwargs):\n def list_groups(self, group=None, *args, **kwargs):\n def list_topics(self, topic=None, *args, **kwargs):\n def poll(self, timeout=None):\n def __len__(self, *args, **kwargs):" }, { "identifier": "KafkaStore", "path": "mockafka/kafka_store.py", "snippet": "class SingletonMeta(type):\nclass KafkaStore(metaclass=SingletonMeta):\n def __call__(cls, *args, **kwargs):\n def __init__(self, clean: bool = False):\n def is_topic_exist(topic: str) -> bool:\n def is_partition_exist_on_topic(cls, topic: str, partition_num: int) -> bool:\n def get_number_of_partition(topic: str) -> int:\n def create_topic(topic: str):\n def create_partition(self, topic: str, partitions: int):\n def remove_topic(self, topic: str):\n def set_first_offset(self, topic: str, partition: int, value: int):\n def _add_next_offset(self, topic: str, partition: int):\n def get_offset_store_key(self, topic: str, partition: int):\n def produce(self, message: Message, topic: str, partition: int):\n def get_message(self, topic: str, partition: int, offset: int) -> Message:\n def get_partition_first_offset(self, topic: str, partition: int) -> int:\n def get_partition_next_offset(self, topic: str, partition: int) -> int:\n def topic_list() -> list[str]:\n def partition_list(topic: str) -> list[int]:\n def get_messages_in_partition(topic: str, partition: int) -> list[Message]:\n def number_of_message_in_topic(self, topic: str) -> int:\n def clear_topic_messages(self, topic: str):\n def clear_partition_messages(topic: str, partition: int):\n def reset_offset(self, topic: str, strategy: str = 'latest'):\n def fresh():\n FIRST_OFFSET = 'first_offset'\n NEXT_OFFSET = 'next_offset'" }, { "identifier": "FakeProducer", "path": "mockafka/producer.py", "snippet": "class FakeProducer(object):\n def __init__(self, config: dict = None):\n self.kafka = KafkaStore()\n\n def produce(self, topic, value=None, *args, **kwargs):\n # create a message and call produce kafka\n message = Message(value=value, topic=topic, *args, **kwargs)\n self.kafka.produce(message=message, topic=topic, partition=kwargs['partition'])\n\n def list_topics(self, topic=None, *args, **kwargs):\n return ClusterMetadata(topic)\n\n def abort_transaction(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def begin_transaction(self):\n # This method Does not support in mockafka\n pass\n\n def commit_transaction(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def flush(self, timeout=None):\n # This method Does not support in mockafka\n return 0\n\n def init_transactions(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def poll(self, timeout=None):\n # This method Does not support in mockafka\n return 0\n\n def purge(self, in_queue=True, *args, **kwargs):\n # This method Does not support in mockafka\n pass\n\n def send_offsets_to_transaction(self, positions, group_metadata,\n timeout=None):\n # This method Does not support in mockafka\n pass" } ]
from unittest import TestCase from mockafka import Message from mockafka.admin_client import FakeAdminClientImpl, NewTopic from mockafka.kafka_store import KafkaStore, KafkaException from mockafka.producer import FakeProducer from confluent_kafka import Message import pytest
1,584
class TestFakeProducer(TestCase): def setUp(self) -> None: self.kafka = KafkaStore(clean=True) self.producer = FakeProducer()
class TestFakeProducer(TestCase): def setUp(self) -> None: self.kafka = KafkaStore(clean=True) self.producer = FakeProducer()
self.admin_client = FakeAdminClientImpl()
1
2023-10-24 13:27:12+00:00
2k
HRI-EU/rosenv
tests/integration/commands/install/test_install_launch_file_detection.py
[ { "identifier": "ROS_2", "path": "tests/conftest.py", "snippet": "ROS_2: Final[Literal[2]] = 2" }, { "identifier": "YieldFixture", "path": "tests/conftest.py", "snippet": "_T = TypeVar(\"_T\")\nROS_1: Final[Literal[1]] = 1\nROS_2: Final[Literal[2]] = 2\nROS_1_PROJECT_LIST = [\"adder\", \"adder_meta\", \"adder_srvs\", \"client\", \"python_server\", \"server\"]\nROS_2_PROJECT_LIST = [\"adder\", \"adder_srvs\", \"client\", \"python_server\", \"server\"]\ndef get_ros_version() -> Literal[1, 2]:\ndef ros_distro() -> RosDistribution:\ndef ros_distro_config() -> DistroConfig:\ndef resources() -> Path:\ndef example_project_launch_files(resources: Path) -> Path:\ndef example_project_ros1(resources: Path) -> Path:\ndef example_project_ros2(resources: Path) -> Path:\ndef example_project(example_project_ros1: Path, example_project_ros2: Path) -> Path:\ndef project_list() -> list[str]:\ndef test_debs(resources: Path) -> Path:\ndef catkin_tools(resources: Path) -> Path:\ndef rosdistro_index(resources: Path) -> Path:" }, { "identifier": "get_ros_version", "path": "tests/conftest.py", "snippet": "def get_ros_version() -> Literal[1, 2]:\n installed_distro = get_installed_distro_paths()\n\n if len(installed_distro) == 0 or installed_distro[0].name != \"noetic\":\n return ROS_2\n\n return ROS_1" } ]
import logging import shutil import pytest from pathlib import Path from cleo.application import Application from cleo.testers.command_tester import CommandTester from deb_pkg_tools.package import ArchiveEntry from deb_pkg_tools.package import inspect_package_contents from tests.conftest import ROS_2 from tests.conftest import YieldFixture from tests.conftest import get_ros_version
1,030
# # Copyright (c) Honda Research Institute Europe GmbH # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # from __future__ import annotations @pytest.fixture() def _copy_success_launch_file_project( rosenv_target_path: Path, example_project_launch_files: Path, ) -> YieldFixture[None]: target_folder = rosenv_target_path.parent / "src" target_folder.mkdir(exist_ok=True, parents=True) failing_project = example_project_launch_files / "src/launch_success" assert failing_project.exists(), "Failing launch file project doesn't exist!" shutil.copytree(failing_project, target_folder / "launch_success") yield shutil.rmtree(target_folder, ignore_errors=True) @pytest.fixture() def _copy_failing_launch_file_project( rosenv_target_path: Path, example_project_launch_files: Path, ) -> YieldFixture[None]: target_folder = rosenv_target_path.parent / "src" target_folder.mkdir(exist_ok=True, parents=True) failing_project = example_project_launch_files / "src/launch_fails" assert failing_project.exists(), "Failing launch file project doesn't exist!" shutil.copytree(failing_project, target_folder / "launch_fails") yield shutil.rmtree(target_folder, ignore_errors=True)
# # Copyright (c) Honda Research Institute Europe GmbH # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # from __future__ import annotations @pytest.fixture() def _copy_success_launch_file_project( rosenv_target_path: Path, example_project_launch_files: Path, ) -> YieldFixture[None]: target_folder = rosenv_target_path.parent / "src" target_folder.mkdir(exist_ok=True, parents=True) failing_project = example_project_launch_files / "src/launch_success" assert failing_project.exists(), "Failing launch file project doesn't exist!" shutil.copytree(failing_project, target_folder / "launch_success") yield shutil.rmtree(target_folder, ignore_errors=True) @pytest.fixture() def _copy_failing_launch_file_project( rosenv_target_path: Path, example_project_launch_files: Path, ) -> YieldFixture[None]: target_folder = rosenv_target_path.parent / "src" target_folder.mkdir(exist_ok=True, parents=True) failing_project = example_project_launch_files / "src/launch_fails" assert failing_project.exists(), "Failing launch file project doesn't exist!" shutil.copytree(failing_project, target_folder / "launch_fails") yield shutil.rmtree(target_folder, ignore_errors=True)
@pytest.mark.skipif(get_ros_version() == ROS_2, reason="Launchfile-Checks only work in ROS1 currently")
2
2023-10-18 12:36:30+00:00
2k
CuriseJia/FreeStyleRet
comparison_test/imagebind_test.py
[ { "identifier": "getR1Accuary", "path": "src/utils/utils.py", "snippet": "def getR1Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n if temp[i][prob.shape[1]-1] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" }, { "identifier": "getR5Accuary", "path": "src/utils/utils.py", "snippet": "def getR5Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n for j in range(prob.shape[1]-4,prob.shape[1]):\n if temp[i][j] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" }, { "identifier": "setup_seed", "path": "src/utils/utils.py", "snippet": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = True" }, { "identifier": "I2ITestDataset", "path": "src/dataset/data.py", "snippet": "class I2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [ori_image, pair_image, index]" }, { "identifier": "T2ITestDataset", "path": "src/dataset/data.py", "snippet": "class T2ITestDataset(Dataset):\n def __init__(self, root_path, json_path, image_transform):\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n image_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n pair_image = self.image_transform(Image.open(image_path))\n\n return [caption, pair_image, index]" } ]
import torch import argparse import sys import json import os import time from tqdm import tqdm from open_clip.factory import image_transform from torch.utils.data import DataLoader from src.utils import setup_seed, getR1Accuary, getR5Accuary from src.dataset import I2ITestDataset, T2ITestDataset from ImageBind.imagebind import data, ModalityType, imagebind_model from prompt_model import Prompt_ImageBind
1,570
image_mean = (0.48145466, 0.4578275, 0.40821073) image_std = (0.26861954, 0.26130258, 0.27577711) def parse_args(): parser = argparse.ArgumentParser(description='Parse args for Prompt_ImageBind or Origin_ImageBind test on DSR dataset.') # project settings parser.add_argument('--origin_resume', default='', type=str, help='load origin model checkpoint from given path') parser.add_argument('--prompt_resume', default='', type=str, help='load prompt model checkpoint from given path') parser.add_argument('--device', default='cuda:0') parser.add_argument('--num_workers', default=6, type=int) # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.') parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.') parser.add_argument("--test_dataset_path", type=str, default='DSR/') parser.add_argument("--test_json_path", type=str, default='DSR/test.json') parser.add_argument("--batch_size", type=int, default=24) # model settings parser.add_argument('--model', type=str, default='prompt', help='prompt-imagebind or imagebind-huge.') parser.add_argument('--n_prompts', type=int, default=3) parser.add_argument('--prompt_dim', type=int, default=50176) args = parser.parse_args() return args def S2IRetrieval(args, model, ori_feat, pair_feat): t1 = time.time() if args.model == 'prompt': ori_feat = model(ori_feat, dtype='image') ske_feat = model(pair_feat, mode='image') prob = torch.softmax(ske_feat @ ori_feat.T, dim=-1) else: with torch.no_grad(): ori_feat = model(ori_feat) ske_feat = model(pair_feat) prob = torch.softmax(ske_feat[ModalityType.VISION] @ ori_feat[ModalityType.VISION].T, dim=-1) t2 = time.time() print('inference a batch costs {}ms'.format((t2-t1)*1000)) return prob def T2IRetrieval(args, model, ori_feat, pair_feat): t1 = time.time() if args.model == 'prompt': ori_feat = model(ori_feat, dtype='image') ske_feat = model(pair_feat, mode='text') else: with torch.no_grad(): ori_feat = model(ori_feat) ske_feat = model(pair_feat) prob = torch.softmax(ske_feat[ModalityType.TEXT] @ ori_feat[ModalityType.VISION].T, dim=-1) t2 = time.time() print('inference a batch costs {}ms'.format((t2-t1)*1000)) return prob if __name__ == "__main__": args = parse_args() setup_seed(args.seed) pair = json.load(open(args.test_json_path, 'r')) if args.model == 'prompt': model = Prompt_ImageBind(args) model.load_state_dict(torch.load(args.prompt_resume)) else: model = imagebind_model.imagebind_huge(args.origin_resume) model.eval() model.to(args.device) r1 = [] r5 = [] rang = int(len(pair)/args.batch_size) pre_process_val = image_transform(224, True, image_mean, image_std) if args.type == 'text2image': test_dataset = T2ITestDataset(args.test_dataset_path, args.test_json_path, pre_process_val) else:
image_mean = (0.48145466, 0.4578275, 0.40821073) image_std = (0.26861954, 0.26130258, 0.27577711) def parse_args(): parser = argparse.ArgumentParser(description='Parse args for Prompt_ImageBind or Origin_ImageBind test on DSR dataset.') # project settings parser.add_argument('--origin_resume', default='', type=str, help='load origin model checkpoint from given path') parser.add_argument('--prompt_resume', default='', type=str, help='load prompt model checkpoint from given path') parser.add_argument('--device', default='cuda:0') parser.add_argument('--num_workers', default=6, type=int) # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.') parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.') parser.add_argument("--test_dataset_path", type=str, default='DSR/') parser.add_argument("--test_json_path", type=str, default='DSR/test.json') parser.add_argument("--batch_size", type=int, default=24) # model settings parser.add_argument('--model', type=str, default='prompt', help='prompt-imagebind or imagebind-huge.') parser.add_argument('--n_prompts', type=int, default=3) parser.add_argument('--prompt_dim', type=int, default=50176) args = parser.parse_args() return args def S2IRetrieval(args, model, ori_feat, pair_feat): t1 = time.time() if args.model == 'prompt': ori_feat = model(ori_feat, dtype='image') ske_feat = model(pair_feat, mode='image') prob = torch.softmax(ske_feat @ ori_feat.T, dim=-1) else: with torch.no_grad(): ori_feat = model(ori_feat) ske_feat = model(pair_feat) prob = torch.softmax(ske_feat[ModalityType.VISION] @ ori_feat[ModalityType.VISION].T, dim=-1) t2 = time.time() print('inference a batch costs {}ms'.format((t2-t1)*1000)) return prob def T2IRetrieval(args, model, ori_feat, pair_feat): t1 = time.time() if args.model == 'prompt': ori_feat = model(ori_feat, dtype='image') ske_feat = model(pair_feat, mode='text') else: with torch.no_grad(): ori_feat = model(ori_feat) ske_feat = model(pair_feat) prob = torch.softmax(ske_feat[ModalityType.TEXT] @ ori_feat[ModalityType.VISION].T, dim=-1) t2 = time.time() print('inference a batch costs {}ms'.format((t2-t1)*1000)) return prob if __name__ == "__main__": args = parse_args() setup_seed(args.seed) pair = json.load(open(args.test_json_path, 'r')) if args.model == 'prompt': model = Prompt_ImageBind(args) model.load_state_dict(torch.load(args.prompt_resume)) else: model = imagebind_model.imagebind_huge(args.origin_resume) model.eval() model.to(args.device) r1 = [] r5 = [] rang = int(len(pair)/args.batch_size) pre_process_val = image_transform(224, True, image_mean, image_std) if args.type == 'text2image': test_dataset = T2ITestDataset(args.test_dataset_path, args.test_json_path, pre_process_val) else:
test_dataset = I2ITestDataset(args.test_dataset_path, args.test_json_path, pre_process_val)
3
2023-10-17 09:32:57+00:00
2k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/tuners/prompt_tuning.py
[ { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "PromptLearningConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" } ]
import enum import math import torch from dataclasses import dataclass, field from typing import Optional, Union from ..utils import PeftType, PromptLearningConfig from transformers import AutoTokenizer
924
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) def __post_init__(self):
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) def __post_init__(self):
self.peft_type = PeftType.PROMPT_TUNING
0
2023-10-19 10:55:50+00:00
2k
voyage-ai/voyageai-python
voyageai/api_resources/api_requestor.py
[ { "identifier": "error", "path": "voyageai/error.py", "snippet": "class VoyageError(Exception):\nclass APIError(VoyageError):\nclass TryAgain(VoyageError):\nclass Timeout(VoyageError):\nclass APIConnectionError(VoyageError):\nclass InvalidRequestError(VoyageError):\nclass MalformedRequestError(VoyageError):\nclass AuthenticationError(VoyageError):\nclass RateLimitError(VoyageError):\nclass ServerError(VoyageError):\nclass ServiceUnavailableError(VoyageError):\n def __init__(\n self,\n message=None,\n http_body=None,\n http_status=None,\n json_body=None,\n headers=None,\n code=None,\n ):\n def __str__(self):\n def user_message(self):\n def __repr__(self):\n def construct_error_object(self):\n def __init__(\n self,\n message,\n http_body=None,\n http_status=None,\n json_body=None,\n headers=None,\n code=None,\n should_retry=False,\n ):" }, { "identifier": "util", "path": "voyageai/util.py", "snippet": "VOYAGE_LOG = os.environ.get(\"VOYAGE_LOG\")\n VOYAGE = 1\nclass ApiType(Enum):\n def from_str(label):\ndef _console_log_level():\ndef log_debug(message, **params):\ndef log_info(message, **params):\ndef log_warn(message, **params):\ndef logfmt(props):\n def fmt(key, val):\ndef convert_to_voyage_object(resp):\ndef convert_to_dict(obj):\ndef merge_dicts(x, y):\ndef default_api_key() -> str:" }, { "identifier": "version", "path": "voyageai/version.py", "snippet": "VERSION = \"0.1.6\"" }, { "identifier": "VoyageResponse", "path": "voyageai/api_resources/voyage_response.py", "snippet": "class VoyageResponse:\n def __init__(self, data, headers):\n self._headers = headers\n self.data = data\n\n @property\n def request_id(self) -> Optional[str]:\n return self._headers.get(\"request-id\")\n\n @property\n def retry_after(self) -> Optional[int]:\n try:\n return int(self._headers.get(\"retry-after\"))\n except TypeError:\n return None\n\n @property\n def operation_location(self) -> Optional[str]:\n return self._headers.get(\"operation-location\")\n\n @property\n def organization(self) -> Optional[str]:\n return self._headers.get(\"Voyage-Organization\")\n\n @property\n def response_ms(self) -> Optional[int]:\n h = self._headers.get(\"Voyage-Processing-Ms\")\n return None if h is None else round(float(h))" }, { "identifier": "ApiType", "path": "voyageai/util.py", "snippet": "class ApiType(Enum):\n VOYAGE = 1\n\n @staticmethod\n def from_str(label):\n if label.lower() == \"voyage\":\n return ApiType.VOYAGE\n else:\n raise voyageai.error.InvalidAPIType(\n \"The API type provided in invalid. Please select one of the supported API types: 'voyage'\"\n )" } ]
import asyncio import json import time import platform import sys import threading import time import warnings import aiohttp import requests import voyageai from json import JSONDecodeError from typing import ( AsyncContextManager, AsyncGenerator, Callable, Dict, Iterator, Optional, Tuple, Union, overload, ) from urllib.parse import urlencode, urlsplit, urlunsplit from typing import Literal from typing_extensions import Literal from voyageai import error, util, version from voyageai.api_resources.voyage_response import VoyageResponse from voyageai.util import ApiType
1,595
if sys.version_info >= (3, 8): else: TIMEOUT_SECS = 600 MAX_SESSION_LIFETIME_SECS = 180 MAX_CONNECTION_RETRIES = 2 # Has one attribute per thread, 'session'. _thread_context = threading.local() def _build_api_url(url, query): scheme, netloc, path, base_query, fragment = urlsplit(url) if base_query: query = "%s&%s" % (base_query, query) return urlunsplit((scheme, netloc, path, query, fragment)) def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]: """Returns a value suitable for the 'proxies' argument to 'requests.request.""" if proxy is None: return None elif isinstance(proxy, str): return {"http": proxy, "https": proxy} elif isinstance(proxy, dict): return proxy.copy() else: raise ValueError( "'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys." ) def _aiohttp_proxies_arg(proxy) -> Optional[str]: """Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request.""" if proxy is None: return None elif isinstance(proxy, str): return proxy elif isinstance(proxy, dict): return proxy["https"] if "https" in proxy else proxy["http"] else: raise ValueError( "'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys." ) def _make_session() -> requests.Session: if voyageai.requestssession: if isinstance(voyageai.requestssession, requests.Session): return voyageai.requestssession return voyageai.requestssession() if not voyageai.verify_ssl_certs: warnings.warn("verify_ssl_certs is ignored; voyageai always verifies.") s = requests.Session() proxies = _requests_proxies_arg(voyageai.proxy) if proxies: s.proxies = proxies s.mount( "https://", requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES), ) return s def parse_stream_helper(line: bytes) -> Optional[str]: if line and line.startswith(b"data:"): if line.startswith(b"data: "): # SSE event may be valid when it contain whitespace line = line[len(b"data: "):] else: line = line[len(b"data:"):] if line.strip() == b"[DONE]": # return here will cause GeneratorExit exception in urllib3 # and it will close http connection with TCP Reset return None else: return line.decode("utf-8") return None def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]: for line in rbody: _line = parse_stream_helper(line) if _line is not None: yield _line async def parse_stream_async(rbody: aiohttp.StreamReader): async for line in rbody: _line = parse_stream_helper(line) if _line is not None: yield _line class APIRequestor: def __init__( self, key=None, api_base=None, api_type=None, api_version=None, organization=None, ): self.api_base = api_base or voyageai.api_base
if sys.version_info >= (3, 8): else: TIMEOUT_SECS = 600 MAX_SESSION_LIFETIME_SECS = 180 MAX_CONNECTION_RETRIES = 2 # Has one attribute per thread, 'session'. _thread_context = threading.local() def _build_api_url(url, query): scheme, netloc, path, base_query, fragment = urlsplit(url) if base_query: query = "%s&%s" % (base_query, query) return urlunsplit((scheme, netloc, path, query, fragment)) def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]: """Returns a value suitable for the 'proxies' argument to 'requests.request.""" if proxy is None: return None elif isinstance(proxy, str): return {"http": proxy, "https": proxy} elif isinstance(proxy, dict): return proxy.copy() else: raise ValueError( "'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys." ) def _aiohttp_proxies_arg(proxy) -> Optional[str]: """Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request.""" if proxy is None: return None elif isinstance(proxy, str): return proxy elif isinstance(proxy, dict): return proxy["https"] if "https" in proxy else proxy["http"] else: raise ValueError( "'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys." ) def _make_session() -> requests.Session: if voyageai.requestssession: if isinstance(voyageai.requestssession, requests.Session): return voyageai.requestssession return voyageai.requestssession() if not voyageai.verify_ssl_certs: warnings.warn("verify_ssl_certs is ignored; voyageai always verifies.") s = requests.Session() proxies = _requests_proxies_arg(voyageai.proxy) if proxies: s.proxies = proxies s.mount( "https://", requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES), ) return s def parse_stream_helper(line: bytes) -> Optional[str]: if line and line.startswith(b"data:"): if line.startswith(b"data: "): # SSE event may be valid when it contain whitespace line = line[len(b"data: "):] else: line = line[len(b"data:"):] if line.strip() == b"[DONE]": # return here will cause GeneratorExit exception in urllib3 # and it will close http connection with TCP Reset return None else: return line.decode("utf-8") return None def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]: for line in rbody: _line = parse_stream_helper(line) if _line is not None: yield _line async def parse_stream_async(rbody: aiohttp.StreamReader): async for line in rbody: _line = parse_stream_helper(line) if _line is not None: yield _line class APIRequestor: def __init__( self, key=None, api_base=None, api_type=None, api_version=None, organization=None, ): self.api_base = api_base or voyageai.api_base
self.api_key = key or util.default_api_key()
1
2023-10-17 22:11:18+00:00
2k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/exchange/test_bybit.py
[ { "identifier": "MarginMode", "path": "freqtrade/enums/marginmode.py", "snippet": "class MarginMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n cross margin/futures margin_mode and\n isolated margin/futures margin_mode\n \"\"\"\n CROSS = \"cross\"\n ISOLATED = \"isolated\"\n NONE = ''" }, { "identifier": "TradingMode", "path": "freqtrade/enums/tradingmode.py", "snippet": "class TradingMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n spot, margin, futures or any other trading method\n \"\"\"\n SPOT = \"spot\"\n MARGIN = \"margin\"\n FUTURES = \"futures\"" }, { "identifier": "EXMS", "path": "tests/conftest.py", "snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'" }, { "identifier": "get_mock_coro", "path": "tests/conftest.py", "snippet": "def get_mock_coro(return_value=None, side_effect=None):\n async def mock_coro(*args, **kwargs):\n if side_effect:\n if isinstance(side_effect, list):\n effect = side_effect.pop(0)\n else:\n effect = side_effect\n if isinstance(effect, Exception):\n raise effect\n if callable(effect):\n return effect(*args, **kwargs)\n return effect\n else:\n return return_value\n\n return Mock(wraps=mock_coro)" }, { "identifier": "get_patched_exchange", "path": "tests/conftest.py", "snippet": "def get_patched_exchange(mocker, config, api_mock=None, id='binance',\n mock_markets=True, mock_supported_modes=True) -> Exchange:\n patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)\n config['exchange']['name'] = id\n try:\n exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)\n except ImportError:\n exchange = Exchange(config)\n return exchange" }, { "identifier": "ccxt_exceptionhandlers", "path": "tests/exchange/test_exchange.py", "snippet": "def ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name,\n fun, mock_ccxt_fun, retries=API_RETRY_COUNT + 1, **kwargs):\n\n with patch('freqtrade.exchange.common.time.sleep'):\n with pytest.raises(DDosProtection):\n api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.DDoSProtection(\"DDos\"))\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)\n getattr(exchange, fun)(**kwargs)\n assert api_mock.__dict__[mock_ccxt_fun].call_count == retries\n\n with pytest.raises(TemporaryError):\n api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.NetworkError(\"DeaDBeef\"))\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)\n getattr(exchange, fun)(**kwargs)\n assert api_mock.__dict__[mock_ccxt_fun].call_count == retries\n\n with pytest.raises(OperationalException):\n api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.BaseError(\"DeadBeef\"))\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)\n getattr(exchange, fun)(**kwargs)\n assert api_mock.__dict__[mock_ccxt_fun].call_count == 1" } ]
from datetime import datetime, timedelta, timezone from unittest.mock import MagicMock from freqtrade.enums.marginmode import MarginMode from freqtrade.enums.tradingmode import TradingMode from tests.conftest import EXMS, get_mock_coro, get_patched_exchange from tests.exchange.test_exchange import ccxt_exceptionhandlers
853
def test_additional_exchange_init_bybit(default_conf, mocker): default_conf['dry_run'] = False default_conf['trading_mode'] = TradingMode.FUTURES
def test_additional_exchange_init_bybit(default_conf, mocker): default_conf['dry_run'] = False default_conf['trading_mode'] = TradingMode.FUTURES
default_conf['margin_mode'] = MarginMode.ISOLATED
0
2023-10-21 10:02:05+00:00
2k
yanzhh/HGERE
transformers/src/transformers/data/processors/squad.py
[ { "identifier": "is_tf_available", "path": "transformers/src/transformers/file_utils.py", "snippet": "def is_tf_available():\n return _tf_available" }, { "identifier": "is_torch_available", "path": "transformers/src/transformers/file_utils.py", "snippet": "def is_torch_available():\n return _torch_available" }, { "identifier": "whitespace_tokenize", "path": "transformers/src/transformers/tokenization_bert.py", "snippet": "def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens" }, { "identifier": "DataProcessor", "path": "transformers/src/transformers/data/processors/utils.py", "snippet": "class DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"Gets an example from a dict with tensorflow tensors\n Args:\n tensor_dict: Keys and values should match the corresponding Glue\n tensorflow_dataset examples.\n \"\"\"\n raise NotImplementedError()\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n def tfds_map(self, example):\n \"\"\"Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.\n This method converts examples to the correct format.\"\"\"\n if len(self.get_labels()) > 1:\n example.label = self.get_labels()[int(example.label)]\n return example\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))" } ]
import json import logging import os import numpy as np import torch import tensorflow as tf from functools import partial from multiprocessing import Pool, cpu_count from tqdm import tqdm from ...file_utils import is_tf_available, is_torch_available from ...tokenization_bert import whitespace_tokenize from .utils import DataProcessor from torch.utils.data import TensorDataset
1,233
if is_torch_available(): if is_tf_available(): logger = logging.getLogger(__name__) def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _new_check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # if len(doc_spans) == 1: # return True best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training): features = [] # print (is_training) if is_training and not example.is_impossible: # Get start and end position start_position = example.start_position end_position = example.end_position # print (start_position, end_position, example.answer_text) # If the answer cannot be found in the text, then skip this example. actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
if is_torch_available(): if is_tf_available(): logger = logging.getLogger(__name__) def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _new_check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # if len(doc_spans) == 1: # return True best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training): features = [] # print (is_training) if is_training and not example.is_impossible: # Get start and end position start_position = example.start_position end_position = example.end_position # print (start_position, end_position, example.answer_text) # If the answer cannot be found in the text, then skip this example. actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
2
2023-10-15 02:31:09+00:00
2k
generative-skill-chaining/gsc-code
generative_skill_chaining/networks/actors/mlp.py
[ { "identifier": "LFF", "path": "generative_skill_chaining/networks/mlp.py", "snippet": "class LFF(torch.nn.Module):\n \"\"\"\n get torch.std_mean(self.B)\n \"\"\"\n\n def __init__(self, in_features, out_features, scale=1.0, init=\"iso\", sincos=False):\n super().__init__()\n self.in_features = in_features\n self.sincos = sincos\n self.out_features = out_features\n self.scale = scale\n if self.sincos:\n self.linear = torch.nn.Linear(in_features, self.out_features // 2)\n else:\n self.linear = torch.nn.Linear(in_features, self.out_features)\n if init == \"iso\":\n torch.nn.init.normal_(self.linear.weight, 0, scale / self.in_features)\n torch.nn.init.normal_(self.linear.bias, 0, 1)\n else:\n torch.nn.init.uniform_(\n self.linear.weight, -scale / self.in_features, scale / self.in_features\n )\n torch.nn.init.uniform_(self.linear.bias, -1, 1)\n if self.sincos:\n torch.nn.init.zeros_(self.linear.bias)\n\n def forward(self, x, **_):\n x = np.pi * self.linear(x)\n if self.sincos:\n return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)\n else:\n return torch.sin(x)" }, { "identifier": "MLP", "path": "generative_skill_chaining/networks/mlp.py", "snippet": "class MLP(torch.nn.Module):\n def __init__(\n self,\n input_dim: int,\n output_dim: int,\n hidden_layers: Sequence[int] = [256, 256],\n act: Type[torch.nn.Module] = torch.nn.ReLU,\n output_act: Optional[Type[torch.nn.Module]] = None,\n ):\n super().__init__()\n net: List[torch.nn.Module] = []\n last_dim = input_dim\n for dim in hidden_layers:\n net.append(torch.nn.Linear(last_dim, dim))\n net.append(act())\n last_dim = dim\n net.append(torch.nn.Linear(last_dim, output_dim))\n if output_act is not None:\n net.append(output_act())\n self.net = torch.nn.Sequential(*net)\n\n def forward(self, x):\n return self.net(x)" }, { "identifier": "weight_init", "path": "generative_skill_chaining/networks/mlp.py", "snippet": "def weight_init(m):\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.orthogonal_(m.weight.data)\n if hasattr(m.bias, \"data\"):\n m.bias.data.fill_(0.0)" }, { "identifier": "base", "path": "generative_skill_chaining/networks/actors/base.py", "snippet": "class Actor(torch.nn.Module, abc.ABC):\n def forward(self, state: torch.Tensor) -> torch.distributions.Distribution:\n def predict(self, state: torch.Tensor, sample: bool = False) -> torch.Tensor:" }, { "identifier": "SquashedNormal", "path": "generative_skill_chaining/networks/utils.py", "snippet": "class SquashedNormal(torch.distributions.TransformedDistribution):\n def __init__(self, loc, scale):\n self._loc = loc\n self.scale = scale\n self.base_dist = torch.distributions.Normal(loc, scale)\n transforms = [torch.distributions.transforms.TanhTransform(cache_size=1)]\n super().__init__(self.base_dist, transforms)\n\n @property\n def loc(self):\n loc = self._loc\n for transform in self.transforms:\n loc = transform(loc)\n return loc" } ]
from typing import Optional, Sequence, Type from generative_skill_chaining.networks.mlp import LFF, MLP, weight_init from generative_skill_chaining.networks.actors import base from generative_skill_chaining.networks.utils import SquashedNormal import gym import torch
1,022
class ContinuousMLPActor(base.Actor): def __init__( self, state_space: gym.spaces.Box, action_space: gym.spaces.Box, hidden_layers: Sequence[int] = [256, 256], act: Type[torch.nn.Module] = torch.nn.ReLU, output_act: Type[torch.nn.Module] = torch.nn.Tanh, ortho_init: bool = False, ): super().__init__() self.mlp = MLP( state_space.shape[0], action_space.shape[0], hidden_layers=hidden_layers, act=act, output_act=output_act, ) if ortho_init:
class ContinuousMLPActor(base.Actor): def __init__( self, state_space: gym.spaces.Box, action_space: gym.spaces.Box, hidden_layers: Sequence[int] = [256, 256], act: Type[torch.nn.Module] = torch.nn.ReLU, output_act: Type[torch.nn.Module] = torch.nn.Tanh, ortho_init: bool = False, ): super().__init__() self.mlp = MLP( state_space.shape[0], action_space.shape[0], hidden_layers=hidden_layers, act=act, output_act=output_act, ) if ortho_init:
self.apply(weight_init)
2
2023-10-16 00:22:40+00:00
2k
ChiyuSONG/dynamics-of-instruction-tuning
inference.py
[ { "identifier": "IGNORE_INDEX", "path": "train_sft.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "DataCollatorForSupervisedDataset", "path": "train_sft.py", "snippet": "class DataCollatorForSupervisedDataset(object):\n \"\"\"Collate examples for supervised fine-tuning.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids, labels, attention_mask = tuple([instance[key] for instance in instances] for key in (\"input_ids\", \"labels\", \"attention_mask\"))\n\n max_label_length = max(len(l) for l in labels)\n if self.pad_to_multiple_of is not None:\n max_label_length = (\n (max_label_length + self.pad_to_multiple_of - 1)\n // self.pad_to_multiple_of\n * self.pad_to_multiple_of\n )\n input_ids = self.pad_sequence(input_ids, self.tokenizer.pad_token_id, max_label_length)\n labels = self.pad_sequence(labels, IGNORE_INDEX, max_label_length)\n attention_mask = self.pad_sequence(attention_mask, 0, max_label_length)\n\n return dict(\n input_ids=input_ids,\n labels=labels,\n attention_mask=attention_mask,\n )\n\n def pad_sequence(self, feature, padding_value, max_label_length):\n for idx, instance in enumerate(feature):\n remainder = torch.LongTensor( [padding_value] * (max_label_length - len(instance)) )\n feature[idx] = torch.cat((instance, remainder), 0) if self.tokenizer.padding_side == \"right\" \\\n else torch.cat((remainder, instance), 0)\n return torch.stack(feature, dim = 0)" }, { "identifier": "ATTR_TO_SPECIAL_TOKEN", "path": "train_sft.py", "snippet": "ATTR_TO_SPECIAL_TOKEN = {\"additional_special_tokens\": [\"<user>\", \"<assistant>\", \"<eot>\"]}" } ]
import torch from transformers import ( LlamaForCausalLM, LlamaTokenizer, set_seed, GenerationConfig ) from train_sft import IGNORE_INDEX, DataCollatorForSupervisedDataset, ATTR_TO_SPECIAL_TOKEN
951
def process(batch, tokenizer): processed = [] user = tokenizer.user_token_id assistant = tokenizer.assistant_token_id eot = tokenizer.eot_token_id def tokenize(s): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s.strip())) for example in batch: input_ids = [] labels = [] messages = [] messages.extend(example["messages"]) for message in messages: input_ids.append(user if message["role"] == "user" else assistant) labels.append(IGNORE_INDEX) content = tokenize(message["content"]) + [eot] input_ids.extend(content) labels.extend([IGNORE_INDEX]*len(content) if message["role"] == "user" else content) input_ids.append(assistant) labels.append(IGNORE_INDEX) assert len(input_ids) == len(labels) attention_mask = [1] * len(input_ids) processed.append( {'input_ids':torch.LongTensor(input_ids), 'labels': torch.LongTensor(labels), 'attention_mask': torch.LongTensor(attention_mask)} ) return processed class Assistant: def __init__(self, model_name_or_path): tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path) tokenizer.padding_side = "left" tokenizer.user_token_id, tokenizer.assistant_token_id, tokenizer.eot_token_id \ = tokenizer.convert_tokens_to_ids(ATTR_TO_SPECIAL_TOKEN["additional_special_tokens"]) model = LlamaForCausalLM.from_pretrained(model_name_or_path, device_map="auto") model.tie_weights() model.eval() self.tokenizer = tokenizer self.model = model self.seed = 0 # use greedy decoding as default self.config = GenerationConfig( max_new_tokens=1024, min_length=1, do_sample=False, output_scores=True, return_dict_in_generate=True, pad_token_id=tokenizer.pad_token_id, eos_token_id=[tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.unk_token_id, tokenizer.eot_token_id, tokenizer.user_token_id, tokenizer.assistant_token_id], ) set_seed(self.seed) def inference(self, batch): processed = process(batch, tokenizer=self.tokenizer)
def process(batch, tokenizer): processed = [] user = tokenizer.user_token_id assistant = tokenizer.assistant_token_id eot = tokenizer.eot_token_id def tokenize(s): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s.strip())) for example in batch: input_ids = [] labels = [] messages = [] messages.extend(example["messages"]) for message in messages: input_ids.append(user if message["role"] == "user" else assistant) labels.append(IGNORE_INDEX) content = tokenize(message["content"]) + [eot] input_ids.extend(content) labels.extend([IGNORE_INDEX]*len(content) if message["role"] == "user" else content) input_ids.append(assistant) labels.append(IGNORE_INDEX) assert len(input_ids) == len(labels) attention_mask = [1] * len(input_ids) processed.append( {'input_ids':torch.LongTensor(input_ids), 'labels': torch.LongTensor(labels), 'attention_mask': torch.LongTensor(attention_mask)} ) return processed class Assistant: def __init__(self, model_name_or_path): tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path) tokenizer.padding_side = "left" tokenizer.user_token_id, tokenizer.assistant_token_id, tokenizer.eot_token_id \ = tokenizer.convert_tokens_to_ids(ATTR_TO_SPECIAL_TOKEN["additional_special_tokens"]) model = LlamaForCausalLM.from_pretrained(model_name_or_path, device_map="auto") model.tie_weights() model.eval() self.tokenizer = tokenizer self.model = model self.seed = 0 # use greedy decoding as default self.config = GenerationConfig( max_new_tokens=1024, min_length=1, do_sample=False, output_scores=True, return_dict_in_generate=True, pad_token_id=tokenizer.pad_token_id, eos_token_id=[tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.unk_token_id, tokenizer.eot_token_id, tokenizer.user_token_id, tokenizer.assistant_token_id], ) set_seed(self.seed) def inference(self, batch): processed = process(batch, tokenizer=self.tokenizer)
data_collator = DataCollatorForSupervisedDataset(tokenizer=self.tokenizer, pad_to_multiple_of=8)
1
2023-10-17 07:41:58+00:00
2k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/pydantic/_internal/_schema_generation_shared.py
[ { "identifier": "GetCoreSchemaHandler", "path": "backend/venv/lib/python3.10/site-packages/pydantic/annotated_handlers.py", "snippet": "class GetCoreSchemaHandler:\n \"\"\"Handler to call into the next CoreSchema schema generation function.\"\"\"\n\n def __call__(self, __source_type: Any) -> core_schema.CoreSchema:\n \"\"\"Call the inner handler and get the CoreSchema it returns.\n This will call the next CoreSchema modifying function up until it calls\n into Pydantic's internal schema generation machinery, which will raise a\n `pydantic.errors.PydanticSchemaGenerationError` error if it cannot generate\n a CoreSchema for the given source type.\n\n Args:\n __source_type: The input type.\n\n Returns:\n CoreSchema: The `pydantic-core` CoreSchema generated.\n \"\"\"\n raise NotImplementedError\n\n def generate_schema(self, __source_type: Any) -> core_schema.CoreSchema:\n \"\"\"Generate a schema unrelated to the current context.\n Use this function if e.g. you are handling schema generation for a sequence\n and want to generate a schema for its items.\n Otherwise, you may end up doing something like applying a `min_length` constraint\n that was intended for the sequence itself to its items!\n\n Args:\n __source_type: The input type.\n\n Returns:\n CoreSchema: The `pydantic-core` CoreSchema generated.\n \"\"\"\n raise NotImplementedError\n\n def resolve_ref_schema(self, __maybe_ref_schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n \"\"\"Get the real schema for a `definition-ref` schema.\n If the schema given is not a `definition-ref` schema, it will be returned as is.\n This means you don't have to check before calling this function.\n\n Args:\n __maybe_ref_schema: A `CoreSchema`, `ref`-based or not.\n\n Raises:\n LookupError: If the `ref` is not found.\n\n Returns:\n A concrete `CoreSchema`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def field_name(self) -> str | None:\n \"\"\"Get the name of the closest field to this validator.\"\"\"\n raise NotImplementedError\n\n def _get_types_namespace(self) -> dict[str, Any] | None:\n \"\"\"Internal method used during type resolution for serializer annotations.\"\"\"\n raise NotImplementedError" }, { "identifier": "GetJsonSchemaHandler", "path": "backend/venv/lib/python3.10/site-packages/pydantic/annotated_handlers.py", "snippet": "class GetJsonSchemaHandler:\n \"\"\"Handler to call into the next JSON schema generation function.\n\n Attributes:\n mode: Json schema mode, can be `validation` or `serialization`.\n \"\"\"\n\n mode: JsonSchemaMode\n\n def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue:\n \"\"\"Call the inner handler and get the JsonSchemaValue it returns.\n This will call the next JSON schema modifying function up until it calls\n into `pydantic.json_schema.GenerateJsonSchema`, which will raise a\n `pydantic.errors.PydanticInvalidForJsonSchema` error if it cannot generate\n a JSON schema.\n\n Args:\n __core_schema: A `pydantic_core.core_schema.CoreSchema`.\n\n Returns:\n JsonSchemaValue: The JSON schema generated by the inner JSON schema modify\n functions.\n \"\"\"\n raise NotImplementedError\n\n def resolve_ref_schema(self, __maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue:\n \"\"\"Get the real schema for a `{\"$ref\": ...}` schema.\n If the schema given is not a `$ref` schema, it will be returned as is.\n This means you don't have to check before calling this function.\n\n Args:\n __maybe_ref_json_schema: A JsonSchemaValue, ref based or not.\n\n Raises:\n LookupError: If the ref is not found.\n\n Returns:\n JsonSchemaValue: A JsonSchemaValue that has no `$ref`.\n \"\"\"\n raise NotImplementedError" } ]
from typing import TYPE_CHECKING, Any, Callable from pydantic_core import core_schema from typing_extensions import Literal from ..annotated_handlers import GetCoreSchemaHandler, GetJsonSchemaHandler from ..json_schema import GenerateJsonSchema, JsonSchemaValue from ._core_utils import CoreSchemaOrField from ._generate_schema import GenerateSchema
1,470
"""Types and utility functions used by various other internal tools.""" from __future__ import annotations if TYPE_CHECKING: GetJsonSchemaFunction = Callable[[CoreSchemaOrField, GetJsonSchemaHandler], JsonSchemaValue] HandlerOverride = Callable[[CoreSchemaOrField], JsonSchemaValue] class GenerateJsonSchemaHandler(GetJsonSchemaHandler): """JsonSchemaHandler implementation that doesn't do ref unwrapping by default. This is used for any Annotated metadata so that we don't end up with conflicting modifications to the definition schema. Used internally by Pydantic, please do not rely on this implementation. See `GetJsonSchemaHandler` for the handler API. """ def __init__(self, generate_json_schema: GenerateJsonSchema, handler_override: HandlerOverride | None) -> None: self.generate_json_schema = generate_json_schema self.handler = handler_override or generate_json_schema.generate_inner self.mode = generate_json_schema.mode def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue: return self.handler(__core_schema) def resolve_ref_schema(self, maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue: """Resolves `$ref` in the json schema. This returns the input json schema if there is no `$ref` in json schema. Args: maybe_ref_json_schema: The input json schema that may contains `$ref`. Returns: Resolved json schema. Raises: LookupError: If it can't find the definition for `$ref`. """ if '$ref' not in maybe_ref_json_schema: return maybe_ref_json_schema ref = maybe_ref_json_schema['$ref'] json_schema = self.generate_json_schema.get_schema_from_definitions(ref) if json_schema is None: raise LookupError( f'Could not find a ref for {ref}.' ' Maybe you tried to call resolve_ref_schema from within a recursive model?' ) return json_schema
"""Types and utility functions used by various other internal tools.""" from __future__ import annotations if TYPE_CHECKING: GetJsonSchemaFunction = Callable[[CoreSchemaOrField, GetJsonSchemaHandler], JsonSchemaValue] HandlerOverride = Callable[[CoreSchemaOrField], JsonSchemaValue] class GenerateJsonSchemaHandler(GetJsonSchemaHandler): """JsonSchemaHandler implementation that doesn't do ref unwrapping by default. This is used for any Annotated metadata so that we don't end up with conflicting modifications to the definition schema. Used internally by Pydantic, please do not rely on this implementation. See `GetJsonSchemaHandler` for the handler API. """ def __init__(self, generate_json_schema: GenerateJsonSchema, handler_override: HandlerOverride | None) -> None: self.generate_json_schema = generate_json_schema self.handler = handler_override or generate_json_schema.generate_inner self.mode = generate_json_schema.mode def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue: return self.handler(__core_schema) def resolve_ref_schema(self, maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue: """Resolves `$ref` in the json schema. This returns the input json schema if there is no `$ref` in json schema. Args: maybe_ref_json_schema: The input json schema that may contains `$ref`. Returns: Resolved json schema. Raises: LookupError: If it can't find the definition for `$ref`. """ if '$ref' not in maybe_ref_json_schema: return maybe_ref_json_schema ref = maybe_ref_json_schema['$ref'] json_schema = self.generate_json_schema.get_schema_from_definitions(ref) if json_schema is None: raise LookupError( f'Could not find a ref for {ref}.' ' Maybe you tried to call resolve_ref_schema from within a recursive model?' ) return json_schema
class CallbackGetCoreSchemaHandler(GetCoreSchemaHandler):
0
2023-10-23 18:09:28+00:00
2k
mindsdb/otto
ottoai/classes.py
[ { "identifier": "INSTRUCTION", "path": "ottoai/templates.py", "snippet": "INSTRUCTION = \"\"\"\n\nYou write python code, write the simplest and most effective Python function to answer the following.\n\nQuestion: {question}\n\nFollow these instructions to write the function:\n\n- The function must be called 'runner' \n- code should only have the necessary imports and the function runner\n- The function shall return a response to the question\n- Only import the fewest possible pip modules from this list: ({modules_metadata}), \n- Import the minimum number of modules necessary\n- The function takes only one argument called 'input' as as follows:\n\ninput={input_dictionary_string}\n\n\"\"\"" }, { "identifier": "llm_completion", "path": "ottoai/helpers.py", "snippet": "def create_string(arg_data):\ndef extract_python_code_from_md(md_string):\ndef get_runner_function(code_string):" } ]
from ottoai.templates import INSTRUCTION from ottoai.helpers import llm_completion, create_string, extract_python_code_from_md, get_runner_function import logging import os import json import pkg_resources import subprocess import os import openai import logger import json
1,069
class Assistant: """ The Assistant class is responsible for managing the skills and conversations. """ def __init__(self, name: str, personality: str, llm_engine, model: str, user_context_variables: dict = {}): """ Initialize the assistant with a name, personality, language model engine, and model. """ self.name = name self.personality = personality self.llm_engine = llm_engine self.model = model self.pip_skills = [] self.user_context_variables = user_context_variables def _m(self, messages): return llm_completion(model=self.model, messages=messages) def set_user_context_variables(self, user_context_variables: dict = {}): """ Set the user context variables for the assistant. Parameters: user_context_variables (dict): A dictionary containing the user context variables. """ self.user_context_variables = user_context_variables def add_pip_skill(self, pip_module): """ Add a new skill to the assistant. """ installed_packages = pkg_resources.working_set installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages]) if pip_module not in installed_packages_list: try: installed_packages_pip_freeze = subprocess.check_output(["pip", "freeze"]).decode().split('\n') if pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze) and pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze): raise ImportError(f"Trying to add skill, but pip module {pip_module} is not installed. \nTo solve this try: pip install {pip_module}") except subprocess.CalledProcessError: raise ImportError(f"Failed to execute pip freeze.") self.pip_skills.append(pip_module) def question(self, text: str): """ Send a message to the assistant and return the assistant's response. """ response = self.generate_and_run_code_for_question(text) return response def start_conversation(self, user_name: str): """ Start a new conversation with the user. """ return Conversation(self, user_name) def generate_and_run_code_for_question(self, question, retries_until_figured = 10): arg_data = { "context_variables": {key: "<...HIDDEN...>" for key in self.user_context_variables} } arg_data_all = { "context_variables": self.user_context_variables } arguments_dictionary_str = create_string(arg_data) modules_metadata = ", ".join(self.pip_skills) instruction =INSTRUCTION.format(modules_metadata = modules_metadata, input_dictionary_string = arguments_dictionary_str, question=question) logging.debug("[OTTO] Generated Instruction: " + instruction) messages = [{"role": "system", "content": instruction}] error = 1 code = '' error_message = None for _ in range(retries_until_figured): if error_message: messages += [{"role": "system", "content":"ran {code} and had this error: {error}".format(code=code, error=error_message)}] logging.debug("[OTTO] Messages: \n" + json.dumps(messages, indent=4)) resp = self._m(messages) code = resp['choices'][0]['message']['content'] error_message = None try: function_code = extract_python_code_from_md(code) if function_code is not None: code = function_code logging.debug("[OTTO] Generated Code: \n```\n{code}\n\n```\n".format(code = code))
class Assistant: """ The Assistant class is responsible for managing the skills and conversations. """ def __init__(self, name: str, personality: str, llm_engine, model: str, user_context_variables: dict = {}): """ Initialize the assistant with a name, personality, language model engine, and model. """ self.name = name self.personality = personality self.llm_engine = llm_engine self.model = model self.pip_skills = [] self.user_context_variables = user_context_variables def _m(self, messages): return llm_completion(model=self.model, messages=messages) def set_user_context_variables(self, user_context_variables: dict = {}): """ Set the user context variables for the assistant. Parameters: user_context_variables (dict): A dictionary containing the user context variables. """ self.user_context_variables = user_context_variables def add_pip_skill(self, pip_module): """ Add a new skill to the assistant. """ installed_packages = pkg_resources.working_set installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages]) if pip_module not in installed_packages_list: try: installed_packages_pip_freeze = subprocess.check_output(["pip", "freeze"]).decode().split('\n') if pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze) and pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze): raise ImportError(f"Trying to add skill, but pip module {pip_module} is not installed. \nTo solve this try: pip install {pip_module}") except subprocess.CalledProcessError: raise ImportError(f"Failed to execute pip freeze.") self.pip_skills.append(pip_module) def question(self, text: str): """ Send a message to the assistant and return the assistant's response. """ response = self.generate_and_run_code_for_question(text) return response def start_conversation(self, user_name: str): """ Start a new conversation with the user. """ return Conversation(self, user_name) def generate_and_run_code_for_question(self, question, retries_until_figured = 10): arg_data = { "context_variables": {key: "<...HIDDEN...>" for key in self.user_context_variables} } arg_data_all = { "context_variables": self.user_context_variables } arguments_dictionary_str = create_string(arg_data) modules_metadata = ", ".join(self.pip_skills) instruction =INSTRUCTION.format(modules_metadata = modules_metadata, input_dictionary_string = arguments_dictionary_str, question=question) logging.debug("[OTTO] Generated Instruction: " + instruction) messages = [{"role": "system", "content": instruction}] error = 1 code = '' error_message = None for _ in range(retries_until_figured): if error_message: messages += [{"role": "system", "content":"ran {code} and had this error: {error}".format(code=code, error=error_message)}] logging.debug("[OTTO] Messages: \n" + json.dumps(messages, indent=4)) resp = self._m(messages) code = resp['choices'][0]['message']['content'] error_message = None try: function_code = extract_python_code_from_md(code) if function_code is not None: code = function_code logging.debug("[OTTO] Generated Code: \n```\n{code}\n\n```\n".format(code = code))
runner_function = get_runner_function(code)
1
2023-10-18 00:09:18+00:00
2k
adarshxs/TokenTally
tools/llm_cost_calculator.py
[ { "identifier": "load_base_models", "path": "utilities.py", "snippet": "def load_base_models():\n with open(\"models.json\", \"r\") as f:\n return json.load(f)" }, { "identifier": "load_quantization", "path": "utilities.py", "snippet": "def load_quantization():\n with open(\"quantization.json\", \"r\") as f:\n return json.load(f)" }, { "identifier": "load_gpus", "path": "utilities.py", "snippet": "def load_gpus():\n with open(\"gpus.json\", \"r\") as f:\n return json.load(f)" }, { "identifier": "load_gpu_providers", "path": "utilities.py", "snippet": "def load_gpu_providers():\n return pd.read_csv('cloud-gpus.csv')" }, { "identifier": "convert_params", "path": "utilities.py", "snippet": "def convert_params(params):\n if params == 0:\n return \"0\"\n size_name = (\"\", \"K\", \"M\", \"B\", \"T\", \"P\", \"E\", \"Z\", \"Y\")\n i = int(math.floor(math.log(params, 1000)))\n p = math.pow(1000, i)\n s = round(params / p, 2)\n return \"%s %s\" % (s, size_name[i])" }, { "identifier": "compute_bound_tokens_p_sec", "path": "utilities.py", "snippet": "def compute_bound_tokens_p_sec(flops_per_token, flops_per_gpu, num_gpus):\n return (flops_per_gpu * num_gpus * 10**12) / (flops_per_token * 10**9)" }, { "identifier": "memory_bound_tokens_p_sec", "path": "utilities.py", "snippet": "def memory_bound_tokens_p_sec(memory_bandwidth_per_gpu, flops_per_token, num_gpus):\n return (memory_bandwidth_per_gpu * num_gpus * 10**12) / (flops_per_token * 10**9)" }, { "identifier": "cost_per_1k_tokens", "path": "utilities.py", "snippet": "def cost_per_1k_tokens(flops_per_token, flops_per_gpu, num_gpus, cost_per_hour, memory_bandwidth_per_gpu):\n tokens_p_sec_compute = compute_bound_tokens_p_sec(flops_per_token, flops_per_gpu, num_gpus)\n tokens_p_sec_memory = memory_bound_tokens_p_sec(memory_bandwidth_per_gpu, flops_per_token, num_gpus)\n \n cost_p_sec = cost_per_hour / 3600 # cost per second\n \n cost_p_token_compute = cost_p_sec / tokens_p_sec_compute\n cost_p_token_memory = cost_p_sec / tokens_p_sec_memory\n \n cost_p_1k_tokens_compute = cost_p_token_compute * 1000\n cost_p_1k_tokens_memory = cost_p_token_memory * 1000\n \n return cost_p_1k_tokens_compute, cost_p_1k_tokens_memory" } ]
import streamlit as st from utilities import load_base_models, load_quantization, load_gpus, load_gpu_providers, convert_params, compute_bound_tokens_p_sec, memory_bound_tokens_p_sec, cost_per_1k_tokens
678
def display_llm_cost_tool(): st.title("Token Tally: LLM Cost Estimator") st.subheader("Estimate Your LLM's Token Toll Across Various Platforms and Configurations") # Base model and configurations data base_models = load_base_models()
def display_llm_cost_tool(): st.title("Token Tally: LLM Cost Estimator") st.subheader("Estimate Your LLM's Token Toll Across Various Platforms and Configurations") # Base model and configurations data base_models = load_base_models()
quantization_data = load_quantization()
1
2023-10-18 06:16:47+00:00
2k
WestlakeIntelligentRobotics/ConsensusLLM-code
modules/llm/agent_2d.py
[ { "identifier": "GPT", "path": "modules/llm/gpt.py", "snippet": "class GPT:\n \"\"\"\n Initialize the GPT class for interacting with OpenAI's GPT model.\n GPT provides basic methods for interacting with the model and parsing its\n output.\n \"\"\"\n\n def __init__(self, key: str, model: str = 'gpt-3.5-turbo-0613',\n temperature: float = 0.7, keep_memory: bool = True):\n \"\"\"\n Initialize the GPT class.\n\n Args:\n key (str): OpenAI API key.\n model (str): The model to use (default: gpt-3.5-turbo-0613).\n temperature (float): Temperature for text generation (default: 0.7).\n keep_memory (bool): Whether to retain memories (default: True).\n \"\"\"\n self._model = model\n self._openai_key = key\n self._cost = 0\n self._memories = []\n self._keep_memory = keep_memory\n self._temperature = temperature\n self._history = []\n\n def get_memories(self):\n \"\"\"\n Get the current memories.\n\n Returns:\n list: List of memories.\n \"\"\"\n return self._memories\n\n def get_history(self):\n \"\"\"\n Get the conversation history.\n\n Returns:\n list: List of conversation history.\n \"\"\"\n return self._history\n\n def memories_update(self, role: str, content: str):\n \"\"\"\n Update memories to set roles (system, user, assistant) and content,\n forming a complete memory.\n\n Args:\n role (str): Role (system, user, assistant).\n content (str): Content.\n\n Raises:\n ValueError: If an unrecognized role is provided or if roles are\n added in an incorrect sequence.\n \"\"\"\n if role not in [\"system\", \"user\", \"assistant\"]:\n raise ValueError(f\"Unrecognized role: {role}\")\n\n if role == \"system\" and len(self._memories) > 0:\n raise ValueError('System role can only be added when memories are '\n 'empty')\n if (role == \"user\" and len(self._memories) > 0 and\n self._memories[-1][\"role\"] == \"user\"):\n raise ValueError('User role can only be added if the previous '\n 'round was a system or assistant role')\n if (role == \"assistant\" and len(self._memories) > 0 and\n self._memories[-1][\"role\"] != \"user\"):\n raise ValueError('Assistant role can only be added if the previous '\n 'round was a user role')\n self._memories.append({\"role\": role, \"content\": content})\n self._history.append({\"role\": role, \"content\": content})\n\n def generate_answer(self, input: str, try_times=0, **kwargs) -> str:\n \"\"\"\n Interact with the GPT model and generate an answer.\n\n Args:\n input (str): Prompt or user input.\n try_times (int): Number of attempts (default is 0).\n kwargs: Additional parameters for the model.\n\n Returns:\n str: Text-based output result.\n\n Raises:\n ConnectionError: If there's an error in generating the answer.\n \"\"\"\n if not self._keep_memory:\n self._memories = [self._memories[0]]\n\n if try_times == 0:\n self._memories.append({\"role\": \"user\", \"content\": input})\n self._history.append({\"role\": \"user\", \"content\": input})\n else:\n if self._memories[-1][\"role\"] == \"assistant\":\n self._memories = self._memories[:-1]\n\n openai.api_key = self._openai_key\n\n try:\n response = openai.ChatCompletion.create(\n model=self._model,\n messages=self._memories,\n temperature=self._temperature,\n **kwargs\n )\n self._cost += response['usage'][\"total_tokens\"]\n content = response['choices'][0]['message']['content']\n self._memories.append({\"role\": \"assistant\", \"content\": content})\n self._history.append({\"role\": \"assistant\", \"content\": content})\n return content\n except Exception as e:\n raise ConnectionError(f\"Error in generate_answer: {e}\")" }, { "identifier": "summarizer_role", "path": "modules/prompt/summarize.py", "snippet": "" }, { "identifier": "summarizer_output_form", "path": "modules/prompt/form.py", "snippet": "" } ]
import re import numpy as np from .gpt import GPT from ..prompt.summarize import summarizer_role from ..prompt.form import summarizer_output_form
1,369
""" MIT License Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at Westlake University] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN THE SOFTWARE. """
""" MIT License Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at Westlake University] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN THE SOFTWARE. """
class Agent2D(GPT):
0
2023-10-20 07:58:07+00:00
2k
inngest/inngest-py
inngest/_internal/middleware_lib/middleware.py
[ { "identifier": "client_lib", "path": "inngest/_internal/client_lib.py", "snippet": "_DEV_SERVER_EVENT_KEY = \"NO_EVENT_KEY_SET\"\nclass Inngest:\n def api_origin(self) -> str:\n def event_api_origin(self) -> str:\n def event_key(self) -> str | None:\n def signing_key(self) -> str | None:\n def __init__(\n self,\n *,\n api_base_url: str | None = None,\n app_id: str,\n event_api_base_url: str | None = None,\n event_key: str | None = None,\n is_production: bool | None = None,\n logger: types.Logger | None = None,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n signing_key: str | None = None,\n ) -> None:\n def _build_send_request(\n self,\n events: list[event_lib.Event],\n ) -> types.MaybeError[httpx.Request]:\n def add_middleware(\n self,\n middleware: type[\n middleware_lib.Middleware | middleware_lib.MiddlewareSync\n ],\n ) -> None:\n def create_function(\n self,\n *,\n batch_events: function_config.Batch | None = None,\n cancel: list[function_config.Cancel] | None = None,\n debounce: function_config.Debounce | None = None,\n fn_id: str,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n name: str | None = None,\n on_failure: function.FunctionHandlerAsync\n | function.FunctionHandlerSync\n | None = None,\n rate_limit: function_config.RateLimit | None = None,\n retries: int | None = None,\n throttle: function_config.Throttle | None = None,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n ) -> typing.Callable[\n def decorator(\n func: function.FunctionHandlerAsync | function.FunctionHandlerSync,\n ) -> function.Function:\n async def send(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def send_sync(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def set_logger(self, logger: types.Logger) -> None:\ndef _extract_ids(body: object) -> list[str]:" }, { "identifier": "execution", "path": "inngest/_internal/execution.py", "snippet": "class Call(types.BaseModel):\nclass CallContext(types.BaseModel):\nclass CallStack(types.BaseModel):\nclass CallError(types.BaseModel):\nclass FunctionCallResponse(types.BaseModel):\nclass StepResponse(types.BaseModel):\nclass Output(types.BaseModel):\nclass Opcode(enum.Enum):\n def from_error(cls, err: Exception) -> CallError:\ndef is_step_call_responses(\n value: object,\n) -> typing.TypeGuard[list[StepResponse]]:\n INVOKE = \"InvokeFunction\"\n PLANNED = \"StepPlanned\"\n SLEEP = \"Sleep\"\n STEP = \"Step\"\n WAIT_FOR_EVENT = \"WaitForEvent\"\nUNSPECIFIED_STEP_ID = \"step\"" }, { "identifier": "function", "path": "inngest/_internal/function.py", "snippet": "class Context:\nclass _Config:\nclass FunctionHandlerAsync(typing.Protocol):\nclass FunctionHandlerSync(typing.Protocol):\nclass FunctionOpts(types.BaseModel):\nclass Function:\nclass _UserError(Exception):\n def __call__(\n self,\n ctx: Context,\n step: step_lib.Step,\n ) -> typing.Awaitable[types.Serializable]:\n def __call__(\n self,\n ctx: Context,\n step: step_lib.StepSync,\n ) -> types.Serializable:\ndef _is_function_handler_async(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerAsync]:\ndef _is_function_handler_sync(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerSync]:\n def convert_validation_error(\n self,\n err: pydantic.ValidationError,\n ) -> BaseException:\n def id(self) -> str:\n def is_handler_async(self) -> bool:\n def is_on_failure_handler_async(self) -> bool | None:\n def on_failure_fn_id(self) -> str | None:\n def __init__(\n self,\n opts: FunctionOpts,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n handler: FunctionHandlerAsync | FunctionHandlerSync,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n ) -> None:\n async def call( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def call_sync( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def get_config(self, app_url: str) -> _Config:\n def get_id(self) -> str:\n def __init__(self, err: Exception) -> None:\ndef _remove_first_traceback_frame(err: Exception) -> None:" } ]
from inngest._internal import client_lib, execution, function
1,522
from __future__ import annotations class Middleware: def __init__(self, client: client_lib.Inngest) -> None: self._client = client async def after_execution(self) -> None: """ After executing new code. Called multiple times per run when using steps. """ return None async def before_execution(self) -> None: """ Before executing new code. Called multiple times per run when using steps. """ return None async def before_response(self) -> None: """ After the output has been set and before the response is sent back to Inngest. This is where you can perform any final actions before the response is sent back to Inngest. Called multiple times per run when using steps. Not called for function middleware. """ return None async def transform_input( self,
from __future__ import annotations class Middleware: def __init__(self, client: client_lib.Inngest) -> None: self._client = client async def after_execution(self) -> None: """ After executing new code. Called multiple times per run when using steps. """ return None async def before_execution(self) -> None: """ Before executing new code. Called multiple times per run when using steps. """ return None async def before_response(self) -> None: """ After the output has been set and before the response is sent back to Inngest. This is where you can perform any final actions before the response is sent back to Inngest. Called multiple times per run when using steps. Not called for function middleware. """ return None async def transform_input( self,
ctx: function.Context,
2
2023-10-19 01:02:30+00:00
2k
f0uriest/quadax
quadax/fixed_order.py
[ { "identifier": "cc_weights", "path": "quadax/quad_weights.py", "snippet": "def _cc_get_weights(N):\ndef _get_tmax(xmax):\n D = 2 / N * np.cos(k[:, None] * n[None, :] * np.pi / (N // 2))\n D = np.where((n == 0) | (n == N // 2), D * 1 / 2, D)\n N = int(2 * 2**i)\n N = int(2 * 2**i)" }, { "identifier": "wrap_func", "path": "quadax/utils.py", "snippet": "def wrap_func(fun, args):\n \"\"\"Vectorize, jit, and mask out inf/nan.\"\"\"\n f = jax.eval_shape(fun, jnp.array(0.0), *args)\n # need to make sure we get the correct shape for array valued integrands\n outsig = \"(\" + \",\".join(\"n\" + str(i) for i in range(len(f.shape))) + \")\"\n\n @jax.jit\n @partial(jnp.vectorize, signature=\"()->\" + outsig)\n def wrapped(x):\n f = fun(x, *args)\n return jnp.where(jnp.isfinite(f), f, 0.0)\n\n return wrapped" } ]
import functools import jax import jax.numpy as jnp from .quad_weights import cc_weights, gk_weights, ts_weights from .utils import wrap_func
809
"""Fixed order quadrature.""" def _dot(w, f): return jnp.sum(w * f.T, axis=-1).T @functools.partial(jax.jit, static_argnums=(0, 4, 5)) def fixed_quadgk(fun, a, b, args=(), norm=jnp.inf, n=21): """Integrate a function from a to b using a fixed order Gauss-Konrod rule. Integration is performed using an order n Konrod rule with error estimated using an embedded n//2 order Gauss rule. Parameters ---------- fun : callable Function to integrate, should have a signature of the form ``fun(x, *args)`` -> float, Array. Should be JAX transformable. a, b : float Lower and upper limits of integration. Must be finite. args : tuple, optional Extra arguments passed to fun. norm : int, callable Norm to use for measuring error for vector valued integrands. No effect if the integrand is scalar valued. If an int, uses p-norm of the given order, otherwise should be callable. n : {15, 21, 31, 41, 51, 61} Order of integration scheme. Returns ------- y : float, Array Estimate of the integral of fun from a to b err : float Estimate of the absolute error in y from nested Gauss rule. y_abs : float, Array Estimate of the integral of abs(fun) from a to b y_mmn : float, Array Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is the mean value of fun over the interval. """ _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm) vfun = wrap_func(fun, args) def truefun(): f = jax.eval_shape(vfun, jnp.array(0.0)) z = jnp.zeros(f.shape, f.dtype) return z, 0.0, z, z def falsefun(): try: xk, wk, wg = (
"""Fixed order quadrature.""" def _dot(w, f): return jnp.sum(w * f.T, axis=-1).T @functools.partial(jax.jit, static_argnums=(0, 4, 5)) def fixed_quadgk(fun, a, b, args=(), norm=jnp.inf, n=21): """Integrate a function from a to b using a fixed order Gauss-Konrod rule. Integration is performed using an order n Konrod rule with error estimated using an embedded n//2 order Gauss rule. Parameters ---------- fun : callable Function to integrate, should have a signature of the form ``fun(x, *args)`` -> float, Array. Should be JAX transformable. a, b : float Lower and upper limits of integration. Must be finite. args : tuple, optional Extra arguments passed to fun. norm : int, callable Norm to use for measuring error for vector valued integrands. No effect if the integrand is scalar valued. If an int, uses p-norm of the given order, otherwise should be callable. n : {15, 21, 31, 41, 51, 61} Order of integration scheme. Returns ------- y : float, Array Estimate of the integral of fun from a to b err : float Estimate of the absolute error in y from nested Gauss rule. y_abs : float, Array Estimate of the integral of abs(fun) from a to b y_mmn : float, Array Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is the mean value of fun over the interval. """ _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm) vfun = wrap_func(fun, args) def truefun(): f = jax.eval_shape(vfun, jnp.array(0.0)) z = jnp.zeros(f.shape, f.dtype) return z, 0.0, z, z def falsefun(): try: xk, wk, wg = (
gk_weights[n]["xk"],
0
2023-10-24 04:44:34+00:00
2k
smonsays/metax
examples/maml-omniglot.py
[ { "identifier": "DATAPATH", "path": "metax/data/base.py", "snippet": "DATAPATH = Path(os.path.expanduser(\"~/data/jax\"))" }, { "identifier": "Dataset", "path": "metax/data/base.py", "snippet": "class Dataset(NamedTuple):\n x: Array\n y: Array\n info: Dict = dict()" }, { "identifier": "MetaDataset", "path": "metax/data/base.py", "snippet": "class MetaDataset(NamedTuple):\n train: Union[Dataset, MultitaskDataset]\n test: Union[Dataset, MultitaskDataset]" } ]
import argparse import jax import jax.numpy as jnp import jax.tree_util as jtu import optax import metax from jax_meta.datasets import Omniglot from metax.data.base import DATAPATH, Dataset, MetaDataset
975
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ parser = argparse.ArgumentParser() parser.add_argument("--batch_size", type=int, default=None) parser.add_argument("--bn_decay", type=float, default=0.9) parser.add_argument("--channels", type=int, default=64) parser.add_argument("--num_tasks_test", type=int, default=100) parser.add_argument("--num_tasks_train", type=int, default=10000) parser.add_argument("--num_tasks_valid", type=int, default=10) parser.add_argument("--ways", type=int, default=5) parser.add_argument("--shots_test", type=int, default=10) parser.add_argument("--shots_train", type=int, default=10) parser.add_argument("--first_order", type=bool, default=False) parser.add_argument("--lr_inner", type=float, default=0.4) parser.add_argument("--lr_outer", type=float, default=0.001) parser.add_argument("--meta_batch_size", type=int, default=16) parser.add_argument("--steps_inner", type=int, default=1) parser.add_argument("--steps_outer", type=int, default=100) parser.add_argument("--seed", type=int, default=2022) args = parser.parse_args() # Load data from [jax_meta](https://github.com/tristandeleu/jax-meta-learning) metaloader = Omniglot( DATAPATH, batch_size=args.meta_batch_size, shots=args.shots_train, ways=args.ways, ) metaloader.input_shape = metaloader.shape metaloader.output_dim = metaloader.ways metaloader.sample_input = jnp.array(metaloader.dummy_input) # Define the loss, meta-model and meta-learning algorithm base_model = metax.models.Conv4(args.channels, args.bn_decay, readout=args.ways) meta_model = metax.module.LearnedInit( loss_fn_inner=metax.energy.CrossEntropy(), loss_fn_outer=metax.energy.CrossEntropy(), base_learner=base_model, reg_strength=None ) meta_learner = metax.learner.ModelAgnosticMetaLearning( meta_model=meta_model, batch_size=args.batch_size, steps_inner=args.steps_inner, optim_fn_inner=optax.sgd(args.lr_inner), optim_fn_outer=optax.adam(args.lr_outer), first_order=args.first_order, ) # Initialize rng = jax.random.PRNGKey(args.seed) rng_reset, rng_train, rng_test = jax.random.split(rng, 3) meta_state = meta_learner.reset(rng_reset, metaloader.sample_input) meta_update = jax.jit(meta_learner.update) meta_eval = jax.jit(meta_learner.eval, static_argnames="steps") # Train for idx, batch in zip(range(args.steps_outer), metaloader): # Mangle data into the format expected by metax
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ parser = argparse.ArgumentParser() parser.add_argument("--batch_size", type=int, default=None) parser.add_argument("--bn_decay", type=float, default=0.9) parser.add_argument("--channels", type=int, default=64) parser.add_argument("--num_tasks_test", type=int, default=100) parser.add_argument("--num_tasks_train", type=int, default=10000) parser.add_argument("--num_tasks_valid", type=int, default=10) parser.add_argument("--ways", type=int, default=5) parser.add_argument("--shots_test", type=int, default=10) parser.add_argument("--shots_train", type=int, default=10) parser.add_argument("--first_order", type=bool, default=False) parser.add_argument("--lr_inner", type=float, default=0.4) parser.add_argument("--lr_outer", type=float, default=0.001) parser.add_argument("--meta_batch_size", type=int, default=16) parser.add_argument("--steps_inner", type=int, default=1) parser.add_argument("--steps_outer", type=int, default=100) parser.add_argument("--seed", type=int, default=2022) args = parser.parse_args() # Load data from [jax_meta](https://github.com/tristandeleu/jax-meta-learning) metaloader = Omniglot( DATAPATH, batch_size=args.meta_batch_size, shots=args.shots_train, ways=args.ways, ) metaloader.input_shape = metaloader.shape metaloader.output_dim = metaloader.ways metaloader.sample_input = jnp.array(metaloader.dummy_input) # Define the loss, meta-model and meta-learning algorithm base_model = metax.models.Conv4(args.channels, args.bn_decay, readout=args.ways) meta_model = metax.module.LearnedInit( loss_fn_inner=metax.energy.CrossEntropy(), loss_fn_outer=metax.energy.CrossEntropy(), base_learner=base_model, reg_strength=None ) meta_learner = metax.learner.ModelAgnosticMetaLearning( meta_model=meta_model, batch_size=args.batch_size, steps_inner=args.steps_inner, optim_fn_inner=optax.sgd(args.lr_inner), optim_fn_outer=optax.adam(args.lr_outer), first_order=args.first_order, ) # Initialize rng = jax.random.PRNGKey(args.seed) rng_reset, rng_train, rng_test = jax.random.split(rng, 3) meta_state = meta_learner.reset(rng_reset, metaloader.sample_input) meta_update = jax.jit(meta_learner.update) meta_eval = jax.jit(meta_learner.eval, static_argnames="steps") # Train for idx, batch in zip(range(args.steps_outer), metaloader): # Mangle data into the format expected by metax
batch = MetaDataset(
2
2023-10-19 16:36:20+00:00
2k
claws-lab/XLingEval
consistency/consistency_get_medalpaca_answer.py
[ { "identifier": "init_medalpaca_model", "path": "consistency/Medalpaca/model_medalpaca.py", "snippet": "def init_medalpaca_model(args):\n # --- Flags from the original code ---\n load_in_8bit = False\n cache_dir = None\n \n print(f\"Loading model {args.model}...\")\n if args.model == \"medalpaca-30b\":\n base_model = \"decapoda-research/llama-30b-hf\"\n model_name = \"medalpaca/medalpaca-lora-30b-8bit\"\n peft = True\n\n elif args.model == \"medalpaca-13b\":\n base_model = \"decapoda-research/llama-13b-hf\"\n model_name = \"medalpaca/medalpaca-lora-13b-8bit\"\n peft = True\n\n elif args.model == \"medalpaca-7b\":\n\n base_model = \"../PPLM/models_hf/7B\"\n model_name = \"medalpaca/medalpaca-7b\"\n model_name = \"medalpaca/medalpaca-lora-7b-16bit\"\n peft = True\n\n cache_dir = \"../medAlpaca/medalpaca-7b\"\n\n else:\n raise ValueError(f\"Unknown model: {args.model}\")\n\n\n prompt_template = f\"consistency/Medalpaca/prompt_templates/medalpaca_consistency.json\"\n\n # ------------------------------------\n\n # Only initialize this model on a Linux machine, which has sufficient GPU memory.\n\n print(\"peft\", peft)\n print(\"load_in_8bit\", load_in_8bit)\n if platform.system() == \"Linux\":\n model = Inferer(\n model_name=model_name,\n prompt_template=prompt_template,\n # f\"../medalpaca/prompt_templates/medalpaca.json\",\n base_model=base_model,\n peft=peft,\n load_in_8bit=load_in_8bit,\n args=args,\n cache_dir=cache_dir,\n )\n\n else:\n model = None\n\n return model" }, { "identifier": "args", "path": "arguments.py", "snippet": "REDDIT_COMMENTS_DIR = \"E:\\\\data\\\\Reddit\\\\comments\"\nDATA_DIR = \"F:\\\\data\\\\NLP\"\nDEVICE_MAP = {\"\": 0}\n DATA_DIR = osp.join(const.HOME_DIR_LINUX_SERVER, \"Workspace\", \"data\", \"NLP\")\n DEVICE_MAP = {\"\": [0, 1, 2, 3]}\n DATA_DIR = osp.join(const.HOME_DIR_LINUX, \"Workspace\", \"storage\", \"NLP\")\n DEVICE_MAP = {\"\": [0, 1]}\nDATA_DIR = \"data\"\nDEVICE_MAP = {\"\": 0}" }, { "identifier": "load_data_consistency", "path": "consistency/data_consistency.py", "snippet": "def load_data_consistency(args):\n if args.dataset_name == \"liveqa\":\n examples = load_LiveQA(language=args.target_language)\n\n elif args.dataset_name == \"medicationqa\":\n examples = load_MedicationQA(language=args.target_language)\n\n elif args.dataset_name == \"healthqa\":\n examples = load_HealthQA(split=args.split,\n language=args.target_language)\n\n else:\n raise NotImplementedError\n\n return examples" }, { "identifier": "load_results_consistency", "path": "consistency/data_consistency.py", "snippet": "def load_results_consistency(args):\n path = get_consistency_results_path(args)\n\n if osp.exists(path):\n results_df = pd.read_excel(path)\n\n print(f\"Loaded {len(results_df)} examples from {path}\")\n\n\n else:\n results_df = pd.DataFrame()\n\n return results_df" }, { "identifier": "get_consistency_results_path", "path": "consistency/data_consistency.py", "snippet": "def get_consistency_results_path(args):\n if args.model != \"gpt35\":\n model_prefix = f\"{args.model}_\"\n\n else:\n model_prefix = \"\"\n\n if args.dataset_name in [\"liveqa\", \"medicationqa\"]:\n path = osp.join(args.output_dir, \"consistency\",\n f\"{model_prefix}{args.dataset_name}_consistency_temp{args.temperature}.xlsx\")\n\n elif args.dataset_name in [\"healthqa\"]:\n path = osp.join(args.output_dir, \"consistency\",\n f\"{model_prefix}{args.dataset_name}_{args.split}_consistency_temp{args.temperature}.xlsx\")\n\n else:\n raise NotImplementedError\n return path" } ]
import os import os.path as osp import re import string import sys import traceback import torch import pandas as pd import const from tqdm import trange from consistency.Medalpaca.model_medalpaca import init_medalpaca_model from arguments import args from consistency.data_consistency import load_data_consistency, \ load_results_consistency, get_consistency_results_path from setup import project_setup from consistency.Medalpaca.params_medalpaca import *
1,542
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__)))) if osp.exists(const.HOME_DIR_LINUX): cuda_path = "/usr/local/cuda-11.7/bin/nvcc" if "LD_LIBRARY_PATH" in os.environ: os.environ["LD_LIBRARY_PATH"] += f"{cuda_path}" else: os.environ["LD_LIBRARY_PATH"] = cuda_path def format_question(d): question = d["question"] options = d["options"] for k, v in options.items(): question += f"\n{k}: {v}" return question def strip_special_chars(input_str): "Remove special characters from string start/end" if not input_str: return input_str start_index = 0 end_index = len(input_str) - 1 while start_index < len(input_str) and input_str[ start_index] not in string.ascii_letters + string.digits: start_index += 1 while end_index >= 0 and input_str[ end_index] not in string.ascii_letters + string.digits: end_index -= 1 if start_index <= end_index: return input_str[start_index:end_index + 1] else: return "" def starts_with_capital_letter(input_str): """ The answers should start like this: 'A: ' 'A. ' 'A ' """ pattern = r'^[A-Z](:|\.|) .+' return bool(re.match(pattern, input_str)) def run_consistency_medalpaca(): path = get_consistency_results_path(args) model = init_medalpaca_model(args) sampling['temperature'] = args.temperature examples = load_data_consistency(args)
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__)))) if osp.exists(const.HOME_DIR_LINUX): cuda_path = "/usr/local/cuda-11.7/bin/nvcc" if "LD_LIBRARY_PATH" in os.environ: os.environ["LD_LIBRARY_PATH"] += f"{cuda_path}" else: os.environ["LD_LIBRARY_PATH"] = cuda_path def format_question(d): question = d["question"] options = d["options"] for k, v in options.items(): question += f"\n{k}: {v}" return question def strip_special_chars(input_str): "Remove special characters from string start/end" if not input_str: return input_str start_index = 0 end_index = len(input_str) - 1 while start_index < len(input_str) and input_str[ start_index] not in string.ascii_letters + string.digits: start_index += 1 while end_index >= 0 and input_str[ end_index] not in string.ascii_letters + string.digits: end_index -= 1 if start_index <= end_index: return input_str[start_index:end_index + 1] else: return "" def starts_with_capital_letter(input_str): """ The answers should start like this: 'A: ' 'A. ' 'A ' """ pattern = r'^[A-Z](:|\.|) .+' return bool(re.match(pattern, input_str)) def run_consistency_medalpaca(): path = get_consistency_results_path(args) model = init_medalpaca_model(args) sampling['temperature'] = args.temperature examples = load_data_consistency(args)
results_df = load_results_consistency(args)
3
2023-10-18 17:35:42+00:00
2k
vtuber-plan/olah
olah/meta.py
[ { "identifier": "OlahConfig", "path": "olah/configs.py", "snippet": "class OlahConfig(object):\n def __init__(self, path: Optional[str] = None) -> None:\n\n # basic\n self.host = \"localhost\"\n self.port = 8090\n self.ssl_key = None\n self.ssl_cert = None\n self.repos_path = \"./repos\"\n self.hf_url = \"https://huggingface.co\"\n self.hf_lfs_url = \"https://cdn-lfs.huggingface.co\"\n self.mirror_url = \"http://localhost:8090\"\n self.mirror_lfs_url = \"http://localhost:8090\"\n\n # accessibility\n self.offline = True\n self.proxy = OlahRuleList.from_list(DEFAULT_PROXY_RULES)\n self.cache = OlahRuleList.from_list(DEFAULT_CACHE_RULES)\n\n if path is not None:\n self.read_toml(path)\n \n def empty_str(self, s: str) -> Optional[str]:\n if s == \"\":\n return None\n else:\n return s\n\n def read_toml(self, path: str):\n config = toml.load(path)\n\n if \"basic\" in config:\n basic = config[\"basic\"]\n self.host = basic.get(\"host\", self.host)\n self.port = basic.get(\"port\", self.port)\n self.ssl_key = self.empty_str(basic.get(\"ssl-key\", self.ssl_key))\n self.ssl_cert = self.empty_str(basic.get(\"ssl-cert\", self.ssl_cert))\n self.repos_path = basic.get(\"repos-path\", self.repos_path)\n self.hf_url = basic.get(\"hf-url\", self.hf_url)\n self.hf_lfs_url = basic.get(\"hf-lfs-url\", self.hf_lfs_url)\n self.mirror_url = basic.get(\"mirror-url\", self.mirror_url)\n self.mirror_lfs_url = basic.get(\"mirror-lfs-url\", self.mirror_lfs_url)\n\n if \"accessibility\" in config:\n accessibility = config[\"accessibility\"]\n self.offline = accessibility.get(\"offline\", self.offline)\n self.proxy = OlahRuleList.from_list(accessibility.get(\"proxy\", self.proxy))\n self.cache = OlahRuleList.from_list(accessibility.get(\"cache\", self.cache))" }, { "identifier": "CHUNK_SIZE", "path": "olah/constants.py", "snippet": "CHUNK_SIZE = 4096" }, { "identifier": "WORKER_API_TIMEOUT", "path": "olah/constants.py", "snippet": "WORKER_API_TIMEOUT = 15" }, { "identifier": "check_cache_rules_hf", "path": "olah/utls.py", "snippet": "async def check_cache_rules_hf(app, repo_type: Literal[\"model\", \"dataset\", \"space\"], org: str, repo: str) -> bool:\n config: OlahConfig = app.app_settings.config\n return config.cache.allow(f\"{org}/{repo}\")" } ]
import os import shutil import tempfile import httpx from typing import Dict, Literal from fastapi import FastAPI, Request from olah.configs import OlahConfig from olah.constants import CHUNK_SIZE, WORKER_API_TIMEOUT from olah.utls import check_cache_rules_hf
1,141
async def meta_cache_generator(app: FastAPI, save_path: str): yield {} with open(save_path, "rb") as f: while True: chunk = f.read(CHUNK_SIZE) if not chunk: break yield chunk async def meta_proxy_generator(app: FastAPI, headers: Dict[str, str], meta_url: str, allow_cache: bool, save_path: str): try: temp_file_path = None async with httpx.AsyncClient() as client: with tempfile.NamedTemporaryFile(mode="wb", delete=False) as temp_file: if not allow_cache: temp_file = open(os.devnull, 'wb') async with client.stream( method="GET", url=meta_url, headers=headers, timeout=WORKER_API_TIMEOUT, ) as response: response_headers = response.headers yield response_headers async for raw_chunk in response.aiter_raw(): if not raw_chunk: continue temp_file.write(raw_chunk) yield raw_chunk if not allow_cache: temp_file_path = None else: temp_file_path = temp_file.name if temp_file_path is not None: shutil.copyfile(temp_file_path, save_path) finally: if temp_file_path is not None: os.remove(temp_file_path) async def meta_generator(app: FastAPI, repo_type: Literal["model", "dataset"], org: str, repo: str, commit: str, request: Request): headers = {k: v for k, v in request.headers.items()} headers.pop("host") # save repos_path = app.app_settings.repos_path save_dir = os.path.join(repos_path, f"api/{repo_type}s/{org}/{repo}/revision/{commit}") save_path = os.path.join(save_dir, "meta.json") if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) use_cache = os.path.exists(save_path)
async def meta_cache_generator(app: FastAPI, save_path: str): yield {} with open(save_path, "rb") as f: while True: chunk = f.read(CHUNK_SIZE) if not chunk: break yield chunk async def meta_proxy_generator(app: FastAPI, headers: Dict[str, str], meta_url: str, allow_cache: bool, save_path: str): try: temp_file_path = None async with httpx.AsyncClient() as client: with tempfile.NamedTemporaryFile(mode="wb", delete=False) as temp_file: if not allow_cache: temp_file = open(os.devnull, 'wb') async with client.stream( method="GET", url=meta_url, headers=headers, timeout=WORKER_API_TIMEOUT, ) as response: response_headers = response.headers yield response_headers async for raw_chunk in response.aiter_raw(): if not raw_chunk: continue temp_file.write(raw_chunk) yield raw_chunk if not allow_cache: temp_file_path = None else: temp_file_path = temp_file.name if temp_file_path is not None: shutil.copyfile(temp_file_path, save_path) finally: if temp_file_path is not None: os.remove(temp_file_path) async def meta_generator(app: FastAPI, repo_type: Literal["model", "dataset"], org: str, repo: str, commit: str, request: Request): headers = {k: v for k, v in request.headers.items()} headers.pop("host") # save repos_path = app.app_settings.repos_path save_dir = os.path.join(repos_path, f"api/{repo_type}s/{org}/{repo}/revision/{commit}") save_path = os.path.join(save_dir, "meta.json") if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) use_cache = os.path.exists(save_path)
allow_cache = await check_cache_rules_hf(app, repo_type, org, repo)
3
2023-10-23 15:01:52+00:00
2k
RF-Tar-Railt/satori-python
src/satori/server/adapter.py
[ { "identifier": "Event", "path": "src/satori/model.py", "snippet": "class Event:\n id: int\n type: str\n platform: str\n self_id: str\n timestamp: datetime\n argv: Optional[ArgvInteraction] = None\n button: Optional[ButtonInteraction] = None\n channel: Optional[Channel] = None\n guild: Optional[Guild] = None\n login: Optional[Login] = None\n member: Optional[Member] = None\n message: Optional[MessageObject] = None\n operator: Optional[User] = None\n role: Optional[Role] = None\n user: Optional[User] = None\n\n @classmethod\n def parse(cls, raw: dict):\n data = {\n \"id\": raw[\"id\"],\n \"type\": raw[\"type\"],\n \"platform\": raw[\"platform\"],\n \"self_id\": raw[\"self_id\"],\n \"timestamp\": datetime.fromtimestamp(int(raw[\"timestamp\"]) / 1000),\n }\n if \"argv\" in raw:\n data[\"argv\"] = ArgvInteraction(**raw[\"argv\"])\n if \"button\" in raw:\n data[\"button\"] = ButtonInteraction(**raw[\"button\"])\n if \"channel\" in raw:\n data[\"channel\"] = Channel.parse(raw[\"channel\"])\n if \"guild\" in raw:\n data[\"guild\"] = Guild.parse(raw[\"guild\"])\n if \"login\" in raw:\n data[\"login\"] = Login.parse(raw[\"login\"])\n if \"member\" in raw:\n data[\"member\"] = Member.parse(raw[\"member\"])\n if \"message\" in raw:\n data[\"message\"] = MessageObject.parse(raw[\"message\"])\n if \"operator\" in raw:\n data[\"operator\"] = User.parse(raw[\"operator\"])\n if \"role\" in raw:\n data[\"role\"] = Role.parse(raw[\"role\"])\n if \"user\" in raw:\n data[\"user\"] = User.parse(raw[\"user\"])\n return cls(**data)\n\n def dump(self):\n res = {\n \"id\": self.id,\n \"type\": self.type,\n \"platform\": self.platform,\n \"self_id\": self.self_id,\n \"timestamp\": int(self.timestamp.timestamp() * 1000),\n }\n if self.argv:\n res[\"argv\"] = self.argv.dump()\n if self.button:\n res[\"button\"] = self.button.dump()\n if self.channel:\n res[\"channel\"] = self.channel.dump()\n if self.guild:\n res[\"guild\"] = self.guild.dump()\n if self.login:\n res[\"login\"] = self.login.dump()\n if self.member:\n res[\"member\"] = self.member.dump()\n if self.message:\n res[\"message\"] = self.message.dump()\n if self.operator:\n res[\"operator\"] = self.operator.dump()\n if self.role:\n res[\"role\"] = self.role.dump()\n if self.user:\n res[\"user\"] = self.user.dump()\n return res" }, { "identifier": "Login", "path": "src/satori/model.py", "snippet": "class Login:\n status: LoginStatus\n user: Optional[User] = None\n self_id: Optional[str] = None\n platform: Optional[str] = None\n\n @classmethod\n def parse(cls, raw: dict):\n data = raw.copy()\n if \"user\" in raw:\n data[\"user\"] = User(**raw[\"user\"])\n data[\"status\"] = LoginStatus(data[\"status\"])\n return cls(**data)\n\n def dump(self):\n res: Dict[str, Any] = {\"status\": self.status.value}\n if self.user:\n res[\"user\"] = self.user.dump()\n if self.self_id:\n res[\"self_id\"] = self.self_id\n if self.platform:\n res[\"platform\"] = self.platform\n return res" }, { "identifier": "Request", "path": "src/satori/server/model.py", "snippet": "class Request(Generic[TA]):\n headers: dict[str, Any]\n action: TA\n params: Any" } ]
from abc import abstractmethod from typing import Any, AsyncIterator, Dict, List from launart import Service from satori.const import Api from ..model import Event, Login from .model import Request
1,085
class Adapter(Service): @abstractmethod def get_platform(self) -> str: ... @abstractmethod def publisher(self) -> AsyncIterator[Event]: ... @abstractmethod def validate_headers(self, headers: Dict[str, Any]) -> bool: ... @abstractmethod def authenticate(self, token: str) -> bool: ... @abstractmethod
class Adapter(Service): @abstractmethod def get_platform(self) -> str: ... @abstractmethod def publisher(self) -> AsyncIterator[Event]: ... @abstractmethod def validate_headers(self, headers: Dict[str, Any]) -> bool: ... @abstractmethod def authenticate(self, token: str) -> bool: ... @abstractmethod
async def get_logins(self) -> List[Login]:
1
2023-10-18 11:09:34+00:00
2k