repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
zju3dv/nr_in_a_room
optim/patch_perceptual.py
[ { "identifier": "perceptual_model", "path": "models/perceptual_model.py", "snippet": "class VGG16_for_Perceptual(nn.Module):\nclass CLIP_for_Perceptual(nn.Module):\n def __init__(self, requires_grad=False, n_layers=[2, 4, 14, 21]):\n def forward(self, x):\n def perceptual_loss(\n self,\n pred: torch.Tensor,\n gt: torch.Tensor,\n low_level: bool = True,\n ):\n def mse_loss(source, target):\n def __init__(self, requires_grad=False, model_name=\"ViT-B/32\"):\n def perceptual_loss(self, pred: torch.Tensor, gt: torch.Tensor, **kwargs):\n def mse_loss(source, target):\n def sim_loss(source, target):\n def compute_img_embedding(self, img: torch.Tensor):\ndef get_perceptual_loss(\n perceptual_net: Union[VGG16_for_Perceptual, CLIP_for_Perceptual],\n pred: torch.Tensor,\n gt: torch.Tensor,\n low_level: bool = True,\n):" }, { "identifier": "get_perceptual_loss", "path": "models/perceptual_model.py", "snippet": "def get_perceptual_loss(\n perceptual_net: Union[VGG16_for_Perceptual, CLIP_for_Perceptual],\n pred: torch.Tensor,\n gt: torch.Tensor,\n low_level: bool = True,\n):\n \"\"\"\n perceptual loss is suitable for whole images, not sampled rays.\n pred: [B, 3, H, W]\n gt: [B, 3, H, W]\n \"\"\"\n assert pred.shape == gt.shape\n if pred.shape[2:4] != torch.Size((244, 244)):\n norm = Normalize(\n (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)\n )\n # pred = norm(nn.Upsample((244, 244), mode=\"bilinear\", align_corners=True)(pred))\n # gt = norm(nn.Upsample((244, 244), mode=\"bilinear\", align_corners=True)(gt))\n pred = norm(pred)\n gt = norm(gt)\n\n return perceptual_net.perceptual_loss(pred, gt, low_level=low_level)" }, { "identifier": "VGG16_for_Perceptual", "path": "models/perceptual_model.py", "snippet": "class VGG16_for_Perceptual(nn.Module):\n def __init__(self, requires_grad=False, n_layers=[2, 4, 14, 21]):\n super(VGG16_for_Perceptual, self).__init__()\n from torchvision import models\n\n vgg_pretrained_features = models.vgg16(\n pretrained=True\n ).features # TODO: check requires_grad\n\n self.slice0 = nn.Sequential()\n self.slice1 = nn.Sequential()\n self.slice2 = nn.Sequential()\n self.slice3 = nn.Sequential()\n\n for x in range(n_layers[0]): # relu1_1\n self.slice0.add_module(str(x), vgg_pretrained_features[x])\n for x in range(n_layers[0], n_layers[1]): # relu1_2\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(n_layers[1], n_layers[2]): # relu3_2\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n\n for x in range(n_layers[2], n_layers[3]): # relu4_2\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n # TODO: normalize\n\n h0 = self.slice0(x)\n h1 = self.slice1(h0)\n h2 = self.slice2(h1)\n h3 = self.slice3(h2)\n\n return h0, h1, h2, h3\n\n def perceptual_loss(\n self,\n pred: torch.Tensor,\n gt: torch.Tensor,\n low_level: bool = True,\n ):\n def mse_loss(source, target):\n return torch.mean((source - target) ** 2)\n\n perceptual_loss = 0\n\n if low_level:\n pred_0 = self.slice0(pred)\n gt_0 = self.slice0(gt)\n perceptual_loss += mse_loss(pred_0, gt_0)\n else:\n pred_0, pred_1, pred_2, pred_3 = self.forward(pred)\n gt_0, gt_1, gt_2, gt_3 = self.forward(gt)\n\n perceptual_loss += mse_loss(pred_0, gt_0)\n perceptual_loss += mse_loss(pred_1, gt_1)\n perceptual_loss += mse_loss(pred_2, gt_2)\n perceptual_loss += mse_loss(pred_3, gt_3)\n\n return perceptual_loss" } ]
import torch import numpy as np import cv2 from models import perceptual_model from models.perceptual_model import get_perceptual_loss, VGG16_for_Perceptual from typing import List, Optional, Any, Dict, Union
1,380
# import lpips # loss_fn_vgg = lpips.LPIPS(net="vgg").cuda() def get_mask_bbox(mask): # crop image true_indices = np.nonzero(mask) min_h, min_w = np.min(true_indices[0]), np.min(true_indices[1]) max_h, max_w = np.max(true_indices[0]), np.max(true_indices[1]) # print(min_h, min_w) # print(max_h, max_w) # img = img[min_h:max_h+1,min_w:max_w+1,:] return min_h, max_h, min_w, max_w def patch_perceptual_loss(
# import lpips # loss_fn_vgg = lpips.LPIPS(net="vgg").cuda() def get_mask_bbox(mask): # crop image true_indices = np.nonzero(mask) min_h, min_w = np.min(true_indices[0]), np.min(true_indices[1]) max_h, max_w = np.max(true_indices[0]), np.max(true_indices[1]) # print(min_h, min_w) # print(max_h, max_w) # img = img[min_h:max_h+1,min_w:max_w+1,:] return min_h, max_h, min_w, max_w def patch_perceptual_loss(
perceptual_net: VGG16_for_Perceptual,
2
2023-10-15 08:41:29+00:00
2k
ShramanPramanick/VoLTA
Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py
[ { "identifier": "make_roi_mask_feature_extractor", "path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py", "snippet": "def make_roi_mask_feature_extractor(cfg):\n func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]\n return func(cfg)" }, { "identifier": "make_roi_mask_predictor", "path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py", "snippet": "def make_roi_mask_predictor(cfg):\n func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]\n return func(cfg)" }, { "identifier": "make_roi_mask_post_processor", "path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py", "snippet": "def make_roi_mask_post_processor(cfg):\n if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:\n mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD\n masker = Masker(threshold=mask_threshold, padding=1)\n else:\n masker = None\n mdetr_style_aggregate_class_num = cfg.TEST.MDETR_STYLE_AGGREGATE_CLASS_NUM\n mask_post_processor = MaskPostProcessor(\n masker, mdetr_style_aggregate_class_num, vl_version=cfg.MODEL.ROI_MASK_HEAD.PREDICTOR.startswith(\"VL\")\n )\n return mask_post_processor" }, { "identifier": "make_roi_mask_loss_evaluator", "path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py", "snippet": "def make_roi_mask_loss_evaluator(cfg):\n matcher = Matcher(\n cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,\n cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,\n allow_low_quality_matches=False,\n )\n\n loss_evaluator = MaskRCNNLossComputation(\n matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION, vl_version=cfg.MODEL.ROI_MASK_HEAD.PREDICTOR.startswith(\"VL\")\n )\n\n return loss_evaluator" } ]
import torch from torch import nn from maskrcnn_benchmark.structures.bounding_box import BoxList from .roi_mask_feature_extractors import make_roi_mask_feature_extractor from .roi_mask_predictors import make_roi_mask_predictor from .inference import make_roi_mask_post_processor from .loss import make_roi_mask_loss_evaluator
801
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. def keep_only_positive_boxes(boxes): """ Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList) """ assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) return positive_boxes, positive_inds class ROIMaskHead(torch.nn.Module): def __init__(self, cfg): super(ROIMaskHead, self).__init__() self.cfg = cfg.clone() self.feature_extractor = make_roi_mask_feature_extractor(cfg) self.predictor = make_roi_mask_predictor(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. def keep_only_positive_boxes(boxes): """ Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList) """ assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) return positive_boxes, positive_inds class ROIMaskHead(torch.nn.Module): def __init__(self, cfg): super(ROIMaskHead, self).__init__() self.cfg = cfg.clone() self.feature_extractor = make_roi_mask_feature_extractor(cfg) self.predictor = make_roi_mask_predictor(cfg)
self.post_processor = make_roi_mask_post_processor(cfg)
2
2023-10-23 04:07:08+00:00
2k
earthcube-lab/textnoisr
tests/textnoisr/test_noise_dataset.py
[ { "identifier": "noise", "path": "textnoisr/noise.py", "snippet": "class CharNoiseAugmenter:\n _AVAILABLE_ACTIONS = (\"insert\", \"swap\", \"substitute\", \"delete\")\n def __init__(\n self,\n noise_level: float,\n actions: tuple[str, ...] = _AVAILABLE_ACTIONS,\n character_set: tuple[str, ...] = tuple(string.ascii_letters),\n seed: int | None = None,\n natural_language_swap_correction: float = 1.052,\n ) -> None:\n def _random_success(self, p: float) -> bool:\n def _random_char(self, p: float, character_set: tuple[str, ...]) -> str:\n def insert_random_chars(self, text: str, p: float) -> str:\n def _choose_another_character(self, char):\n def substitute_random_chars(self, text: str, p: float) -> str:\n def delete_random_chars(self, text: str, p: float) -> str:\n def consecutive_swap_random_chars(self, text: str, p: float) -> str:\n def add_noise(self, text: str | list[str]) -> str | list[str]:" }, { "identifier": "noise_dataset", "path": "textnoisr/noise_dataset.py", "snippet": "def _add_noise_to_example(\n example: dict,\n noise_augmenter: noise.CharNoiseAugmenter,\n feature_name: str,\n) -> dict:\ndef add_noise(\n dataset: Dataset,\n noise_augmenter: noise.CharNoiseAugmenter,\n feature_name: str = \"tokens\",\n **kwargs: Any,\n) -> Dataset:" } ]
from math import isclose from datasets import load_dataset as hf_load_dataset from evaluate import load from textnoisr import noise, noise_dataset import pytest
851
ABS_TOLERANCE = 1.5e-2 REL_TOLERANCE = 1.5e-2 @pytest.fixture() def dataset100_text(): return hf_load_dataset("rotten_tomatoes", split="train") @pytest.fixture() def dataset100(dataset100_text): def split_tokens(item): item["tokens"] = item["text"].split(" ") return item return dataset100_text.map(split_tokens) cer = load("cer") @pytest.mark.nightly @pytest.mark.parametrize( "noise_level,actions", [ (0.001, ["substitute"]), (0.001, ["insert"]), (0.001, ["delete"]), (0.001, ["swap"]), (0.001, ["delete", "insert", "substitute", "swap"]), (0.01, ["substitute"]), (0.01, ["insert"]), (0.01, ["delete"]), (0.01, ["swap"]), (0.01, ["delete", "insert", "substitute", "swap"]), (0.1, ["substitute"]), (0.1, ["insert"]), (0.1, ["delete"]), (0.1, ["swap"]), (0.1, ["delete", "insert", "substitute", "swap"]), (0.15, ["substitute"]), (0.15, ["insert"]), (0.15, ["delete"]), (0.15, ["swap"]), (0.15, ["delete", "insert", "substitute", "swap"]), (0.20, ["substitute"]), (0.20, ["insert"]), (0.20, ["delete"]), (0.20, ["swap"]), (0.20, ["delete", "insert", "substitute", "swap"]), ], ) @pytest.mark.filterwarnings("ignore:jiwer.compute_measures") def test_add_noise_on_split_into_words(dataset100, noise_level, actions): noised_dataset = noise_dataset.add_noise( dataset100,
ABS_TOLERANCE = 1.5e-2 REL_TOLERANCE = 1.5e-2 @pytest.fixture() def dataset100_text(): return hf_load_dataset("rotten_tomatoes", split="train") @pytest.fixture() def dataset100(dataset100_text): def split_tokens(item): item["tokens"] = item["text"].split(" ") return item return dataset100_text.map(split_tokens) cer = load("cer") @pytest.mark.nightly @pytest.mark.parametrize( "noise_level,actions", [ (0.001, ["substitute"]), (0.001, ["insert"]), (0.001, ["delete"]), (0.001, ["swap"]), (0.001, ["delete", "insert", "substitute", "swap"]), (0.01, ["substitute"]), (0.01, ["insert"]), (0.01, ["delete"]), (0.01, ["swap"]), (0.01, ["delete", "insert", "substitute", "swap"]), (0.1, ["substitute"]), (0.1, ["insert"]), (0.1, ["delete"]), (0.1, ["swap"]), (0.1, ["delete", "insert", "substitute", "swap"]), (0.15, ["substitute"]), (0.15, ["insert"]), (0.15, ["delete"]), (0.15, ["swap"]), (0.15, ["delete", "insert", "substitute", "swap"]), (0.20, ["substitute"]), (0.20, ["insert"]), (0.20, ["delete"]), (0.20, ["swap"]), (0.20, ["delete", "insert", "substitute", "swap"]), ], ) @pytest.mark.filterwarnings("ignore:jiwer.compute_measures") def test_add_noise_on_split_into_words(dataset100, noise_level, actions): noised_dataset = noise_dataset.add_noise( dataset100,
noise.CharNoiseAugmenter(noise_level=noise_level, actions=actions, seed=42),
0
2023-10-18 19:28:34+00:00
2k
oven-lab/tuya_cloud_map_extractor
custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/tuya.py
[ { "identifier": "ServerError", "path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py", "snippet": "class ServerError(Exception):\n pass" }, { "identifier": "ClientIDError", "path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py", "snippet": "class ClientIDError(Exception):\n pass" }, { "identifier": "ClientSecretError", "path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py", "snippet": "class ClientSecretError(Exception):\n pass" }, { "identifier": "DeviceIDError", "path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py", "snippet": "class DeviceIDError(Exception):\n pass" } ]
import datetime import hmac import requests from .const import ServerError, ClientIDError, ClientSecretError, DeviceIDError
749
def _get_sign(client_id: str, secret_key: str, url: str, t: int, token: str): empty_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" signstr = client_id + token + t + "GET" + "\n" + empty_hash + "\n" + "" + "\n" + url return hmac.new( secret_key.encode(), msg=signstr.encode(), digestmod="sha256" ).hexdigest() def tuyarequest( server: str, url: str, client_id: str, secret_key: str, token="" ) -> dict: """Handles authentication with provided token and makes request to tuya servers.""" t = str(int(round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000, 0))) sign = _get_sign( client_id=client_id, secret_key=secret_key, url=url, t=t, token=token ) headers = { "sign_method": "HMAC-SHA256", "client_id": client_id, "t": t, "sign": sign.upper(), } if token != "": headers["access_token"] = token return requests.get(url=server + url, headers=headers, timeout=2.5).json() def get_download_link( server: str, client_id: str, secret_key: str, device_id: str ) -> str: """Gets the download link of the real time map.""" url = "/v1.0/token?grant_type=1" response = tuyarequest( server=server, url=url, client_id=client_id, secret_key=secret_key ) if not response["success"]: if response["msg"] == "clientId is invalid": raise ClientIDError("Invalid Client ID") elif response["msg"] == "sign invalid": raise ClientSecretError("Invalid Client Secret") elif "cross-region access is not allowed" in response["msg"]: raise ServerError("Wrong server region. Cross-region access is not allowed.") else: raise RuntimeError("Request failed - Response: ", response) access_token = response["result"]["access_token"] url = "/v1.0/users/sweepers/file/" + device_id + "/realtime-map" response = tuyarequest( server=server, url=url, client_id=client_id, secret_key=secret_key, token=access_token, ) if not response["success"]: if response["msg"] == "permission deny":
def _get_sign(client_id: str, secret_key: str, url: str, t: int, token: str): empty_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" signstr = client_id + token + t + "GET" + "\n" + empty_hash + "\n" + "" + "\n" + url return hmac.new( secret_key.encode(), msg=signstr.encode(), digestmod="sha256" ).hexdigest() def tuyarequest( server: str, url: str, client_id: str, secret_key: str, token="" ) -> dict: """Handles authentication with provided token and makes request to tuya servers.""" t = str(int(round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000, 0))) sign = _get_sign( client_id=client_id, secret_key=secret_key, url=url, t=t, token=token ) headers = { "sign_method": "HMAC-SHA256", "client_id": client_id, "t": t, "sign": sign.upper(), } if token != "": headers["access_token"] = token return requests.get(url=server + url, headers=headers, timeout=2.5).json() def get_download_link( server: str, client_id: str, secret_key: str, device_id: str ) -> str: """Gets the download link of the real time map.""" url = "/v1.0/token?grant_type=1" response = tuyarequest( server=server, url=url, client_id=client_id, secret_key=secret_key ) if not response["success"]: if response["msg"] == "clientId is invalid": raise ClientIDError("Invalid Client ID") elif response["msg"] == "sign invalid": raise ClientSecretError("Invalid Client Secret") elif "cross-region access is not allowed" in response["msg"]: raise ServerError("Wrong server region. Cross-region access is not allowed.") else: raise RuntimeError("Request failed - Response: ", response) access_token = response["result"]["access_token"] url = "/v1.0/users/sweepers/file/" + device_id + "/realtime-map" response = tuyarequest( server=server, url=url, client_id=client_id, secret_key=secret_key, token=access_token, ) if not response["success"]: if response["msg"] == "permission deny":
raise DeviceIDError("Invalid Device ID")
3
2023-10-22 10:48:25+00:00
2k
mlbio-epfl/hume
hume.py
[ { "identifier": "parse_args", "path": "argparser.py", "snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--phi1_path', \n type=str,\n required=True,\n help=\"Path to the embeddings in first representation space\")\n\n parser.add_argument('--phi2_path',\n type=str,\n required=True,\n help=\"Path to the embeddings in second representation space\")\n\n parser.add_argument('--phi1_path_val',\n type=str,\n help=\"Path to the embeddings in first representation space to compute metrics.\"\n \" If not provided phi1_path will be also used for evaluation.\")\n\n parser.add_argument('--phi2_path_val',\n type=str,\n help=\"Path to the embeddings in second representation space to compute metrics.\"\n \" If not provided phi2_path will be also used for evaluation.\")\n\n parser.add_argument('--gt_labels_path',\n type=str,\n required=True,\n help=\"Path to ground truth labeling to compute metrics\")\n\n parser.add_argument('--k',\n type=int,\n default=10,\n help=\"Number of classes\")\n\n parser.add_argument('--inner_lr',\n type=float,\n default=0.001,\n help=\"Step size for the inner optimization\")\n\n parser.add_argument('--outer_lr',\n type=float,\n default=0.001,\n help=\"Step size for the task encoder's updates\")\n\n parser.add_argument('--tau',\n type=float,\n default=0.1,\n help=\"Temperature hyperparameter\")\n\n parser.add_argument('--H_reg',\n type=float,\n default=10.,\n help=\"Entropy regularization coefficient\")\n\n parser.add_argument('--num_iters',\n type=int,\n default=1000,\n help=\"Number of training iterations\")\n\n parser.add_argument('--adaptation_steps',\n type=int,\n default=300,\n help=\"Number of inner iterations to fit linear model\")\n\n parser.add_argument('--num_subsets',\n type=int,\n default=20,\n help=\"Number of (Xtr, Xte) subsets for averaging HUME's loss\")\n\n parser.add_argument('--subset_size',\n type=int,\n default=10000,\n help=\"Size of union of each (Xtr, Xte) subset\")\n\n parser.add_argument('--train_fraction',\n type=float,\n default=0.9,\n help=\"Fraction of args.subset_size to define size of Xtr\")\n\n parser.add_argument('--no_anneal',\n dest='anneal',\n action='store_false',\n help=\"Turn off temperature and learning rate annealing\")\n\n parser.add_argument('--no_rand_init',\n dest='rand_init',\n action='store_false',\n help=\"Start from random inner w0 at each outer iter or generate random w0 once\")\n\n parser.add_argument('--device',\n type=str,\n default=\"cuda\",\n help=\"Use cuda or cpu\")\n\n parser.add_argument('--exp_path',\n type=str,\n default=\"./linear_tasks/\",\n help=\"Path to save experiment's results\")\n\n parser.add_argument('--save_all',\n action='store_true',\n help=\"If used then task_encoder is saved at each iteration\")\n\n parser.add_argument('--seed',\n type=int,\n default=42,\n help='Random seed')\n \n return parser.parse_args(args)" }, { "identifier": "Sparsemax", "path": "activations.py", "snippet": "class Sparsemax(torch.nn.Module):\n\n def __init__(self, dim=0):\n self.dim = dim\n super(Sparsemax, self).__init__()\n\n def forward(self, input):\n return sparsemax(input, self.dim)" }, { "identifier": "fix_seed", "path": "utils.py", "snippet": "def fix_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "get_cv_score", "path": "utils.py", "snippet": "def get_cv_score(X, y):\n cv = KFold(n_splits=10, random_state=1, shuffle=True)\n clf = LogisticRegression(penalty=None)\n scores = cross_val_score(clf, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n return np.mean(scores)" }, { "identifier": "check_both_none_or_not_none", "path": "utils.py", "snippet": "def check_both_none_or_not_none(arg1, arg2):\n return (arg1 is None and arg2 is None) or (arg1 is not None and arg2 is not None)" }, { "identifier": "cluster_acc", "path": "metrics.py", "snippet": "def cluster_acc(y_pred, y_true, return_matched=False):\n \"\"\"\n Calculate clustering accuracy. Require scipy installed\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n # Return\n accuracy, in [0,1]\n \"\"\"\n y_true = y_true.astype(np.int64)\n assert y_pred.size == y_true.size\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n row_ind, col_ind = linear_sum_assignment(w.max() - w)\n\n if return_matched:\n matched = np.array(list(map(lambda i: col_ind[i], y_pred)))\n return w[row_ind, col_ind].sum() / y_pred.size, matched\n else:\n return w[row_ind, col_ind].sum() / y_pred.size" }, { "identifier": "cluster_ari", "path": "metrics.py", "snippet": "def cluster_ari(y_pred, y_true):\n \"\"\"\n Calculate adjusted rand index. Require scikit-learn installed\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n # Return\n ARI, in [0,1]\n \"\"\"\n return adjusted_rand_score(y_true, y_pred)" } ]
import os import pickle import torch import torch.nn as nn import torch.nn.functional as F import learn2learn as l2l import numpy as np from tqdm import tqdm from argparser import parse_args from activations import Sparsemax from utils import fix_seed, get_cv_score, check_both_none_or_not_none from metrics import cluster_acc, cluster_ari
1,573
def run(args=None): args = parse_args(args) device = torch.device(args.device) fix_seed(args.seed) if not os.path.exists(args.exp_path): os.makedirs(args.exp_path) phi1 = np.load(args.phi1_path).astype(np.float32) phi2 = np.load(args.phi2_path).astype(np.float32)
def run(args=None): args = parse_args(args) device = torch.device(args.device) fix_seed(args.seed) if not os.path.exists(args.exp_path): os.makedirs(args.exp_path) phi1 = np.load(args.phi1_path).astype(np.float32) phi2 = np.load(args.phi2_path).astype(np.float32)
assert check_both_none_or_not_none(args.phi1_path_val, args.phi2_path_val)
4
2023-10-20 15:32:06+00:00
2k
MaxDude132/django-register-field
tests/models.py
[ { "identifier": "Register", "path": "django_register/base.py", "snippet": "class Register:\n def __init__(self):\n self._key_to_class = {}\n self._class_to_key = {}\n\n def register(self, klass, db_key=None):\n if db_key is None:\n try:\n db_key = klass.label\n except AttributeError:\n raise ValueError(\n _(\n \"The class {klass} does not have a label. Define \"\n \"one or pass a db_key to be used as database value.\"\n ).format(klass=klass)\n )\n\n if db_key in self._key_to_class:\n raise ValueError(_(\"Key {key} already registered.\").format(key=db_key))\n\n if klass in self._class_to_key:\n raise ValueError(_(\"Class {klass} already registered.\").format(klass=klass))\n\n self._key_to_class[db_key] = klass\n self._class_to_key[klass] = db_key\n\n return klass\n\n def from_key(self, value):\n try:\n return self._key_to_class[value]\n except (KeyError, TypeError):\n raise ValidationError(\n _(\"Value {value} not a registered key.\").format(value=value)\n )\n\n def from_class(self, value):\n try:\n return self._class_to_key[value]\n except KeyError:\n raise ValidationError(\n _(\"Value {value} not a registered class.\").format(value=value)\n )\n\n def get_key(self, value):\n try:\n self.from_key(value)\n except ValidationError:\n return self.from_class(value)\n\n return value\n\n def get_class(self, value):\n try:\n self.from_class(value)\n except ValidationError:\n return self.from_key(value)\n\n return value\n\n @property\n def max_length(self):\n if self._key_to_class:\n return max(len(key) for key in self._key_to_class)\n\n @property\n def choices(self):\n return [\n (k, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()\n ]\n\n @property\n def flatchoices(self):\n return [\n (v, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()\n ]\n\n def _get_verbose_name(self, klass, key):\n return getattr(klass, \"verbose_name\", key.replace(\"_\", \" \").title())\n\n def __iter__(self):\n return iter(self._key_to_class.values())" }, { "identifier": "RegisterChoices", "path": "django_register/base.py", "snippet": "class RegisterChoices(metaclass=RegisterChoicesMeta):\n def __new__(cls, klass):\n return cls.register.get_class(klass)" }, { "identifier": "RegisterField", "path": "django_register/base.py", "snippet": "class RegisterField(models.CharField):\n description = _(\"Store a string, return the associated class\")\n\n def __init__(self, *args, **kwargs):\n if \"register\" not in kwargs and \"choices\" not in kwargs:\n raise ValueError(_(\"You must provide choices to the RegisterField.\"))\n\n if \"register\" not in kwargs and not hasattr(kwargs[\"choices\"], \"register\"):\n raise ValueError(_(\"Choices must be a RegisterChoices instance.\"))\n\n # When building the migrations, the register cannot be in the choices.\n # It will be passed individually, so we take it from there.\n self.register: Register = (\n kwargs.pop(\"register\")\n if \"register\" in kwargs\n else kwargs[\"choices\"].register\n )\n\n if \"choices\" not in kwargs:\n kwargs[\"choices\"] = self.register.choices\n\n if \"max_length\" not in kwargs and (max_length := self.register.max_length):\n kwargs[\"max_length\"] = max_length\n\n if \"default\" in kwargs:\n try:\n kwargs[\"default\"] = self.register.get_key(kwargs[\"default\"])\n except ValidationError:\n pass\n\n super().__init__(*args, **kwargs)\n\n def from_db_value(self, value, expression, connection):\n if not value:\n return value\n\n return self.register.get_class(value)\n\n def get_default(self):\n default = super().get_default()\n\n if default:\n return self.register.get_class(default)\n\n return default\n\n def to_python(self, value):\n if not value:\n return value\n\n return self.register.get_class(value)\n\n def get_prep_value(self, value):\n if not value:\n return value\n\n return self.register.get_key(value)\n\n def value_from_object(self, obj):\n value = super().value_from_object(obj)\n return self.get_prep_value(value)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs.pop(\"choices\", None)\n kwargs[\"register\"] = self.register\n return name, path, args, kwargs\n\n def clean(self, value, model_instance):\n \"\"\"\n We need to override clean because it runs the validations on the\n Python object instead of on the database string.\n \"\"\"\n value = self.get_prep_value(value)\n self.validate(value, model_instance)\n self.run_validators(value)\n return self.to_python(value)\n\n def _get_flatchoices(self):\n return self.register.flatchoices\n\n flatchoices = property(_get_flatchoices)\n\n def _register_choices(self):\n return self.register.choices\n\n def _register_choices_set(self, value):\n return\n\n choices = property(_register_choices, _register_choices_set)\n _choices = property(_register_choices, _register_choices_set)" } ]
from dataclasses import dataclass from django.db import models from django_register import Register, RegisterChoices, RegisterField
1,542
# Standard libraries # Django # django_register @dataclass(unsafe_hash=True) class CountryInfo: population: int capital: str class CountryChoices(RegisterChoices): CANADA = CountryInfo(population=37_742_154, capital="Ottawa") FRANCE = CountryInfo(population=65_273_511, capital="Paris") GERMANY = CountryInfo(population=83_783_942, capital="Berlin") UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington") @dataclass(unsafe_hash=True) class ContinentInfo: label: str @dataclass(unsafe_hash=True) class FoodInfo: verbose_name: str
# Standard libraries # Django # django_register @dataclass(unsafe_hash=True) class CountryInfo: population: int capital: str class CountryChoices(RegisterChoices): CANADA = CountryInfo(population=37_742_154, capital="Ottawa") FRANCE = CountryInfo(population=65_273_511, capital="Paris") GERMANY = CountryInfo(population=83_783_942, capital="Berlin") UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington") @dataclass(unsafe_hash=True) class ContinentInfo: label: str @dataclass(unsafe_hash=True) class FoodInfo: verbose_name: str
food_register = Register()
0
2023-10-23 18:11:08+00:00
2k
hsouri/bob-classification
medical_chexpert/util/datasets.py
[ { "identifier": "GaussianBlur", "path": "medical_chexpert/util/custom_transforms.py", "snippet": "class GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.]):\n self.sigma = sigma\n\n def __call__(self, x):\n sigma = random.uniform(self.sigma[0], self.sigma[1])\n x = x.filter(ImageFilter.GaussianBlur(radius=sigma))\n return x" }, { "identifier": "new_data_aug_generator", "path": "medical_chexpert/util/augment.py", "snippet": "def new_data_aug_generator(args=None, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):\n img_size = args.input_size\n remove_random_resized_crop = args.src\n # mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n primary_tfl = []\n scale = (0.08, 1.0)\n interpolation = 'bicubic'\n if remove_random_resized_crop:\n primary_tfl = [\n transforms.Resize(img_size, interpolation=3),\n transforms.RandomCrop(img_size, padding=4, padding_mode='reflect'),\n transforms.RandomHorizontalFlip()\n ]\n else:\n primary_tfl = [\n RandomResizedCropAndInterpolation(\n img_size, scale=scale, interpolation=interpolation),\n transforms.RandomHorizontalFlip()\n ]\n\n secondary_tfl = [transforms.RandomChoice([gray_scale(p=1.0),\n Solarization(p=1.0),\n GaussianBlur(p=1.0)])]\n\n # if args.color_jitter is not None and not args.color_jitter == 0:\n # secondary_tfl.append(transforms.ColorJitter(args.color_jitter, args.color_jitter, args.color_jitter))\n\n final_tfl = [\n transforms.ToTensor(),\n transforms.Normalize(\n mean=torch.tensor(mean),\n std=torch.tensor(std))\n ]\n return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)" } ]
import os import PIL import torch from torchvision import datasets, transforms from timm.data import create_transform from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from util.dataloader_med import RetinaDataset, Augmentation, Node21, ChestX_ray14, Covidx, CheXpert from .custom_transforms import GaussianBlur from .augment import new_data_aug_generator
897
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # -------------------------------------------------------- def build_dataset(is_train, args): transform = build_transform(is_train, args) root = os.path.join(args.data_path, 'train' if is_train else 'val') dataset = datasets.ImageFolder(root, transform=transform) print(dataset) return dataset def build_dataset_chest_xray(split, args): is_train = (split == 'train') # transform = build_transform(is_train, args) if args.build_timm_transform: transform = build_transform(is_train, args) else: if is_train: if args.aug_strategy == 'simclr_with_randrotation': print(args.aug_strategy) transform = transforms.Compose([ transforms.RandomResizedCrop(224, scale=(0.2, 1.)), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomRotation(degrees=(0, 45)), transforms.RandomGrayscale(p=0.2),
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # -------------------------------------------------------- def build_dataset(is_train, args): transform = build_transform(is_train, args) root = os.path.join(args.data_path, 'train' if is_train else 'val') dataset = datasets.ImageFolder(root, transform=transform) print(dataset) return dataset def build_dataset_chest_xray(split, args): is_train = (split == 'train') # transform = build_transform(is_train, args) if args.build_timm_transform: transform = build_transform(is_train, args) else: if is_train: if args.aug_strategy == 'simclr_with_randrotation': print(args.aug_strategy) transform = transforms.Compose([ transforms.RandomResizedCrop(224, scale=(0.2, 1.)), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomRotation(degrees=(0, 45)), transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
0
2023-10-20 16:28:17+00:00
2k
Salz0/telegram_flea
middlewares/message_logging_middleware.py
[ { "identifier": "Message", "path": "models.py", "snippet": "class Message(BaseModel):\n \"\"\"The model for the Telegram message.\"\"\"\n\n from_user: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"bot.User\", related_name=\"messages\"\n )\n id = fields.IntField(pk=True, generated=True)\n\n # In Telegram, `message_id` is unique only **within a chat**.\n message_id = fields.BigIntField() # for the sake of safety, this is a `BigIntField`\n\n # TODO: [3/20/2023 by Mykola] Make this a foreign key to the Chat model\n chat_id = fields.BigIntField()\n\n reply_to_message: fields.ForeignKeyRelation[Message] = fields.ForeignKeyField(\n \"bot.Message\", related_name=\"replies\", null=True\n )\n\n content_type = fields.TextField(null=True)\n text = fields.TextField(null=True)\n\n date = fields.DatetimeField()\n is_handled = fields.BooleanField(default=False)\n content = fields.BinaryField(null=True)\n status = fields.CharField(max_length=32, null=True)\n\n complete_message_json = fields.JSONField(null=True)\n\n replies: fields.BackwardFKRelation[Message]" }, { "identifier": "User", "path": "models.py", "snippet": "class User(BaseModel):\n \"\"\"\n The model for the Telegram user.\n\n This model stores all the information about the user.\n It is also used to store all the authentication-related information.\n \"\"\"\n\n id = fields.BigIntField(pk=True, generated=False)\n\n username = fields.CharField(max_length=32, null=True)\n\n first_name = fields.TextField(null=True)\n last_name = fields.TextField(null=True)\n\n phone_number = fields.CharField(max_length=14, null=True)\n language_code = fields.CharField(max_length=2, null=True)\n is_bot = fields.BooleanField(default=False)\n\n start_payload = fields.TextField(null=True)\n\n is_active = fields.BooleanField(default=True)\n has_bot_blocked = fields.BooleanField(default=False)\n is_beta = fields.BooleanField(default=False)\n is_deleted = fields.BooleanField(default=False)\n\n is_admin = fields.BooleanField(default=False)\n is_staff_member = fields.BooleanField(default=False)\n\n messages: fields.ReverseRelation[Message]\n\n @property\n def full_name(self):\n \"\"\"Get the full name of the user.\"\"\"\n if not self.last_name:\n return self.first_name\n\n return f\"{self.first_name} {self.last_name}\"" }, { "identifier": "logger", "path": "utils/loguru_logging.py", "snippet": "class InterceptHandler(logging.Handler):\n def emit(self, record):" } ]
from aiogram import types from aiogram.dispatcher.middlewares import BaseMiddleware from arrow import arrow from models import Message, User from utils.loguru_logging import logger
977
"""The middleware to log all the incoming messages into the database.""" class MessagesLoggingMiddleware(BaseMiddleware): """The middleware class, inherited from `BaseMiddleware`.""" @staticmethod async def _save_message(msg: types.Message) -> Message: """Save the message into the database.""" if msg.reply_to_message: reply_to_message = await Message.get_or_none( message_id=msg.reply_to_message.message_id, chat_id=msg.chat.id, # `message_id` is not unique. For details, see `models.py`. ) else: reply_to_message = None return await Message.create( # Primary fields message_id=msg.message_id, from_user_id=msg.from_user.id, chat_id=msg.chat.id, text=msg.text, date=msg.date, # Other fields that might be useful reply_to_message=reply_to_message, content_type=msg.content_type, complete_message_json=msg.as_json(), ) async def on_pre_process_message(self, msg: types.Message, *_, **__): """Save the message into the database _before_ processing it.""" user_data: dict = msg.from_user.to_python() try: # Create a user first, if not exist. Otherwise, we are unable to create a message # with a foreign key. user, created = await User.get_or_create(id=user_data.pop("id"), defaults=user_data) if created: if payload := msg.get_args(): user.start_payload = payload await user.save()
"""The middleware to log all the incoming messages into the database.""" class MessagesLoggingMiddleware(BaseMiddleware): """The middleware class, inherited from `BaseMiddleware`.""" @staticmethod async def _save_message(msg: types.Message) -> Message: """Save the message into the database.""" if msg.reply_to_message: reply_to_message = await Message.get_or_none( message_id=msg.reply_to_message.message_id, chat_id=msg.chat.id, # `message_id` is not unique. For details, see `models.py`. ) else: reply_to_message = None return await Message.create( # Primary fields message_id=msg.message_id, from_user_id=msg.from_user.id, chat_id=msg.chat.id, text=msg.text, date=msg.date, # Other fields that might be useful reply_to_message=reply_to_message, content_type=msg.content_type, complete_message_json=msg.as_json(), ) async def on_pre_process_message(self, msg: types.Message, *_, **__): """Save the message into the database _before_ processing it.""" user_data: dict = msg.from_user.to_python() try: # Create a user first, if not exist. Otherwise, we are unable to create a message # with a foreign key. user, created = await User.get_or_create(id=user_data.pop("id"), defaults=user_data) if created: if payload := msg.get_args(): user.start_payload = payload await user.save()
logger.info(
2
2023-10-19 17:28:55+00:00
2k
RobertCsordas/moe_layer
triton_src/moe_layer/moe_layer_simple.py
[ { "identifier": "cvmm", "path": "triton_src/moe_layer/cvmm.py", "snippet": "def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):\n if not isinstance(sel, CVMMSel):\n sel = cvmm_prepare_sel(sel, keys.shape[0])\n\n return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)" }, { "identifier": "cvmm_prepare_sel2", "path": "triton_src/moe_layer/cvmm.py", "snippet": "def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:\n # Has multiple selections for each batch element\n n_per_batch = sel.shape[-1]\n\n # indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)\n # indices = indices.repeat_interleave(n_per_batch).flatten()\n\n fsel = sel.flatten()\n ssel, sel_index = fsel.sort()\n\n # in_index = indices[sel_index]\n in_index = sel_index // n_per_batch\n\n return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)" }, { "identifier": "CVMMSel", "path": "triton_src/moe_layer/cvmm.py", "snippet": "class CVMMSel:\n raw_sel: torch.Tensor\n sel: torch.Tensor\n sel_index: torch.Tensor\n out_index: Optional[torch.Tensor] = None\n reduction_weight: Optional[torch.Tensor] = None\n\n def clone(self) -> 'CVMMSel':\n return CVMMSel(self.raw_sel, self.sel, self.sel_index, self.out_index, self.reduction_weight)" } ]
import torch import torch.distributed import torch.nn.functional as F import math from typing import Tuple, List, Optional from .cvmm import cvmm, cvmm_prepare_sel2, CVMMSel
1,330
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor: # Calculate numerically stable distributed logsumexp xmax = x.max(dim=dim, keepdim=True).values torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX) xe = (x - xmax).exp().sum(dim=dim, keepdim=True) torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM) res = (xmax + xe.log()) if not keepdim: res = res.squeeze(dim) return res def log_mean(x: torch.Tensor, dim: int = 0): if torch.distributed.is_initialized(): xlse = dist_logsumexp(x, dim=dim) # Normalize n = torch.tensor(x.shape[dim]).to(x.device) torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM) return xlse - n.log() else: return x.logsumexp(dim) - math.log(x.shape[dim]) def entropy_l(l: torch.Tensor) -> torch.Tensor: return - (l * l.exp()).sum(-1) class MoE(torch.nn.Module): def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int, dropout: float = 0, selection_mode: str = "sigmoid", activation_after_topk: bool = False, activation=F.relu, bias: bool = False, v_dim: Optional[int] = None, sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0, weight_std_scale: float = 1.0): super().__init__() self.k_dim = dmodel self.v_dim = v_dim if v_dim is not None else dmodel self.n_experts = n_experts self.expert_size = expert_size self.size = self.n_experts * self.expert_size self.dropout = dropout self.selection_mode = selection_mode self.k_vec_dim = self.k_dim self.n_heads = k self.activation_after_topk = activation_after_topk self.activation = activation self.sinkhorn_n_iters = sinkhorn_n_iters self.expert_dropout = expert_dropout if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}: raise ValueError(f"Unknown selection mode {self.selection_mode}") self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size)) self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim)) self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim)) torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale) torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale) torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale) if bias: self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size)) self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim)) else: self.bias = None self.o_bias = None self.renorm_keep_std(self.expert_sel, dim=1) def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0): with torch.no_grad(): std = weight.std() weight.div_(weight.norm(dim=dim, keepdim=True)) weight.mul_(std / weight.std()) def entropy_reg(self, sel: torch.Tensor) -> float: # Everything is done in log scale sel = sel.flatten(0, -2) sel = F.log_softmax(sel, dim=-1) sel = log_mean(sel, -2) return - entropy_l(sel).mean() def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor: # Calculate numerically stable distributed logsumexp xmax = x.max(dim=dim, keepdim=True).values torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX) xe = (x - xmax).exp().sum(dim=dim, keepdim=True) torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM) res = (xmax + xe.log()) if not keepdim: res = res.squeeze(dim) return res def log_mean(x: torch.Tensor, dim: int = 0): if torch.distributed.is_initialized(): xlse = dist_logsumexp(x, dim=dim) # Normalize n = torch.tensor(x.shape[dim]).to(x.device) torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM) return xlse - n.log() else: return x.logsumexp(dim) - math.log(x.shape[dim]) def entropy_l(l: torch.Tensor) -> torch.Tensor: return - (l * l.exp()).sum(-1) class MoE(torch.nn.Module): def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int, dropout: float = 0, selection_mode: str = "sigmoid", activation_after_topk: bool = False, activation=F.relu, bias: bool = False, v_dim: Optional[int] = None, sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0, weight_std_scale: float = 1.0): super().__init__() self.k_dim = dmodel self.v_dim = v_dim if v_dim is not None else dmodel self.n_experts = n_experts self.expert_size = expert_size self.size = self.n_experts * self.expert_size self.dropout = dropout self.selection_mode = selection_mode self.k_vec_dim = self.k_dim self.n_heads = k self.activation_after_topk = activation_after_topk self.activation = activation self.sinkhorn_n_iters = sinkhorn_n_iters self.expert_dropout = expert_dropout if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}: raise ValueError(f"Unknown selection mode {self.selection_mode}") self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size)) self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim)) self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim)) torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale) torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale) torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale) if bias: self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size)) self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim)) else: self.bias = None self.o_bias = None self.renorm_keep_std(self.expert_sel, dim=1) def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0): with torch.no_grad(): std = weight.std() weight.div_(weight.norm(dim=dim, keepdim=True)) weight.mul_(std / weight.std()) def entropy_reg(self, sel: torch.Tensor) -> float: # Everything is done in log scale sel = sel.flatten(0, -2) sel = F.log_softmax(sel, dim=-1) sel = log_mean(sel, -2) return - entropy_l(sel).mean() def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
scores = cvmm(input, index, self.keys)
0
2023-10-16 11:00:47+00:00
2k
meanii/downly
downly/plugins/logger.py
[ { "identifier": "Downly", "path": "downly/downly.py", "snippet": "class Downly(Client):\n \"\"\"\n Downly 🦉\n \"\"\"\n def __init__(self):\n name = self.__class__.__name__.lower()\n\n self.telegram = telegram\n\n super().__init__(\n name,\n api_id=self.telegram.get('api_id'),\n api_hash=self.telegram.get('api_hash'),\n bot_token=self.telegram.get('bot_token'),\n workdir=str(Path.cwd()),\n workers=16,\n plugins=dict(\n root=f\"{name}.plugins\",\n ),\n sleep_threshold=180\n )\n\n self.uptime_reference = time.monotonic_ns()\n self.start_datetime = datetime.utcnow()\n\n async def start(self):\n await super().start()\n\n me = await self.get_me()\n bot.username = me.username\n bot.id = me.id\n logger.info(f\"Downly 🦉 v{__version__} (Layer {layer}) started on @{me.username}. Hi.\")\n\n async def stop(self, *args):\n await super().stop()\n logger.info(\"Downly 🦉 stopped. Bye.\")" }, { "identifier": "b_logger", "path": "downly/utils/b_logger.py", "snippet": "def b_logger(func):\n async def wrapper(client, message: Message):\n\n # checking if a message is url then log\n if not validate_url(message.text):\n await func(client, message)\n return\n\n # logging message\n if message.from_user: # if a message is from a user\n logger.info(f\"New message from {message.from_user.first_name}({message.from_user.id})\"\n f\" in {message.chat.title}({message.chat.id}) -\"\n f\" [MESSAGE]: {message.text}\")\n\n if message.from_user is None: # if a message is from channel\n logger.info(f\"New message from {message.chat.title}({message.chat.id}) -\"\n f\" [MESSAGE]: {message.text}\")\n\n return await func(client, message)\n\n return wrapper" }, { "identifier": "update_user", "path": "downly/database/users_sql.py", "snippet": "def update_user(user_id: int, username: str):\n with INSERTION_LOCK:\n user = SESSION.query(Users).get(user_id)\n if not user:\n user = Users(user_id, username)\n logger.info(f'[DB]: adding new user to db {user_id} ({username})')\n SESSION.add(user)\n SESSION.flush()\n else:\n user.username = username\n\n SESSION.commit()" }, { "identifier": "update_chat", "path": "downly/database/users_sql.py", "snippet": "def update_chat(chat_id: str, chat_name: str):\n with INSERTION_LOCK:\n chat = SESSION.query(Chats).get(str(chat_id))\n if not chat:\n chat = Chats(chat_id, chat_name)\n logger.info(f'[DB]: adding new chat to db {chat_id} ({chat_name})')\n SESSION.add(chat)\n SESSION.flush()\n else:\n chat.chat_name = chat_name\n\n SESSION.commit()" } ]
from pyrogram import filters, Client from pyrogram.types import Message from pyrogram.enums import ChatType from downly.downly import Downly from downly.utils.b_logger import b_logger from downly.database.users_sql import update_user, update_chat
853
@Downly.on_message(filters.private | filters.group | filters.channel, group=2) @b_logger async def logger(client: Client, message: Message): # check if a message is command then do nothing if message.chat.type == ChatType.GROUP or message.chat.type == ChatType.SUPERGROUP: update_chat(str(message.chat.id), message.chat.title) if message.from_user:
@Downly.on_message(filters.private | filters.group | filters.channel, group=2) @b_logger async def logger(client: Client, message: Message): # check if a message is command then do nothing if message.chat.type == ChatType.GROUP or message.chat.type == ChatType.SUPERGROUP: update_chat(str(message.chat.id), message.chat.title) if message.from_user:
update_user(message.from_user.id, message.from_user.username)
2
2023-10-17 16:21:31+00:00
2k
hnesk/flipper-raw-rfid
flipper_raw_rfid/bits.py
[ { "identifier": "batched", "path": "flipper_raw_rfid/utils.py", "snippet": "def batched(iterable: Iterable[Any], n: int) -> Iterable[tuple[Any, ...]]:\n # batched('ABCDEFG', 3) --> ABC DEF G\n if n < 1:\n raise ValueError('n must be at least one')\n it = iter(iterable)\n while batch := tuple(islice(it, n)):\n yield batch" }, { "identifier": "Peak", "path": "flipper_raw_rfid/utils.py", "snippet": "class Peak:\n \"\"\"\n A peak in a distribution described by left, center and right index\n \"\"\"\n left: int = field(compare=False)\n center: int = field(compare=False)\n right: int = field(compare=False)\n height: float = field(default=0.0, repr=False)\n\n def merge(self, other: Peak) -> Peak:\n \"\"\"\n Merge this peak with another peak\n :param other: Peak to merge with\n :return: merged peak\n \"\"\"\n return Peak(\n min(self.left, other.left),\n (self.center + other.center) // 2,\n max(self.right, other.right),\n max(self.height, other.height)\n )\n\n def slice(self, distribution: npt.NDArray[Any]) -> npt.NDArray[Any]:\n \"\"\"\n Slice the distribution with the peak\n\n :param distribution:\n :return:\n \"\"\"\n return distribution[self.left:self.right]\n\n def fit(self, distribution: npt.NDArray[Any], quantile: float = 1.0) -> Peak:\n \"\"\"\n Fit the distribution to the peak\n :param distribution:\n :param quantile:\n :return:\n \"\"\"\n my_excerpt = distribution[self.left:self.right]\n if quantile < 1.0:\n to_capture = numpy.sum(my_excerpt) * quantile\n\n def objective(thr: float) -> float:\n # 1.0 for capturing enough and a little nudge to find bigger thresholds\n return cast(float, 1.0 * (to_capture > numpy.sum(my_excerpt[my_excerpt > thr])) - thr * 0.0001)\n\n res = minimize_scalar(objective, (0, my_excerpt.max()))\n threshold = int(res.x)\n else:\n threshold = 0\n\n first, *_, last = (my_excerpt > threshold).nonzero()[0]\n\n return Peak(\n self.left + first - 1,\n self.left + (first + last) // 2,\n self.left + last + 1,\n my_excerpt[first:last].max()\n )\n\n def __contains__(self, v: float | int) -> bool:\n \"\"\"\n Check if a value is inside the peak\n :param v: value to check\n :return:\n \"\"\"\n return self.left <= v <= self.right" } ]
import re import numpy import numpy.typing as npt from flipper_raw_rfid.utils import batched, Peak
1,177
""" Utilities for working with bitstreams """ def decode_lengths(pads: npt.NDArray[numpy.int64], peaks: list[Peak]) -> tuple[npt.NDArray[numpy.int8], int]: """ Loops through pulses and durations and matches them to peaks Checks for the length of the peak as a multiple of the first peak and adds as many 1/0 to the result :param pads: Pulse and duration values :param peaks: A list of peaks from find_peaks, the center frequencies should be more or less multiples of the first peak :return: The decoded bitstream """ result: list[int] = [] position = 0 result_position = None first_length = peaks[0].center for high, duration in pads: low = duration - high high_peak = None low_peak = None for p in peaks: if high in p: high_peak = p if low in p: low_peak = p if high_peak and low_peak: break if not (high_peak and low_peak): if not high_peak: print(f'Found nothing for high {high}, restarting') if not low_peak: print(f'Found nothing for low {low}, restarting') result = [] result_position = position continue result.extend([1] * int(round(high_peak.center / first_length))) result.extend([0] * int(round(low_peak.center / first_length))) position += duration return numpy.array(result, dtype=numpy.int8), result_position def decode_manchester(manchester: npt.NDArray[numpy.int8], biphase: bool = True) -> npt.NDArray[numpy.int8]: """ Decode manchester encoded bitstream :param manchester: manchester encoded bitstream :param biphase: True for biphase, False for diphase :return: decoded bitstream """ if manchester[0] == manchester[1]: manchester = manchester[1:] result = []
""" Utilities for working with bitstreams """ def decode_lengths(pads: npt.NDArray[numpy.int64], peaks: list[Peak]) -> tuple[npt.NDArray[numpy.int8], int]: """ Loops through pulses and durations and matches them to peaks Checks for the length of the peak as a multiple of the first peak and adds as many 1/0 to the result :param pads: Pulse and duration values :param peaks: A list of peaks from find_peaks, the center frequencies should be more or less multiples of the first peak :return: The decoded bitstream """ result: list[int] = [] position = 0 result_position = None first_length = peaks[0].center for high, duration in pads: low = duration - high high_peak = None low_peak = None for p in peaks: if high in p: high_peak = p if low in p: low_peak = p if high_peak and low_peak: break if not (high_peak and low_peak): if not high_peak: print(f'Found nothing for high {high}, restarting') if not low_peak: print(f'Found nothing for low {low}, restarting') result = [] result_position = position continue result.extend([1] * int(round(high_peak.center / first_length))) result.extend([0] * int(round(low_peak.center / first_length))) position += duration return numpy.array(result, dtype=numpy.int8), result_position def decode_manchester(manchester: npt.NDArray[numpy.int8], biphase: bool = True) -> npt.NDArray[numpy.int8]: """ Decode manchester encoded bitstream :param manchester: manchester encoded bitstream :param biphase: True for biphase, False for diphase :return: decoded bitstream """ if manchester[0] == manchester[1]: manchester = manchester[1:] result = []
for pair in batched(manchester, 2):
0
2023-10-20 13:06:00+00:00
2k
xingchenshanyao/YOLOP-E
lib/dataset/DemoDataset.py
[ { "identifier": "clean_str", "path": "lib/utils/utils.py", "snippet": "def clean_str(s):\n # Cleans a string by replacing special characters with underscore _\n return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)" }, { "identifier": "letterbox_for_img", "path": "lib/utils/augmentations.py", "snippet": "def letterbox_for_img(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n\n\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding\n\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)\n\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)" } ]
import glob import os import random import shutil import time import cv2 import math import numpy as np import torch from pathlib import Path from threading import Thread from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from ..utils import letterbox_for_img, clean_str
1,417
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng'] vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv'] class LoadImages: # for inference def __init__(self, path, img_size=640): p = str(Path(path)) # os-agnostic p = os.path.abspath(p) # absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir elif os.path.isfile(p): files = [p] # files else: raise Exception('ERROR: %s does not exist' % p) images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] ni, nv = len(images), len(videos) self.img_size = img_size self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'images' if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \ (p, img_formats, vid_formats) def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() if not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() h0, w0 = img0.shape[:2] self.frame += 1 print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='') else: # Read image self.count += 1 img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR #img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB) assert img0 is not None, 'Image Not Found ' + path print('image %g/%g %s: \n' % (self.count, self.nf, path), end='') h0, w0 = img0.shape[:2] # Padded resize # 填充尺寸,640*360*3 -> 640*384*3
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng'] vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv'] class LoadImages: # for inference def __init__(self, path, img_size=640): p = str(Path(path)) # os-agnostic p = os.path.abspath(p) # absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir elif os.path.isfile(p): files = [p] # files else: raise Exception('ERROR: %s does not exist' % p) images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] ni, nv = len(images), len(videos) self.img_size = img_size self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'images' if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \ (p, img_formats, vid_formats) def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() if not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() h0, w0 = img0.shape[:2] self.frame += 1 print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='') else: # Read image self.count += 1 img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR #img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB) assert img0 is not None, 'Image Not Found ' + path print('image %g/%g %s: \n' % (self.count, self.nf, path), end='') h0, w0 = img0.shape[:2] # Padded resize # 填充尺寸,640*360*3 -> 640*384*3
img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True)
1
2023-10-24 02:08:25+00:00
2k
godisboy0/nonebot-adapter-wcf
adapters/wechatferry/api.py
[ { "identifier": "ApiNotAvailable", "path": "adapters/wechatferry/exception.py", "snippet": "class ApiNotAvailable(BaseApiNotAvailable, WechatFerryAdapterException):\n \"\"\"API 连接不可用\"\"\"" }, { "identifier": "UserInfo", "path": "adapters/wechatferry/basemodel.py", "snippet": "class UserInfo():\n \n def __init__(self, wx_id: str, code: str, wx_name: str, gender: str):\n self.wx_id = wx_id # 微信id,原始id。会被作为真正的user_id \n self.code = code # code 微信允许改id后,新改的id的code\n self.wx_name = wx_name # 微信昵称\n self.gender = gender # 性别\n\n def __str__(self) -> str:\n return f\"wx_id: {self.wx_id}, code: {self.code}, wx_name: {self.wx_name}, gender: {self.gender or ''}\"" }, { "identifier": "database", "path": "adapters/wechatferry/sqldb.py", "snippet": "class database:\n\n def __init__(self, file_path, db_name=\"wcf\") -> None:\n ## 如果同参数\n global singleton_dict\n if hasattr(singleton_dict, file_path):\n self.conn = getattr(singleton_dict, file_path)\n return\n \n if not file_path:\n raise ValueError(\"file_path can not be empty\")\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n datafile = os.path.join(file_path, db_name)\n self.conn = sqlite3.connect(datafile)\n singleton_dict.file_path = self.conn\n\n def create_table(self, sql: str) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to create table: {e}\")\n raise e\n finally:\n cursor.close()\n\n def query(self, sql, *args) -> list:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n return cursor.fetchall()\n except Exception as e:\n logger.error(f\"Failed to query: {e}\")\n raise e\n finally:\n cursor.close()\n\n def execute(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to execute: {e}\")\n raise e\n finally:\n cursor.close()\n\n def insert(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to insert: {e}\")\n raise e\n finally:\n cursor.close()\n\n def update(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to update: {e}\")\n raise e\n finally:\n cursor.close()\n\n def delete(self, sql: str, *args) -> None:\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql, args)\n self.conn.commit()\n except Exception as e:\n logger.error(f\"Failed to delete: {e}\")\n raise e\n finally:\n cursor.close()\n\n def table_exists(self, table_name: str) -> bool:\n sql = f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{table_name}'\"\n cursor = self.conn.cursor()\n try:\n cursor.execute(sql)\n return cursor.fetchone()[0] == 1\n except Exception as e:\n logger.error(f\"Failed to check table {table_name} exists: {e}\")\n return False\n finally:\n cursor.close()" }, { "identifier": "file_md5", "path": "adapters/wechatferry/utils.py", "snippet": "class Logger:\nclass downloader:\n def __init__(self) -> None:\n def info(self, msg: str, e: Exception=None) -> None:\n def error(self, msg: str, e: Exception=None) -> None:\n def debug(self, msg: str, e: Exception=None) -> None:\n def warning(self, msg: str, e: Exception=None) -> None:\ndef handle_api_result(result: Optional[Dict[str, Any]]) -> Any:\ndef file_md5(file_path) -> Optional[str]:\n def __init__(self, url, file_name, path: str, override: bool = True, chunk_size: int = 1024, headers={}) -> None:\n async def downloadAsync(self) -> str:\n def download(self) -> str:" }, { "identifier": "AdapterConfig", "path": "adapters/wechatferry/config.py", "snippet": "class AdapterConfig(BaseModel):\n \"\"\"wechatferry 配置类\"\"\"\n\n root_user: str\n debug: bool = Field(default=True)\n \"\"\"是否开启调试模式\"\"\"\n db_path: str = Field(default=\"./data\")\n \"\"\"数据库路径,默认为当前运行路径下的 data 文件夹,该文件夹已经被 .gitignore 忽略\"\"\"\n echo_root_msg: bool = Field(default=False)\n \"\"\"是否将 root_user 的信息直接做成json回传给root_user\"\"\"\n \"\"\"在debug时非常有用,特别是你的开发机器和部署微信的机器不是同一台时。用过的都说好\"\"\"\n \n\n class Config:\n extra = \"ignore\"" } ]
from wcferry import Wcf from typing import Any from .exception import ApiNotAvailable from concurrent.futures import ThreadPoolExecutor from .basemodel import UserInfo from .sqldb import database from .utils import file_md5, logger from .config import AdapterConfig import asyncio
1,546
""" 所有的 api 都定义在这里。 call_api 的所有方法最终都会调用这里的方法。 """ """ 发现绝大多数插件都是为 onebot.v11 所写,为了更好的复用(白嫖),这里也用 onebot.v11 中相关的数据结构。 参数约定: to_wx_id: 群聊时为群聊id, 非群聊时为用户id """ user_cache = {} md5_executor = ThreadPoolExecutor(max_workers=1) class API:
""" 所有的 api 都定义在这里。 call_api 的所有方法最终都会调用这里的方法。 """ """ 发现绝大多数插件都是为 onebot.v11 所写,为了更好的复用(白嫖),这里也用 onebot.v11 中相关的数据结构。 参数约定: to_wx_id: 群聊时为群聊id, 非群聊时为用户id """ user_cache = {} md5_executor = ThreadPoolExecutor(max_workers=1) class API:
def __init__(self, wcf: Wcf, config: AdapterConfig):
4
2023-10-22 10:52:27+00:00
2k
R1999RC-official/Reverse1999ResonanceCalculator
python/python_env/Lib/site-packages/setuptools/config/_apply_pyprojecttoml.py
[ { "identifier": "SetuptoolsWarning", "path": "python/python_env/Lib/site-packages/setuptools/warnings.py", "snippet": "class SetuptoolsWarning(UserWarning):\n \"\"\"Base class in ``setuptools`` warning hierarchy.\"\"\"\n\n @classmethod\n def emit(\n cls,\n summary: Optional[str] = None,\n details: Optional[str] = None,\n due_date: Optional[_DueDate] = None,\n see_docs: Optional[str] = None,\n see_url: Optional[str] = None,\n stacklevel: int = 2,\n **kwargs,\n ):\n \"\"\"Private: reserved for ``setuptools`` internal use only\"\"\"\n # Default values:\n summary_ = summary or getattr(cls, \"_SUMMARY\", None) or \"\"\n details_ = details or getattr(cls, \"_DETAILS\", None) or \"\"\n due_date = due_date or getattr(cls, \"_DUE_DATE\", None)\n docs_ref = see_docs or getattr(cls, \"_SEE_DOCS\", None)\n docs_url = docs_ref and f\"https://setuptools.pypa.io/en/latest/{docs_ref}\"\n see_url = see_url or getattr(cls, \"_SEE_URL\", None)\n due = date(*due_date) if due_date else None\n\n text = cls._format(summary_, details_, due, see_url or docs_url, kwargs)\n if due and due < date.today() and _should_enforce():\n raise cls(text)\n warnings.warn(text, cls, stacklevel=stacklevel + 1)\n\n @classmethod\n def _format(\n cls,\n summary: str,\n details: str,\n due_date: Optional[date] = None,\n see_url: Optional[str] = None,\n format_args: Optional[dict] = None,\n ):\n \"\"\"Private: reserved for ``setuptools`` internal use only\"\"\"\n today = date.today()\n summary = cleandoc(summary).format_map(format_args or {})\n possible_parts = [\n cleandoc(details).format_map(format_args or {}),\n (\n f\"\\nBy {due_date:%Y-%b-%d}, you need to update your project and remove \"\n \"deprecated calls\\nor your builds will no longer be supported.\"\n if due_date and due_date > today\n else None\n ),\n (\n \"\\nThis deprecation is overdue, please update your project and remove \"\n \"deprecated\\ncalls to avoid build errors in the future.\"\n if due_date and due_date < today\n else None\n ),\n (f\"\\nSee {see_url} for details.\" if see_url else None),\n ]\n parts = [x for x in possible_parts if x]\n if parts:\n body = indent(_TEMPLATE.format(details=\"\\n\".join(parts)), _INDENT)\n return \"\\n\".join([summary, \"!!\\n\", body, \"\\n!!\"])\n return summary" }, { "identifier": "SetuptoolsDeprecationWarning", "path": "python/python_env/Lib/site-packages/setuptools/warnings.py", "snippet": "class SetuptoolsDeprecationWarning(SetuptoolsWarning):\n \"\"\"\n Base class for warning deprecations in ``setuptools``\n\n This class is not derived from ``DeprecationWarning``, and as such is\n visible by default.\n \"\"\"" } ]
import logging import os from collections.abc import Mapping from email.headerregistry import Address from functools import partial, reduce from itertools import chain from types import MappingProxyType from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, cast, ) from ..warnings import SetuptoolsWarning, SetuptoolsDeprecationWarning from setuptools._importlib import metadata # noqa from setuptools.dist import Distribution # noqa from setuptools.config import expand from setuptools.config import expand from setuptools.extern.packaging.specifiers import SpecifierSet from .._importlib import metadata from setuptools.dist import Distribution
1,434
"""Translation layer between pyproject config and setuptools distribution and metadata objects. The distribution and metadata objects are modeled after (an old version of) core metadata, therefore configs in the format specified for ``pyproject.toml`` need to be processed before being applied. **PRIVATE MODULE**: API reserved for setuptools internal usage only. """ if TYPE_CHECKING: EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like _Path = Union[os.PathLike, str] _DictOrStr = Union[dict, str] _CorrespFn = Callable[["Distribution", Any, _Path], None] _Correspondence = Union[str, _CorrespFn] _logger = logging.getLogger(__name__) def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution": """Apply configuration dict read with :func:`read_configuration`""" if not config: return dist # short-circuit unrelated pyproject.toml file root_dir = os.path.dirname(filename) or "." _apply_project_table(dist, config, root_dir) _apply_tool_table(dist, config, filename) current_directory = os.getcwd() os.chdir(root_dir) try: dist._finalize_requires() dist._finalize_license_files() finally: os.chdir(current_directory) return dist def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path): project_table = config.get("project", {}).copy() if not project_table: return # short-circuit _handle_missing_dynamic(dist, project_table) _unify_entry_points(project_table) for field, value in project_table.items(): norm_key = json_compatible_key(field) corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key) if callable(corresp): corresp(dist, value, root_dir) else: _set_config(dist, corresp, value) def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path): tool_table = config.get("tool", {}).get("setuptools", {}) if not tool_table: return # short-circuit for field, value in tool_table.items(): norm_key = json_compatible_key(field) if norm_key in TOOL_TABLE_DEPRECATIONS: suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key] msg = f"The parameter `{norm_key}` is deprecated, {suggestion}"
"""Translation layer between pyproject config and setuptools distribution and metadata objects. The distribution and metadata objects are modeled after (an old version of) core metadata, therefore configs in the format specified for ``pyproject.toml`` need to be processed before being applied. **PRIVATE MODULE**: API reserved for setuptools internal usage only. """ if TYPE_CHECKING: EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like _Path = Union[os.PathLike, str] _DictOrStr = Union[dict, str] _CorrespFn = Callable[["Distribution", Any, _Path], None] _Correspondence = Union[str, _CorrespFn] _logger = logging.getLogger(__name__) def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution": """Apply configuration dict read with :func:`read_configuration`""" if not config: return dist # short-circuit unrelated pyproject.toml file root_dir = os.path.dirname(filename) or "." _apply_project_table(dist, config, root_dir) _apply_tool_table(dist, config, filename) current_directory = os.getcwd() os.chdir(root_dir) try: dist._finalize_requires() dist._finalize_license_files() finally: os.chdir(current_directory) return dist def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path): project_table = config.get("project", {}).copy() if not project_table: return # short-circuit _handle_missing_dynamic(dist, project_table) _unify_entry_points(project_table) for field, value in project_table.items(): norm_key = json_compatible_key(field) corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key) if callable(corresp): corresp(dist, value, root_dir) else: _set_config(dist, corresp, value) def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path): tool_table = config.get("tool", {}).get("setuptools", {}) if not tool_table: return # short-circuit for field, value in tool_table.items(): norm_key = json_compatible_key(field) if norm_key in TOOL_TABLE_DEPRECATIONS: suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key] msg = f"The parameter `{norm_key}` is deprecated, {suggestion}"
SetuptoolsDeprecationWarning.emit(
1
2023-10-24 06:48:58+00:00
2k
Summaw/genCraft-imageGen
main.py
[ { "identifier": "write", "path": "modules/write/write.py", "snippet": "def write(text: str, case: str) -> None:\r\n current_time = time.strftime(\"%H:%M:%S\", time.localtime())\r\n switcher = {\r\n 'info': _write_info,\r\n 'success': _write_success,\r\n 'error': _write_error\r\n }\r\n func = switcher.get(case.lower(), lambda x, y: None)\r\n func(current_time, text)\r" }, { "identifier": "login_attempt", "path": "modules/tasks/login.py", "snippet": "async def login_attempt():\r\n\r\n headers = {\r\n \"Host\": \"api.gencraft.com\",\r\n \"Connection\": \"keep-alive\",\r\n \"Content-Length\": \"94\",\r\n \"X-Csrf-Protection\":\"1\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 OPR/102.0.0.0\",\r\n \"Content-Type\": \"application/json\",\r\n \"Accept\": \"application/json, text/plain, */*\",\r\n \"X-WEB-TOKEN\": \"YOURWEBTOKENHERE (WILL AUTOMATE THIS AT A LATER DATE)\",\r\n \"Origin\": \"https://gencraft.com\",\r\n \"Referer\": \"https://gencraft.com/\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"en-US,en;q=0.9\"\r\n }\r\n \r\n '''\r\n \"first_name\": \"your google account firstname\", \r\n \"last_name\": \"your google account lastname\",\r\n \"auth_provider\": \"google\",\r\n \"timezone\": \"America/New_York\" #or your timezone \r\n '''\r\n\r\n data = {\r\n \"first_name\": \"John\", \r\n \"last_name\": \"Doe\",\r\n \"auth_provider\": \"google\",\r\n \"timezone\": \"America/New_York\"\r\n }\r\n\r\n login_request = requests.post(\"https://api.gencraft.com/api/v5/user/login\", headers=headers, json=data, timeout=20)\r\n\r\n if \"SESSION_ID\" in login_request.cookies:\r\n # Get the value of the \"SESSION_ID\" cookie\r\n session_id = login_request.cookies[\"SESSION_ID\"]\r\n else:\r\n write(\"SESSION_ID cookie not found in the response.\", 'error')\r\n\r\n if login_request.status_code == 200:\r\n return session_id\r\n else:\r\n return 'False'" }, { "identifier": "generate_image", "path": "modules/tasks/generateImage.py", "snippet": "async def generate_image(sessionId):\r\n time.sleep(2)\r\n\r\n cookies = {\r\n 'SESSION_ID': f'{sessionId}',\r\n }\r\n\r\n headers = {\r\n 'authority': 'api.gencraft.com',\r\n 'accept': 'application json, text plain, */*',\r\n 'accept-language': 'en-US,en;q=0.9',\r\n 'content-type': 'application/json',\r\n 'origin': 'https://gencraft.com',\r\n 'referer': 'https://gencraft.com/',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 OPR/102.0.0.0',\r\n 'x-csrf-protection': '1',\r\n }\r\n\r\n # Model styles\r\n # art_style 1 = 3D Style\r\n # art_style 2 = Anime Style\r\n # art_style 14 = CyberPunk Style\r\n # art_style 9 = Realistic Style\r\n # art_style 29 = Video Game Style\r\n # art_style 17 = Isometric Style\r\n\r\n json_data = {\r\n 'prompt_text': 'A blue and gold macaw chilling on a tree overviewing the rainforest', # Give the model a custom prompt here\r\n 'art_style_id': 9,\r\n 'negative_prompt_text': '',\r\n 'media_type': 'image',\r\n 'model_id': 1,\r\n 'width': 1024,\r\n 'height': 1024,\r\n }\r\n\r\n generate_image_request = requests.post('https://api.gencraft.com/api/v5/prompt/generate', cookies=cookies, headers=headers,\r\n json=json_data, timeout=30)\r\n print(generate_image_request.text)\r\n if generate_image_request.status_code == 400:\r\n write('Daily limit reached. Please use a different X-WEB-TOKEN to continue generating images', 'error')\r\n else:\r\n response_json = json.loads(generate_image_request.text)\r\n\r\n if \"data\" in response_json and \"images\" in response_json[\"data\"]:\r\n images = response_json[\"data\"][\"images\"]\r\n image_urls = [image[\"url\"] for image in images if \"url\" in image]\r\n\r\n structured_data = {\r\n \"prompt\": {\r\n \"prompt_text\": json_data[\"prompt_text\"],\r\n \"art_style_id\": json_data[\"art_style_id\"],\r\n },\r\n \"urls\": image_urls,\r\n }\r\n\r\n for url in image_urls:\r\n write(f\"Image Generated: {url}\", \"success\")\r\n\r\n with open(\"data/generated.json\", \"a\") as json_file:\r\n json_file.write(\"\\n\")\r\n json.dump(structured_data, json_file, indent=4)\r" } ]
import time import asyncio import requests from modules.write.write import write from modules.tasks.login import login_attempt from modules.tasks.generateImage import generate_image
1,328
async def start(): loginRequest = await login_attempt() if loginRequest == 'False': write("There was a problem logging in.", "error") else: write(f"Session ID: {loginRequest}", 'info')
async def start(): loginRequest = await login_attempt() if loginRequest == 'False': write("There was a problem logging in.", "error") else: write(f"Session ID: {loginRequest}", 'info')
await generate_image(loginRequest)
2
2023-10-20 20:56:32+00:00
2k
mentpy/mentpy
mentpy/gradients/grad.py
[ { "identifier": "fd_gradient", "path": "mentpy/gradients/_finite_difference.py", "snippet": "def fd_gradient(f, x, h=1e-5, type=\"central\"):\n if type not in [\"central\", \"forward\", \"backward\"]:\n raise UserWarning(\n f\"Expected type to be 'central', 'forward', or 'backward' but {type} was given\"\n )\n\n grad = np.zeros(len(x))\n for i in range(len(x)):\n if type == \"central\":\n grad[i] = (f(x + h * np.eye(len(x))[i]) - f(x - h * np.eye(len(x))[i])) / (\n 2 * h\n )\n elif type == \"forward\":\n grad[i] = (f(x + h * np.eye(len(x))[i]) - f(x)) / h\n elif type == \"backward\":\n grad[i] = (f(x) - f(x - h * np.eye(len(x))[i])) / h\n return grad" }, { "identifier": "fd_hessian", "path": "mentpy/gradients/_finite_difference.py", "snippet": "def fd_hessian(f, x, h=1e-5, type=\"central\"):\n if type not in [\"central\", \"forward\", \"backward\"]:\n raise UserWarning(\n f\"Expected type to be 'central', 'forward', or 'backward' but {type} was given\"\n )\n\n hess = np.zeros((len(x), len(x)))\n for i in range(len(x)):\n for j in range(len(x)):\n if type == \"central\":\n hess[i, j] = (\n f(x + h * np.eye(len(x))[i] + h * np.eye(len(x))[j])\n - f(x + h * np.eye(len(x))[i] - h * np.eye(len(x))[j])\n - f(x - h * np.eye(len(x))[i] + h * np.eye(len(x))[j])\n + f(x - h * np.eye(len(x))[i] - h * np.eye(len(x))[j])\n ) / (4 * h**2)\n elif type == \"forward\":\n hess[i, j] = (\n f(x + h * np.eye(len(x))[i] + h * np.eye(len(x))[j])\n - f(x + h * np.eye(len(x))[i])\n - f(x + h * np.eye(len(x))[j])\n + f(x)\n ) / h**2\n elif type == \"backward\":\n hess[i, j] = (\n f(x)\n - f(x - h * np.eye(len(x))[i])\n - f(x - h * np.eye(len(x))[j])\n + f(x - h * np.eye(len(x))[i] - h * np.eye(len(x))[j])\n ) / h**2\n return hess" }, { "identifier": "psr_gradient", "path": "mentpy/gradients/_parameter_shift.py", "snippet": "def psr_gradient(cost, x, shift=1.5):\n \"\"\"Calculate the gradient of a cost function using the parameter shift rule.\n\n Args:\n cost (callable): Cost function to calculate the gradient of.\n x (array): Input to the cost function.\n shift (float, optional): Shift to use in the parameter shift rule. Defaults to 1.5.\n\n Returns:\n array: Gradient of the cost function.\n \"\"\"\n grad = np.zeros(len(x))\n for i in range(len(x)):\n grad[i] = (\n cost(x + shift * np.eye(len(x))[i]) - cost(x - shift * np.eye(len(x))[i])\n ) / (2 * shift)\n return grad" }, { "identifier": "psr_hessian", "path": "mentpy/gradients/_parameter_shift.py", "snippet": "def psr_hessian(cost, x, shift=1.5):\n \"\"\"Calculate the Hessian of a cost function using the parameter shift rule.\n\n Args:\n cost (callable): Cost function to calculate the Hessian of.\n x (array): Input to the cost function.\n shift (float, optional): Shift to use in the parameter shift rule. Defaults to 1.5.\n\n Returns:\n array: Hessian of the cost function.\n \"\"\"\n hess = np.zeros((len(x), len(x)))\n for i in range(len(x)):\n for j in range(len(x)):\n hess[i, j] = (\n cost(x + shift * np.eye(len(x))[i] + shift * np.eye(len(x))[j])\n - cost(x + shift * np.eye(len(x))[i] - shift * np.eye(len(x))[j])\n - cost(x - shift * np.eye(len(x))[i] + shift * np.eye(len(x))[j])\n + cost(x - shift * np.eye(len(x))[i] - shift * np.eye(len(x))[j])\n ) / (4 * shift**2)\n return hess" } ]
import numpy as np from ._finite_difference import fd_gradient, fd_hessian from ._parameter_shift import psr_gradient, psr_hessian
1,328
# Copyright 2023 Luis Mantilla # # Licensed under the Apache License, Version 2.0. # See <http://www.apache.org/licenses/LICENSE-2.0> for details. """Module that contains functions to calculate gradients of cost functions.""" __all__ = ["get_gradient", "get_hessian"] def get_gradient(cost, x, method="parameter-shift", *args, **kwargs): """Calculate the gradient of a cost function. Args: cost (callable): Cost function to calculate the gradient of. x (array): Input to the cost function. method (str, optional): Method to use to calculate the gradient. Defaults to 'parameter-shift'. Returns: array: Gradient of the cost function. """ match method: case "parameter-shift" | "psr" | "parametershift":
# Copyright 2023 Luis Mantilla # # Licensed under the Apache License, Version 2.0. # See <http://www.apache.org/licenses/LICENSE-2.0> for details. """Module that contains functions to calculate gradients of cost functions.""" __all__ = ["get_gradient", "get_hessian"] def get_gradient(cost, x, method="parameter-shift", *args, **kwargs): """Calculate the gradient of a cost function. Args: cost (callable): Cost function to calculate the gradient of. x (array): Input to the cost function. method (str, optional): Method to use to calculate the gradient. Defaults to 'parameter-shift'. Returns: array: Gradient of the cost function. """ match method: case "parameter-shift" | "psr" | "parametershift":
return psr_gradient(cost, x, *args, **kwargs)
2
2023-10-18 18:29:42+00:00
2k
rnag/cert-hero
cert_hero/cli.py
[ { "identifier": "certs_please", "path": "cert_hero/cert_hero.py", "snippet": "def certs_please(\n hostnames: list[str] | tuple[str] | set[str],\n context: ssl.SSLContext = None,\n num_threads: int = 25,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n) -> dict[str, CertHero]:\n \"\"\"\n Retrieve (concurrently) the SSL certificate(s) for a list of ``hostnames`` - works\n even in the case of expired or self-signed certificates.\n\n Usage:\n\n >>> import cert_hero, json\n >>> host_to_cert = cert_hero.certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be'])\n >>> cert_hero.set_expired(host_to_cert)\n >>> host_to_cert\n {'google.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"753DD6FF20CB1B4510CB4C1EA27DA2EB\",\n ...\n }\n ), 'cnn.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"7F2F3E5C350554D71A6784CCFE6E8315\",\n ...\n }\n ), ...\n }\n >>> json.dumps(host_to_cert)\n {\"google.com\": {\"Cert Status\": \"SUCCESS\", ...}, \"cnn.com\": {\"Cert Status\": \"SUCCESS\", ...}, ...}\n\n :param hostnames: List of hosts to retrieve SSL Certificate(s) for\n :param context: (Optional) Shared SSL Context\n :param num_threads: Max number of concurrent threads\n :param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.\n Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module\n is installed (via the ``fake-ua``\n `extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).\n :return: A mapping of ``hostname`` to the SSL Certificate (e.g. :class:`CertHero`) for that host\n\n \"\"\"\n\n if context is None:\n context = create_ssl_context()\n\n if num_hosts := len(hostnames):\n # We can use a with statement to ensure threads are cleaned up promptly\n with ThreadPoolExecutor(\n max_workers=min(num_hosts, num_threads)\n ) as pool:\n _host_to_cert = {\n # TODO: Update to remove `or` once we finalize how to handle missing certs\n host: cert_info or _build_failed_cert('TIMED_OUT')\n for host, cert_info in zip(\n hostnames,\n pool.map(\n cert_please,\n hostnames,\n repeat(context),\n repeat(user_agent),\n ),\n )\n }\n else:\n _host_to_cert = {}\n\n return _host_to_cert" }, { "identifier": "set_expired", "path": "cert_hero/cert_hero.py", "snippet": "def set_expired(certs: CertHero\n | dict[str, str | int | dict[str, str | bool]]\n | dict[str, CertHero]\n | dict[str, dict[str, str | int | dict[str, str | bool]]]\n | Iterable[CertHero]\n | Iterable[dict[str, str | int | dict[str, str | bool]]]\n | None,\n _date_from_iso_str=date.fromisoformat) -> None:\n \"\"\"\n Set or update the value for ``Validity > Expired`` (:type:`bool`) on\n each cert in a response from :func:`cert_please()` or :func:`certs_please()`,\n or a serialized version thereof (e.g. ``json.dumps`` > ``json.loads``).\n\n Example Usage::\n\n >>> from cert_hero import cert_please, set_expired\n >>> cert = cert_please('google.com')\n >>> assert 'Expired' not in cert['Validity']\n >>> set_expired(cert)\n >>> assert 'Expired' in cert['Validity']\n\n \"\"\"\n if not certs:\n return\n\n # cert_please(): given a `CertHero` (or `CertHero`-like) object\n if 'Serial' in certs:\n certs = [certs]\n # certs_please(): given a mapping of `hostname` to `CertHero` (or `CertHero`-like) object\n elif values_fn := getattr(certs, 'values', None):\n certs = values_fn()\n\n today = datetime.utcnow().date()\n\n for _cert in certs:\n if _cert:\n if _validity := _cert.get('Validity'):\n # Use cached attribute `not_after_date` if available (CertHero),\n # else we calculate it on the fly in case of a `dict`.\n not_after_date: date = getattr(_cert, '_not_after_date', None) \\\n or _date_from_iso_str(_validity['Not After'])\n # Set the `Validity > Expired` value (bool)\n _validity['Expired'] = not_after_date < today" } ]
import argparse import sys from . import certs_please, set_expired
1,297
"""Console script for cert_hero.""" def main(): """Console script for cert_hero.""" parser = argparse.ArgumentParser(prog='ch', description='Retrieve the SSL certificate(s) for one or more given host') parser.add_argument('hosts', nargs='*') args = parser.parse_args() host_to_cert = certs_please(args.hosts)
"""Console script for cert_hero.""" def main(): """Console script for cert_hero.""" parser = argparse.ArgumentParser(prog='ch', description='Retrieve the SSL certificate(s) for one or more given host') parser.add_argument('hosts', nargs='*') args = parser.parse_args() host_to_cert = certs_please(args.hosts)
set_expired(host_to_cert)
1
2023-10-16 19:02:05+00:00
2k
KosinskiLab/pyTME
tme/matching_optimization.py
[ { "identifier": "rigid_transform", "path": "tme/matching_utils.py", "snippet": "def rigid_transform(\n coordinates: NDArray,\n rotation_matrix: NDArray,\n out: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n coordinates_mask: NDArray = None,\n out_mask: NDArray = None,\n center: NDArray = None,\n) -> None:\n \"\"\"\n Apply a rigid transformation (rotation and translation) to given coordinates.\n\n Parameters\n ----------\n coordinates : NDArray\n An array representing the coordinates to be transformed [d x N].\n rotation_matrix : NDArray\n The rotation matrix to be applied [d x d].\n translation : NDArray\n The translation vector to be applied [d].\n out : NDArray\n The output array to store the transformed coordinates.\n coordinates_mask : NDArray, optional\n An array representing the mask for the coordinates [d x T].\n out_mask : NDArray, optional\n The output array to store the transformed coordinates mask.\n use_geometric_center : bool, optional\n Whether to use geometric or coordinate center.\n\n Returns\n -------\n None\n \"\"\"\n coordinate_dtype = coordinates.dtype\n center = coordinates.mean(axis=1) if center is None else center\n if not use_geometric_center:\n coordinates = coordinates - center[:, None]\n\n np.matmul(rotation_matrix, coordinates, out=out)\n if use_geometric_center:\n axis_max, axis_min = out.max(axis=1), out.min(axis=1)\n axis_difference = axis_max - axis_min\n translation = np.add(translation, center - axis_max + (axis_difference // 2))\n else:\n translation = np.add(translation, np.subtract(center, out.mean(axis=1)))\n\n out += translation[:, None]\n if coordinates_mask is not None and out_mask is not None:\n if not use_geometric_center:\n coordinates_mask = coordinates_mask - center[:, None]\n np.matmul(rotation_matrix, coordinates_mask, out=out_mask)\n out_mask += translation[:, None]\n\n if not use_geometric_center and coordinate_dtype != out.dtype:\n np.subtract(out.mean(axis=1), out.astype(int).mean(axis=1), out=translation)\n out += translation[:, None]" }, { "identifier": "euler_to_rotationmatrix", "path": "tme/matching_utils.py", "snippet": "def euler_to_rotationmatrix(angles: Tuple[float]) -> NDArray:\n \"\"\"\n Convert Euler angles to a rotation matrix.\n\n Parameters\n ----------\n angles : tuple\n A tuple representing the Euler angles in degrees.\n\n Returns\n -------\n NDArray\n The generated rotation matrix.\n \"\"\"\n if len(angles) == 1:\n angles = (angles, 0, 0)\n rotation_matrix = (\n Rotation.from_euler(\"zyx\", angles, degrees=True).as_matrix().astype(np.float32)\n )\n return rotation_matrix" } ]
from typing import Tuple, Dict from abc import ABC, abstractmethod from numpy.typing import NDArray from scipy.optimize import ( differential_evolution, LinearConstraint, basinhopping, ) from scipy.ndimage import laplace from scipy.spatial import KDTree from .matching_utils import rigid_transform, euler_to_rotationmatrix import numpy as np
1,363
""" Implements various methods for non-exhaustive template matching based on numerical optimization. Copyright (c) 2023 European Molecular Biology Laboratory Author: Valentin Maurer <valentin.maurer@embl-hamburg.de> """ class MatchCoordinatesToDensity(ABC): """ A class to template match coordinate sets. Parameters ---------- target_coordinates : NDArray The coordinates of the target. template_coordinates : NDArray The coordinates of the template. target_weights : NDArray The weights of the target. template_weights : NDArray The weights of the template. sampling_rate : NDArray The size of the voxel. template_mask_coordinates : NDArray, optional The coordinates of the template mask. Default is None. target_mask_coordinates : NDArray, optional The coordinates of the target mask. Default is None. **kwargs : dict, optional Other keyword arguments. """ def __init__( self, target_coordinates: NDArray, template_coordinates: NDArray, target_weights: NDArray, template_weights: NDArray, sampling_rate: NDArray, template_mask_coordinates: NDArray = None, target_mask_coordinates: NDArray = None, **kwargs, ): target, _, origin = FitRefinement.array_from_coordinates( target_coordinates, target_weights, sampling_rate ) self.target_density = target self.target_origin = origin self.sampling_rate = sampling_rate self.template_weights = template_weights self.template_coordinates = template_coordinates self.template_coordinates_rotated = np.empty( self.template_coordinates.shape, dtype=np.float32 ) self.target_mask_density = None if target_mask_coordinates is not None: target_mask, *_ = FitRefinement.array_from_coordinates( coordinates=target_mask_coordinates.astype(np.float32), weights=np.ones(target_mask_coordinates.shape[1]), shape=self.target_density.shape, origin=self.target_origin, sampling_rate=self.sampling_rate, ) self.target_mask_density = target_mask self.template_mask_coordinates = None self.template_mask_coordinates_rotated = None if template_mask_coordinates is not None: self.template_mask_coordinates = template_mask_coordinates self.template_mask_coordinates_rotated = np.empty( self.template_mask_coordinates.shape, dtype=np.float32 ) def __call__(self, x: NDArray): """ Return the score for a given transformation. Parameters ---------- x : NDArray The input transformation parameters. Returns ------- float The negative score from the scoring function. """ translation, rotation = x[:3], x[3:] rotation_matrix = euler_to_rotationmatrix(rotation)
""" Implements various methods for non-exhaustive template matching based on numerical optimization. Copyright (c) 2023 European Molecular Biology Laboratory Author: Valentin Maurer <valentin.maurer@embl-hamburg.de> """ class MatchCoordinatesToDensity(ABC): """ A class to template match coordinate sets. Parameters ---------- target_coordinates : NDArray The coordinates of the target. template_coordinates : NDArray The coordinates of the template. target_weights : NDArray The weights of the target. template_weights : NDArray The weights of the template. sampling_rate : NDArray The size of the voxel. template_mask_coordinates : NDArray, optional The coordinates of the template mask. Default is None. target_mask_coordinates : NDArray, optional The coordinates of the target mask. Default is None. **kwargs : dict, optional Other keyword arguments. """ def __init__( self, target_coordinates: NDArray, template_coordinates: NDArray, target_weights: NDArray, template_weights: NDArray, sampling_rate: NDArray, template_mask_coordinates: NDArray = None, target_mask_coordinates: NDArray = None, **kwargs, ): target, _, origin = FitRefinement.array_from_coordinates( target_coordinates, target_weights, sampling_rate ) self.target_density = target self.target_origin = origin self.sampling_rate = sampling_rate self.template_weights = template_weights self.template_coordinates = template_coordinates self.template_coordinates_rotated = np.empty( self.template_coordinates.shape, dtype=np.float32 ) self.target_mask_density = None if target_mask_coordinates is not None: target_mask, *_ = FitRefinement.array_from_coordinates( coordinates=target_mask_coordinates.astype(np.float32), weights=np.ones(target_mask_coordinates.shape[1]), shape=self.target_density.shape, origin=self.target_origin, sampling_rate=self.sampling_rate, ) self.target_mask_density = target_mask self.template_mask_coordinates = None self.template_mask_coordinates_rotated = None if template_mask_coordinates is not None: self.template_mask_coordinates = template_mask_coordinates self.template_mask_coordinates_rotated = np.empty( self.template_mask_coordinates.shape, dtype=np.float32 ) def __call__(self, x: NDArray): """ Return the score for a given transformation. Parameters ---------- x : NDArray The input transformation parameters. Returns ------- float The negative score from the scoring function. """ translation, rotation = x[:3], x[3:] rotation_matrix = euler_to_rotationmatrix(rotation)
rigid_transform(
0
2023-10-20 13:46:01+00:00
2k
hookla/DreamTeamGPT
dream_team_gpt/main.py
[ { "identifier": "Meeting", "path": "dream_team_gpt/meeting.py", "snippet": "class Meeting:\n idea: str\n config: Path = None\n\n def __post_init__(self) -> None:\n \"\"\"Create agents\"\"\"\n client_factory = ai_client_factory(\n AIClientConfig(\n client_type=AIClientType.ChatGPT,\n model=Models.GPT4,\n api_key=os.environ[\"openai.api_key\"],\n )\n )\n if self.config:\n sme_dict = parse_yaml_config(self.config)\n else:\n sme_dict = DEFAULT_SME_DICT\n self.smes = [SME(client_factory=client_factory, **d) for d in sme_dict]\n self.chairman = Chairman(client_factory, self.smes)\n self.refiner = IdeaRefiner(client_factory, \"Refiner\")\n\n def run(self) -> None:\n \"\"\"Run the meeting to discuss the idea\"\"\"\n transcript = Transcript(self.idea)\n print_with_wrap(transcript)\n refined_idea = self.refiner.refine_idea(self.idea)\n transcript.refined_idea = refined_idea\n print_with_wrap(refined_idea)\n while not self.chairman.decide_if_meeting_over(transcript):\n self.run_discussion_round(transcript)\n\n def run_discussion_round(self, transcript: str) -> None:\n logger.info(\"running next discussion round\\n\")\n speaker: SME = self.chairman.decide_next_speaker(transcript)\n opinion = speaker.opinion(transcript)\n print_with_wrap(f\"\\033[94m{speaker.name}\\033[0m: {opinion}\\n\")\n if opinion.strip().rstrip(\".\").upper() != NO_COMMENT:\n transcript += opinion" }, { "identifier": "configure_logging", "path": "dream_team_gpt/utils/logging.py", "snippet": "def configure_logging(verbose: int = 0) -> None:\n logging_levels = {0: \"ERROR\", 1: \"INFO\", 2: \"DEBUG\"}\n logger.remove(0)\n logger.add(sys.stdout, level=logging_levels.get(verbose, \"ERROR\"))\n logger.add(\"dream_team_gpt.log\", level=\"DEBUG\")" } ]
from dataclasses import dataclass from pathlib import Path from dotenv import load_dotenv from dream_team_gpt.meeting import Meeting from dream_team_gpt.utils import configure_logging import os import click
655
@click.command() @click.option( "--idea", "-i", type=str, required=True, help="your idea for the team to discuss. Please use double quotes", ) @click.option( "--config", "-c", type=click.Path(exists=True), default=None, help="yaml file with team personalities details", ) @click.option("-v", "--verbose", default=1, count=True) def run_meeting(idea: str, config: Path = None, verbose: int = 1) -> None: print(idea)
@click.command() @click.option( "--idea", "-i", type=str, required=True, help="your idea for the team to discuss. Please use double quotes", ) @click.option( "--config", "-c", type=click.Path(exists=True), default=None, help="yaml file with team personalities details", ) @click.option("-v", "--verbose", default=1, count=True) def run_meeting(idea: str, config: Path = None, verbose: int = 1) -> None: print(idea)
configure_logging(verbose)
1
2023-10-18 22:45:50+00:00
2k
amrahhh/sqla_async_orm_queries
examples/test.py
[ { "identifier": "Model", "path": "sqla_async_orm_queries/models.py", "snippet": "class Model(Base):\n __abstract__ = True\n\n @classmethod\n async def create(cls, data: dict):\n async with SessionLocal() as session:\n try:\n data = cls(**data)\n session.add(data)\n await session.commit()\n return data\n except Exception as e:\n await session.rollback()\n raise e\n\n @classmethod\n async def select_one(cls, *args: BinaryExpression):\n async with SessionLocal() as session:\n result = await session.execute(select(cls).where(*args))\n data = result.scalar()\n return data\n\n @classmethod\n async def select_all(cls, *args: BinaryExpression):\n async with SessionLocal() as session:\n result = await session.execute(select(cls).where(*args))\n data = result.scalars().all()\n return data\n\n @classmethod\n async def update(cls, data: dict, *args: BinaryExpression):\n async with SessionLocal() as session:\n try:\n query = update(cls).where(*args).values(**data).returning(cls.id)\n db_data = await session.execute(query)\n db_data = db_data.scalar()\n await session.commit()\n return db_data\n except Exception as e:\n await session.rollback()\n raise e\n\n @classmethod\n async def delete(cls, *args: BinaryExpression):\n async with SessionLocal() as session:\n try:\n query = delete(cls).where(*args)\n db_data = await session.execute(query)\n await session.commit()\n return db_data\n except Exception as e:\n await session.rollback()\n raise e\n\n @classmethod\n async def select_with_pagination(\n cls, *args: BinaryExpression, page: int = 1, size: int = 10\n ):\n async with SessionLocal() as session:\n query = select(cls).where(*args).offset((page - 1) * size).limit(size)\n result = await session.execute(query)\n data = result.scalars().all()\n return data\n\n async def apply(self):\n async with SessionLocal() as session:\n try:\n session.add(self)\n await session.commit()\n except Exception as e:\n await session.rollback()\n raise e\n\n @classmethod\n async def apply_all(self, models: List[TModels]):\n async with SessionLocal() as session:\n try:\n session.add_all(models)\n await session.commit()\n except Exception as e:\n await session.rollback()\n raise e" }, { "identifier": "init_session", "path": "sqla_async_orm_queries/models.py", "snippet": "def init_session(session: AsyncSession):\n global SessionLocal, INITIALIZED\n if isinstance(session, (async_sessionmaker, sessionmaker)) and issubclass(\n session.class_, AsyncSession\n ):\n SessionLocal = session\n INITIALIZED = True\n return True\n raise TypeError(\"You need to use SQLAlchemy `AsyncSession`\")" } ]
import asyncio from sqlalchemy import Column, String, Integer, and_ from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker from sqla_async_orm_queries import Model, init_session
836
# create your engine engine = create_async_engine( "postgresql+asyncpg://test_user:12345@localhost/test_db", echo=True, ) # create your SessionLocal SessionLocal = async_sessionmaker( expire_on_commit=True, class_=AsyncSession, bind=engine, )
# create your engine engine = create_async_engine( "postgresql+asyncpg://test_user:12345@localhost/test_db", echo=True, ) # create your SessionLocal SessionLocal = async_sessionmaker( expire_on_commit=True, class_=AsyncSession, bind=engine, )
class Test(Model):
0
2023-10-17 09:42:44+00:00
2k
MeetingAgent/MeetingAgent-Core
meeting_buddy.py
[ { "identifier": "MyTTS", "path": "voice_cloning/clone.py", "snippet": "class MyTTS:\n def __init__(self):\n # Get device\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n self.tts = TTS(\"tts_models/en/ljspeech/tacotron2-DDC\")\n self.use_default_speaker = False\n self.speaker_wav = self._get_speaker()\n\n def _get_speaker(self):\n # speaker audio file\n wav_files = glob.glob(\"voice_cloning/audio_samples/*.wav\")\n print(\"WAV FILES: \", wav_files)\n if wav_files:\n if self.use_default_speaker:\n wav_file = \"voice_cloning/audio_samples/default_audio.wav\"\n else: \n wav_file = wav_files[0] if wav_files[0] != \"default_audio.wav\" else FileNotFoundError(\"Add your audio.wav to /voice_cloning/audio_samples\")\n\n print(\"WAV FILE: \", wav_file)\n return wav_file\n\n def text_to_speech(self, text, output_file):\n self.tts.tts_with_vc_to_file(\n text,\n speaker_wav=self.speaker_wav,\n file_path=output_file\n )" }, { "identifier": "gpt_4_answer", "path": "meeting_buddy_system/gpt_utils.py", "snippet": "def gpt_4_answer(\n messages,\n model=\"gpt-4\",\n max_tokens=750,\n temperature=0.6,\n top_p=0.9,\n frequency_penalty=1.2,\n presence_penalty=0.5,\n):\n completion_params = {\n \"model\": model,\n \"messages\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"frequency_penalty\": frequency_penalty,\n \"presence_penalty\": presence_penalty, \n \"max_tokens\": max_tokens,\n }\n\n response = openai.ChatCompletion.create(**completion_params)\n\n return response[\"choices\"][0][\"message\"][\"content\"]" }, { "identifier": "gpt_3_5_turbo_16k_answer", "path": "meeting_buddy_system/gpt_utils.py", "snippet": "def gpt_3_5_turbo_16k_answer(\n messages,\n model=\"gpt-3.5-turbo-16k\",\n max_tokens=750,\n temperature=0.6,\n top_p=0.9,\n frequency_penalty=1.2,\n presence_penalty=0.5,\n):\n completion_params = {\n \"model\": model,\n \"messages\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"frequency_penalty\": frequency_penalty,\n \"presence_penalty\": presence_penalty, \n \"max_tokens\": max_tokens,\n }\n\n response = openai.ChatCompletion.create(**completion_params)\n\n return response[\"choices\"][0][\"message\"][\"content\"]" }, { "identifier": "MEETING_BUDDY_MAIN_PROMPT", "path": "meeting_buddy_system/prompts.py", "snippet": "MEETING_BUDDY_MAIN_PROMPT = \"\"\"\n<!-->IMPORTANT CONTEXT<--!>\nAn answer should be coherent and include some point form arguments.\n<!-->IMPORTANT CONTEXT<--!>\n\nHere is context for the meeting: {meeting_context}\n\nGiven a question, answer it coherently and several possible points that can be derived from the question.\nIf the question is simple, like an arithmetic question, no need to further explain any detail. Just give the result with a short explanation of how it was achieved it.\n\"\"\"" }, { "identifier": "EXTRACT_QUERY_PROMPT", "path": "meeting_buddy_system/prompts.py", "snippet": "EXTRACT_QUERY_PROMPT = \"\"\"\nGiven some input text, extract a query from the text. You are to do this in the language of the text. \nIf no query exists, interpret the text as is and see if a question can be captured from it.\n\"\"\"" } ]
import pyaudio import wave import whisper import threading import time import pygame from kivy.app import App from kivy.uix.button import Button from kivy.uix.boxlayout import BoxLayout from kivy.uix.switch import Switch from kivy.uix.label import Label from kivy.clock import Clock from kivy.uix.textinput import TextInput from kivy.core.window import Window from kivy.support import install_twisted_reactor from gtts import gTTS from pydub import AudioSegment from ftlangdetect import detect from voice_cloning.clone import MyTTS from meeting_buddy_system.gpt_utils import gpt_4_answer, gpt_3_5_turbo_16k_answer from meeting_buddy_system.prompts import MEETING_BUDDY_MAIN_PROMPT, EXTRACT_QUERY_PROMPT
1,587
# Audio Processing # GUI install_twisted_reactor() # gtts text to speech # personalized voice text to speech # Local recording = False audio_thread = None def get_audio() -> None: global recording recording = True p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024) frames = [] try: print("Recording...") while recording: data = stream.read(1024) frames.append(data) print("Finished recording.") finally: stream.stop_stream() stream.close() p.terminate() wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb') wf.setnchannels(1) wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) wf.setframerate(44100) wf.writeframes(b''.join(frames)) wf.close() def stop_audio() -> None: global recording recording = False def whisper_process_audio(audio_file: str) -> str: model = whisper.load_model("base") # for multilingual result = model.transcribe(audio_file) return result["text"] def detect_language(text: str) -> str: cleaned_text = text.replace('\n', ' ') return detect(text=cleaned_text, low_memory=True) def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None: language = detect_language(text=text)["lang"] tts = gTTS(text=text, lang=language, slow=False) tts.save(output_file) print(f'Audio saved as {output_file}') def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None: app.tts.text_to_speech(text, output_file) print(f'Audio saved as {output_file}') # initialize mixer pygame.mixer.init() def play_audio(file_path): pygame.mixer.music.load(file_path) pygame.mixer.music.play() def stop_audio_playback(): pygame.mixer.music.stop() def gpt_pipeline(meeting_context: str, input_text: str) -> str: """ Extract query from text and produce the final answer to query. """ print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n") messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}]
# Audio Processing # GUI install_twisted_reactor() # gtts text to speech # personalized voice text to speech # Local recording = False audio_thread = None def get_audio() -> None: global recording recording = True p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024) frames = [] try: print("Recording...") while recording: data = stream.read(1024) frames.append(data) print("Finished recording.") finally: stream.stop_stream() stream.close() p.terminate() wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb') wf.setnchannels(1) wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) wf.setframerate(44100) wf.writeframes(b''.join(frames)) wf.close() def stop_audio() -> None: global recording recording = False def whisper_process_audio(audio_file: str) -> str: model = whisper.load_model("base") # for multilingual result = model.transcribe(audio_file) return result["text"] def detect_language(text: str) -> str: cleaned_text = text.replace('\n', ' ') return detect(text=cleaned_text, low_memory=True) def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None: language = detect_language(text=text)["lang"] tts = gTTS(text=text, lang=language, slow=False) tts.save(output_file) print(f'Audio saved as {output_file}') def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None: app.tts.text_to_speech(text, output_file) print(f'Audio saved as {output_file}') # initialize mixer pygame.mixer.init() def play_audio(file_path): pygame.mixer.music.load(file_path) pygame.mixer.music.play() def stop_audio_playback(): pygame.mixer.music.stop() def gpt_pipeline(meeting_context: str, input_text: str) -> str: """ Extract query from text and produce the final answer to query. """ print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n") messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}]
query = gpt_3_5_turbo_16k_answer(messages=messages)
2
2023-10-18 06:50:56+00:00
2k
KaichengGroup/FUSE-Flow
FUSE_Flow/other_modules/adaptive_unet.py
[ { "identifier": "AEInit", "path": "FUSE_Flow/other_modules/utils.py", "snippet": "class AEInit(str, Enum):\n zero = 'zero'\n xavier = 'xavier'\n\n @classmethod\n def get_values(cls):\n return tuple(map(lambda c: c.value, cls))" }, { "identifier": "ConvBlock", "path": "FUSE_Flow/other_modules/conv_modules/conv_block.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(self, conv, c_in, c_out, kernel_size, stride, padding, init, attention_type, attn_red_ratio):\n super().__init__()\n self.block = nn.Sequential(\n nn.BatchNorm2d(c_in),\n nn.LeakyReLU(negative_slope=0.2),\n conv(c_in, c_out, kernel_size=kernel_size, stride=stride, padding=padding),\n )\n\n # initialize weights and biases\n if init == AEInit.zero:\n self.block[-1].weight.data.zero_()\n self.block[-1].bias.data.zero_()\n elif init == AEInit.xavier:\n for name, param in self.block.named_parameters():\n if name.endswith('.bias'):\n param.data.fill_(0)\n elif name.endswith('.weight'):\n if len(param.shape) >= 2:\n bound = math.sqrt(6) / math.sqrt(param.shape[0] + param.shape[1])\n param.data.uniform_(-bound, bound)\n\n def forward(self, x):\n return self.block(x)" }, { "identifier": "UpsampleBlock", "path": "FUSE_Flow/other_modules/gated_resnet.py", "snippet": "class UpsampleBlock(GatedResidualNetBase):\n def __init__(self, c_in, c_out, c_hid, n_layers, init, attention_type, attn_red_ratio):\n super().__init__(c_in, c_out, c_hid, n_layers, nn.ConvTranspose2d,\n 4, 2, 1, init, attention_type, attn_red_ratio)\n\n def forward(self, x):\n return self.nn(x)" }, { "identifier": "DownsampleBlock", "path": "FUSE_Flow/other_modules/gated_resnet.py", "snippet": "class DownsampleBlock(GatedResidualNetBase):\n def __init__(self, c_in, c_out, c_hid, n_layers, init, attention_type, attn_red_ratio):\n super().__init__(c_in, c_out, c_hid, n_layers, nn.Conv2d,\n 4, 2, 1, init, attention_type, attn_red_ratio)\n\n def forward(self, x):\n return self.nn(x)" } ]
import math import pytorch_lightning as pl import torch from torch import nn from FUSE_Flow.other_modules.utils import AEInit from .conv_modules.conv_block import ConvBlock from .gated_resnet import UpsampleBlock, DownsampleBlock
1,391
class AdaptiveUNet(pl.LightningModule): """SR network architecture that uses Residual-in-Residual Dense Blocks. Implement Figure (3) in ESRGAN paper. Parameters ---------- d_x : int Priority dimension (height or width) of input chosen for downstream comparisons. d_y : int Priority dimension (height or width) of output chosen for downstream comparisons. add_depth : int Additional depth on top of that required based on difference in scale of input and output. Largest value this value can take is the largest n where input_shape[1]/factor**n is whole and odd. factor: int Factor at which data expands or shrinks. Currently only works for factor = 2. c_in : int Number of channels of input tensor. c_hid : int Number of channels of inner convolutional layers. n_conv : int Number of conv layers. no_skip : bool To include skip connection between mirrored layers. attention_type: AttentionType type of attention implemented in gated conv blocks attn_red_ratio : float # default 16 Minimum value = 1, Maximum value = c_in, set reduction from 1 to c_in using attn_red_ratio Smaller attn_red_ratio --> Less Parameters Hyperparameter to vary capacity and computational cost of SE blocks in the network. """ def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip, attention_type, attn_red_ratio): super().__init__() self.save_hyperparameters() self.no_skip = no_skip # double the number of channels needed if no skip connection if no_skip: c_inter = c_hid else: c_inter = c_hid//2 # larger of the input and output priority dimension d_l = max(d_x, d_y) # larger of the input and output priority dimension d_s = min(d_x, d_y) # scale difference between input and output scale = int(d_l / d_s) # max depth of U-Net max_depth = int(math.log(scale, factor) + 1 + add_depth) # represents dimension size of unwanted depths denominator = d_l // (factor ** (max_depth - 1)) # number of down-sampling blocks n_down = math.floor(math.log(d_x / denominator, factor)) # number of up-sampling layers in encoder n_enc_up = max_depth - 1 - n_down - math.ceil(math.log(scale, factor) % 1) # number of up-sampling layers in decoder n_dec_up = math.floor(math.log(d_y / denominator, factor)) # discrepancy between size of input priority dimension and nearest larger multiple of 2 k_up = d_l // (factor ** math.floor(math.log(scale, factor))) - d_s # discrepancy between size of input priority dimension and nearest smaller multiple of 2 k_down = d_s - d_l // (factor ** math.ceil(math.log(scale, factor))) # need resizing if data is not multiple of 2 self.need_resizing = k_up or k_down # encoder if not no_skip: c_up = c_inter // (factor ** (n_down+self.need_resizing)) self.up_resizer = nn.Sequential( *[ConvBlock(nn.ConvTranspose2d, c_in, c_up,
class AdaptiveUNet(pl.LightningModule): """SR network architecture that uses Residual-in-Residual Dense Blocks. Implement Figure (3) in ESRGAN paper. Parameters ---------- d_x : int Priority dimension (height or width) of input chosen for downstream comparisons. d_y : int Priority dimension (height or width) of output chosen for downstream comparisons. add_depth : int Additional depth on top of that required based on difference in scale of input and output. Largest value this value can take is the largest n where input_shape[1]/factor**n is whole and odd. factor: int Factor at which data expands or shrinks. Currently only works for factor = 2. c_in : int Number of channels of input tensor. c_hid : int Number of channels of inner convolutional layers. n_conv : int Number of conv layers. no_skip : bool To include skip connection between mirrored layers. attention_type: AttentionType type of attention implemented in gated conv blocks attn_red_ratio : float # default 16 Minimum value = 1, Maximum value = c_in, set reduction from 1 to c_in using attn_red_ratio Smaller attn_red_ratio --> Less Parameters Hyperparameter to vary capacity and computational cost of SE blocks in the network. """ def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip, attention_type, attn_red_ratio): super().__init__() self.save_hyperparameters() self.no_skip = no_skip # double the number of channels needed if no skip connection if no_skip: c_inter = c_hid else: c_inter = c_hid//2 # larger of the input and output priority dimension d_l = max(d_x, d_y) # larger of the input and output priority dimension d_s = min(d_x, d_y) # scale difference between input and output scale = int(d_l / d_s) # max depth of U-Net max_depth = int(math.log(scale, factor) + 1 + add_depth) # represents dimension size of unwanted depths denominator = d_l // (factor ** (max_depth - 1)) # number of down-sampling blocks n_down = math.floor(math.log(d_x / denominator, factor)) # number of up-sampling layers in encoder n_enc_up = max_depth - 1 - n_down - math.ceil(math.log(scale, factor) % 1) # number of up-sampling layers in decoder n_dec_up = math.floor(math.log(d_y / denominator, factor)) # discrepancy between size of input priority dimension and nearest larger multiple of 2 k_up = d_l // (factor ** math.floor(math.log(scale, factor))) - d_s # discrepancy between size of input priority dimension and nearest smaller multiple of 2 k_down = d_s - d_l // (factor ** math.ceil(math.log(scale, factor))) # need resizing if data is not multiple of 2 self.need_resizing = k_up or k_down # encoder if not no_skip: c_up = c_inter // (factor ** (n_down+self.need_resizing)) self.up_resizer = nn.Sequential( *[ConvBlock(nn.ConvTranspose2d, c_in, c_up,
3, 1, 1, AEInit.xavier, attention_type, attn_red_ratio)] +
0
2023-10-19 06:49:31+00:00
2k
zytedata/zyte-spider-templates
zyte_spider_templates/spiders/ecommerce.py
[ { "identifier": "document_enum", "path": "zyte_spider_templates/documentation.py", "snippet": "def document_enum(func):\n return func" }, { "identifier": "BaseSpider", "path": "zyte_spider_templates/spiders/base.py", "snippet": "class BaseSpider(scrapy.Spider):\n custom_settings: Dict[str, Any] = {\n \"ZYTE_API_TRANSPARENT_MODE\": True,\n \"_ZYTE_API_USER_AGENT\": f\"zyte-spider-templates/{version('zyte-spider-templates')}\",\n }\n\n metadata: Dict[str, Any] = {\n \"template\": True,\n \"title\": \"Base\",\n \"description\": \"Base template.\",\n }\n\n _NEXT_PAGE_PRIORITY: int = 100\n\n @classmethod\n def from_crawler(cls, crawler: Crawler, *args, **kwargs) -> scrapy.Spider:\n spider = super().from_crawler(crawler, *args, **kwargs)\n spider.allowed_domains = [parse_url(spider.args.url).netloc]\n\n if spider.args.geolocation:\n # We set the geolocation in ZYTE_API_PROVIDER_PARAMS for injected\n # dependencies, and in ZYTE_API_AUTOMAP_PARAMS for page object\n # additional requests.\n for component in (\"AUTOMAP\", \"PROVIDER\"):\n default_params = spider.settings.getdict(f\"ZYTE_API_{component}_PARAMS\")\n default_params[\"geolocation\"] = spider.args.geolocation\n spider.settings.set(\n f\"ZYTE_API_{component}_PARAMS\",\n default_params,\n priority=ARG_SETTING_PRIORITY,\n )\n\n if spider.args.max_requests:\n spider.settings.set(\n \"ZYTE_API_MAX_REQUESTS\",\n spider.args.max_requests,\n priority=ARG_SETTING_PRIORITY,\n )\n return spider" }, { "identifier": "BaseSpiderParams", "path": "zyte_spider_templates/spiders/base.py", "snippet": "class BaseSpiderParams(BaseModel):\n url: str = Field(\n title=\"URL\",\n description=\"Initial URL for the crawl.\",\n pattern=r\"^https?:\\/\\/[^:\\/\\s]+(:\\d{1,5})?(\\/[^\\s]*)*(#[^\\s]*)?$\",\n )\n geolocation: Optional[Geolocation] = Field(\n title=\"Geolocation\",\n description=\"ISO 3166-1 alpha-2 2-character string specified in \"\n \"https://docs.zyte.com/zyte-api/usage/reference.html#operation/extract/request/geolocation.\",\n default=None,\n json_schema_extra={\n \"enumMeta\": {\n code: {\n \"title\": GEOLOCATION_OPTIONS_WITH_CODE[code],\n }\n for code in Geolocation\n }\n },\n )\n max_requests: Optional[int] = Field(\n description=(\n \"The maximum number of Zyte API requests allowed for the crawl.\\n\"\n \"\\n\"\n \"Requests with error responses that cannot be retried or exceed \"\n \"their retry limit also count here, but they incur in no costs \"\n \"and do not increase the request count in Scrapy Cloud.\"\n ),\n default=100,\n json_schema_extra={\n \"widget\": \"request-limit\",\n },\n )" } ]
from enum import Enum from typing import Any, Callable, Dict, Iterable, Optional, Union from pydantic import Field from scrapy import Request from scrapy.crawler import Crawler from scrapy_poet import DummyResponse from scrapy_spider_metadata import Args from zyte_common_items import ProbabilityRequest, Product, ProductNavigation from zyte_spider_templates.documentation import document_enum from zyte_spider_templates.spiders.base import BaseSpider, BaseSpiderParams import scrapy
1,014
@document_enum class EcommerceCrawlStrategy(str, Enum): full: str = "full" """Follow most links within the domain of URL in an attempt to discover and extract as many products as possible.""" navigation: str = "navigation" """Follow pagination, subcategories, and product detail pages.""" pagination_only: str = "pagination_only" """Follow pagination and product detail pages. SubCategory links are ignored. Use this when some subCategory links are misidentified by ML-extraction.""" @document_enum class ExtractFrom(str, Enum): httpResponseBody: str = "httpResponseBody" """Use HTTP responses. Cost-efficient and fast extraction method, which works well on many websites.""" browserHtml: str = "browserHtml" """Use browser rendering. Often provides the best quality."""
@document_enum class EcommerceCrawlStrategy(str, Enum): full: str = "full" """Follow most links within the domain of URL in an attempt to discover and extract as many products as possible.""" navigation: str = "navigation" """Follow pagination, subcategories, and product detail pages.""" pagination_only: str = "pagination_only" """Follow pagination and product detail pages. SubCategory links are ignored. Use this when some subCategory links are misidentified by ML-extraction.""" @document_enum class ExtractFrom(str, Enum): httpResponseBody: str = "httpResponseBody" """Use HTTP responses. Cost-efficient and fast extraction method, which works well on many websites.""" browserHtml: str = "browserHtml" """Use browser rendering. Often provides the best quality."""
class EcommerceSpiderParams(BaseSpiderParams):
2
2023-10-18 10:58:44+00:00
2k
Bio-OS/bio-mate
bio_mate/BaseWidget.py
[ { "identifier": "gen_data_url_img", "path": "bio_mate/defs.py", "snippet": "def gen_data_url_img(img_path: Path):\n base64_utf8_str = base64.b64encode(img_path.read_bytes()).decode(\"utf-8\")\n ext = str(img_path).split(\".\")[-1]\n data_url = f\"data:image/{ext};base64,{base64_utf8_str}\"\n\n return data_url" }, { "identifier": "get_img", "path": "bio_mate/defs.py", "snippet": "def get_img(type):\n img_data_url = all_defs[type][\"sample_img\"]\n if img_data_url:\n return img_data_url\n\n img_name = all_defs[type][\"meta\"][\"sample_img\"]\n if not img_name:\n return\n\n img_path = plot_defs / type / img_name\n data_url = gen_data_url_img(img_path)\n\n all_defs[type][\"sample_img\"] = data_url\n\n return data_url" }, { "identifier": "list_files", "path": "bio_mate/defs.py", "snippet": "def list_files(path: str):\n custom_path = Path(path)\n\n if not custom_path.exists():\n print(f\"{path} not exists\")\n return\n\n return [\n {\"name\": item.name, \"is_dir\": item.is_dir()} for item in custom_path.iterdir()\n ]" }, { "identifier": "prepare_plot_env", "path": "bio_mate/defs.py", "snippet": "def prepare_plot_env(params: dict):\n now = datetime.utcnow()\n time_str = now.strftime(\"%Y%m%d_%H%M%S_%f\")\n\n current_plot = current_file.parent / \"log_plot\" / time_str\n current_plot.mkdir(exist_ok=True, parents=True)\n\n input_json = current_plot / \"input.json\"\n input_json.write_text(json.dumps(params, indent=2))\n\n return current_plot" } ]
from ipywidgets import DOMWidget from traitlets import Bool, Unicode, Dict, Int from bio_mate.defs import gen_data_url_img, get_img, list_files, prepare_plot_env import json import warnings import subprocess
848
module_name = "bio-mate" module_version = "1.0.0" class BaseWidget(DOMWidget): _model_name = Unicode("BaseWidgetModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("BaseWidgetView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) _view_count = Int(0).tag(sync=True) type = Unicode("").tag(sync=True) count = Int(100).tag(sync=True) all_defs = Dict().tag(sync=True) def handle_messages(self, widget, content: dict, buffers): reqId = content.get("reqId", "") method_name = content.get("method", "") if not reqId or not method_name: print(f"Invalid CommRequest: reqId: {reqId}-{method_name}") return if not hasattr(self, method_name): content["response"] = {"status": "failed", "msg": "NotImplementedError"} self.send(content) return func = getattr(self, method_name) func(content) def __init__(self, **kwargs): super(BaseWidget, self).__init__(**kwargs) # Assign keyword parameters to this object recognized_keys = dir(self.__class__) for key, value in kwargs.items(): if key not in recognized_keys and f"_{key}" not in recognized_keys: warnings.warn(RuntimeWarning(f"Keyword parameter {key} not recognized")) setattr(self, key, value) # Attach the callback event handler self.on_msg(self.handle_messages) def getSampleImage(self, content: dict):
module_name = "bio-mate" module_version = "1.0.0" class BaseWidget(DOMWidget): _model_name = Unicode("BaseWidgetModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("BaseWidgetView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) _view_count = Int(0).tag(sync=True) type = Unicode("").tag(sync=True) count = Int(100).tag(sync=True) all_defs = Dict().tag(sync=True) def handle_messages(self, widget, content: dict, buffers): reqId = content.get("reqId", "") method_name = content.get("method", "") if not reqId or not method_name: print(f"Invalid CommRequest: reqId: {reqId}-{method_name}") return if not hasattr(self, method_name): content["response"] = {"status": "failed", "msg": "NotImplementedError"} self.send(content) return func = getattr(self, method_name) func(content) def __init__(self, **kwargs): super(BaseWidget, self).__init__(**kwargs) # Assign keyword parameters to this object recognized_keys = dir(self.__class__) for key, value in kwargs.items(): if key not in recognized_keys and f"_{key}" not in recognized_keys: warnings.warn(RuntimeWarning(f"Keyword parameter {key} not recognized")) setattr(self, key, value) # Attach the callback event handler self.on_msg(self.handle_messages) def getSampleImage(self, content: dict):
content["response"] = {"status": "ok", "result": get_img(self.type)}
1
2023-10-19 02:15:54+00:00
2k
iamarunbrahma/llm-prompt-testing
metrics.py
[ { "identifier": "get_embeddings", "path": "utils.py", "snippet": "@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))\r\ndef get_embeddings(text, embedding_model=\"text-embedding-ada-002\"):\r\n response = openai.Embedding.create(\r\n model=embedding_model,\r\n input=text,\r\n )\r\n embedding_vectors = response[\"data\"][0][\"embedding\"]\r\n return embedding_vectors\r" }, { "identifier": "get_chat_completion", "path": "utils.py", "snippet": "@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))\r\ndef get_chat_completion(config, system_prompt, question):\r\n try:\r\n messages = [\r\n {\"role\": \"system\", \"content\": system_prompt},\r\n {\"role\": \"user\", \"content\": question},\r\n ]\r\n\r\n response = openai.ChatCompletion.create(\r\n model=config[\"model_name\"],\r\n messages=messages,\r\n temperature=config[\"temperature\"],\r\n max_tokens=config[\"max_tokens\"],\r\n top_p=config[\"top_p\"],\r\n frequency_penalty=config[\"frequency_penalty\"],\r\n presence_penalty=config[\"presence_penalty\"],\r\n )\r\n\r\n answer = response[\"choices\"][0][\"message\"][\"content\"]\r\n answer = answer.strip()\r\n return answer\r\n\r\n except OpenAIError as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}:\\n{type(e).__name__}=> {str(e)}\")\r" } ]
from collections import Counter from numpy.linalg import norm from utils import get_embeddings, get_chat_completion import evaluate import streamlit as st import traceback import numpy as np
1,122
class Metrics: def __init__(self, question, context, answer, config, strictness=1): self.question = question self.context = context self.answer = answer self.strictness = strictness config["model_name"] = "gpt-3.5-turbo" self.config = config def rouge_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate Rouge Score." ) rouge = evaluate.load("rouge") results = rouge.compute(predictions=self.answer, references=self.context) rouge1 = np.round(results["rouge1"], 3) rouge2 = np.round(results["rouge2"], 3) rougeL = np.round(results["rougeL"], 3) return rouge1, rouge2, rougeL except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def bleu_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate BLEU Score." ) bleu = evaluate.load("bleu") results = bleu.compute(predictions=self.answer, references=self.context) return np.round(results["bleu"], 3) except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def bert_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate BLEU Score." ) bertscore = evaluate.load("bertscore") results = bertscore.compute( predictions=self.answer, references=self.context, lang="en", model_type="distilbert-base-uncased", ) return np.round(results["f1"], 3) except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def answer_relevancy(self): try: if not self.answer or not self.question: raise ValueError( "Please provide both question and answer to generate Answer Relevancy Score." ) relevancy_prompt = """ Generate question for the given answer. Here are few examples: Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions. Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team? Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797. Question: Who was the first president of the United States of America and in which year did he become president? Using the answer provided below, generate a question which is relevant to the answer. """ answer_relevancy_score = [] for _ in range(self.strictness): generated_question = get_chat_completion( self.config, relevancy_prompt, self.answer )
class Metrics: def __init__(self, question, context, answer, config, strictness=1): self.question = question self.context = context self.answer = answer self.strictness = strictness config["model_name"] = "gpt-3.5-turbo" self.config = config def rouge_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate Rouge Score." ) rouge = evaluate.load("rouge") results = rouge.compute(predictions=self.answer, references=self.context) rouge1 = np.round(results["rouge1"], 3) rouge2 = np.round(results["rouge2"], 3) rougeL = np.round(results["rougeL"], 3) return rouge1, rouge2, rougeL except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def bleu_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate BLEU Score." ) bleu = evaluate.load("bleu") results = bleu.compute(predictions=self.answer, references=self.context) return np.round(results["bleu"], 3) except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def bert_score(self): try: if not self.answer or not self.context: raise ValueError( "Please provide both context and answer to generate BLEU Score." ) bertscore = evaluate.load("bertscore") results = bertscore.compute( predictions=self.answer, references=self.context, lang="en", model_type="distilbert-base-uncased", ) return np.round(results["f1"], 3) except Exception as e: func_name = traceback.extract_stack()[-1].name st.error(f"Error in {func_name}: {str(e)}") def answer_relevancy(self): try: if not self.answer or not self.question: raise ValueError( "Please provide both question and answer to generate Answer Relevancy Score." ) relevancy_prompt = """ Generate question for the given answer. Here are few examples: Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions. Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team? Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797. Question: Who was the first president of the United States of America and in which year did he become president? Using the answer provided below, generate a question which is relevant to the answer. """ answer_relevancy_score = [] for _ in range(self.strictness): generated_question = get_chat_completion( self.config, relevancy_prompt, self.answer )
question_vec = np.asarray(get_embeddings(self.question.strip()))
0
2023-10-24 17:37:07+00:00
2k
AVAniketh0905/fluidspy
fluidspylib/fluidspy/numerical/methods/finite_differential.py
[ { "identifier": "CompositeBoundary", "path": "fluidspylib/fluidspy/numerical/boundary/composite.py", "snippet": "class CompositeBoundary:\n children: List[Direction]\n\n def __init__(self, *args) -> None:\n self.children = list(args)\n\n def init_apply(self):\n for child in self.children:\n child.init_apply()\n\n def apply(self):\n for child in self.children:\n child.apply()" }, { "identifier": "Dimension", "path": "fluidspylib/fluidspy/numerical/dim/dimension.py", "snippet": "class Dimension(ABC):\n \"\"\"Abstract class for dimensions.\"\"\"\n\n initial_conditions: SimulationState\n\n def __init__(self, initial_conditions: SimulationState) -> None:\n self.initial_conditions = initial_conditions\n\n @abstractmethod\n def create_grid(\n self, num_points: Union[int, Tuple[int, int]], base_value: float = 0.0\n ):\n init_state = np.zeros(num_points, dtype=float)\n init_state.fill(base_value)\n self.initial_conditions.set_state(init_state)\n\n @abstractmethod\n def convolution():\n pass" }, { "identifier": "MaterialProperties", "path": "fluidspylib/fluidspy/numerical/material_properties/material.py", "snippet": "class MaterialProperties:\n \"\"\"Material properties.\n\n Args:\n name (str): Material name.\n density (float): Material density.(kg/m^3)\n specific_heat (float): Material specific heat.(J/kg.K)\n prandtl (float): Material Prandtl number.\n \"\"\"\n\n name: str\n density: float\n specific_heat: float\n prandtl: float" }, { "identifier": "ThermalProperties", "path": "fluidspylib/fluidspy/numerical/material_properties/material.py", "snippet": "class ThermalProperties(MaterialProperties):\n \"\"\"Thermal properties.\n\n Args:\n name (str): Material name.\n density (float): Material density.(kg/m^3)\n specific_heat (float): Material specific heat.(J/kg.K)\n prandtl (float): Material Prandtl number.\n thermal_conductivity (float): Material thermal conductivity.(W/m.K)\n thermal_expansion_coefficient (float): Material thermal expansion coefficient.(1/K)\n \"\"\"\n\n thermal_conductivity: float\n thermal_expansion_coefficient: float" }, { "identifier": "SimulationState", "path": "fluidspylib/fluidspy/numerical/state.py", "snippet": "class SimulationState:\n state: np.ndarray | NoneType = None\n\n def get_state(self) -> np.ndarray:\n return self.state\n\n def set_state(self, value: np.ndarray):\n self.state = value\n\n def get_dimension(self):\n return self.state.ndim" }, { "identifier": "Step", "path": "fluidspylib/fluidspy/numerical/step.py", "snippet": "class Step:\n time: float\n vec: Vector\n\n def __init__(\n self,\n time: float,\n vec: Vector = Vector(),\n ):\n \"\"\"\n Create the time step and the spatial step.\n\n Args:\n time (float): The time step.\n vec (Vector): The spatial step. Defaults to (0, 0, 0).\n \"\"\"\n\n self.time = time\n self.vec = vec\n\n def __repr__(self) -> str:\n return f\"({self.time}, {self.vec})\"" }, { "identifier": "Vector", "path": "fluidspylib/fluidspy/numerical/step.py", "snippet": "class Vector:\n x: float\n y: float\n z: float\n\n def __init__(self, x=inf, y=inf, z=inf) -> None:\n \"\"\"\n Create the spatial step.\n \"\"\"\n self.x = x\n self.y = y\n self.z = z\n\n def __repr__(self) -> str:\n return f\"({self.x}, {self.y}, {self.z})\"" } ]
from abc import ABC from abc import abstractmethod from typing import List from ..boundary.composite import CompositeBoundary from ..dim import Dimension from ..material_properties import MaterialProperties from ..material_properties import ThermalProperties from ..state import SimulationState from ..step import Step from ..step import Vector import numpy as np
1,020
class FiniteDifferentialMethod(ABC): def __init__( self, state: SimulationState, dim: Dimension, properties: ThermalProperties,
class FiniteDifferentialMethod(ABC): def __init__( self, state: SimulationState, dim: Dimension, properties: ThermalProperties,
boundary_conditions: CompositeBoundary,
0
2023-10-21 06:55:58+00:00
2k
zorrobyte/esp32-universal-diesel-heater-controller
main.py
[ { "identifier": "stateMachine", "path": "states/stateMachine.py", "snippet": "def log(message, level=2):\ndef handle_state(current_state, switch_value, exhaust_temp, output_temp):" }, { "identifier": "emergencyStop", "path": "states/emergencyStop.py", "snippet": "def log(message, level=1):\ndef turn_off_pumps(timer):\ndef emergency_stop(reason):" }, { "identifier": "sensors", "path": "lib/sensors.py", "snippet": "def log(message, level=1):\ndef read_temp(analog_value, sensor_type, sensor_beta, sensor_name=\"output\"):\ndef read_output_temp():\ndef read_exhaust_temp():\nTEMP_HISTORY_LENGTH = 3\n R0 = params['R0']\n T0 = params['T0']\n BETA = sensor_beta" }, { "identifier": "networking", "path": "lib/networking.py", "snippet": "def init_wifi():\ndef init_mqtt():\ndef connect_wifi():\ndef connect_mqtt():\ndef publish_sensor_values():\ndef mqtt_callback(topic, msg):\ndef run_networking():" }, { "identifier": "fanPID", "path": "lib/fanPID.py", "snippet": "def rpm_interrupt_handler(pin):\n def __init__(self, kp, ki, kd):\n def calculate(self, setpoint, current_value):\ndef set_fan_duty_cycle(duty_cycle):\ndef fan_control_thread():\nclass PIDController:" } ]
import machine import _thread import hardwareConfig as config import utime import webserver from machine import Timer from states import stateMachine, emergencyStop from lib import sensors, networking, fanPID
1,091
#################################################################### # WARNING # #################################################################### # This code is provided "AS IS" without warranty of any kind. # # Use of this code in any form acknowledges your acceptance of # # these terms. # # # # This code has NOT been tested in real-world scenarios. # # Improper usage, lack of understanding, or any combination # # thereof can result in significant property damage, injury, # # loss of life, or worse. # # Specifically, this code is related to controlling heating # # elements and systems, and there's a very real risk that it # # can BURN YOUR SHIT DOWN. # # # # By using, distributing, or even reading this code, you agree # # to assume all responsibility and risk associated with it. # # The author(s), contributors, and distributors of this code # # will NOT be held liable for any damages, injuries, or other # # consequences you may face as a result of using or attempting # # to use this code. # # # # Always approach such systems with caution. Ensure you understand # # the code, the systems involved, and the potential risks. # # If you're unsure, DO NOT use the code. # # # # Stay safe and think before you act. # #################################################################### # Initialize the WDT with a 10-second timeout wdt = machine.WDT(id=0, timeout=10000) # 10 seconds def log(message, level=2): if config.LOG_LEVEL >= level: print(message) def get_reset_reason(): reset_reason = machine.reset_cause() if reset_reason == machine.PWRON_RESET: print("Reboot was because of Power-On!") elif reset_reason == machine.WDT_RESET: print("Reboot was because of WDT!") return reset_reason pulse_timer = Timer(0) last_pulse_time = 0 off_timer = Timer(1) def turn_off_pump(_): config.FUEL_PIN.off() def pulse_fuel_callback(_): global last_pulse_time current_time = utime.ticks_ms() if utime.ticks_diff(current_time, config.heartbeat) > 10000: config.FUEL_PIN.off() log("Heartbeat missing, fuel pump turned off.") elif config.pump_frequency > 0: period = 1000.0 / config.pump_frequency if utime.ticks_diff(current_time, last_pulse_time) >= period: last_pulse_time = current_time config.FUEL_PIN.on() off_timer.init(period=int(config.PUMP_ON_TIME * 1000), mode=Timer.ONE_SHOT, callback=turn_off_pump) else: config.FUEL_PIN.off() pulse_timer.init(period=100, mode=Timer.PERIODIC, callback=pulse_fuel_callback) def emergency_stop_thread(): while True: wdt.feed() current_time = utime.ticks_ms() # Use ticks_ms to get the current time in milliseconds if utime.ticks_diff(current_time, config.heartbeat) > 10000: # Compare in milliseconds (10 seconds = 10000 ms) emergencyStop.emergency_stop("No heartbeat detected") utime.sleep(1) def run_networking_thread(): while True: networking.run_networking() utime.sleep(5) def main(): while True: config.heartbeat = utime.ticks_ms() config.output_temp = sensors.read_output_temp() config.exhaust_temp = sensors.read_exhaust_temp() current_switch_value = config.SWITCH_PIN.value()
#################################################################### # WARNING # #################################################################### # This code is provided "AS IS" without warranty of any kind. # # Use of this code in any form acknowledges your acceptance of # # these terms. # # # # This code has NOT been tested in real-world scenarios. # # Improper usage, lack of understanding, or any combination # # thereof can result in significant property damage, injury, # # loss of life, or worse. # # Specifically, this code is related to controlling heating # # elements and systems, and there's a very real risk that it # # can BURN YOUR SHIT DOWN. # # # # By using, distributing, or even reading this code, you agree # # to assume all responsibility and risk associated with it. # # The author(s), contributors, and distributors of this code # # will NOT be held liable for any damages, injuries, or other # # consequences you may face as a result of using or attempting # # to use this code. # # # # Always approach such systems with caution. Ensure you understand # # the code, the systems involved, and the potential risks. # # If you're unsure, DO NOT use the code. # # # # Stay safe and think before you act. # #################################################################### # Initialize the WDT with a 10-second timeout wdt = machine.WDT(id=0, timeout=10000) # 10 seconds def log(message, level=2): if config.LOG_LEVEL >= level: print(message) def get_reset_reason(): reset_reason = machine.reset_cause() if reset_reason == machine.PWRON_RESET: print("Reboot was because of Power-On!") elif reset_reason == machine.WDT_RESET: print("Reboot was because of WDT!") return reset_reason pulse_timer = Timer(0) last_pulse_time = 0 off_timer = Timer(1) def turn_off_pump(_): config.FUEL_PIN.off() def pulse_fuel_callback(_): global last_pulse_time current_time = utime.ticks_ms() if utime.ticks_diff(current_time, config.heartbeat) > 10000: config.FUEL_PIN.off() log("Heartbeat missing, fuel pump turned off.") elif config.pump_frequency > 0: period = 1000.0 / config.pump_frequency if utime.ticks_diff(current_time, last_pulse_time) >= period: last_pulse_time = current_time config.FUEL_PIN.on() off_timer.init(period=int(config.PUMP_ON_TIME * 1000), mode=Timer.ONE_SHOT, callback=turn_off_pump) else: config.FUEL_PIN.off() pulse_timer.init(period=100, mode=Timer.PERIODIC, callback=pulse_fuel_callback) def emergency_stop_thread(): while True: wdt.feed() current_time = utime.ticks_ms() # Use ticks_ms to get the current time in milliseconds if utime.ticks_diff(current_time, config.heartbeat) > 10000: # Compare in milliseconds (10 seconds = 10000 ms) emergencyStop.emergency_stop("No heartbeat detected") utime.sleep(1) def run_networking_thread(): while True: networking.run_networking() utime.sleep(5) def main(): while True: config.heartbeat = utime.ticks_ms() config.output_temp = sensors.read_output_temp() config.exhaust_temp = sensors.read_exhaust_temp() current_switch_value = config.SWITCH_PIN.value()
config.current_state, config.emergency_reason = stateMachine.handle_state(
0
2023-10-24 14:50:47+00:00
2k
suliman-99/django-seeding
django_seeding/seeder_registry.py
[ { "identifier": "Seeder", "path": "django_seeding/seeders.py", "snippet": "class Seeder():\n \"\"\" \n The `Seeder` class provides a minimal class which may be used\n for writing custom seeding implementations.\n \n Required:\n seed:\n `seed()` as <method>\n\n Additionals:\n priority:\n `priority` as <class attribute>\n or\n `get_priority()` as <method>\n\n just_debug:\n `just_debug` as <class attribute>\n or\n `get_just_debug()` as <method>:\n \"\"\"\n def seed(self):\n \"\"\" Method that fill the datebase as wanted \"\"\"\n raise NotImplementedError('`seed()` must be implemented.')\n \n def _seed(self):\n \"\"\" Inner method that do validation before calling the public `seed()` method \"\"\"\n id = self._get_id()\n\n # if this seeder is just_debug and the settings state is not debug then dont apply it\n if self._get_just_debug() and not settings.DEBUG:\n return\n \n # if this seeder is applied before then dont apply it\n if AppliedSeeder.objects.filter(id=id).exists():\n return\n \n print(f' Seeding {id}...', end='')\n \n # apply the seeder \n self.seed()\n\n # store it in the applied seeders table in the database\n AppliedSeeder.objects.create(id=id)\n\n GREEN_COLOR = \"\\033[32m\"\n WHITE_COLOR = \"\\033[0m\"\n print(GREEN_COLOR + \" Successfully ^_^ \" + WHITE_COLOR)\n\n def get_priority(self):\n \"\"\" \n Method return the `priority` value (smaller will be applied earlier)\n \n if `priority` is passed:\n it will be returned\n\n if `priority` is not passed:\n float(inf) will be returned \n \"\"\"\n return getattr(self, 'priority', float('inf'))\n \n def _get_priority(self):\n \"\"\" Innder method to validate the value returned by `get_priority()` method \"\"\"\n priority = self.get_priority()\n\n if not isinstance(priority, float) and not isinstance(priority, int):\n raise TypeError('`priority` must be a number')\n \n return priority\n\n def get_just_debug(self):\n \"\"\" \n Method return the `just_debug` value \n \n just_debug=True means this seeder will be applied just when settings.DEBUG=True\n \n if `just_debug` is passed:\n it will be returned\n\n if `just_debug` is not passed:\n False will be returned \n \"\"\"\n return getattr(self, 'just_debug', False)\n \n def _get_just_debug(self):\n \"\"\" Innder method to validate the value returned by `get_just_debug()` method \"\"\"\n just_debug = self.get_just_debug()\n\n if not isinstance(just_debug, bool):\n raise TypeError('`just_debug` must be a bool value')\n \n return just_debug\n \n def get_id(self):\n \"\"\" \n Method return the `id` value to be stored in the database `AppliedSeeder` table\n\n Note: by this id value we can check if this seeder is applied before or not\n \n it is preferred to not change the id \n because after changing thd id the seeder will be considerd as another seeder\n then it will be apllied even that the old seeder is applied with the old id value\n\n default value is the name of the class -> str(type(self))\n\n Note:\n if you changed the class name \n or changed the seeder-class file name\n or and file in the path from the root to the class the str(type(self)) will return another value\n then the default value of this seeder is changed\n then if it doesnt have a constant id the seeder will be applied again\n and it may cause errors\n\n so:\n give an `id` class attribute to solv this problem\n \"\"\"\n return getattr(self, 'id', str(type(self)))\n \n def _get_id(self):\n \"\"\" Innder method to validate the value returned by `get_id()` method \"\"\"\n id = self.get_id()\n\n if not isinstance(id, str):\n raise TypeError('`id` must be str')\n \n return id" }, { "identifier": "AppliedSeeder", "path": "django_seeding/models.py", "snippet": "class AppliedSeeder(models.Model):\n id = models.CharField(max_length=100, primary_key=True)\n\n def __str__(self) -> str:\n return self.id" } ]
import sys import importlib.util from pathlib import Path from django.apps import apps from django.conf import settings from .seeders import Seeder from .models import AppliedSeeder
1,236
class SeederRegistry: """ The `SeederRegistry` class apply registered seeders when the server is run. seeder registering is doing by: @SeederRegistry.register as <decorator> or SeederRegistry.register(<seeder-class>) as <method> """ seeders = [] @classmethod def register(cls, seeder): """ Method and decorator to register the seeder-class in the seeders list to be seeded when the server is run """
class SeederRegistry: """ The `SeederRegistry` class apply registered seeders when the server is run. seeder registering is doing by: @SeederRegistry.register as <decorator> or SeederRegistry.register(<seeder-class>) as <method> """ seeders = [] @classmethod def register(cls, seeder): """ Method and decorator to register the seeder-class in the seeders list to be seeded when the server is run """
if not issubclass(seeder, Seeder):
0
2023-10-24 17:00:49+00:00
2k
cfs-energy/cfspopcon
cfspopcon/helpers.py
[ { "identifier": "Algorithms", "path": "cfspopcon/named_options.py", "snippet": "class Algorithms(Enum):\n \"\"\"Select which top-level algorithm to run.\"\"\"\n\n predictive_popcon = auto()\n two_point_model_fixed_fpow = auto()\n two_point_model_fixed_qpart = auto()\n two_point_model_fixed_tet = auto()\n calc_beta = auto()\n calc_core_radiated_power = auto()\n calc_fusion_gain = auto()\n calc_geometry = auto()\n calc_heat_exhaust = auto()\n calc_ohmic_power = auto()\n calc_peaked_profiles = auto()\n calc_plasma_current_from_q_star = auto()\n calc_q_star_from_plasma_current = auto()\n calc_power_balance_from_tau_e = auto()\n calc_zeff_and_dilution_from_impurities = auto()\n calc_confinement_transition_threshold_power = auto()\n calc_ratio_P_LH = auto()\n calc_f_rad_core = auto()\n calc_normalised_collisionality = auto()\n calc_rho_star = auto()\n calc_triple_product = auto()\n calc_greenwald_fraction = auto()\n calc_current_relaxation_time = auto()\n calc_peak_pressure = auto()\n calc_average_total_pressure = auto()\n calc_bootstrap_fraction = auto()\n calc_auxillary_power = auto()\n calc_average_ion_temp = auto()\n calc_fuel_average_mass_number = auto()\n calc_magnetic_field_on_axis = auto()\n calc_extrinsic_core_radiator = auto()\n require_P_rad_less_than_P_in = auto()\n calc_P_SOL = auto()\n use_LOC_tau_e_below_threshold = auto()\n calc_plasma_stored_energy = auto()" }, { "identifier": "ConfinementScaling", "path": "cfspopcon/named_options.py", "snippet": "class ConfinementScaling(Enum):\n r\"\"\"Enum of implemented \\tau_{E} scalings.\"\"\"\n ITER98y2 = auto()\n ITER89P = auto()\n ITER89P_ka = auto()\n ITERL96Pth = auto()\n ITER97L = auto()\n IModey2 = auto()\n ITPA20_STD5 = auto()\n ITPA20_IL = auto()\n ITPA20_IL_HighZ = auto()\n ITPA_2018_STD5_OLS = auto()\n ITPA_2018_STD5_WLS = auto()\n ITPA_2018_STD5_GLS = auto()\n ITPA_2018_STD5_SEL1_OLS = auto()\n ITPA_2018_STD5_SEL1_WLS = auto()\n ITPA_2018_STD5_SEL1_GLS = auto()\n LOC = auto()\n H_DS03 = auto()" }, { "identifier": "Impurity", "path": "cfspopcon/named_options.py", "snippet": "class Impurity(Enum):\n \"\"\"Enum of possible impurity elements.\n\n The enum value represents the element's atomic number (Z).\n \"\"\"\n\n Helium = 2\n Lithium = 3\n Beryllium = 4\n Carbon = 6\n Nitrogen = 7\n Oxygen = 8\n Neon = 10\n Argon = 18\n Krypton = 36\n Xenon = 54\n Tungsten = 74" }, { "identifier": "LambdaQScaling", "path": "cfspopcon/named_options.py", "snippet": "class LambdaQScaling(Enum):\n \"\"\"Options for heat flux decay length scaling.\"\"\"\n\n Brunner = auto()\n EichRegression14 = auto()\n EichRegression15 = auto()" }, { "identifier": "MomentumLossFunction", "path": "cfspopcon/named_options.py", "snippet": "class MomentumLossFunction(Enum):\n \"\"\"Select which SOL momentum loss function to use.\"\"\"\n\n KotovReiter = auto()\n Sang = auto()\n Jarvinen = auto()\n Moulton = auto()\n PerezH = auto()\n PerezL = auto()" }, { "identifier": "ProfileForm", "path": "cfspopcon/named_options.py", "snippet": "class ProfileForm(Enum):\n \"\"\"Methods to calculate nT profiles.\"\"\"\n\n analytic = auto()\n prf = auto()" }, { "identifier": "RadiationMethod", "path": "cfspopcon/named_options.py", "snippet": "class RadiationMethod(Enum):\n \"\"\"Methods to calculate radiation losses.\"\"\"\n\n Inherent = \"Bremsstrahlung and synchrotron radiation only\"\n PostJensen = \"Impurity radiation, using a coronal equilibrium model from Post & Jensen 1977\"\n MavrinCoronal = \"Impurity radiation, using a coronal equilibrium model from Mavrin 2018\"\n MavrinNoncoronal = \"Impurity radiation, using a non-coronal model from Mavrin 2017\"\n Radas = \"Impurity line and bremsstrahlung radiation, using coronal Lz curves from Radas\"" }, { "identifier": "ReactionType", "path": "cfspopcon/named_options.py", "snippet": "class ReactionType(Enum):\n \"\"\"Supported Fusion Fuel Reaction Types.\"\"\"\n\n DT = \"Deuterium-Tritium\"\n DD = \"Deuterium-Deuterium\"\n DHe3 = \"Deuterium-Helium3\"\n pB11 = \"Proton-Boron11\"" } ]
from typing import Any, Union from .named_options import ( Algorithms, ConfinementScaling, Impurity, LambdaQScaling, MomentumLossFunction, ProfileForm, RadiationMethod, ReactionType, ) import xarray as xr
1,324
"""Constructors and helper functions.""" def convert_named_options(key: str, val: Any) -> Any: # noqa: PLR0911, PLR0912 """Given a 'key' matching a named_option, return the corresponding Enum value.""" if key == "algorithms": return Algorithms[val] elif key == "energy_confinement_scaling": return ConfinementScaling[val] elif key == "profile_form":
"""Constructors and helper functions.""" def convert_named_options(key: str, val: Any) -> Any: # noqa: PLR0911, PLR0912 """Given a 'key' matching a named_option, return the corresponding Enum value.""" if key == "algorithms": return Algorithms[val] elif key == "energy_confinement_scaling": return ConfinementScaling[val] elif key == "profile_form":
return ProfileForm[val]
5
2023-10-19 16:58:23+00:00
2k
yifei-he/GOAT
experiments.py
[ { "identifier": "ot_ablation", "path": "ot_util.py", "snippet": "def ot_ablation(size, mode):\n ns, nt = size, size\n plan = np.zeros((ns, nt))\n ran = np.arange(ns*nt)\n np.random.shuffle(ran)\n idx = ran[:size]\n\n for i in idx:\n row = i // nt\n col = i-i//nt * nt\n if mode == \"random\":\n plan[row, col] = np.random.uniform()\n elif mode == \"uniform\":\n plan[row, col] = 1\n \n plan /= np.sum(plan, 1, keepdims=True)\n plan[~ np.isfinite(plan)] = 0\n\n return plan" }, { "identifier": "generate_domains", "path": "ot_util.py", "snippet": "def generate_domains(n_inter, dataset_s, dataset_t, plan=None, entry_cutoff=0, conf=0):\n print(\"------------Generate Intermediate domains----------\")\n all_domains = []\n \n xs, xt = dataset_s.data, dataset_t.data\n ys = dataset_s.targets\n\n if plan is None:\n if len(xs.shape) > 2:\n xs_flat, xt_flat = nn.Flatten()(xs), nn.Flatten()(xt)\n plan = get_OT_plan(xs_flat, xt_flat, solver='emd', entry_cutoff=entry_cutoff)\n else:\n plan = get_OT_plan(xs, xt, solver='emd', entry_cutoff=entry_cutoff)\n\n logits_t = get_transported_labels(plan, ys, logit=True)\n yt_hat, conf_idx = get_conf_idx(logits_t, confidence_q=conf)\n xt = xt[conf_idx]\n plan = plan[:, conf_idx]\n yt_hat = yt_hat[conf_idx]\n\n print(f\"Remaining data after confidence filter: {len(conf_idx)}\")\n\n for i in range(1, n_inter+1):\n x, weights = pushforward(xs, xt, plan, i / (n_inter+1))\n if isinstance(x, np.ndarray):\n all_domains.append(DomainDataset(torch.from_numpy(x).float(), weights))\n else:\n all_domains.append(DomainDataset(x, weights))\n all_domains.append(dataset_t)\n\n print(f\"Total data for each intermediate domain: {len(x)}\")\n\n return all_domains" } ]
import torch import torch.optim as optim import copy import argparse import random import torch.backends.cudnn as cudnn import time from model import * from train_model import * from util import * from ot_util import ot_ablation from da_algo import * from ot_util import generate_domains from dataset import *
1,057
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True): print("Start training source model") model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) for epoch in range(1, epochs+1): train(epoch, trainloader, model, optimizer, verbose=verbose) if epoch % 5 == 0: test(testloader, model, verbose=verbose) return model def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10): # get the performance of direct adaptation from the source to target, st involves self-training on target direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs) # get the performance of GST from the source to target, st involves self-training on target direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs) # encode the source and target domains e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset) # encode the intermediate ground-truth domains intersets = all_sets[:-1] encoded_intersets = [e_src_trainset] for i in intersets: encoded_intersets.append(get_encoded_dataset(source_model.encoder, i)) encoded_intersets.append(e_tgt_trainset) # generate intermediate domains generated_acc = 0 if generated_domains > 0: all_domains = [] for i in range(len(encoded_intersets)-1):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True): print("Start training source model") model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) for epoch in range(1, epochs+1): train(epoch, trainloader, model, optimizer, verbose=verbose) if epoch % 5 == 0: test(testloader, model, verbose=verbose) return model def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10): # get the performance of direct adaptation from the source to target, st involves self-training on target direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs) # get the performance of GST from the source to target, st involves self-training on target direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs) # encode the source and target domains e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset) # encode the intermediate ground-truth domains intersets = all_sets[:-1] encoded_intersets = [e_src_trainset] for i in intersets: encoded_intersets.append(get_encoded_dataset(source_model.encoder, i)) encoded_intersets.append(e_tgt_trainset) # generate intermediate domains generated_acc = 0 if generated_domains > 0: all_domains = [] for i in range(len(encoded_intersets)-1):
all_domains += generate_domains(generated_domains, encoded_intersets[i], encoded_intersets[i+1])
1
2023-10-20 16:41:00+00:00
2k
ansible/django-ansible-base
ansible_base/tests/unit/serializers/test_common.py
[ { "identifier": "AuthenticatorMap", "path": "ansible_base/models/authenticator_map.py", "snippet": "class AuthenticatorMap(NamedCommonModel):\n class Meta:\n app_label = 'ansible_base'\n # If the map type is a team then we must have an org/team\n constraints = [\n models.CheckConstraint(\n name=\"%(app_label)s_%(class)s_require_org_team_if_team_map\",\n check=(~models.Q(map_type='team') | models.Q(team__isnull=False) & models.Q(organization__isnull=False)),\n ),\n models.CheckConstraint(\n name=\"%(app_label)s_%(class)s_require_org_if_org_map\",\n check=(~models.Q(map_type='organization') | models.Q(organization__isnull=False)),\n ),\n ]\n unique_together = ['name', 'authenticator']\n\n authenticator = models.ForeignKey(\n Authenticator,\n null=False,\n on_delete=models.CASCADE,\n help_text=\"The authenticator this mapping belongs to\",\n )\n revoke = models.BooleanField(\n null=False,\n default=False,\n help_text=\"If a user does not meet this rule should we revoke the permission\",\n )\n map_type = models.CharField(\n max_length=17,\n null=False,\n default=\"team\",\n choices=[\n ('team', 'team'),\n ('is_superuser', 'is_superuser'),\n ('is_system_auditor', 'is_system_auditor'),\n ('allow', 'allow'),\n ('organization', 'organization'),\n ],\n help_text='What does the map work on, a team, a user flag or is this an allow rule',\n )\n team = models.CharField(\n max_length=512,\n null=True,\n default=None,\n help_text='A team name this rule works on',\n )\n organization = models.CharField(\n max_length=512,\n null=True,\n default=None,\n help_text='An organization name this rule works on',\n )\n triggers = models.JSONField(\n null=False,\n default=dict,\n help_text=\"Trigger information for this rule\",\n )\n order = models.PositiveIntegerField(\n null=False,\n default=0,\n help_text=(\n \"The order in which this rule should be processed, smaller numbers are of higher precedence. \"\n \"Items with the same order will be executed in random order\"\n ),\n )" }, { "identifier": "CommonModelSerializer", "path": "ansible_base/serializers/common.py", "snippet": "class CommonModelSerializer(serializers.ModelSerializer):\n show_capabilities = ['edit', 'delete']\n url = serializers.SerializerMethodField()\n related = serializers.SerializerMethodField('_get_related')\n summary_fields = serializers.SerializerMethodField('_get_summary_fields')\n\n class Meta:\n fields = ['id', 'url', 'created_on', 'created_by', 'modified_on', 'modified_by', 'related', 'summary_fields']\n\n def __init__(self, instance=None, data=empty, **kwargs):\n # pre-populate the form with the defaults from the model\n model = getattr(self.Meta, 'model', None)\n if model:\n extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})\n for field in model._meta.concrete_fields:\n if field.name not in extra_kwargs:\n extra_kwargs[field.name] = {}\n if not extra_kwargs[field.name].get('initial', None):\n if field.default and field.default is not NOT_PROVIDED:\n extra_kwargs[field.name]['initial'] = field.default\n setattr(self.Meta, 'extra_kwargs', extra_kwargs)\n super().__init__(instance, data, **kwargs)\n\n def get_url(self, obj):\n if self.reverse_url_name:\n return reverse_lazy(self.reverse_url_name, kwargs={'pk': obj.pk})\n return ''\n\n def _get_related(self, obj):\n if obj is None:\n return {}\n if not hasattr(obj, 'related_fields'):\n logger.warning(f\"Object {obj.__class__} has no related_fields method\")\n return {}\n return obj.related_fields(self.context.get('request'))\n\n def _get_summary_fields(self, obj):\n if obj is None:\n return {}\n if not hasattr(obj, 'get_summary_fields'):\n logger.warning(f\"Object {obj.__class__} has no get_summary_fields method\")\n return {}\n return obj.get_summary_fields()\n\n def to_representation(self, obj):\n ret = super().to_representation(obj)\n\n for key in obj.encrypted_fields:\n if key in ret:\n ret[key] = ENCRYPTED_STRING\n\n return ret\n\n def update(self, instance, validated_data):\n # We don't want the $encrypted$ fields going back to the model\n for key in self.Meta.model.encrypted_fields:\n new_field = validated_data.get(key, None)\n if new_field and new_field == ENCRYPTED_STRING:\n validated_data.pop(key, None)\n\n return super().update(instance, validated_data)" }, { "identifier": "ENCRYPTED_STRING", "path": "ansible_base/utils/encryption.py", "snippet": "ENCRYPTED_STRING = '$encrypted$'" }, { "identifier": "EncryptionModel", "path": "test_app/models.py", "snippet": "class EncryptionModel(NamedCommonModel):\n class Meta:\n app_label = \"test_app\"\n\n encrypted_fields = ['testing1', 'testing2']\n\n testing1 = models.CharField(max_length=1, null=True, default='a')\n testing2 = models.CharField(max_length=1, null=True, default='b')" }, { "identifier": "EncryptionTestSerializer", "path": "test_app/serializers.py", "snippet": "class EncryptionTestSerializer(NamedCommonModelSerializer):\n reverse_url_name = None\n\n class Meta:\n model = EncryptionModel\n fields = NamedCommonModelSerializer.Meta.fields + [x.name for x in EncryptionModel._meta.concrete_fields]" } ]
import pytest from ansible_base.models import AuthenticatorMap from ansible_base.serializers.common import CommonModelSerializer from ansible_base.utils.encryption import ENCRYPTED_STRING from test_app.models import EncryptionModel from test_app.serializers import EncryptionTestSerializer
1,413
@pytest.mark.django_db def test_representation_of_encrypted_fields(): model = EncryptionModel.objects.create()
@pytest.mark.django_db def test_representation_of_encrypted_fields(): model = EncryptionModel.objects.create()
serializer = EncryptionTestSerializer()
4
2023-10-20 13:20:12+00:00
2k
zhudotexe/kani-vision
kani/ext/vision/engines/openai/models.py
[ { "identifier": "ImagePart", "path": "kani/ext/vision/parts.py", "snippet": "class ImagePart(MessagePart, abc.ABC):\n \"\"\"Base class for all image message parts.\n\n Generally, you shouldn't construct this directly - instead, use one of the classmethods to initialize the image from\n a file path, binary, or Pillow image.\n \"\"\"\n\n model_config = ConfigDict(ignored_types=(functools.cached_property,))\n\n # constructors\n @staticmethod\n def from_path(fp: PathLike):\n \"\"\"Load an image from a path on the local filesystem.\"\"\"\n return FileImagePart(path=fp)\n\n @staticmethod\n def from_bytes(data: bytes):\n \"\"\"Load an image from binary data in memory.\"\"\"\n return BytesImagePart(data=data)\n\n @staticmethod\n def from_image(image: Image.Image):\n \"\"\"Create an image part from an existing :class:`PIL.Image.Image`.\"\"\"\n return PillowImagePart(pil_image=image)\n\n @classmethod\n async def from_url(cls, url: str, remote: bool = True):\n \"\"\"Create an image part from a URL.\n\n If *remote* is True, this will not download the image - it will be up to the engine to do so!\n\n .. attention::\n Note that this classmethod is *asynchronous*, unlike the other classmethods!\n\n This is because we need to check the image headers and metadata before returning a valid image part.\n \"\"\"\n if not remote:\n io = BytesIO()\n await download_image(url, io)\n return BytesImagePart(data=io.getvalue())\n size, mime = await image_metadata_from_url(url)\n return RemoteURLImagePart(url=url, size_=size, mime_=mime)\n\n # interface\n @property\n def image(self) -> Image.Image:\n \"\"\"Get a :class:`PIL.Image.Image` representing the image.\"\"\"\n raise NotImplementedError\n\n @property\n def bytes(self) -> bytes:\n \"\"\"The binary image data.\"\"\"\n io = BytesIO()\n self.image.save(io, format=\"PNG\")\n return io.getvalue()\n\n @property\n def b64(self) -> str:\n \"\"\"The binary image data encoded in a base64 string.\n\n Note that this is *not* a web-suitable ``data:image/...`` string; just the raw binary of the image. Use\n :attr:`b64_uri` for a web-suitable string.\n \"\"\"\n return base64.b64encode(self.bytes).decode()\n\n @property\n def b64_uri(self) -> str:\n \"\"\"Get the binary image data encoded in a web-suitable base64 string.\"\"\"\n return f\"data:{self.mime};base64,{self.b64}\"\n\n # metadata\n @property\n def size(self) -> tuple[int, int]:\n \"\"\"Get the size of the image, in pixels.\"\"\"\n return self.image.size\n\n @property\n def mime(self) -> str:\n \"\"\"Get the MIME filetype of the image.\"\"\"\n img_format = self.image.format\n return Image.MIME.get(img_format, f\"image/{img_format.lower()}\")" }, { "identifier": "RemoteURLImagePart", "path": "kani/ext/vision/parts.py", "snippet": "class RemoteURLImagePart(ImagePart):\n \"\"\"A reference to a remote image stored at the given URL.\n\n Use :meth:`.ImagePart.from_url` to construct.\n \"\"\"\n\n url: str\n size_: tuple[int, int]\n mime_: str\n\n @property\n def image(self):\n raise RemoteImageError(\n \"This engine does not support remote images. Use `await ImagePart.from_url(url, remote=False)` to download\"\n \" the image before using it in this engine.\"\n )\n\n @property\n def size(self):\n return self.size_\n\n @property\n def mime(self):\n return self.mime_" } ]
from typing import Annotated, Literal, Union from pydantic import Field from kani.engines.openai.models import OpenAIChatMessage from kani.models import BaseModel, ChatMessage, ChatRole from ...parts import ImagePart, RemoteURLImagePart
1,114
# note: `type` does not have default since we use `.model_dump(..., exclude_defaults=True)` class OpenAIText(BaseModel): type: Literal["text"] text: str @classmethod def from_text(cls, data: str): return cls(type="text", text=data) class OpenAIImage(BaseModel): type: Literal["image_url"] image_url: str detail: Literal["high"] | Literal["low"] | None = None @classmethod def from_imagepart(cls, part: ImagePart):
# note: `type` does not have default since we use `.model_dump(..., exclude_defaults=True)` class OpenAIText(BaseModel): type: Literal["text"] text: str @classmethod def from_text(cls, data: str): return cls(type="text", text=data) class OpenAIImage(BaseModel): type: Literal["image_url"] image_url: str detail: Literal["high"] | Literal["low"] | None = None @classmethod def from_imagepart(cls, part: ImagePart):
if isinstance(part, RemoteURLImagePart):
1
2023-10-20 16:21:03+00:00
2k
line/Skeleton-Temporal-Action-Localization
evaluation/eval.py
[ { "identifier": "getClassificationMAP", "path": "evaluation/classificationMAP.py", "snippet": "def getClassificationMAP(confidence, labels):\n \"\"\" confidence and labels are of dimension n_samples x n_label \"\"\"\n\n AP = []\n for i in range(np.shape(labels)[1]):\n AP.append(getAP(confidence[:, i], labels[:, i]))\n return 100 * sum(AP) / len(AP)" }, { "identifier": "getSingleStreamDetectionMAP", "path": "evaluation/detectionMAP.py", "snippet": "def getSingleStreamDetectionMAP(\n vid_preds, frm_preds, vid_lens, annotation_path, args, multi=False, factor=1.0\n):\n iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n dmap_list = []\n seg = getActLoc(\n vid_preds,\n frm_preds,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),\n annotation_path,\n args,\n multi=multi,\n )\n # print (len(seg))\n for iou in iou_list:\n print(\"Testing for IoU %f\" % iou)\n dmap_list.append(\n getLocMAP(seg, iou, annotation_path, args, multi=multi, factor=factor)\n )\n return dmap_list, iou_list" }, { "identifier": "getTwoStreamDetectionMAP", "path": "evaluation/detectionMAP.py", "snippet": "def getTwoStreamDetectionMAP(\n rgb_vid_preds,\n flow_vid_preds,\n rgb_frm_preds,\n flow_frm_preds,\n vid_lens,\n annotation_path,\n args,\n):\n iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n dmap_list = []\n rgb_seg = getActLoc(\n rgb_vid_preds,\n rgb_frm_preds * 0.1,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval)\n * 0.1,\n annotation_path,\n args,\n )\n flow_seg = getActLoc(\n flow_vid_preds,\n flow_frm_preds,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),\n annotation_path,\n args,\n )\n seg = IntergrateSegs(rgb_seg, flow_seg, 0.9, args)\n for iou in iou_list:\n print(\"Testing for IoU %f\" % iou)\n dmap_list.append(getLocMAP(seg, iou, annotation_path, args))\n\n return dmap_list, iou_list" }, { "identifier": "write_results_to_eval_file", "path": "evaluation/utils.py", "snippet": "def write_results_to_eval_file(args, dmap, itr1, itr2):\n file_folder = \"./ckpt/\" + args.dataset_name + \"/eval/\"\n file_name = args.dataset_name + \"-results.log\"\n fid = open(file_folder + file_name, \"a+\")\n string_to_write = str(itr1)\n string_to_write += \" \" + str(itr2)\n for item in dmap:\n string_to_write += \" \" + \"%.2f\" % item\n fid.write(string_to_write + \"\\n\")\n fid.close()" }, { "identifier": "write_results_to_file", "path": "evaluation/utils.py", "snippet": "def write_results_to_file(args, dmap, cmap, itr):\n file_folder = \"./ckpt/\" + args.dataset_name + \"/\" + str(args.model_id) + \"/\"\n file_name = args.dataset_name + \"-results.log\"\n fid = open(file_folder + file_name, \"a+\")\n string_to_write = str(itr)\n for item in dmap:\n string_to_write += \" \" + \"%.2f\" % item\n string_to_write += \" \" + \"%.2f\" % cmap\n fid.write(string_to_write + \"\\n\")\n fid.close()" } ]
import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable from .classificationMAP import getClassificationMAP as cmAP from .detectionMAP import getSingleStreamDetectionMAP as dsmAP from .detectionMAP import getTwoStreamDetectionMAP as dtmAP from .utils import write_results_to_eval_file, write_results_to_file
1,385
def ss_eval(epoch, dataloader, args, logger, model, device): vid_preds = [] frm_preds = [] vid_lens = [] labels = [] for num, sample in enumerate(dataloader): if (num + 1) % 100 == 0: print("Testing test data point %d of %d" % (num + 1, len(dataloader))) features = sample["data"].numpy() label = sample["labels"].numpy() vid_len = sample["vid_len"].numpy() features = torch.from_numpy(features).float().to(device) with torch.no_grad(): _, vid_pred, _, frm_scr = model(Variable(features)) frm_pred = F.softmax(frm_scr, -1) vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0) frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0) label = np.squeeze(label, axis=0) vid_preds.append(vid_pred) frm_preds.append(frm_pred) vid_lens.append(vid_len) labels.append(label) vid_preds = np.array(vid_preds) frm_preds = np.array(frm_preds) vid_lens = np.array(vid_lens) labels = np.array(labels) cmap = cmAP(vid_preds, labels) dmap, iou = dsmAP( vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args ) print("Classification map %f" % cmap) for item in list(zip(iou, dmap)): print("Detection map @ %f = %f" % (item[0], item[1])) logger.log_value("Test Classification mAP", cmap, epoch) for item in list(zip(dmap, iou)): logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
def ss_eval(epoch, dataloader, args, logger, model, device): vid_preds = [] frm_preds = [] vid_lens = [] labels = [] for num, sample in enumerate(dataloader): if (num + 1) % 100 == 0: print("Testing test data point %d of %d" % (num + 1, len(dataloader))) features = sample["data"].numpy() label = sample["labels"].numpy() vid_len = sample["vid_len"].numpy() features = torch.from_numpy(features).float().to(device) with torch.no_grad(): _, vid_pred, _, frm_scr = model(Variable(features)) frm_pred = F.softmax(frm_scr, -1) vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0) frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0) label = np.squeeze(label, axis=0) vid_preds.append(vid_pred) frm_preds.append(frm_pred) vid_lens.append(vid_len) labels.append(label) vid_preds = np.array(vid_preds) frm_preds = np.array(frm_preds) vid_lens = np.array(vid_lens) labels = np.array(labels) cmap = cmAP(vid_preds, labels) dmap, iou = dsmAP( vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args ) print("Classification map %f" % cmap) for item in list(zip(iou, dmap)): print("Detection map @ %f = %f" % (item[0], item[1])) logger.log_value("Test Classification mAP", cmap, epoch) for item in list(zip(dmap, iou)): logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
write_results_to_file(args, dmap, cmap, epoch)
4
2023-10-20 05:38:16+00:00
2k
n-thumann/xbox-cloud-statistics
backend/xbox_cloud_statistics/main.py
[ { "identifier": "Game", "path": "backend/xbox_cloud_statistics/models.py", "snippet": "class Game(Model):\n id: str\n title: str\n image_url: str\n subscriptions: Subscription\n\n def to_dict(self) -> dict:\n return {\"id\": self.id, \"title\": self.title, \"image_url\": self.image_url}\n\n def __lt__(self, other) -> bool:\n if not isinstance(other, Game):\n raise TypeError\n\n return self.id < other.id" }, { "identifier": "Measurement", "path": "backend/xbox_cloud_statistics/models.py", "snippet": "class Measurement(Model):\n server_time: datetime\n wait_time: int\n\n def to_dict(self) -> dict:\n return {int(self.server_time.timestamp()): self.wait_time}" }, { "identifier": "Results", "path": "backend/xbox_cloud_statistics/models.py", "snippet": "class Results(Model):\n _games: dict[Game, GameResult] = field(\n default_factory=lambda: defaultdict(GameResult)\n )\n\n def __getitem__(self, game: Game) -> GameResult:\n return self._games[game]\n\n def __iter__(self) -> Iterator[tuple[Game, GameResult]]:\n return iter(sorted(self._games.items()))\n\n def to_dict(self) -> dict:\n return {game.id: game_result for game, game_result in self}" }, { "identifier": "Subscription", "path": "backend/xbox_cloud_statistics/models.py", "snippet": "class Subscription(Flag):\n F2P = auto()\n GPU = auto()\n\n @classmethod\n def from_string(cls, value: str):\n return cls._member_map_.get(value)" } ]
import asyncio import itertools import httpx from pathlib import Path from xbox_cloud_statistics.client import XBoxCloudClient from xbox_cloud_statistics.config import Config from xbox_cloud_statistics.io.cli import CLI from xbox_cloud_statistics.io.json import JSON from .models import ( Game, Measurement, Results, Subscription, )
669
def run(): asyncio.run(main()) async def main(): config = Config() results = Results() async with httpx.AsyncClient(http2=True) as http_client: client = XBoxCloudClient(http_client, config.client_id, config.client_secret) if config.f2p_token: await run_measurements( client, Subscription.F2P, config.f2p_token, config.f2p_games, results, ) if config.gpu_token: await run_measurements( client, Subscription.GPU, config.gpu_token, config.gpu_games, results, ) CLI.handle(results) JSON.handle(results, Path("./results")) async def run_measurements( client: XBoxCloudClient, subscription: Subscription, token: str, games: list[Game], results: Results, ): await client.login(subscription, token) games_regions = list(itertools.product(games, client.regions)) coroutines = [client.measure(region, game) for game, region in games_regions]
def run(): asyncio.run(main()) async def main(): config = Config() results = Results() async with httpx.AsyncClient(http2=True) as http_client: client = XBoxCloudClient(http_client, config.client_id, config.client_secret) if config.f2p_token: await run_measurements( client, Subscription.F2P, config.f2p_token, config.f2p_games, results, ) if config.gpu_token: await run_measurements( client, Subscription.GPU, config.gpu_token, config.gpu_games, results, ) CLI.handle(results) JSON.handle(results, Path("./results")) async def run_measurements( client: XBoxCloudClient, subscription: Subscription, token: str, games: list[Game], results: Results, ): await client.login(subscription, token) games_regions = list(itertools.product(games, client.regions)) coroutines = [client.measure(region, game) for game, region in games_regions]
times: list[Measurement | Exception] = await asyncio.gather(
1
2023-10-22 13:05:00+00:00
2k
albu-org/aiotp
aiotp/totp/totp.py
[ { "identifier": "OTP", "path": "aiotp/core/otp.py", "snippet": "class OTP(AbstractOTP):\n def __init__(\n self,\n secret: str,\n digit: int = 5,\n algorithm: algorithms = 'sha1'\n ) -> None:\n assert 0 < digit < 11\n assert algorithm.lower() in ('sha1', 'sha256', 'sha512')\n\n self.digit = digit\n self.secret = secret\n self.algorithm = algorithm\n\n async def _generate(self, integer: int) -> str:\n if integer < 0:\n raise ValueError('input must be positive integer')\n\n int2bytes = struct.pack('>q', integer)\n\n b_secret = base64.b32decode(self.secret + '=' * ((8 - len(self.secret)) % 8), casefold=True)\n \n hash_hmac = hmac.new(b_secret, int2bytes, self.algorithm).digest()\n\n offset = hash_hmac[-1] & 0xF\n\n code_bytes = hash_hmac[offset:offset + 4]\n code = str(struct.unpack('>l', code_bytes)[0] & 0X7FFFFFFF)\n\n return code[-self.digit:].zfill(self.digit)" }, { "identifier": "conversion", "path": "aiotp/utils/utils.py", "snippet": "async def conversion(date_time: datetime.datetime, interval: int) -> int:\n if date_time.tzinfo:\n return int(calendar.timegm(date_time.utctimetuple()) / interval)\n\n else:\n return int(time.mktime(date_time.timetuple()) / interval)" }, { "identifier": "algorithms", "path": "aiotp/typing.py", "snippet": "" }, { "identifier": "AbstractTOTP", "path": "aiotp/abstracts/abstracts.py", "snippet": "class AbstractTOTP(ABC):\n \"\"\"AbstractBase\"\"\"\n\n @abstractmethod\n async def create(self, dt: datetime) -> str:\n \"\"\"generate the TOTP code\"\"\"\n\n @abstractmethod\n async def verify(self, code: str, dt: datetime) -> bool:\n \"\"\"verify the TOTP code\"\"\"\n\n @abstractmethod\n async def uri(self, name: str, issuer: Optional[str], image: Optional[str]) -> str:\n \"\"\"generate the uri\"\"\"" } ]
import hmac import datetime import unicodedata from typing import Optional from urllib.parse import quote, urlencode, urlparse from ..core import OTP from ..utils import conversion from ..typing import algorithms from ..abstracts import AbstractTOTP
711
class TOTP(AbstractTOTP, OTP): def __init__( self, secret: str, digits: int = 5, interval: int = 60, algorithm: algorithms = 'sha1', ) -> None: self.interval = interval super().__init__(secret, digits, algorithm) async def __aenter__(self) -> 'TOTP': return self async def __aexit__(self, *args, **kwargs) -> None: ... async def create(self, dt: Optional[datetime.datetime] = None) -> str: if not dt: dt = datetime.datetime.now()
class TOTP(AbstractTOTP, OTP): def __init__( self, secret: str, digits: int = 5, interval: int = 60, algorithm: algorithms = 'sha1', ) -> None: self.interval = interval super().__init__(secret, digits, algorithm) async def __aenter__(self) -> 'TOTP': return self async def __aexit__(self, *args, **kwargs) -> None: ... async def create(self, dt: Optional[datetime.datetime] = None) -> str: if not dt: dt = datetime.datetime.now()
return await self._generate(await conversion(dt, self.interval))
1
2023-10-20 18:51:22+00:00
2k
brandonrobertz/reason-act-sqlite-py
llm_sql_queries.py
[ { "identifier": "DB_PATH", "path": "actions.py", "snippet": "DB_PATH = \"example.db\"" }, { "identifier": "load_db", "path": "actions.py", "snippet": "def load_db(path):\n assert os.path.exists(path), f\"Database doesn't exist: {path}\"\n db = sqlite_utils.Database(path)\n return db" }, { "identifier": "tables", "path": "actions.py", "snippet": "def tables(db):\n return [\n name\n for name in db.table_names()\n # game stats confuses the model\n if (\n \"_fts\" not in name\n and name not in IGNORED_TABLES\n and not name.endswith(\"_history\")\n )\n ]" }, { "identifier": "schema", "path": "actions.py", "snippet": "def schema(db, table_name):\n table_names = tables(db)\n if table_name not in table_names:\n return f\"Error: Invalid table. Valid tables are: {table_names}\"\n return re.sub('\\s+', ' ', db[table_name].schema)" }, { "identifier": "help", "path": "actions.py", "snippet": "def help(db, *args):\n if not args:\n return \"Error: The help action requires at least one argument\"\n table_name = args[0]\n column = None\n if len(args) == 2:\n column = args[1]\n if table_name not in DATA_HELP:\n available_tables = tables(db)\n return f\"Error: The table {table_name} doesn't exist. Valid tables: {available_tables}\"\n if column not in DATA_HELP[table_name]:\n available_columns = [\n c.name\n for c in db[table_name].columns\n if c.name not in IGNORED_COLUMNS\n ]\n return f\"Error: The column {column} isn't in the {table_name} table. Valid columns: {available_columns}\"\n help_text = DATA_HELP[table_name][column]\n # table help requested\n if column is None:\n return help_text\n # column help requested, add common values\n analysis = db[table_name].analyze_column(column, common_limit=2)\n common_values = \", \".join([f\"{value}\" for value, count in analysis.most_common])\n return f\"{help_text} the top two values are: {common_values}\"" }, { "identifier": "sql_query", "path": "actions.py", "snippet": "def sql_query(db, query):\n if query.lower().startswith(\"select *\"):\n return \"Error: Select some specific columns, not *\"\n try:\n results = list(db.query(query))\n except sqlite3.OperationalError as e:\n return f\"Your query has an error: {e}\"\n return clean_truncate(results, n=5)" } ]
import json import os import re import sys import sqlite3 from llama_cpp import Llama from actions import ( DB_PATH, load_db, tables, schema, help, sql_query )
971
try: except ModuleNotFoundError: print("llama_cpp not installed, continuing without") # Larger context sizes will reduce quality, but some models # support large contexts better than others. #CONTEXT_SIZE=2048 CONTEXT_SIZE=2048*2 # how many tokens to allow the model to output in a sigle go w/o stopping MAX_TOKENS=400 # Utils n stuff def load_model(model_path, n_gpu_layers=0, n_threads=os.cpu_count() - 1, n_ctx=CONTEXT_SIZE, temp=None, top_p=None): # for LLaMA2 70B models add kwarg: n_gqa=8 (NOTE: not required for GGUF models) print("Loading model", model_path) print("CTX:", n_ctx, "GPU layers:", n_gpu_layers, "CPU threads:", n_threads) print("Temperature:", temp, "Top-p Sampling:", top_p) kwargs = dict( model_path=model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers, n_threads=n_threads, verbose=False ) if temp is not None: kwargs["temp"] = temp if top_p is not None: kwargs["top_p"] = top_p llm = Llama(**kwargs) return llm def execute(model_path, outfile=None, debug=True, return_dict=None, prompt=None, n_gpu_layers=0, temp=None, top_p=None): llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp, top_p=top_p)
try: except ModuleNotFoundError: print("llama_cpp not installed, continuing without") # Larger context sizes will reduce quality, but some models # support large contexts better than others. #CONTEXT_SIZE=2048 CONTEXT_SIZE=2048*2 # how many tokens to allow the model to output in a sigle go w/o stopping MAX_TOKENS=400 # Utils n stuff def load_model(model_path, n_gpu_layers=0, n_threads=os.cpu_count() - 1, n_ctx=CONTEXT_SIZE, temp=None, top_p=None): # for LLaMA2 70B models add kwarg: n_gqa=8 (NOTE: not required for GGUF models) print("Loading model", model_path) print("CTX:", n_ctx, "GPU layers:", n_gpu_layers, "CPU threads:", n_threads) print("Temperature:", temp, "Top-p Sampling:", top_p) kwargs = dict( model_path=model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers, n_threads=n_threads, verbose=False ) if temp is not None: kwargs["temp"] = temp if top_p is not None: kwargs["top_p"] = top_p llm = Llama(**kwargs) return llm def execute(model_path, outfile=None, debug=True, return_dict=None, prompt=None, n_gpu_layers=0, temp=None, top_p=None): llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp, top_p=top_p)
db = load_db(DB_PATH)
1
2023-10-15 04:30:30+00:00
2k
sehyun03/MulActSeg
tools/label_assignment_tensor.py
[ { "identifier": "RegionCityscapesTensor", "path": "dataloader/region_cityscapes_tensor.py", "snippet": "class RegionCityscapesTensor(RegionCityscapes):\n\n def __init__(self, args, root, datalist, split='train', transform=None, region_dict=\"dataloader/init_data/cityscapes/train.dict\"):\n super().__init__(args, root, datalist, split, transform, False, region_dict, True, False)\n self.kernel = np.ones((args.trim_kernel_size, args.trim_kernel_size), np.uint8)\n\n def __getitem__(self, index):\n img_fname, lbl_fname, spx_fname = self.im_idx[index]\n '''Load image, label, and superpixel'''\n image = Image.open(img_fname).convert('RGB')\n target = Image.open(lbl_fname)\n superpixel = self.open_spx(spx_fname)\n image, lbls = self.transform(image, [target, superpixel])\n target, superpixel = lbls\n target = self.encode_target(target)\n\n ''' superpixel tensor generation '''\n superpixel_cls = torch.zeros((self.args.nseg, self.args.num_classes + 1), dtype=torch.uint8)\n superpixel_size = torch.ones((self.args.nseg, ), dtype=torch.int) * -1\n\n '''GT masking (mimic region-based annotation)'''\n target = target.reshape(-1)\n preserving_labels = self.suppix[spx_fname]\n\n ### trim query boundary\n if self.args.trim_multihot_boundary:\n bdry = find_boundaries(superpixel, mode='thick')\n bdry = binary_dilation(bdry, self.kernel)\n bdry = torch.from_numpy(bdry)\n superpixel_trim = superpixel.clone()\n superpixel_trim = torch.masked_fill(superpixel_trim, bdry, self.args.nseg)\n superpixel_trim = superpixel_trim.reshape(-1)\n else:\n pass\n superpixel = superpixel.reshape(-1)\n\n ''' Multi-hot label assignment '''\n for p in preserving_labels:\n if self.args.trim_multihot_boundary:\n sp_mask = (superpixel_trim == p)\n sp_mask = sp_mask if torch.any(sp_mask) else (superpixel == p) # boundary 때문에 소실되는 걸 방지\n else:\n sp_mask = (superpixel == p)\n # Image.fromarray(sp_mask.reshape(1024,2048).numpy()).save(\"vis/trim/new_{}_trim_{}x{}_mask.png\".format(p, self.args.trim_kernel_size, self.args.trim_kernel_size))\n u, c = np.unique(target[sp_mask], return_counts=True) ### superpixel 내부에 class 구성 파악\n isignore = 255 in u\n if isignore and len(u) == 1:\n allignore = True\n else:\n allignore = False\n npx = sp_mask.sum()\n if not allignore:\n u_valid = u[u != 255]\n c_valid = c[u != 255]\n c_order = c_valid.argsort()[::-1]\n cls = u_valid[c_order].tolist()\n cpx = c_valid[c_order].tolist()\n else:\n cls = []\n cpx = []\n\n if isignore:\n cls.append(-1) ### last dimension of superpixel_cls is assigned to ignore label\n else:\n pass\n\n superpixel_cls[p, cls] = 1\n superpixel_size[p] = npx\n\n sample = {'superpixel_info': (superpixel_cls, superpixel_size), 'fname': self.im_idx[index]}\n\n return sample" }, { "identifier": "DataProvider", "path": "dataloader/utils.py", "snippet": "class DataProvider():\n def __init__(self, dataset, batch_size, num_workers, drop_last, shuffle,\n pin_memory):\n # dataset\n self.dataset = dataset\n self.iteration = 0\n self.epoch = 0\n\n # dataloader parameters\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.drop_last = drop_last\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.dataloader = \\\n DataLoader(self.dataset, batch_size=self.batch_size, collate_fn=collate_fn,\n shuffle=self.shuffle, num_workers=self.num_workers, drop_last=self.drop_last,\n pin_memory=self.pin_memory)\n self.dataiter = iter(self.dataloader)\n\n def __len__(self):\n return len(self.dataloader)\n\n def __next__(self):\n try:\n batch = self.dataiter.next()\n self.iteration += 1\n return batch\n\n except StopIteration:\n self.epoch += 1\n self.dataiter = iter(self.dataloader)\n batch = self.dataiter.next()\n self.iteration += 1\n return batch" } ]
import os import sys import argparse import numpy as np import dataloader.ext_transforms as et from tqdm import tqdm from dataloader.region_cityscapes_tensor import RegionCityscapesTensor from dataloader.utils import DataProvider
1,591
sys.path.append(os.path.abspath('.')) def get_parser(): # Training configurations parser = argparse.ArgumentParser(description='') parser.add_argument('--nseg', type=int, default=2048, help='# superpixel component for slic') parser.add_argument('--save_data_dir', help='superpixel directory root') parser.add_argument('--num_worker', type=int, default=8, help='number of classes in dataset') parser.add_argument('--ignore_size', type=int, default=0, help='(or_lbeling) ignore class region smaller than this') parser.add_argument('--mark_topk', type=int, default=-1, help='(or_lbeling) ignore classes with the region size under than kth order') parser.add_argument('--num_classes', type=int, default=19, help='number of classes in dataset') parser.add_argument('--trim_kernel_size', type=int, default=3) parser.add_argument('--trim_multihot_boundary', action='store_true', default=False) parser.add_argument('--prob_dominant', action='store_true', default=False) parser.add_argument('--spx_method', default='seed') parser.add_argument('--trg_data_dir', default='./data/Cityscapes') return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() args.trg_datalist = 'dataloader/init_data/cityscapes/train_{}{}.txt'.format(args.spx_method, args.nseg) args.region_dict = 'dataloader/init_data/cityscapes/train_{}{}.dict'.format(args.spx_method, args.nseg) args.known_ignore = False print(args) identity_transform = et.ExtCompose([et.ExtToTensor(dtype_list=['int','int'])]) ### load superpixel & max-frequent pooled target
sys.path.append(os.path.abspath('.')) def get_parser(): # Training configurations parser = argparse.ArgumentParser(description='') parser.add_argument('--nseg', type=int, default=2048, help='# superpixel component for slic') parser.add_argument('--save_data_dir', help='superpixel directory root') parser.add_argument('--num_worker', type=int, default=8, help='number of classes in dataset') parser.add_argument('--ignore_size', type=int, default=0, help='(or_lbeling) ignore class region smaller than this') parser.add_argument('--mark_topk', type=int, default=-1, help='(or_lbeling) ignore classes with the region size under than kth order') parser.add_argument('--num_classes', type=int, default=19, help='number of classes in dataset') parser.add_argument('--trim_kernel_size', type=int, default=3) parser.add_argument('--trim_multihot_boundary', action='store_true', default=False) parser.add_argument('--prob_dominant', action='store_true', default=False) parser.add_argument('--spx_method', default='seed') parser.add_argument('--trg_data_dir', default='./data/Cityscapes') return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() args.trg_datalist = 'dataloader/init_data/cityscapes/train_{}{}.txt'.format(args.spx_method, args.nseg) args.region_dict = 'dataloader/init_data/cityscapes/train_{}{}.dict'.format(args.spx_method, args.nseg) args.known_ignore = False print(args) identity_transform = et.ExtCompose([et.ExtToTensor(dtype_list=['int','int'])]) ### load superpixel & max-frequent pooled target
region_dataset = RegionCityscapesTensor(args,
0
2023-10-24 09:19:58+00:00
2k
upiterbarg/hihack
models/flat_transformer.py
[ { "identifier": "generate_square_subsequent_mask", "path": "models/transformer_lstm.py", "snippet": "def generate_square_subsequent_mask(sz: int, device: str = \"cpu\") -> torch.Tensor:\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = (\n mask.float()\n .masked_fill(mask == 0, float(\"-inf\"))\n .masked_fill(mask == 1, float(0.0))\n ).to(device=device)\n return mask" }, { "identifier": "PositionalEncoding", "path": "models/transformer_lstm.py", "snippet": "class PositionalEncoding(nn.Module):\n def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):\n super().__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(max_len, 1, d_model)\n pe[:, 0, 0::2] = torch.sin(position * div_term)\n pe[:, 0, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Args:\n x: Tensor, shape [seq_len, batch_size, embedding_dim]\n \"\"\"\n x = x + self.pe[:x.size(0)]\n return self.dropout(x)" } ]
import json import numpy as np import os import pathlib import pdb import sys import torch from nle import nethack from nle.nethack.actions import ACTIONS as A from torch import nn from torch.nn import functional as F from .transformer_lstm import ( generate_square_subsequent_mask, PositionalEncoding ) from chaotic_dwarf import ( TopLineEncoder, BottomLinesEncoder, ScreenEncoder, conv_outdim )
1,449
base_path = pathlib.Path().resolve() sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models')) class FlatTransformer(nn.Module): def __init__(self, shape, action_space, flags, device): super(FlatTransformer, self).__init__() self.flags = flags self.num_actions = len(action_space) self.use_prev_action = flags.use_prev_action self.topline_encoder = TopLineEncoder() self.bottomline_encoder = torch.jit.script(BottomLinesEncoder()) pixel_size = flags.pixel_size if flags.crop_dim == 0: screen_shape = (24 * pixel_size, 80 * pixel_size) else: screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size) self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape)) self.prev_actions_dim = 128 if self.use_prev_action else 0 self.h_dim = sum( [ self.topline_encoder.hidden_dim, self.bottomline_encoder.hidden_dim, self.screen_encoder.hidden_dim, self.prev_actions_dim, ] ) self.num_attention_heads = flags.num_attention_heads self.num_transformer_encoder_layers = flags.num_transformer_layers core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads) self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers) self.positional_encoder = PositionalEncoding(self.h_dim) self.policy_hidden_dim = 1024 self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim), nn.ELU(), nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim), nn.ELU(), nn.Linear(self.policy_hidden_dim, self.num_actions) ) self.baseline = nn.Linear(self.h_dim, 1) self.version = 0 self.inference_unroll_length = 1 def initial_state(self, batch_size=1): return ( torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), torch.rand(self.inference_unroll_length, batch_size, self.h_dim) ) def forward(self, inputs, core_state=None): T, B, C, H, W = inputs["screen_image"].shape topline = inputs["tty_chars"][..., 0, :] bottom_line = inputs["tty_chars"][..., -2:, :] st = [ self.topline_encoder( topline.float(memory_format=torch.contiguous_format).view(T * B, -1) ), self.bottomline_encoder( bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1) ), self.screen_encoder( inputs["screen_image"] .float(memory_format=torch.contiguous_format) .view(T * B, C, H, W) ), ] if self.use_prev_action: st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1)) st = torch.cat(st, dim=1) core_input = st.reshape(T, B, -1) notdone = (~inputs["done"]).float() if not self.training: prev_mask, prev_encodings = core_state prev_mask = prev_mask.squeeze(0) core_input = torch.cat([prev_encodings[1:], core_input], axis=0) core_mask = torch.stack( [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)] ) core_mask[:, -1, -1] = 1 core_state = (core_mask.detach().clone().unsqueeze(0), core_input.detach().clone() ) for i in range(B): core_mask[i].fill_diagonal_(1) core_mask = (core_mask.float().masked_fill(core_mask == 0, float("-inf")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device) core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length) T = core_input.shape[0] else:
base_path = pathlib.Path().resolve() sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models')) class FlatTransformer(nn.Module): def __init__(self, shape, action_space, flags, device): super(FlatTransformer, self).__init__() self.flags = flags self.num_actions = len(action_space) self.use_prev_action = flags.use_prev_action self.topline_encoder = TopLineEncoder() self.bottomline_encoder = torch.jit.script(BottomLinesEncoder()) pixel_size = flags.pixel_size if flags.crop_dim == 0: screen_shape = (24 * pixel_size, 80 * pixel_size) else: screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size) self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape)) self.prev_actions_dim = 128 if self.use_prev_action else 0 self.h_dim = sum( [ self.topline_encoder.hidden_dim, self.bottomline_encoder.hidden_dim, self.screen_encoder.hidden_dim, self.prev_actions_dim, ] ) self.num_attention_heads = flags.num_attention_heads self.num_transformer_encoder_layers = flags.num_transformer_layers core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads) self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers) self.positional_encoder = PositionalEncoding(self.h_dim) self.policy_hidden_dim = 1024 self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim), nn.ELU(), nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim), nn.ELU(), nn.Linear(self.policy_hidden_dim, self.num_actions) ) self.baseline = nn.Linear(self.h_dim, 1) self.version = 0 self.inference_unroll_length = 1 def initial_state(self, batch_size=1): return ( torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), torch.rand(self.inference_unroll_length, batch_size, self.h_dim) ) def forward(self, inputs, core_state=None): T, B, C, H, W = inputs["screen_image"].shape topline = inputs["tty_chars"][..., 0, :] bottom_line = inputs["tty_chars"][..., -2:, :] st = [ self.topline_encoder( topline.float(memory_format=torch.contiguous_format).view(T * B, -1) ), self.bottomline_encoder( bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1) ), self.screen_encoder( inputs["screen_image"] .float(memory_format=torch.contiguous_format) .view(T * B, C, H, W) ), ] if self.use_prev_action: st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1)) st = torch.cat(st, dim=1) core_input = st.reshape(T, B, -1) notdone = (~inputs["done"]).float() if not self.training: prev_mask, prev_encodings = core_state prev_mask = prev_mask.squeeze(0) core_input = torch.cat([prev_encodings[1:], core_input], axis=0) core_mask = torch.stack( [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)] ) core_mask[:, -1, -1] = 1 core_state = (core_mask.detach().clone().unsqueeze(0), core_input.detach().clone() ) for i in range(B): core_mask[i].fill_diagonal_(1) core_mask = (core_mask.float().masked_fill(core_mask == 0, float("-inf")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device) core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length) T = core_input.shape[0] else:
core_mask = generate_square_subsequent_mask(T, core_input.device)
0
2023-10-23 15:44:32+00:00
2k
kulkansecurity/gitverify
gitverify.py
[ { "identifier": "gh_api", "path": "include/gh_api.py", "snippet": "GITHUB_API_URL = \"https://api.github.com/repos/\"\nGITHUB_TOKEN = os.environ.get(\"GH_ACCESS_TOKEN\", None)\ndef github_request_json(url):\ndef fetch_domains_from_code(repository):\ndef fetch_repository(github_url):\ndef fetch_contributors(repo_obj):\ndef fetch_issues_and_prs(repo_obj):\ndef fetch_contributor(contributor_obj):\ndef fetch_contributor_contributions(repo_obj, contributor_obj):\ndef json_request(url):" }, { "identifier": "output", "path": "include/output.py", "snippet": "class Output:\n ANSI_RESET = \"\\033[0m\"\n ANSI_BLUE = \"\\033[94m\"\n ANSI_GREEN = \"\\033[92m\"\n ANSI_RED = \"\\033[91m\"\n ANSI_YELLOW = \"\\033[93m\"\n def __init__(self, verbose=False, outfile=None, outformat='text'):\n def initialize_repo_output(self, repository):\n def positive(self, message, weight=0):\n def negative(self, message, weight=0):\n def debug(self, message):\n def warn(self, message):\n def _create_text_output(self):\n def _create_json_output(self):\n def _create_csv_output(self):\n def doOutput(self):" }, { "identifier": "arg_parser", "path": "include/arg_parser.py", "snippet": "def parse_repositories_from_file(filepath):\ndef validate_repository(repo):\ndef parse_arguments():" }, { "identifier": "verify_metadata", "path": "modules/verify_metadata.py", "snippet": "def run(repository, output_obj):\n THRESHOLD = len(gh_api.fetch_contributors(repository))" }, { "identifier": "verify_contributors", "path": "modules/verify_contributors.py", "snippet": "def run(repository, output_obj):" }, { "identifier": "verify_domains", "path": "modules/verify_domains.py", "snippet": "def run(repository, output_obj):" }, { "identifier": "verify_issues_prs", "path": "modules/verify_issues_prs.py", "snippet": "def run(repository, contributors, output_obj):" } ]
import os, sys from include import gh_api, output, arg_parser from modules import verify_metadata from modules import verify_contributors from modules import verify_domains from modules import verify_issues_prs
1,180
#!/usr/bin/env python3 if __name__ == "__main__": args = arg_parser.parse_arguments() output_obj = output.Output(verbose=args.verbose, outfile=args.outfile, outformat=args.format) print(""" ░██████╗░██╗████████╗██╗░░░██╗███████╗██████╗░██╗███████╗██╗░░░██╗ ██╔════╝░██║╚══██╔══╝██║░░░██║██╔════╝██╔══██╗██║██╔════╝╚██╗░██╔╝ ██║░░██╗░██║░░░██║░░░╚██╗░██╔╝█████╗░░██████╔╝██║█████╗░░░╚████╔╝░ ██║░░╚██╗██║░░░██║░░░░╚████╔╝░██╔══╝░░██╔══██╗██║██╔══╝░░░░╚██╔╝░░ ╚██████╔╝██║░░░██║░░░░░╚██╔╝░░███████╗██║░░██║██║██║░░░░░░░░██║░░░ ░╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚═╝╚═╝╚═╝░░░░░░░░╚═╝░░░ GitVerify: Is the repo trustworthy? Make an informed decision. v1.0 - https://www.kulkan.com ######################################################################################""") # Let's warn the user that unauth RateLimits are pretty low if os.environ.get("GH_ACCESS_TOKEN", None) == None: output_obj.warn("GH_ACCESS_TOKEN environment variable not set, using GitHub RateLimits for anonymous queries") output_obj.warn("Unauthenticated requests to the Github API will enforce a very low and strict RateLimit") print("For information on how to create a GitHub API Access Token refer to: ") print("https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens") if os.environ.get("VT_API_KEY", None) == None: output_obj.warn("VT_API_KEY environment variable not set, disabling VirusTotal checks.") print("For information on how to create a VirusTotal API Key refer to: ") print("https://www.virustotal.com/en/documentation/public-api/") args.disable_vt = True if not args.repositories_file: args.repositories_file = [args.repository] for repo in args.repositories_file: try: repository = gh_api.fetch_repository(repo) print("######################################################################################") print("Now verifying repository: {}".format(repository.get('full_name'))) except Exception as ex: print("Unable to pull data for the repository that was provided. Is it a valid repo URL?") if args.verbose: print(ex) sys.exit() output_obj.initialize_repo_output(repository.get('full_name')) verify_metadata.run(repository, output_obj) # We store the result from contributors() to prevent calling it again for I+PRS
#!/usr/bin/env python3 if __name__ == "__main__": args = arg_parser.parse_arguments() output_obj = output.Output(verbose=args.verbose, outfile=args.outfile, outformat=args.format) print(""" ░██████╗░██╗████████╗██╗░░░██╗███████╗██████╗░██╗███████╗██╗░░░██╗ ██╔════╝░██║╚══██╔══╝██║░░░██║██╔════╝██╔══██╗██║██╔════╝╚██╗░██╔╝ ██║░░██╗░██║░░░██║░░░╚██╗░██╔╝█████╗░░██████╔╝██║█████╗░░░╚████╔╝░ ██║░░╚██╗██║░░░██║░░░░╚████╔╝░██╔══╝░░██╔══██╗██║██╔══╝░░░░╚██╔╝░░ ╚██████╔╝██║░░░██║░░░░░╚██╔╝░░███████╗██║░░██║██║██║░░░░░░░░██║░░░ ░╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚═╝╚═╝╚═╝░░░░░░░░╚═╝░░░ GitVerify: Is the repo trustworthy? Make an informed decision. v1.0 - https://www.kulkan.com ######################################################################################""") # Let's warn the user that unauth RateLimits are pretty low if os.environ.get("GH_ACCESS_TOKEN", None) == None: output_obj.warn("GH_ACCESS_TOKEN environment variable not set, using GitHub RateLimits for anonymous queries") output_obj.warn("Unauthenticated requests to the Github API will enforce a very low and strict RateLimit") print("For information on how to create a GitHub API Access Token refer to: ") print("https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens") if os.environ.get("VT_API_KEY", None) == None: output_obj.warn("VT_API_KEY environment variable not set, disabling VirusTotal checks.") print("For information on how to create a VirusTotal API Key refer to: ") print("https://www.virustotal.com/en/documentation/public-api/") args.disable_vt = True if not args.repositories_file: args.repositories_file = [args.repository] for repo in args.repositories_file: try: repository = gh_api.fetch_repository(repo) print("######################################################################################") print("Now verifying repository: {}".format(repository.get('full_name'))) except Exception as ex: print("Unable to pull data for the repository that was provided. Is it a valid repo URL?") if args.verbose: print(ex) sys.exit() output_obj.initialize_repo_output(repository.get('full_name')) verify_metadata.run(repository, output_obj) # We store the result from contributors() to prevent calling it again for I+PRS
contributors = verify_contributors.run(repository, output_obj)
4
2023-10-24 15:39:55+00:00
2k
nmathey/finasync
finasync/realt.py
[ { "identifier": "GNOSIS_API_TOKENLIST_URI", "path": "finasync/constants.py", "snippet": "GNOSIS_API_TOKENLIST_URI = (\n \"https://blockscout.com/xdai/mainnet/api?module=account&action=tokenlist&address=\"\n)" }, { "identifier": "REALT_API_TOKENLIST_URI", "path": "finasync/constants.py", "snippet": "REALT_API_TOKENLIST_URI = \"https://api.realt.community/v1/token\"" }, { "identifier": "REALT_OFFLINE_TOKENS_LIST", "path": "finasync/constants.py", "snippet": "REALT_OFFLINE_TOKENS_LIST = \"RealT_OfflineTokensList.json\"" }, { "identifier": "convert_currency", "path": "finasync/utils.py", "snippet": "def convert_currency(amount, from_currency, to_currency):\n Now_Time = datetime.today()\n Exchange_OfflineRates_Path = Path(\n EXCHANGE_OFFLINE_RATES_PATH\n + \"Exchange_OfflineRates_To_\"\n + to_currency\n + \".json\"\n )\n Exchange_OfflineRates_Path.touch(exist_ok=True)\n converted_amount = 0\n with open(Exchange_OfflineRates_Path) as json_file:\n try:\n Exchange_OfflineRates = json.load(json_file)\n except JSONDecodeError:\n Exchange_OfflineRates = {\n \"info\": {\n \"last_sync\": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))\n },\n \"data\": {},\n }\n\n # Fetch latest exchange rates only if local cache > 1 week\n if float(Exchange_OfflineRates[\"info\"][\"last_sync\"]) < datetime.timestamp(\n Now_Time - timedelta(weeks=1)\n ):\n response = requests.get(EXCHANGE_RATES_API_URI + to_currency)\n Exchange_OfflineRates[\"info\"][\"last_sync\"] = str(datetime.timestamp(Now_Time))\n Exchange_OfflineRates[\"data\"] = response.json()\n\n data = Exchange_OfflineRates[\"data\"]\n if \"rates\" in data:\n rates = data[\"rates\"]\n if from_currency in rates and to_currency in rates:\n converted_amount = amount / rates[from_currency]\n else:\n raise ValueError(\"Invalid currency!\")\n else:\n raise ValueError(\"Unable to fetch exchange rates!\")\n\n with open(Exchange_OfflineRates_Path, \"w\") as outfile:\n json.dump(Exchange_OfflineRates, outfile, indent=4)\n\n return round(converted_amount, 2)" } ]
import requests import re import json import time import os import logging from pathlib import Path from datetime import datetime, timedelta from json.decoder import JSONDecodeError from finary_uapi.user_real_estates import ( get_user_real_estates, delete_user_real_estates, update_user_real_estates, add_user_real_estates, add_user_real_estates_with_currency, ) from finary_uapi.user_me import get_display_currency_code from .constants import ( GNOSIS_API_TOKENLIST_URI, REALT_API_TOKENLIST_URI, REALT_OFFLINE_TOKENS_LIST, ) from .utils import convert_currency
881
def get_realt_token_details(realt_token_contractAdress): Now_Time = datetime.today() RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST) RealT_OfflineTokensList_Path.touch(exist_ok=True) with open(RealT_OfflineTokensList_Path) as json_file: try: RealT_OfflineTokensList = json.load(json_file) except JSONDecodeError: RealT_OfflineTokensList = { "info": { "last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2))) }, "data": {}, } # Update offlineTokensList from RealT API only if more than 1 week old if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp( Now_Time - timedelta(weeks=1) ): MyRealT_API_Header = { "Accept": "*/*", "X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"], } TokensListReq = requests.get(
def get_realt_token_details(realt_token_contractAdress): Now_Time = datetime.today() RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST) RealT_OfflineTokensList_Path.touch(exist_ok=True) with open(RealT_OfflineTokensList_Path) as json_file: try: RealT_OfflineTokensList = json.load(json_file) except JSONDecodeError: RealT_OfflineTokensList = { "info": { "last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2))) }, "data": {}, } # Update offlineTokensList from RealT API only if more than 1 week old if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp( Now_Time - timedelta(weeks=1) ): MyRealT_API_Header = { "Accept": "*/*", "X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"], } TokensListReq = requests.get(
REALT_API_TOKENLIST_URI, headers=MyRealT_API_Header
1
2023-10-24 00:32:05+00:00
2k
biggzlar/plausible-uncertainties
evidential_regression/networks.py
[ { "identifier": "DenseInverseGamma", "path": "evidential_regression/layers.py", "snippet": "class DenseInverseGamma(torch.nn.Module):\n \"\"\" Based on: https://github.com/aamini/evidential-deep-learning.\n \"\"\"\n def __init__(self, in_features, units=1):\n super(DenseInverseGamma, self).__init__()\n self.units = units\n self.dense = torch.nn.Linear(in_features=in_features, out_features=4 * self.units)\n self.softplus = torch.nn.Softplus()\n\n def evidence(self, x):\n return self.softplus(x)\n\n def forward(self, x):\n output = self.dense(x)\n mu, logv, logalpha, logbeta = torch.split(output, split_size_or_sections=self.units, dim=-1)\n \n nu = self.evidence(logv)\n alpha = self.evidence(logalpha) + 2\n beta = self.evidence(logbeta)\n \n return mu, nu, alpha, beta" }, { "identifier": "DenseInverseWishart", "path": "evidential_regression/layers.py", "snippet": "class DenseInverseWishart(torch.nn.Module):\n def __init__(self, in_features, p=1, mu_activation=None):\n super(DenseInverseWishart, self).__init__()\n self.p = p\n self.diag_indices = [i for i in range(self.p)]\n self.tril_indices = torch.tril_indices(self.p, self.p).tolist()\n\n self.mu = torch.nn.Linear(in_features=in_features, out_features=self.p)\n self.params = torch.nn.Linear(in_features=in_features, out_features=2)\n\n self.n_decomposit_units = int((1 + self.p) * self.p / 2)\n self.L_decomposit = torch.nn.Linear(in_features=in_features, out_features=self.p**2)\n \n self.softplus = torch.nn.Softplus()\n self.mu_activation = mu_activation\n\n def evidence(self, x):\n return self.softplus(x)\n\n def forward(self, x):\n mu = self.mu(x)\n params = self.params(x)\n lognu, logkappa = torch.split(params, split_size_or_sections=1, dim=-1)\n \n if self.mu_activation is not None:\n mu = self.mu_activation(mu)\n nu = self.evidence(lognu) + self.p + 1\n kappa = self.evidence(logkappa) + 1\n \n L = self.L_decomposit(x)\n L = L.view(-1, self.p, self.p)\n L = torch.tril(L, diagonal=-1) + torch.diag_embed(1e-2 + self.evidence(torch.diagonal(L, dim1=-2, dim2=-1)))\n\n # non_zeros = self.L_decomposit(x)\n # L = torch.zeros((x.shape[0], self.p, self.p))\n # L[:, self.tril_indices[0], self.tril_indices[1]] = non_zeros\n # L[:, self.diag_indices, self.diag_indices] = self.evidence(L[:, self.diag_indices, self.diag_indices])\n\n return mu, nu, kappa, L" } ]
import torch import torch.nn as nn import numpy as np from .layers import DenseInverseGamma, DenseInverseWishart
897
class UnivariateDerNet(nn.Module): def __init__(self): super(UnivariateDerNet, self).__init__() self.hidden = nn.Sequential( nn.Linear(in_features=1, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128),
class UnivariateDerNet(nn.Module): def __init__(self): super(UnivariateDerNet, self).__init__() self.hidden = nn.Sequential( nn.Linear(in_features=1, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128), # nn.ReLU6(), # nn.Tanh(), nn.Mish(), nn.Linear(in_features=128, out_features=128),
DenseInverseGamma(in_features=128, units=1)
0
2023-10-19 08:44:08+00:00
2k
t-ega/whatsapp-cloud-sdk
whatsapp_cloud_sdk/_formaters/message_formatter.py
[ { "identifier": "JSONDict", "path": "whatsapp_cloud_sdk/_utils/types.py", "snippet": "class MessageTypes(Enum):\n IMAGE = \"image\"\n AUDIO = \"audio\"\n TEXT = \"text\"\n REACTION = \"reaction\"\n STICKER = \"sticker\"\n LOCATION = \"location\"\n UNKNOWN = \"unknown\"" }, { "identifier": "ButtonContents", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class ButtonContents(BaseModel):\n \"\"\"\n Represents the contents of a button.\n\n Args:\n id (str, optional): An optional button ID. Defaults to a UUID.\n title (str): The title or label of the button.\n\n Attributes:\n None\n \"\"\"\n\n id: Optional[str] = str(uuid.uuid4())\n title: constr(max_length=20, min_length=1)" } ]
from enum import Enum from typing import List, Optional from unicodedata import decimal from whatsapp_cloud_sdk._utils.types import JSONDict from whatsapp_cloud_sdk._validators.messages import ButtonContents
884
"""This module contains custom formatting class and aliases for internal use within the library. Warning: Contents of this module are intended to be used internally by the library and *not* by the user. Changes to this module are not considered breaking changes and may not be documented in the changelog. """ class LinkTypes(Enum): """ Constants representing different types of links. Attributes: AUDIO (str): A link type for audio content. IMAGE (str): A link type for image content. VIDEO (str): A link type for video content. """ AUDIO = "audio" IMAGE = "image" VIDEO = "video" class MessageFormatter: """ Provides methods for formatting messages and data for interaction with the WhatsApp API. Methods: - format_text_message(body: str, to: str, preview_url: bool = False, message_id: str = None) -> JSONDict: - format_button_message(to: str, text: str, buttons: List[ButtonContents], message_id: Optional[str]) -> JSONDict: - format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict: - format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = "", message_id: str =None -> JSONDict: - format_send_document_by_url(to: str, document_link: str, caption: str, is_reply: bool = False, message_id: str = None) -> JSONDict: - format_location_message(to: str, latitude: decimal, longitude: int, name: str, address: str, message_id: Optional[str]) -> JSONDict: - format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict: - format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict: - mark_message_as_read(message_id: str) -> JSONDict: """ @staticmethod def format_text_message( body: str, to: str, preview_url: bool = False, message_id: str = None ) -> JSONDict: """ Formats a text message for WhatsApp. Args: - body (str): The text message body. - to (str): The recipient's WhatsApp number. - preview_url (bool, optional): Whether to preview URLs in the message. - message_id (str, optional): The ID of the message being replied to. Returns: - JSONDict: The formatted text message. """ body = { "messaging_product": "whatsapp", "recipient_type": "individual", "to": to, "type": "text", "text": {"preview_url": preview_url, "body": body}, } if message_id: body["context"] = {"message_id": message_id} return body @staticmethod def format_button_message( to: str, text: str,
"""This module contains custom formatting class and aliases for internal use within the library. Warning: Contents of this module are intended to be used internally by the library and *not* by the user. Changes to this module are not considered breaking changes and may not be documented in the changelog. """ class LinkTypes(Enum): """ Constants representing different types of links. Attributes: AUDIO (str): A link type for audio content. IMAGE (str): A link type for image content. VIDEO (str): A link type for video content. """ AUDIO = "audio" IMAGE = "image" VIDEO = "video" class MessageFormatter: """ Provides methods for formatting messages and data for interaction with the WhatsApp API. Methods: - format_text_message(body: str, to: str, preview_url: bool = False, message_id: str = None) -> JSONDict: - format_button_message(to: str, text: str, buttons: List[ButtonContents], message_id: Optional[str]) -> JSONDict: - format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict: - format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = "", message_id: str =None -> JSONDict: - format_send_document_by_url(to: str, document_link: str, caption: str, is_reply: bool = False, message_id: str = None) -> JSONDict: - format_location_message(to: str, latitude: decimal, longitude: int, name: str, address: str, message_id: Optional[str]) -> JSONDict: - format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict: - format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict: - mark_message_as_read(message_id: str) -> JSONDict: """ @staticmethod def format_text_message( body: str, to: str, preview_url: bool = False, message_id: str = None ) -> JSONDict: """ Formats a text message for WhatsApp. Args: - body (str): The text message body. - to (str): The recipient's WhatsApp number. - preview_url (bool, optional): Whether to preview URLs in the message. - message_id (str, optional): The ID of the message being replied to. Returns: - JSONDict: The formatted text message. """ body = { "messaging_product": "whatsapp", "recipient_type": "individual", "to": to, "type": "text", "text": {"preview_url": preview_url, "body": body}, } if message_id: body["context"] = {"message_id": message_id} return body @staticmethod def format_button_message( to: str, text: str,
buttons: List[ButtonContents],
1
2023-10-15 21:12:45+00:00
2k
DTennant/GPC
data/imagenet.py
[ { "identifier": "subsample_instances", "path": "data/data_utils.py", "snippet": "def subsample_instances(dataset, prop_indices_to_subsample=0.8):\n\n np.random.seed(0)\n subsample_indices = np.random.choice(range(len(dataset)), replace=False,\n size=(int(prop_indices_to_subsample * len(dataset)),))\n\n return subsample_indices" }, { "identifier": "imagenet_root", "path": "config.py", "snippet": "_C = CN()\n_C.MODEL = CN()\n_C.MODEL.DEVICE = \"cuda\"\n_C.MODEL.NAME = 'resnet50'\n_C.MODEL.LAST_STRIDE = 1\n_C.MODEL.LABEL_SMOOTH = False\n_C.MODEL.PRETRAIN_PATH = ''\n_C.INPUT = CN()\n_C.INPUT.SIZE_TRAIN = [384, 128]\n_C.INPUT.SIZE_TEST = [384, 128]\n_C.INPUT.PROB = 0.0\n_C.INPUT.RE_PROB = 0.0\n_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]\n_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]\n_C.INPUT.PADDING = 10\n_C.DATASETS = CN()\n_C.DATASETS.NAMES = ('market1501')\n_C.DATASETS.DATA_PATH = '/home/zbc/data/market1501/'\n_C.DATASETS.TRAIN_PATH = 'bounding_box_train'\n_C.DATASETS.QUERY_PATH = 'query'\n_C.DATASETS.GALLERY_PATH = 'bounding_box_test'\n_C.DATALOADER = CN()\n_C.DATALOADER.NUM_WORKERS = 8\n_C.DATALOADER.SAMPLER = 'softmax'\n_C.DATALOADER.NUM_INSTANCE = 16\n_C.SOLVER = CN()\n_C.SOLVER.OPTIMIZER_NAME = \"Adam\"\n_C.SOLVER.FP16 = False\n_C.SOLVER.MAX_EPOCHS = 50\n_C.SOLVER.BASE_LR = 3e-4\n_C.SOLVER.BIAS_LR_FACTOR = 2\n_C.SOLVER.MOMENTUM = 0.9\n_C.SOLVER.MARGIN = 0.3\n_C.SOLVER.WEIGHT_DECAY = 0.0005\n_C.SOLVER.WEIGHT_DECAY_BIAS = 0.\n_C.SOLVER.GAMMA = 0.1\n_C.SOLVER.STEPS = (30, 55)\n_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n_C.SOLVER.WARMUP_ITERS = 500\n_C.SOLVER.WARMUP_METHOD = \"linear\"\n_C.SOLVER.CHECKPOINT_PERIOD = 50\n_C.SOLVER.LOG_PERIOD = 100\n_C.SOLVER.EVAL_PERIOD = 50\n_C.SOLVER.IMS_PER_BATCH = 64\n_C.SOLVER.CYTHON = True\n_C.TEST = CN()\n_C.TEST.IMS_PER_BATCH = 128\n_C.TEST.WEIGHT = \"\"\n_C.TEST.DEBUG = False\n_C.TEST.MULTI_GPU = False\n_C.TEST.RERANK = True\n_C.OUTPUT_DIR = \"\"" } ]
import torchvision import numpy as np import os from copy import deepcopy from data.data_utils import subsample_instances from config import imagenet_root
1,514
class ImageNetBase(torchvision.datasets.ImageFolder): def __init__(self, root, transform): super(ImageNetBase, self).__init__(root, transform) self.uq_idxs = np.array(range(len(self))) def __getitem__(self, item): img, label = super().__getitem__(item) uq_idx = self.uq_idxs[item] return img, label, uq_idx def subsample_dataset(dataset, idxs): imgs_ = [] for i in idxs: imgs_.append(dataset.imgs[i]) dataset.imgs = imgs_ samples_ = [] for i in idxs: samples_.append(dataset.samples[i]) dataset.samples = samples_ # dataset.imgs = [x for i, x in enumerate(dataset.imgs) if i in idxs] # dataset.samples = [x for i, x in enumerate(dataset.samples) if i in idxs] dataset.targets = np.array(dataset.targets)[idxs].tolist() dataset.uq_idxs = dataset.uq_idxs[idxs] return dataset def subsample_classes(dataset, include_classes=list(range(1000))): cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes] target_xform_dict = {} for i, k in enumerate(include_classes): target_xform_dict[k] = i dataset = subsample_dataset(dataset, cls_idxs) dataset.target_transform = lambda x: target_xform_dict[x] return dataset def get_train_val_indices(train_dataset, val_split=0.2): train_classes = list(set(train_dataset.targets)) # Get train/test indices train_idxs = [] val_idxs = [] for cls in train_classes: cls_idxs = np.where(np.array(train_dataset.targets) == cls)[0] v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),)) t_ = [x for x in cls_idxs if x not in v_] train_idxs.extend(t_) val_idxs.extend(v_) return train_idxs, val_idxs def get_equal_len_datasets(dataset1, dataset2): """ Make two datasets the same length """ if len(dataset1) > len(dataset2): rand_idxs = np.random.choice(range(len(dataset1)), size=(len(dataset2, ))) subsample_dataset(dataset1, rand_idxs) elif len(dataset2) > len(dataset1): rand_idxs = np.random.choice(range(len(dataset2)), size=(len(dataset1, ))) subsample_dataset(dataset2, rand_idxs) return dataset1, dataset2 def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80), prop_train_labels=0.8, split_train_val=False, seed=0): np.random.seed(seed) # Subsample imagenet dataset initially to include 100 classes subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False) subsampled_100_classes = np.sort(subsampled_100_classes) print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}') cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))} # Init entire training set
class ImageNetBase(torchvision.datasets.ImageFolder): def __init__(self, root, transform): super(ImageNetBase, self).__init__(root, transform) self.uq_idxs = np.array(range(len(self))) def __getitem__(self, item): img, label = super().__getitem__(item) uq_idx = self.uq_idxs[item] return img, label, uq_idx def subsample_dataset(dataset, idxs): imgs_ = [] for i in idxs: imgs_.append(dataset.imgs[i]) dataset.imgs = imgs_ samples_ = [] for i in idxs: samples_.append(dataset.samples[i]) dataset.samples = samples_ # dataset.imgs = [x for i, x in enumerate(dataset.imgs) if i in idxs] # dataset.samples = [x for i, x in enumerate(dataset.samples) if i in idxs] dataset.targets = np.array(dataset.targets)[idxs].tolist() dataset.uq_idxs = dataset.uq_idxs[idxs] return dataset def subsample_classes(dataset, include_classes=list(range(1000))): cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes] target_xform_dict = {} for i, k in enumerate(include_classes): target_xform_dict[k] = i dataset = subsample_dataset(dataset, cls_idxs) dataset.target_transform = lambda x: target_xform_dict[x] return dataset def get_train_val_indices(train_dataset, val_split=0.2): train_classes = list(set(train_dataset.targets)) # Get train/test indices train_idxs = [] val_idxs = [] for cls in train_classes: cls_idxs = np.where(np.array(train_dataset.targets) == cls)[0] v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),)) t_ = [x for x in cls_idxs if x not in v_] train_idxs.extend(t_) val_idxs.extend(v_) return train_idxs, val_idxs def get_equal_len_datasets(dataset1, dataset2): """ Make two datasets the same length """ if len(dataset1) > len(dataset2): rand_idxs = np.random.choice(range(len(dataset1)), size=(len(dataset2, ))) subsample_dataset(dataset1, rand_idxs) elif len(dataset2) > len(dataset1): rand_idxs = np.random.choice(range(len(dataset2)), size=(len(dataset1, ))) subsample_dataset(dataset2, rand_idxs) return dataset1, dataset2 def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80), prop_train_labels=0.8, split_train_val=False, seed=0): np.random.seed(seed) # Subsample imagenet dataset initially to include 100 classes subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False) subsampled_100_classes = np.sort(subsampled_100_classes) print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}') cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))} # Init entire training set
imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)
1
2023-10-23 18:23:22+00:00
2k
camenduru/MiniGPT-v2-hf
minigpt4/models/base_model.py
[ { "identifier": "download_cached_file", "path": "minigpt4/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()" }, { "identifier": "is_dist_avail_and_initialized", "path": "minigpt4/common/dist_utils.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "get_abs_path", "path": "minigpt4/common/utils.py", "snippet": "def get_abs_path(rel_path):\n return os.path.join(registry.get_path(\"library_root\"), rel_path)" }, { "identifier": "is_url", "path": "minigpt4/common/utils.py", "snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")" }, { "identifier": "create_eva_vit_g", "path": "minigpt4/models/eva_vit.py", "snippet": "def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision=\"fp16\"):\n model = VisionTransformer(\n img_size=img_size,\n patch_size=14,\n use_mean_pooling=False,\n embed_dim=1408,\n depth=39,\n num_heads=1408//88,\n mlp_ratio=4.3637,\n qkv_bias=True,\n drop_path_rate=drop_path_rate,\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_checkpoint=use_checkpoint,\n ) \n url = \"https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth\"\n cached_file = download_cached_file(\n url, check_hash=False, progress=True\n )\n state_dict = torch.load(cached_file, map_location=\"cpu\") \n interpolate_pos_embed(model,state_dict)\n \n incompatible_keys = model.load_state_dict(state_dict, strict=False)\n# print(incompatible_keys)\n \n if precision == \"fp16\":\n# model.to(\"cuda\") \n convert_weights_to_fp16(model)\n return model" } ]
import os import logging import contextlib import numpy as np import torch import torch.nn as nn from omegaconf import OmegaConf from transformers import BertTokenizer, LlamaTokenizer from transformers.models.llama.modeling_llama import LlamaForCausalLM from peft import ( LoraConfig, get_peft_model, prepare_model_for_int8_training, ) from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized from minigpt4.common.utils import get_abs_path, is_url from minigpt4.models.eva_vit import create_eva_vit_g
1,202
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BaseModel(nn.Module): """Base class for models.""" def __init__(self): super().__init__() @property def device(self): return list(self.parameters())[-1].device def load_checkpoint(self, url_or_filename): """ Load from a finetuned checkpoint. This should expect no mismatch in the model keys and the checkpoint keys. """ if is_url(url_or_filename): cached_file = download_cached_file( url_or_filename, check_hash=False, progress=True ) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") if "model" in checkpoint.keys(): state_dict = checkpoint["model"] else: state_dict = checkpoint msg = self.load_state_dict(state_dict, strict=False) logging.info("Missing keys {}".format(msg.missing_keys)) logging.info("load checkpoint from %s" % url_or_filename) return msg @classmethod def from_pretrained(cls, model_type): """ Build a pretrained model from default configuration file, specified by model_type. Args: - model_type (str): model type, specifying architecture and checkpoints. Returns: - model (nn.Module): pretrained or finetuned model, depending on the configuration. """ model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model model = cls.from_config(model_cfg) return model @classmethod def default_config_path(cls, model_type): assert ( model_type in cls.PRETRAINED_MODEL_CONFIG_DICT ), "Unknown model type {}".format(model_type)
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BaseModel(nn.Module): """Base class for models.""" def __init__(self): super().__init__() @property def device(self): return list(self.parameters())[-1].device def load_checkpoint(self, url_or_filename): """ Load from a finetuned checkpoint. This should expect no mismatch in the model keys and the checkpoint keys. """ if is_url(url_or_filename): cached_file = download_cached_file( url_or_filename, check_hash=False, progress=True ) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") if "model" in checkpoint.keys(): state_dict = checkpoint["model"] else: state_dict = checkpoint msg = self.load_state_dict(state_dict, strict=False) logging.info("Missing keys {}".format(msg.missing_keys)) logging.info("load checkpoint from %s" % url_or_filename) return msg @classmethod def from_pretrained(cls, model_type): """ Build a pretrained model from default configuration file, specified by model_type. Args: - model_type (str): model type, specifying architecture and checkpoints. Returns: - model (nn.Module): pretrained or finetuned model, depending on the configuration. """ model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model model = cls.from_config(model_cfg) return model @classmethod def default_config_path(cls, model_type): assert ( model_type in cls.PRETRAINED_MODEL_CONFIG_DICT ), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
2
2023-10-15 19:54:22+00:00
2k
deepghs/sdeval
sdeval/corrupt/aicorrupt.py
[ { "identifier": "load_images", "path": "sdeval/utils/images.py", "snippet": "def _yield_images(images: ImagesTyping) -> Iterator[Image.Image]:\ndef load_images(images: ImagesTyping) -> List[Image.Image]:" }, { "identifier": "tqdm", "path": "sdeval/utils/tqdm_.py", "snippet": "def tqdm(*args, silent: bool = False, **kwargs):\n \"\"\"\n An enhanced version of tqdm (progress bar) with an option to silence the output.\n\n This function modifies the behavior of tqdm to allow silencing the progress bar.\n\n :param args: Positional arguments to be passed to tqdm.\n :param silent: If True, the progress bar content will not be displayed.\n :type silent: bool\n :param kwargs: Additional keyword arguments to be passed to tqdm.\n :return: tqdm progress bar.\n :rtype: tqdm.std.tqdm\n \"\"\"\n with io.StringIO() as sio:\n if silent:\n kwargs['file'] = sio\n\n return _origin_tqdm(*args, **kwargs)" } ]
import json import numpy as np from functools import lru_cache from typing import Tuple, Optional, Mapping from PIL import Image from huggingface_hub import hf_hub_download from imgutils.data import rgb_encode, ImageTyping, load_image from imgutils.utils import open_onnx_model from ..utils import ImagesTyping, load_images, tqdm
1,561
@lru_cache() def _open_anime_aicop_meta(model_name: str): """ Open the meta information of the AI image corrupted detection model. This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub. :param model_name: The name of the AI image corrupted detection model. :type model_name: str :return: The opened meta information of the AI image corrupted detection model. :rtype: dict """ with open(hf_hub_download( f'deepghs/ai_image_corrupted', f'{model_name}/meta.json', ), 'r', encoding='utf-8') as f: return json.load(f) @lru_cache() def _open_anime_aicop_labels(model_name: str): """ Open the labels of the AI image corrupted detection model. This function opens the labels of the AI image corrupted detection model specified by the given model name. :param model_name: The name of the AI image corrupted detection model. :type model_name: str :return: The labels of the AI image corrupted detection model. :rtype: List[str] """ return _open_anime_aicop_meta(model_name)['labels'] def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384), normalize: Optional[Tuple[float, float]] = (0.5, 0.5)): """ Encode the image for AI image corrupted detection. This function resizes and encodes the image for AI image corrupted detection. :param image: The input image. :type image: Image.Image :param size: The target size for encoding. Default is (384, 384). :type size: Tuple[int, int] :param normalize: The normalization parameters. Default is (0.5, 0.5). :type normalize: Optional[Tuple[float, float]] :return: The encoded image data. :rtype: np.ndarray """ image = image.resize(size, Image.BILINEAR) data = rgb_encode(image, order_='CHW') if normalize is not None: mean_, std_ = normalize mean = np.asarray([mean_]).reshape((-1, 1, 1)) std = np.asarray([std_]).reshape((-1, 1, 1)) data = (data - mean) / std return data.astype(np.float32) def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]: """ Get AI image corrupted detection scores for an image. This function calculates AI image corrupted detection scores for a given image using the specified model. :param image: The input image. :type image: ImageTyping :param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'. :type model_name: str :return: A dictionary containing the corrupted score. :rtype: Mapping[str, float] """ image = load_image(image, force_background='white', mode='RGB') input_ = _img_encode(image)[None, ...] output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_}) return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist())) class AICorruptMetrics: """ Class for calculating an AI image corruptness score. The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model. :param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'. :type model_name: str :param silent: If True, suppresses progress bars and additional output during calculation. :type silent: bool :param tqdm_desc: Description for the tqdm progress bar during calculation. :type tqdm_desc: str """ def __init__(self, model_name: str = _DEFAULT_MODEL_NAME, silent: bool = False, tqdm_desc: str = None): self._model_name = model_name self.silent = silent self.tqdm_desc = tqdm_desc or self.__class__.__name__ def score(self, images: ImagesTyping, silent: bool = None): """ Calculate the AI image corruptness score for a set of images. This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model. :param images: The set of input images for calculating the AI image corruptness score. :type images: ImagesTyping :param silent: If True, suppresses progress bars and additional output during calculation. :type silent: bool :return: The AI image corruptness score. :rtype: float """
""" Overview: AI image corrupt evaluation metrics. """ _DEFAULT_MODEL_NAME = 'caformer_s36_v0_focal' @lru_cache() def _open_anime_aicop_model(model_name: str): """ Open the AI image corrupted detection model. This function downloads and opens the AI image corrupted detection model specified by the given model name using Hugging Face Hub. :param model_name: The name of the AI image corrupted detection model. :type model_name: str :return: The opened AI image corrupted detection model. :rtype: Model """ return open_onnx_model(hf_hub_download( f'deepghs/ai_image_corrupted', f'{model_name}/model.onnx', )) @lru_cache() def _open_anime_aicop_meta(model_name: str): """ Open the meta information of the AI image corrupted detection model. This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub. :param model_name: The name of the AI image corrupted detection model. :type model_name: str :return: The opened meta information of the AI image corrupted detection model. :rtype: dict """ with open(hf_hub_download( f'deepghs/ai_image_corrupted', f'{model_name}/meta.json', ), 'r', encoding='utf-8') as f: return json.load(f) @lru_cache() def _open_anime_aicop_labels(model_name: str): """ Open the labels of the AI image corrupted detection model. This function opens the labels of the AI image corrupted detection model specified by the given model name. :param model_name: The name of the AI image corrupted detection model. :type model_name: str :return: The labels of the AI image corrupted detection model. :rtype: List[str] """ return _open_anime_aicop_meta(model_name)['labels'] def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384), normalize: Optional[Tuple[float, float]] = (0.5, 0.5)): """ Encode the image for AI image corrupted detection. This function resizes and encodes the image for AI image corrupted detection. :param image: The input image. :type image: Image.Image :param size: The target size for encoding. Default is (384, 384). :type size: Tuple[int, int] :param normalize: The normalization parameters. Default is (0.5, 0.5). :type normalize: Optional[Tuple[float, float]] :return: The encoded image data. :rtype: np.ndarray """ image = image.resize(size, Image.BILINEAR) data = rgb_encode(image, order_='CHW') if normalize is not None: mean_, std_ = normalize mean = np.asarray([mean_]).reshape((-1, 1, 1)) std = np.asarray([std_]).reshape((-1, 1, 1)) data = (data - mean) / std return data.astype(np.float32) def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]: """ Get AI image corrupted detection scores for an image. This function calculates AI image corrupted detection scores for a given image using the specified model. :param image: The input image. :type image: ImageTyping :param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'. :type model_name: str :return: A dictionary containing the corrupted score. :rtype: Mapping[str, float] """ image = load_image(image, force_background='white', mode='RGB') input_ = _img_encode(image)[None, ...] output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_}) return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist())) class AICorruptMetrics: """ Class for calculating an AI image corruptness score. The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model. :param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'. :type model_name: str :param silent: If True, suppresses progress bars and additional output during calculation. :type silent: bool :param tqdm_desc: Description for the tqdm progress bar during calculation. :type tqdm_desc: str """ def __init__(self, model_name: str = _DEFAULT_MODEL_NAME, silent: bool = False, tqdm_desc: str = None): self._model_name = model_name self.silent = silent self.tqdm_desc = tqdm_desc or self.__class__.__name__ def score(self, images: ImagesTyping, silent: bool = None): """ Calculate the AI image corruptness score for a set of images. This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model. :param images: The set of input images for calculating the AI image corruptness score. :type images: ImagesTyping :param silent: If True, suppresses progress bars and additional output during calculation. :type silent: bool :return: The AI image corruptness score. :rtype: float """
image_list = load_images(images)
0
2023-10-18 03:35:52+00:00
2k
WHUlwb/Assisted_learning
hrnet/hrnet.py
[ { "identifier": "BN_MOMENTUM", "path": "hrnet/backbone.py", "snippet": "BN_MOMENTUM = 0.1\r" }, { "identifier": "hrnet_classification", "path": "hrnet/backbone.py", "snippet": "def hrnet_classification(backbone='hrnetv2_w18'):\r\n model = HighResolutionNet_Classification(num_classes=1000, backbone=backbone)\r\n return model\r" } ]
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .backbone import BN_MOMENTUM, hrnet_classification
677
class HRnet_Backbone(nn.Module): def __init__(self, in_channel, backbone = 'hrnetv2_w18'): super(HRnet_Backbone, self).__init__() self.model = hrnet_classification(backbone = backbone) del self.model.incre_modules del self.model.downsamp_modules del self.model.final_layer del self.model.classifier self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=2, padding=1, bias=False) def forward(self, x): # x = self.model.conv1(x) # 原来的 x = self.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.conv2(x) x = self.model.bn2(x) x = self.model.relu(x) x = self.model.layer1(x) x_list = [] for i in range(2): if self.model.transition1[i] is not None: x_list.append(self.model.transition1[i](x)) else: x_list.append(x) y_list = self.model.stage2(x_list) x_list = [] for i in range(3): if self.model.transition2[i] is not None: if i < 2: x_list.append(self.model.transition2[i](y_list[i])) else: x_list.append(self.model.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.model.stage3(x_list) x_list = [] for i in range(4): if self.model.transition3[i] is not None: if i < 3: x_list.append(self.model.transition3[i](y_list[i])) else: x_list.append(self.model.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.model.stage4(x_list) return y_list class HRnet(nn.Module): def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True): super(HRnet, self).__init__() self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained) last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels)) self.last_layer = nn.Sequential( nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),
class HRnet_Backbone(nn.Module): def __init__(self, in_channel, backbone = 'hrnetv2_w18'): super(HRnet_Backbone, self).__init__() self.model = hrnet_classification(backbone = backbone) del self.model.incre_modules del self.model.downsamp_modules del self.model.final_layer del self.model.classifier self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=2, padding=1, bias=False) def forward(self, x): # x = self.model.conv1(x) # 原来的 x = self.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.conv2(x) x = self.model.bn2(x) x = self.model.relu(x) x = self.model.layer1(x) x_list = [] for i in range(2): if self.model.transition1[i] is not None: x_list.append(self.model.transition1[i](x)) else: x_list.append(x) y_list = self.model.stage2(x_list) x_list = [] for i in range(3): if self.model.transition2[i] is not None: if i < 2: x_list.append(self.model.transition2[i](y_list[i])) else: x_list.append(self.model.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.model.stage3(x_list) x_list = [] for i in range(4): if self.model.transition3[i] is not None: if i < 3: x_list.append(self.model.transition3[i](y_list[i])) else: x_list.append(self.model.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.model.stage4(x_list) return y_list class HRnet(nn.Module): def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True): super(HRnet, self).__init__() self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained) last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels)) self.last_layer = nn.Sequential( nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),
0
2023-10-17 06:19:02+00:00
2k
dagedarr/telegram-budget
handlers/change_info_handler.py
[ { "identifier": "get_by_id", "path": "core/crud.py", "snippet": "async def get_by_id(\n model: ModelType,\n obj_id: int,\n session: AsyncSession\n) -> ModelType:\n \"\"\"\n Получение объекта по ID.\n\n Parameters:\n - model (ModelType): Тип модели SQLAlchemy.\n - obj_id (int): Идентификатор объекта.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n\n Returns:\n ModelType: Объект модели, найденный по ID.\n \"\"\"\n\n get_obj_in_db = await session.execute(\n select(model).where(model.id == obj_id)\n )\n return get_obj_in_db.scalars().first()" }, { "identifier": "update", "path": "core/crud.py", "snippet": "async def update(\n db_obj: ModelType,\n obj_in: dict,\n session: AsyncSession,\n) -> ModelType:\n \"\"\"\n Изменение значений полей объекта.\n\n Parameters:\n - db_obj (ModelType): Объект модели для обновления.\n - obj_in (dict): Словарь с новыми значениями полей.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n\n Returns:\n ModelType: Обновленный объект модели.\n \"\"\"\n\n for field in obj_in:\n setattr(db_obj, field, obj_in[field])\n session.add(db_obj)\n await session.commit()\n await session.refresh(db_obj)\n return db_obj" }, { "identifier": "IsEndOnboardingFilter", "path": "filters/user_filters.py", "snippet": "class IsEndOnboardingFilter(BaseFilter):\n \"\"\"\n Фильтр для проверки прохождения онбординга.\n \"\"\"\n\n async def __call__(self, message: Message) -> bool:\n session = await get_async_session()\n\n user = await get_by_id(\n model=User,\n obj_id=message.from_user.id,\n session=session,\n )\n if user:\n await session.close()\n return user.is_onboarding\n await session.close()\n return False" }, { "identifier": "RegistrationForm", "path": "forms/user_form.py", "snippet": "class RegistrationForm(StatesGroup):\n username = State()\n mail = State()" }, { "identifier": "set_info_keyboard", "path": "keyboards/user_keyboards.py", "snippet": "def set_info_keyboard(is_onboarding=False) -> InlineKeyboardMarkup:\n \"\"\"Клавиатура изменения данных пользователя.\"\"\"\n\n builder = InlineKeyboardBuilder()\n builder.add(InlineKeyboardButton(\n text='Ввести почту' if is_onboarding else 'Поменять почту',\n callback_data='get_mail')\n )\n builder.add(InlineKeyboardButton(\n text='Поменять имя',\n callback_data='get_username')\n )\n if is_onboarding:\n builder.add(InlineKeyboardButton(\n text='Завершить регистрацию',\n callback_data='registration_end')\n )\n else:\n builder.add(InlineKeyboardButton(\n text='Назад',\n callback_data='other')\n )\n builder.adjust(2)\n return builder.as_markup()" }, { "identifier": "universal_keyboard", "path": "keyboards/user_keyboards.py", "snippet": "def universal_keyboard(\n buttons: List[Tuple[str, Union[str, CallbackData]]],\n buttons_per_row: int = 1,\n) -> InlineKeyboardMarkup:\n \"\"\"Универсальная клавиатура с кнопками колбека.\"\"\"\n\n builder = InlineKeyboardBuilder()\n\n if len(buttons) == 1:\n text, data = buttons[0]\n builder.add(InlineKeyboardButton(text=text, callback_data=data))\n else:\n line = []\n for text, data in buttons:\n line.append(\n InlineKeyboardButton(text=text, callback_data=data)\n )\n builder.add(*line)\n builder.adjust(buttons_per_row)\n return builder.as_markup()" }, { "identifier": "User", "path": "models/user.py", "snippet": "class User(Base):\n \"\"\"Модель пользователя.\"\"\"\n\n username = Column(String(64), nullable=True)\n email = Column(String(254), unique=True, index=True, nullable=True)\n registration_time = Column(BigInteger) # Время в формате Unix.\n is_onboarding = Column(Boolean, default=False)\n\n categories = relationship(\n 'Category', back_populates='user',\n cascade='all, delete-orphan', lazy='selectin'\n )\n aliases = relationship(\n 'Alias', back_populates='user', cascade='all, delete-orphan'\n )\n transactions = relationship(\n 'Transaction', back_populates='user',\n cascade='all, delete-orphan', lazy='selectin'\n )" }, { "identifier": "callback_message", "path": "utils/user_actions.py", "snippet": "async def callback_message(\n target: Union[Message, CallbackQuery],\n text: str,\n reply_markup: InlineKeyboardMarkup = None,\n replace_message: bool = False,\n delete_reply: bool = True,\n **kwargs,\n):\n \"\"\"Редактировние сообщения.\"\"\"\n\n target = target if isinstance(target, Message) else target.message\n\n if replace_message:\n await target.edit_text(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n else:\n await target.answer(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n await target.delete_reply_markup() if delete_reply else None" } ]
from aiogram import F, Router from aiogram.fsm.context import FSMContext from aiogram.types import CallbackQuery, Message from sqlalchemy.ext.asyncio import AsyncSession from core.crud import get_by_id, update from filters import IsEndOnboardingFilter from forms import RegistrationForm from keyboards import set_info_keyboard, universal_keyboard from models import User from utils.user_actions import callback_message
1,474
router = Router(name='change_info_router') @router.callback_query(F.data == 'change_info') async def change_info(callback: CallbackQuery): """Выводит Категории и Статистику и осльной функционал.""" await callback_message( target=callback, text='Изменить данные о себе',
router = Router(name='change_info_router') @router.callback_query(F.data == 'change_info') async def change_info(callback: CallbackQuery): """Выводит Категории и Статистику и осльной функционал.""" await callback_message( target=callback, text='Изменить данные о себе',
reply_markup=set_info_keyboard(),
4
2023-10-23 17:30:24+00:00
2k
nchen909/Pass-Tuning
evaluator/CodeBLEU/parser/DFG.py
[ { "identifier": "remove_comments_and_docstrings", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def remove_comments_and_docstrings(source, lang):\n if lang in ['python']:\n \"\"\"\n Returns 'source' minus comments and docstrings.\n \"\"\"\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n if start_col > 0:\n out += token_string\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n temp = []\n for x in out.split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)\n elif lang in ['ruby']:\n return source\n else:\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n temp = []\n for x in re.sub(pattern, replacer, source).split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)" }, { "identifier": "tree_to_token_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_token_index(root_node):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n return [(root_node.start_point, root_node.end_point)]\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_token_index(child)\n return code_tokens" }, { "identifier": "index_to_code_token", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def index_to_code_token(index, code):\n start_point = index[0]\n end_point = index[1]\n if start_point[0] == end_point[0]:\n s = code[start_point[0]][start_point[1]:end_point[1]]\n else:\n s = \"\"\n s += code[start_point[0]][start_point[1]:]\n for i in range(start_point[0] + 1, end_point[0]):\n s += code[i]\n s += code[end_point[0]][:end_point[1]]\n return s" }, { "identifier": "tree_to_variable_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_variable_index(root_node, index_to_code):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n index = (root_node.start_point, root_node.end_point)\n _, code = index_to_code[index]\n if root_node.type != code:\n return [(root_node.start_point, root_node.end_point)]\n else:\n return []\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_variable_index(child, index_to_code)\n return code_tokens" } ]
from tree_sitter import Language, Parser from .utils import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index)
1,245
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. def DFG_python(root_node,index_to_code,states): assignment=['assignment','augmented_assignment','for_in_clause'] if_statement=['if_statement'] for_statement=['for_statement'] while_statement=['while_statement'] do_first_statement=['for_in_clause'] def_statement=['default_parameter'] states=states.copy() if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment': idx,code=index_to_code[(root_node.start_point,root_node.end_point)] if root_node.type==code: return [],states elif code in states: return [(code,idx,'comesFrom',[code],states[code].copy())],states else: if root_node.type=='identifier': states[code]=[idx] return [(code,idx,'comesFrom',[],[])],states elif root_node.type in def_statement: name=root_node.child_by_field_name('name') value=root_node.child_by_field_name('value') DFG=[] if value is None:
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. def DFG_python(root_node,index_to_code,states): assignment=['assignment','augmented_assignment','for_in_clause'] if_statement=['if_statement'] for_statement=['for_statement'] while_statement=['while_statement'] do_first_statement=['for_in_clause'] def_statement=['default_parameter'] states=states.copy() if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment': idx,code=index_to_code[(root_node.start_point,root_node.end_point)] if root_node.type==code: return [],states elif code in states: return [(code,idx,'comesFrom',[code],states[code].copy())],states else: if root_node.type=='identifier': states[code]=[idx] return [(code,idx,'comesFrom',[],[])],states elif root_node.type in def_statement: name=root_node.child_by_field_name('name') value=root_node.child_by_field_name('value') DFG=[] if value is None:
indexs=tree_to_variable_index(name,index_to_code)
3
2023-10-20 09:24:44+00:00
2k
kavisha725/MBNSF
trajectory_estimation/mbnt.py
[ { "identifier": "extract_clusters_dbscan", "path": "utils/o3d_uitls.py", "snippet": "def extract_clusters_dbscan(cloud, eps = 0.9, min_points=10, return_clusters= False, return_colored_pcd=False):\n pcl = copy.deepcopy(cloud)\n pcl = make_open3d_point_cloud(pcl)\n labels = np.array(\n pcl.cluster_dbscan(eps=eps, min_points=min_points, print_progress=True))\n \n if return_colored_pcd:\n cmap = plt.get_cmap(\"tab20\")\n max_label = labels.max()\n print(\"Has %d clusters\" % (max_label + 1))\n colors = cmap(labels / (max_label if max_label > 0 else 1))\n colors[labels < 0] = 0\n pcl.colors = o3d.utility.Vector3dVector(colors[:, :3])\n # o3d.visualization.draw_geometries([pcl])\n # save_view_point(pcl, 'pcd_viewpoint.json')\n load_view_point(pcl, 'pcd_viewpoint.json')\n \n clusters = []\n if return_clusters:\n label_ids = np.delete(np.unique(labels), 0)\n for id in label_ids:\n clusters.append(cloud[labels == id])\n clusters = np.asarray(clusters)\n\n if return_colored_pcd: \n return labels, clusters, pcl\n return labels, clusters\n else:\n if return_colored_pcd: \n return labels, pcl\n return labels" }, { "identifier": "spatial_consistency_loss", "path": "utils/sc_utils.py", "snippet": "def spatial_consistency_loss(src_keypts, tgt_keypts, d_thre=0.1, max_points = 3000):\n \"\"\"\n Input:\n - src_keypts: [bs, num_corr, 3]\n - tgt_keypts: [bs, num_corr, 3]\n Output:\n - sc_loss: [bs, 1], the spatial consistency loss.\n \"\"\"\n bs, num_corr = src_keypts.shape[0], tgt_keypts.shape[1]\n\n # (Optional) random sample points\n if num_corr > max_points:\n rand_perm = torch.randperm(num_corr)\n rand_idx = rand_perm[:max_points]\n\n src_keypts = src_keypts[:, rand_idx, :]\n tgt_keypts = tgt_keypts[:, rand_idx, :]\n\n # Spatial Consistency Adjacency Matrix\n src_dist = torch.norm((src_keypts[:, :, None, :] - src_keypts[:, None, :, :]), dim=-1)\n target_dist = torch.norm((tgt_keypts[:, :, None, :] - tgt_keypts[:, None, :, :]), dim=-1)\n cross_dist = torch.abs(src_dist - target_dist)\n adj_mat = torch.clamp(1.0 - cross_dist ** 2 / d_thre ** 2, min=0)\n\n # Spatial Consistency Loss\n lead_eigvec = power_iteration(adj_mat)\n sc_score = spatial_consistency_score( adj_mat, lead_eigvec)\n sc_loss = -torch.log(sc_score)\n\n return sc_loss" } ]
import os, glob import argparse import logging import csv import numpy as np import torch import sys import pytorch3d.loss as p3dloss from utils.general_utils import * from utils.ntp_utils import * from utils.o3d_uitls import extract_clusters_dbscan from utils.sc_utils import spatial_consistency_loss
1,338
# Long-term trajectory estimation with MBNT. sys.path.append(os.path.join(os.path.dirname(__file__), '../')) logger = logging.getLogger(__name__) def total_sc_loss(labels_t, label_ids, pc, pc_defored, d_thresh=0.03, max_points=3000): loss_sc = None for id in label_ids: cluster = pc[labels_t == id] cluster_deformed = pc_defored[labels_t == id] assert cluster.shape == cluster_deformed.shape cluster_cs_loss = spatial_consistency_loss(cluster.unsqueeze(0), cluster_deformed.unsqueeze(0), d_thre=d_thresh, max_points=max_points) if not loss_sc: loss_sc = cluster_cs_loss else: loss_sc += cluster_cs_loss loss_sc /= len(label_ids) return loss_sc.squeeze() def fit_trajectory_field( exp_dir, pc_list, options, flow_gt_list = None, traj_gt = None, traj_val_mask = None ): csv_file = open(f"{exp_dir}/metrics.csv", 'w') metric_labels = ['train_loss', 'train_chamfer_loss', 'train_sc_loss', 'train_consist_loss', 'traj_consist', 'epe', 'acc_strict', 'acc_relax', 'angle_error', 'outlier'] csv_writer = csv.DictWriter(csv_file, ['itr'] + metric_labels + ['traj_metric']) csv_writer.writeheader() n_lidar_sweeps = len(pc_list) if traj_gt is not None and traj_val_mask is not None: traj_gt = torch.from_numpy(traj_gt).cuda() traj_val_mask = torch.from_numpy(traj_val_mask).cuda() # ANCHOR: Initialize the trajectory field net = NeuralTrajField(traj_len=n_lidar_sweeps, filter_size=options.hidden_units, act_fn=options.act_fn, traj_type=options.traj_type, st_embed_type=options.st_embed_type) net.to(options.device) optimizer = torch.optim.Adam(net.parameters(), lr=options.lr, weight_decay=options.weight_decay) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5) # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200,400,600,800], gamma=0.5) # Pre-compute clusters: labels_database, label_ids_database = [], [] for fid in range(n_lidar_sweeps):
# Long-term trajectory estimation with MBNT. sys.path.append(os.path.join(os.path.dirname(__file__), '../')) logger = logging.getLogger(__name__) def total_sc_loss(labels_t, label_ids, pc, pc_defored, d_thresh=0.03, max_points=3000): loss_sc = None for id in label_ids: cluster = pc[labels_t == id] cluster_deformed = pc_defored[labels_t == id] assert cluster.shape == cluster_deformed.shape cluster_cs_loss = spatial_consistency_loss(cluster.unsqueeze(0), cluster_deformed.unsqueeze(0), d_thre=d_thresh, max_points=max_points) if not loss_sc: loss_sc = cluster_cs_loss else: loss_sc += cluster_cs_loss loss_sc /= len(label_ids) return loss_sc.squeeze() def fit_trajectory_field( exp_dir, pc_list, options, flow_gt_list = None, traj_gt = None, traj_val_mask = None ): csv_file = open(f"{exp_dir}/metrics.csv", 'w') metric_labels = ['train_loss', 'train_chamfer_loss', 'train_sc_loss', 'train_consist_loss', 'traj_consist', 'epe', 'acc_strict', 'acc_relax', 'angle_error', 'outlier'] csv_writer = csv.DictWriter(csv_file, ['itr'] + metric_labels + ['traj_metric']) csv_writer.writeheader() n_lidar_sweeps = len(pc_list) if traj_gt is not None and traj_val_mask is not None: traj_gt = torch.from_numpy(traj_gt).cuda() traj_val_mask = torch.from_numpy(traj_val_mask).cuda() # ANCHOR: Initialize the trajectory field net = NeuralTrajField(traj_len=n_lidar_sweeps, filter_size=options.hidden_units, act_fn=options.act_fn, traj_type=options.traj_type, st_embed_type=options.st_embed_type) net.to(options.device) optimizer = torch.optim.Adam(net.parameters(), lr=options.lr, weight_decay=options.weight_decay) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5) # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200,400,600,800], gamma=0.5) # Pre-compute clusters: labels_database, label_ids_database = [], [] for fid in range(n_lidar_sweeps):
labels = extract_clusters_dbscan(pc_list[fid], eps = options.sc_cluster_eps, min_points=options.sc_cluster_min_points, return_clusters= False, return_colored_pcd=False)
0
2023-10-16 07:21:12+00:00
2k
cool-dev-guy/tkmoderngl
main.py
[ { "identifier": "FramebufferImage", "path": "tkmoderngl/framebuffer.py", "snippet": "class FramebufferImage(ImageTk.PhotoImage):\n def __init__(self, master, ctx, size):\n super(FramebufferImage, self).__init__(Image.new('RGB', size, (0, 0, 0)))\n self.ctx = ctx\n self.fbo = self.ctx.simple_framebuffer(size)\n self.scope = self.ctx.scope(self.fbo)\n\n def __enter__(self):\n self.scope.__enter__()\n\n def __exit__(self, *args):\n self.scope.__exit__(*args)\n self.paste(Image.frombytes('RGB', self.fbo.size, self.fbo.read(), 'raw', 'RGB', 0, -1))" }, { "identifier": "Canvas", "path": "tkmoderngl/renderer.py", "snippet": "class Canvas:\n def __init__(self, ctx, reserve='4MB'):\n self.ctx = ctx\n self.prog = self.ctx.program(\n vertex_shader='''\n #version 330\n\n uniform vec2 Pan;\n\n in vec2 in_vert;\n in vec4 in_color;\n\n out vec4 v_color;\n\n void main() {\n v_color = in_color;\n gl_Position = vec4(in_vert - Pan, 0.0, 1.0);\n }\n ''',\n fragment_shader='''\n #version 330\n\n in vec4 v_color;\n\n out vec4 f_color;\n\n void main() {\n f_color = v_color;\n }\n ''',\n )\n\n self.vbo = ctx.buffer(reserve='4MB', dynamic=True)\n self.vao = ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert', 'in_color')\n\n def pan(self, pos):\n self.prog['Pan'].value = pos\n\n def clear(self, color=(0, 0, 0, 0)):\n self.ctx.clear(*color)\n\n def plot(self, points, type='line'):\n data = points.astype('f4').tobytes()\n self.vbo.orphan()\n self.vbo.write(data)\n if type == 'line':\n self.ctx.line_width = 1.0\n self.vao.render(moderngl.LINE_STRIP, vertices=len(data) // 24)\n if type == 'points':\n self.ctx.point_size = 3.0\n self.vao.render(moderngl.POINTS, vertices=len(data) // 24)" }, { "identifier": "PanTool", "path": "tkmoderngl/renderer.py", "snippet": "class PanTool:\n def __init__(self):\n self.total_x = 0.0\n self.total_y = 0.0\n self.start_x = 0.0\n self.start_y = 0.0\n self.delta_x = 0.0\n self.delta_y = 0.0\n self.drag = False\n\n def start_drag(self, x, y):\n self.start_x = x\n self.start_y = y\n self.drag = True\n\n def dragging(self, x, y):\n if self.drag:\n self.delta_x = (x - self.start_x) * 2.0\n self.delta_y = (y - self.start_y) * 2.0\n\n def stop_drag(self, x, y):\n if self.drag:\n self.dragging(x, y)\n self.total_x -= self.delta_x\n self.total_y += self.delta_y\n self.delta_x = 0.0\n self.delta_y = 0.0\n self.drag = False\n\n @property\n def value(self):\n return (self.total_x - self.delta_x, self.total_y + self.delta_y)" } ]
import tkinter as tk import moderngl import numpy as np from tkmoderngl.framebuffer import FramebufferImage from tkmoderngl.renderer import Canvas, PanTool
1,014
""" code from moderngl/examples modified by : cool-dev-guy """ # the moderngl widget class GlWidget(tk.Label): def __init__(self,*args,**kwargs): super().__init__(*args,**kwargs) self.parent = args[0] self._ctx = moderngl.create_standalone_context() self._tkfbo = FramebufferImage(args[0],self._ctx,(500,500))
""" code from moderngl/examples modified by : cool-dev-guy """ # the moderngl widget class GlWidget(tk.Label): def __init__(self,*args,**kwargs): super().__init__(*args,**kwargs) self.parent = args[0] self._ctx = moderngl.create_standalone_context() self._tkfbo = FramebufferImage(args[0],self._ctx,(500,500))
self._canvas = Canvas(self._ctx)
1
2023-10-15 07:58:13+00:00
2k
G3VV/Yank
index.py
[ { "identifier": "start_token_thread", "path": "util/spotify.py", "snippet": "def start_token_thread():\n \n client_id = spotify_id\n client_secret = spotify_secret\n \n get_access_token(client_id, client_secret)" }, { "identifier": "start", "path": "util/download.py", "snippet": "async def start(id):\n isrc = id\n try:\n\n try:\n track = await spotify_isrc(isrc)\n except Exception as e:\n print(\"Spotify token expired or couldn't find isrc\")\n print(\" \")\n print(e)\n return \"none\"\n\n if 'isrc' in track['external_ids']:\n isrc = track['external_ids']['isrc']\n else:\n isrc = \"ISRC not available\"\n print(\"Song not found\")\n return \"none\"\n\n j = await get_deezer_track(isrc)\n pathfile = Path(f\"./music/{isrc}.mp3\")\n\n if pathfile.is_file():\n print(f\"[{isrc}] Already cached\")\n return pathfile\n else:\n print(f\"[{isrc}] Not cached\")\n try:\n track_id = j[\"id\"]\n except:\n print(\"Couldn't find song on deezer\")\n return \"none\"\n loop = asyncio.get_event_loop()\n download_track(track_id, isrc)\n return pathfile\n\n except Exception as e:\n print(f\"{e} at line {sys.exc_info()[-1].tb_lineno}\")\n return \"none\"" }, { "identifier": "start_playlist", "path": "util/download.py", "snippet": "async def start_playlist(id):\n folder_to_zip = f'./music/{id}/'\n output_zip_file = f'./zip/{id}'\n def zip_folder(folder_path, output_path):\n print(f\"[playlist] Zipping folder {folder_path} to {output_path}\")\n\n shutil.make_archive(output_path, 'zip', folder_path)\n print(f\"[playlist] Finished zipping folder {folder_path} to {output_path}\")\n\n\n isrc = id\n try:\n if os.path.exists(folder_to_zip):\n return output_zip_file + \".zip\"\n\n try:\n playlist_isrcs = await spotify_playlist(isrc)\n except Exception as e:\n print(\"Spotify token expired or couldn't find isrc\")\n print(\" \")\n print(e)\n return \"none\"\n\n deezer_ids = []\n\n for index in range(len(playlist_isrcs)):\n try:\n j = await get_deezer_track(playlist_isrcs[index])\n print(j[\"id\"])\n deezer_ids.append(f'{j[\"id\"]}')\n except:\n print(\"Couldn't find song on deezer\")\n continue\n\n #return deezer_ids\n\n\n\n download_playlist(deezer_ids, id)\n\n zip_folder(folder_to_zip, output_zip_file)\n return output_zip_file + \".zip\"\n\n except Exception as e:\n print(f\"{e} at line {sys.exc_info()[-1].tb_lineno}\")\n return \"none\"" } ]
from quart import Quart, send_file from util.spotify import start_token_thread from util.download import start, start_playlist from dotenv import load_dotenv import threading import re import os import json
850
app = Quart(__name__) load_dotenv() port = os.environ.get("port") @app.route('/track/<string:id>') async def serve_audio(id): filename = await start(id) return await send_file(filename, mimetype='audio/mpeg') @app.route('/') async def serve_index(): return "online" @app.route('/playlist/<string:id>') async def serve_playlist(id):
app = Quart(__name__) load_dotenv() port = os.environ.get("port") @app.route('/track/<string:id>') async def serve_audio(id): filename = await start(id) return await send_file(filename, mimetype='audio/mpeg') @app.route('/') async def serve_index(): return "online" @app.route('/playlist/<string:id>') async def serve_playlist(id):
filename = await start_playlist(id)
2
2023-10-15 04:35:56+00:00
2k
openfoodfacts/open-prices
app/models.py
[ { "identifier": "Base", "path": "app/db.py", "snippet": "" }, { "identifier": "CurrencyEnum", "path": "app/enums.py", "snippet": "CURRENCIES = [(currency, currency) for currency in list_currencies()]\n NODE = \"NODE\"\n WAY = \"WAY\"\n RELATION = \"RELATION\"\n PRICE_TAG = \"PRICE_TAG\"\n RECEIPT = \"RECEIPT\"\n GDPR_REQUEST = \"GDPR_REQUEST\"\n UNIT = \"UNIT\"\n KILOGRAM = \"KILOGRAM\"\nclass LocationOSMEnum(Enum):\nclass ProofTypeEnum(Enum):\nclass PricePerEnum(Enum):" } ]
from openfoodfacts import Flavor from sqlalchemy import ( JSON, BigInteger, Boolean, Column, Date, DateTime, ForeignKey, Integer, Numeric, String, ) from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Mapped, mapped_column, relationship from sqlalchemy.sql import func from sqlalchemy_utils import force_auto_coercion from sqlalchemy_utils.types.choice import ChoiceType from app.db import Base from app.enums import CurrencyEnum, LocationOSMEnum, PricePerEnum, ProofTypeEnum
734
force_auto_coercion() JSONVariant = JSON().with_variant(JSONB(), "postgresql") class User(Base): user_id = Column(String, primary_key=True, index=True) token = Column(String, unique=True, index=True) last_used = Column(DateTime(timezone=True)) price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) __tablename__ = "users" class Product(Base): id = Column(Integer, primary_key=True, index=True) code = Column(String, unique=True, index=True) source = Column(ChoiceType(Flavor)) product_name = Column(String) product_quantity = Column(Integer) brands = Column(String) image_url = Column(String) unique_scans_n = Column(Integer, nullable=False, server_default="0") prices: Mapped[list["Price"]] = relationship(back_populates="product") price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) updated = Column(DateTime(timezone=True), onupdate=func.now()) __tablename__ = "products" class Location(Base): id = Column(Integer, primary_key=True, index=True) osm_id = Column(BigInteger) osm_type = Column(ChoiceType(LocationOSMEnum)) osm_name = Column(String) osm_display_name = Column(String) osm_address_postcode = Column(String) osm_address_city = Column(String) osm_address_country = Column(String) osm_lat = Column(Numeric(precision=11, scale=7)) osm_lon = Column(Numeric(precision=11, scale=7)) prices: Mapped[list["Price"]] = relationship(back_populates="location") price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) updated = Column(DateTime(timezone=True), onupdate=func.now()) __tablename__ = "locations" class Proof(Base): id = Column(Integer, primary_key=True, index=True) file_path = Column(String, nullable=False) mimetype = Column(String, index=True)
force_auto_coercion() JSONVariant = JSON().with_variant(JSONB(), "postgresql") class User(Base): user_id = Column(String, primary_key=True, index=True) token = Column(String, unique=True, index=True) last_used = Column(DateTime(timezone=True)) price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) __tablename__ = "users" class Product(Base): id = Column(Integer, primary_key=True, index=True) code = Column(String, unique=True, index=True) source = Column(ChoiceType(Flavor)) product_name = Column(String) product_quantity = Column(Integer) brands = Column(String) image_url = Column(String) unique_scans_n = Column(Integer, nullable=False, server_default="0") prices: Mapped[list["Price"]] = relationship(back_populates="product") price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) updated = Column(DateTime(timezone=True), onupdate=func.now()) __tablename__ = "products" class Location(Base): id = Column(Integer, primary_key=True, index=True) osm_id = Column(BigInteger) osm_type = Column(ChoiceType(LocationOSMEnum)) osm_name = Column(String) osm_display_name = Column(String) osm_address_postcode = Column(String) osm_address_city = Column(String) osm_address_country = Column(String) osm_lat = Column(Numeric(precision=11, scale=7)) osm_lon = Column(Numeric(precision=11, scale=7)) prices: Mapped[list["Price"]] = relationship(back_populates="location") price_count = Column(Integer, nullable=False, server_default="0", index=True) created = Column(DateTime(timezone=True), server_default=func.now()) updated = Column(DateTime(timezone=True), onupdate=func.now()) __tablename__ = "locations" class Proof(Base): id = Column(Integer, primary_key=True, index=True) file_path = Column(String, nullable=False) mimetype = Column(String, index=True)
type = Column(ChoiceType(ProofTypeEnum))
1
2023-10-21 14:02:15+00:00
2k
krasnoukhov/homeassistant-smart-maic
custom_components/smart_maic/config_flow.py
[ { "identifier": "DEVICE_NAME", "path": "custom_components/smart_maic/const.py", "snippet": "DEVICE_NAME = \"device_name\"" }, { "identifier": "DEVICE_ID", "path": "custom_components/smart_maic/const.py", "snippet": "DEVICE_ID = \"devid\"" }, { "identifier": "DEVICE_TYPE", "path": "custom_components/smart_maic/const.py", "snippet": "DEVICE_TYPE = \"devtype\"" }, { "identifier": "DOMAIN", "path": "custom_components/smart_maic/const.py", "snippet": "DOMAIN = \"smart_maic\"" }, { "identifier": "IP_ADDRESS", "path": "custom_components/smart_maic/const.py", "snippet": "IP_ADDRESS = CONF_IP_ADDRESS" }, { "identifier": "PIN", "path": "custom_components/smart_maic/const.py", "snippet": "PIN = CONF_PIN" }, { "identifier": "SmartMaic", "path": "custom_components/smart_maic/smart_maic.py", "snippet": "class SmartMaic:\n \"\"\"Smart MAIC instance.\"\"\"\n\n def __init__(self, data: dict[str, Any]) -> None:\n \"\"\"Init Smart MAIC.\"\"\"\n self._ip_address = data[IP_ADDRESS]\n self._pin = data[PIN]\n self._devid = data.get(DEVICE_ID)\n\n def get_wdata(self) -> dict[str, Any]:\n \"\"\"Get \"wdata\" for Smart MAIC component.\"\"\"\n self._login_request()\n return self._get_request(page=\"getwdata\").json()\n\n def get_config(self) -> dict[str, Any]:\n \"\"\"Get config for Smart MAIC component.\"\"\"\n self._login_request()\n return self._get_request(page=\"webinit\").json()\n\n def set_mqtt_config(self) -> dict[str, Any]:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n config = self.get_config()\n\n self._get_request(\n page=\"mqtt\",\n serv=config[\"serv\"],\n port=config[\"port\"],\n uname=config[\"uname\"],\n **{\"pass\": config[\"pass\"]},\n mqtt_on=1,\n mqttint=5,\n separat=2,\n prefix=f\"{PREFIX}/\",\n )\n\n return self.get_config()\n\n def set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n self._login_request()\n self._get_request(page=\"initval\", **{key: value})\n\n def set_dry_switch(self, value: int) -> dict[str, Any]:\n \"\"\"Set Smart MAIC dry switch.\"\"\"\n self._get_request(\n page=\"getdata\", devid=self._devid, devpass=self._pin, pout=value\n )\n\n def _login_request(self) -> None:\n self._get_request(page=\"devlogin\", devpass=self._pin)\n\n def _get_request(self, **kwargs) -> requests.Response:\n \"\"\"Make GET request to the Smart MAIC API.\"\"\"\n url = urlparse(f\"http://{self._ip_address}/\")\n url = url._replace(query=urlencode(kwargs))\n\n _LOGGER.debug(f\"Smart MAIC request: GET {url.geturl()}\")\n try:\n r = requests.get(url.geturl(), timeout=HTTP_TIMEOUT)\n r.raise_for_status()\n _LOGGER.debug(f\"Smart MAIC status: {r.status_code}\")\n _LOGGER.debug(f\"Smart MAIC response: {r.text}\")\n\n return r\n except TimeoutError as timeout_error:\n raise ConnectionError from timeout_error\n except requests.exceptions.ConnectionError as connection_error:\n raise ConnectionError from connection_error\n except requests.exceptions.HTTPError as http_error:\n if http_error.response.status_code == 400:\n return r\n raise ConnectionError from http_error" }, { "identifier": "SmartMaicCoordinator", "path": "custom_components/smart_maic/coordinator.py", "snippet": "class SmartMaicCoordinator(DataUpdateCoordinator[dict[str, Any]]):\n \"\"\"Smart MAIC Coordinator class.\"\"\"\n\n def __init__(self, smart_maic: SmartMaic, hass: HomeAssistant) -> None:\n \"\"\"Initialize.\"\"\"\n self._smart_maic = smart_maic\n\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n )\n\n def _get_config(self) -> None:\n \"\"\"Get Smart MAIC config.\"\"\"\n return self._smart_maic.set_mqtt_config()\n\n async def async_get_config(self) -> None:\n \"\"\"Get Smart MAIC config.\"\"\"\n return await self.hass.async_add_executor_job(self._get_config)\n\n def _set_mqtt_config(self) -> None:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n return self._smart_maic.set_mqtt_config()\n\n async def async_set_mqtt_config(self) -> None:\n \"\"\"Set Smart MAIC MQTT config.\"\"\"\n return await self.hass.async_add_executor_job(self._set_mqtt_config)\n\n def _set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n return self._smart_maic.set_consumption(key=key, value=value)\n\n async def async_set_consumption(self, key: str, value: float) -> None:\n \"\"\"Set Smart MAIC consumption value.\"\"\"\n return await self.hass.async_add_executor_job(self._set_consumption, key, value)\n\n def _set_dry_switch(self, value: int) -> None:\n \"\"\"Set Smart MAIC dry switch value.\"\"\"\n return self._smart_maic.set_dry_switch(value=value)\n\n async def async_set_dry_switch(self, value: int) -> None:\n \"\"\"Set Smart MAIC dry switch value.\"\"\"\n return await self.hass.async_add_executor_job(self._set_dry_switch, value)" } ]
import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from typing import Any from homeassistant import config_entries from homeassistant.components import mqtt from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import AbortFlow from .const import ( DEVICE_NAME, DEVICE_ID, DEVICE_TYPE, DOMAIN, IP_ADDRESS, PIN, ) from .smart_maic import SmartMaic from .coordinator import SmartMaicCoordinator
1,530
"""Config flow for Smart MAIC integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) USER_SCHEMA = vol.Schema( { vol.Required(IP_ADDRESS): cv.string, vol.Required(PIN): cv.string, vol.Required(DEVICE_NAME, default="Energy"): cv.string, } ) async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]: """Validate the user input allows us to connect. Data has the keys from USER_SCHEMA with values provided by the user. """ if not await mqtt.async_wait_for_mqtt_client(hass): raise AbortFlow("mqtt_unavailable") smart_maic = SmartMaic(data)
"""Config flow for Smart MAIC integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) USER_SCHEMA = vol.Schema( { vol.Required(IP_ADDRESS): cv.string, vol.Required(PIN): cv.string, vol.Required(DEVICE_NAME, default="Energy"): cv.string, } ) async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]: """Validate the user input allows us to connect. Data has the keys from USER_SCHEMA with values provided by the user. """ if not await mqtt.async_wait_for_mqtt_client(hass): raise AbortFlow("mqtt_unavailable") smart_maic = SmartMaic(data)
coordinator = SmartMaicCoordinator(smart_maic, hass)
7
2023-10-16 17:24:45+00:00
2k
JoaoPedro9674/django-ledger
django_ledger/contrib/django_ledger_graphene/api.py
[ { "identifier": "ChartOfAccountsModelType", "path": "django_ledger/contrib/django_ledger_graphene/coa/schema.py", "snippet": "class ChartOfAccountsModelType(DjangoObjectType):\n class Meta:\n model = ChartOfAccountModel\n fields = [\n 'uuid',\n 'slug',\n 'name',\n 'locked'\n ]\n interfaces = (relay.Node,)" }, { "identifier": "EntityModelQuery", "path": "django_ledger/contrib/django_ledger_graphene/entity/schema.py", "snippet": "class EntityModelQuery(graphene.ObjectType):\n entity_model_list_all = graphene.List(EntityModelType)\n entity_model_list_visible = graphene.List(EntityModelType)\n entity_model_list_hidden = graphene.List(EntityModelType)\n entity_model_list_managed = graphene.List(EntityModelType)\n entity_model_list_is_admin = graphene.List(EntityModelType)\n\n entity_model_detail_by_uuid = graphene.Field(EntityModelTypeDetail, uuid=graphene.String(required=True))\n entity_model_detail_by_slug = graphene.Field(EntityModelTypeDetail, slug=graphene.String(required=True))\n\n @staticmethod\n def get_base_queryset(info):\n if info.context.resource_owner.is_authenticated:\n return EntityModel.objects.for_user(user_model=info.context.resource_owner)\n return EntityModel.objects.none()\n\n # list ....\n def resolve_entity_model_list_all(self, info, **kwargs):\n return EntityModelQuery.get_base_queryset(info)\n\n def resolve_entity_model_list_visible(self, info, **kwargs):\n qs = EntityModelQuery.get_base_queryset(info)\n return qs.visible()\n\n def resolve_entity_model_list_hidden(self, info, **kwargs):\n qs = EntityModelQuery.get_base_queryset(info)\n return qs.hidden()\n\n def resolve_entity_model_list_managed(self, info, **kwargs):\n qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)\n user_model = info.context.resource_owner\n return qs.filter(managers__in=[user_model])\n\n def resolve_entity_model_list_is_admin(self, info, **kwargs):\n qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)\n user_model = info.context.resource_owner\n return qs.filter(admin=user_model)\n\n # detail...\n def resolve_entity_model_detail_by_slug(self, info, slug, **kwargs):\n qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)\n return qs.select_related('default_coa').get(slug__exact=slug)\n\n def resolve_entity_model_detail_by_uuid(self, info, uuid, **kwargs):\n qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)\n return qs.select_related('default_coa').get(uuid__exact=uuid)" }, { "identifier": "EntityModelType", "path": "django_ledger/contrib/django_ledger_graphene/entity/schema.py", "snippet": "class EntityModelType(DjangoObjectType):\n is_admin = graphene.Boolean()\n\n def resolve_is_admin(self, info):\n entity_model: EntityModel = self\n return entity_model.is_admin_user(user_model=info.context.resource_owner)\n\n class Meta:\n model = EntityModel\n fields = ENTITY_MODEL_BASE_FIELDS\n filter_fields = {\n 'name': [\n 'exact',\n 'icontains',\n 'istartswith'\n ],\n }\n interfaces = (relay.Node,)" } ]
import graphene from django_ledger.contrib.django_ledger_graphene.coa.schema import ChartOfAccountsModelType from django_ledger.contrib.django_ledger_graphene.entity.schema import EntityModelQuery, EntityModelType
945
class Query( EntityModelQuery, # ChartOfAccountsModelQuery # CustomerQuery, # Bill_list_Query, # Accountlist_Query, # Bank_account_Query , # ChartOfAccountsQuery, # UnitOfMeasureQuery, # VendorsQuery, # EntityUnitQuery, # LedgerQuery, # TransactionsQuery, # JournalEntryQuery, # PurchaseOrderQuery, # QueryUser, ): pass # class Mutation( # # CustomerMutations, # # BankAccountMutations, # # AuthMutation, # ): # pass schema = graphene.Schema( types=[ EntityModelType,
class Query( EntityModelQuery, # ChartOfAccountsModelQuery # CustomerQuery, # Bill_list_Query, # Accountlist_Query, # Bank_account_Query , # ChartOfAccountsQuery, # UnitOfMeasureQuery, # VendorsQuery, # EntityUnitQuery, # LedgerQuery, # TransactionsQuery, # JournalEntryQuery, # PurchaseOrderQuery, # QueryUser, ): pass # class Mutation( # # CustomerMutations, # # BankAccountMutations, # # AuthMutation, # ): # pass schema = graphene.Schema( types=[ EntityModelType,
ChartOfAccountsModelType
0
2023-10-20 01:07:20+00:00
2k
HLTCHKUST/InstructAlign
main_nlu_prompt.py
[ { "identifier": "get_prompt", "path": "nlu_prompt.py", "snippet": "def get_prompt(prompt_lang):\n if prompt_lang == 'EN':\n return DATA_TO_EN_PROMPT\n elif prompt_lang == 'EN2':\n return DATA_TO_EN2_PROMPT\n elif prompt_lang == 'EN3':\n return DATA_TO_EN3_PROMPT\n elif prompt_lang == 'ID':\n return DATA_TO_ID_PROMPT\n elif prompt_lang == 'ID2':\n return DATA_TO_ID2_PROMPT\n elif prompt_lang == 'ID3':\n return DATA_TO_ID3_PROMPT\n else:\n raise ValueError(f'get_prompt() - Unknown prompt_lang `{prompt_lang}` (options: EN / EN2 / EN3 / ID / ID2 / ID3)')" }, { "identifier": "load_xnli_dataset", "path": "data_utils.py", "snippet": "def load_xnli_dataset():\n xnli_dataset = datasets.load_dataset('xtreme', 'XNLI')\n df = xnli_dataset['test'].to_pandas()\n \n xnli_dsets = {}\n for lang, lang_df in df.groupby('language'):\n lang_df = lang_df[['sentence1', 'sentence2', 'gold_label']]\n lang_df.columns = ['text_1', 'text_2', 'label']\n xnli_dsets[f'xnli_{lang}'] = DatasetDict({'test': Dataset.from_pandas(lang_df.reset_index(drop=True))})\n return xnli_dsets" }, { "identifier": "load_nusa_menulis_dataset", "path": "data_utils.py", "snippet": "def load_nusa_menulis_dataset():\n nusa_menulis_dsets = {}\n for (dset, task, lang) in NUSA_MENULIS_TASKS:\n nusa_menulis_dsets[f'{dset}_{task}_{lang}'] = load_single_dataset(dset, task, lang, base_path='./nusamenulis')\n return nusa_menulis_dsets" }, { "identifier": "load_nlu_tasks", "path": "data_utils.py", "snippet": "def load_nlu_tasks():\n conhelps = NusantaraConfigHelper()\n nlu_datasets = {\n helper.config.name: helper.load_dataset() for helper in conhelps.filtered(lambda x: x.config.name in TEXT_CLASSIFICATION_TASKS)\n }\n return nlu_datasets" } ]
import os, sys import csv import pandas as pd import torch import torch.nn.functional as F from os.path import exists from numpy import argmax from tqdm import tqdm from sklearn.metrics import f1_score, accuracy_score from nlu_prompt import get_prompt from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM from nusacrowd import NusantaraConfigHelper from data_utils import load_xnli_dataset, load_nusa_menulis_dataset, load_nlu_tasks
1,378
"""nusacrowd zero-shot prompt.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1Ru8DyS2ALWfRdkjOPHj-KNjw6Pfa44Nd """ #!pip install git+https://github.com/IndoNLP/nusa-crowd.git@release_exp #!pip install transformers #!pip install sentencepiece DEBUG=False def to_prompt(input, prompt, labels, prompt_lang): # single label if 'text' in input: prompt = prompt.replace('[INPUT]', input['text']) else: prompt = prompt.replace('[INPUT_A]', input['text_1']) prompt = prompt.replace('[INPUT_B]', input['text_2']) # replace [OPTIONS] to A, B, or C if "[OPTIONS]" in prompt: new_labels = [f'{l}' for l in labels] new_labels[-1] = ("or " if 'EN' in prompt_lang else "atau ") + new_labels[-1] if len(new_labels) > 2: prompt = prompt.replace('[OPTIONS]', ', '.join(new_labels)) else: prompt = prompt.replace('[OPTIONS]', ' '.join(new_labels)) return prompt @torch.no_grad() def get_logprobs(model, tokenizer, prompt, label_ids=None, label_attn=None): inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to('cuda') input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:] outputs = model(**inputs, labels=input_ids) logits = outputs.logits if model.config.is_encoder_decoder: logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, label_ids.unsqueeze(2)) * label_attn.unsqueeze(2) return logprobs.sum() / label_attn.sum() else: logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2)) return logprobs.mean() def predict_classification(model, tokenizer, prompt, labels): if model.config.is_encoder_decoder: labels_encoded = tokenizer(labels, add_special_tokens=False, padding=True, return_tensors='pt') list_label_ids =labels_encoded['input_ids'].to('cuda') list_label_attn =labels_encoded['attention_mask'].to('cuda') probs = [ get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', ''), label_ids.view(1,-1), label_attn.view(1,-1)) for (label_ids, label_attn) in zip(list_label_ids, list_label_attn) ] else: probs = [get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', label)) for label in labels] return probs if __name__ == '__main__': if len(sys.argv) < 3: raise ValueError('main_nlu_prompt.py <prompt_lang> <model_path_or_name> <optional_output_name>') prompt_lang = sys.argv[1] MODEL = sys.argv[2] output_name = None if len(sys.argv) == 4: output_name = sys.argv[3] os.makedirs('./outputs', exist_ok=True) # Load Prompt DATA_TO_PROMPT = get_prompt(prompt_lang) # Load Dataset print('Load NLU Datasets...')
"""nusacrowd zero-shot prompt.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1Ru8DyS2ALWfRdkjOPHj-KNjw6Pfa44Nd """ #!pip install git+https://github.com/IndoNLP/nusa-crowd.git@release_exp #!pip install transformers #!pip install sentencepiece DEBUG=False def to_prompt(input, prompt, labels, prompt_lang): # single label if 'text' in input: prompt = prompt.replace('[INPUT]', input['text']) else: prompt = prompt.replace('[INPUT_A]', input['text_1']) prompt = prompt.replace('[INPUT_B]', input['text_2']) # replace [OPTIONS] to A, B, or C if "[OPTIONS]" in prompt: new_labels = [f'{l}' for l in labels] new_labels[-1] = ("or " if 'EN' in prompt_lang else "atau ") + new_labels[-1] if len(new_labels) > 2: prompt = prompt.replace('[OPTIONS]', ', '.join(new_labels)) else: prompt = prompt.replace('[OPTIONS]', ' '.join(new_labels)) return prompt @torch.no_grad() def get_logprobs(model, tokenizer, prompt, label_ids=None, label_attn=None): inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to('cuda') input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:] outputs = model(**inputs, labels=input_ids) logits = outputs.logits if model.config.is_encoder_decoder: logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, label_ids.unsqueeze(2)) * label_attn.unsqueeze(2) return logprobs.sum() / label_attn.sum() else: logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2)) return logprobs.mean() def predict_classification(model, tokenizer, prompt, labels): if model.config.is_encoder_decoder: labels_encoded = tokenizer(labels, add_special_tokens=False, padding=True, return_tensors='pt') list_label_ids =labels_encoded['input_ids'].to('cuda') list_label_attn =labels_encoded['attention_mask'].to('cuda') probs = [ get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', ''), label_ids.view(1,-1), label_attn.view(1,-1)) for (label_ids, label_attn) in zip(list_label_ids, list_label_attn) ] else: probs = [get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', label)) for label in labels] return probs if __name__ == '__main__': if len(sys.argv) < 3: raise ValueError('main_nlu_prompt.py <prompt_lang> <model_path_or_name> <optional_output_name>') prompt_lang = sys.argv[1] MODEL = sys.argv[2] output_name = None if len(sys.argv) == 4: output_name = sys.argv[3] os.makedirs('./outputs', exist_ok=True) # Load Prompt DATA_TO_PROMPT = get_prompt(prompt_lang) # Load Dataset print('Load NLU Datasets...')
nlu_datasets = load_nlu_tasks()
3
2023-10-24 07:46:05+00:00
2k
ambient-innovation/django-migration-zero
tests/services/test_deployment.py
[ { "identifier": "InvalidMigrationTreeError", "path": "django_migration_zero/exceptions.py", "snippet": "class InvalidMigrationTreeError(RuntimeError):\n pass" }, { "identifier": "MigrationZeroConfigurationManager", "path": "django_migration_zero/managers.py", "snippet": "class MigrationZeroConfigurationManager(models.Manager):\n def fetch_singleton(self) -> None:\n logger = get_logger()\n try:\n number_records = self.count()\n except ProgrammingError:\n logger.warning(\n \"The migration zero table is missing. This might be ok for the first installation of \"\n '\"django-migration-zero\" but if you see this warning after that point, something went sideways.'\n )\n return None\n\n if number_records > 1:\n raise MissingMigrationZeroConfigRecordError(\n \"Too many configuration records detected. There can only be one.\"\n )\n\n config_singleton = self.all().first()\n if not config_singleton:\n raise MissingMigrationZeroConfigRecordError(\"No configuration record found in the database.\")\n\n return config_singleton" }, { "identifier": "MigrationZeroConfiguration", "path": "django_migration_zero/models.py", "snippet": "class MigrationZeroConfiguration(models.Model):\n migration_imminent = models.BooleanField(\n _(\"Migration imminent\"),\n default=False,\n help_text=_(\"Enable this checkbox to prepare the database for a migration zero reset on the next deployment.\"),\n )\n migration_date = models.DateField(_(\"Migration date\"), null=True, blank=True)\n\n objects = MigrationZeroConfigurationManager()\n\n class Meta:\n verbose_name = _(\"Configuration\")\n verbose_name_plural = _(\"Configurations\")\n\n def __str__(self):\n return \"Configuration\"\n\n @property\n def is_migration_applicable(self) -> bool:\n \"\"\"\n Checks if we are currently preparing for a \"migration zero\"-deployment\n \"\"\"\n logger = get_logger()\n if not self.migration_imminent:\n logger.info(\"Switch not active. Skipping migration zero process.\")\n return False\n\n if not self.migration_date == timezone.now().date():\n logger.info(\"Security date doesn't match today. Skipping migration zero process.\")\n return False\n\n return True" }, { "identifier": "DatabasePreparationService", "path": "django_migration_zero/services/deployment.py", "snippet": "class DatabasePreparationService:\n \"\"\"\n Service to prepare the database for an upcoming commit in the CI/CD pipeline.\n \"\"\"\n\n logger: Logger\n\n def __init__(self):\n super().__init__()\n\n self.logger = get_logger()\n\n def process(self):\n self.logger.info(\"Starting migration zero database adjustments...\")\n\n # Fetch configuration singleton from database\n config_singleton = MigrationZeroConfiguration.objects.fetch_singleton()\n\n # If we encountered a problem or are not planning to do a migration reset, we are done here\n if not (config_singleton and config_singleton.is_migration_applicable):\n return\n\n # Reset migration history in database for all apps because there might be dependency issues if we keep the\n # records of the other ones\n self.logger.info(\"Resetting migration history for all apps...\")\n\n with connections[\"default\"].cursor() as cursor:\n cursor.execute(\"DELETE FROM `django_migrations`\")\n\n # Apply migrations via fake because the database is already up-to-date\n self.logger.info(\"Populating migration history.\")\n call_command(\"migrate\", fake=True)\n\n # Check if migration tree is valid\n self.logger.info(\"Checking migration integrity.\")\n migrate_check = call_command(\"migrate\", check=True)\n\n if not migrate_check:\n self.logger.info(\"All good.\")\n else:\n raise InvalidMigrationTreeError(\n 'The command \"migrate --check\" returned a non-zero error code. '\n \"Your migration structure seems to be invalid.\"\n )\n\n # Process finished, deactivate migration zero switch\n self.logger.info(\"Deactivating migration zero switch in database.\")\n config_singleton.migration_imminent = False\n config_singleton.save()\n\n self.logger.info(\"Process successfully finished.\")" } ]
from logging import Logger from unittest import mock from django.test import TestCase from django.utils import timezone from freezegun import freeze_time from django_migration_zero.exceptions import InvalidMigrationTreeError from django_migration_zero.managers import MigrationZeroConfigurationManager from django_migration_zero.models import MigrationZeroConfiguration from django_migration_zero.services.deployment import DatabasePreparationService
1,158
@freeze_time("2023-06-26") class DatabasePreparationServiceTest(TestCase): config: MigrationZeroConfiguration @classmethod def setUpTestData(cls): super().setUpTestData() cls.service = DatabasePreparationService() cls.config, _ = MigrationZeroConfiguration.objects.get_or_create() def test_init_logger_set(self): self.assertIsInstance(self.service.logger, Logger) def test_process_regular(self): # Setup self.config.migration_imminent = True self.config.migration_date = timezone.now().date() self.config.save() # Assertions self.assertIsNone(self.service.process()) self.config.refresh_from_db() self.assertFalse(self.config.migration_imminent) @mock.patch.object(MigrationZeroConfiguration, "is_migration_applicable", return_value=False)
@freeze_time("2023-06-26") class DatabasePreparationServiceTest(TestCase): config: MigrationZeroConfiguration @classmethod def setUpTestData(cls): super().setUpTestData() cls.service = DatabasePreparationService() cls.config, _ = MigrationZeroConfiguration.objects.get_or_create() def test_init_logger_set(self): self.assertIsInstance(self.service.logger, Logger) def test_process_regular(self): # Setup self.config.migration_imminent = True self.config.migration_date = timezone.now().date() self.config.save() # Assertions self.assertIsNone(self.service.process()) self.config.refresh_from_db() self.assertFalse(self.config.migration_imminent) @mock.patch.object(MigrationZeroConfiguration, "is_migration_applicable", return_value=False)
@mock.patch.object(MigrationZeroConfigurationManager, "fetch_singleton", return_value=None)
1
2023-10-18 12:51:36+00:00
2k
Lucchetto/model_converter
src/api.py
[ { "identifier": "setup_pub_key", "path": "src/licensing.py", "snippet": "def setup_pub_key() -> (rsa.RSAPublicKey | None):\n str = os.environ.get('LICENSING_PUB_KEY')\n if str:\n logging.info(\"LICENSING_PUB_KEY defined, Play Store licensing validation will be performed\")\n key = serialization.load_der_public_key(\n base64.b64decode(str), \n backend=default_backend()\n )\n # Check if the key is an instance of RSA public key\n if isinstance(key, rsa.RSAPublicKey):\n return key\n else:\n raise ValueError(\"The key is not an RSA public key.\")\n else:\n logging.info(\"LICENSING_PUB_KEY not defined, no licensing validation will be performed\")\n return None" }, { "identifier": "validate_license", "path": "src/licensing.py", "snippet": "def validate_license(key: rsa.RSAPublicKey, licensing_response_data: str | None, signature: str | None) -> bool:\n \"\"\"Validates license response from Play Store\n \"\"\"\n # Extract license data from response\n if licensing_response_data is None:\n return False\n license_data = licensing_response_data.split(\"|\")\n if len(license_data) < 6:\n return False\n license_status = LicensingStatus.from_value(safe_str_to_int(license_data[0]))\n package_name = license_data[2]\n # Remove extra data separated with |{timestamp}:{extra_data}\n timestamp = int(license_data[5].split(\":\")[0])\n # License responses with old timestamp should considered invalid too\n if (timestamp + LICENSE_RESPONSE_VALIDITY_TIME < datetime.now(timezone.utc).timestamp() * 1000):\n return False\n\n # Checking signature is not necessary if not licensed or if licensed check signature is provided\n if license_status is not LicensingStatus.LICENSED or signature is None:\n return False\n\n # Verify reponse data integrity if status is licensed\n try:\n key.verify(\n base64.b64decode(signature),\n licensing_response_data.encode(),\n padding.PKCS1v15(),\n hashes.SHA1()\n )\n return True\n except InvalidSignature as e:\n return False" }, { "identifier": "UnsupportedModelArch", "path": "src/converter.py", "snippet": "class UnsupportedModelArch(Exception):\n pass" }, { "identifier": "convert_pth_to_onnx", "path": "src/converter.py", "snippet": "def convert_pth_to_onnx(input_model: str, output_model: str):\n (model, _, _) = load_model_node(input_model)\n if model.__class__.__name__ in (DAT.__name__, CodeFormer.__name__, GFPGANv1Clean.__name__, RestoreFormer.__name__):\n raise UnsupportedModelArch()\n # set the train mode to false since we will only run the forward pass.\n model.train(False)\n model.cpu().eval()\n\n # An example input\n x = torch.rand(1, model.in_nc, 256, 256)\n # Export the model\n with torch.no_grad():\n dynamic_axes = {'input':{0:'batch_size' , 2:'width', 3:'height'}, 'output':{0:'batch_size' , 2:'width', 3:'height'}}\n torch.onnx.export(\n model,\n x,\n output_model,\n opset_version=11,\n export_params=True,\n input_names = ['input'],\n output_names = ['output'], \n dynamic_axes=dynamic_axes)" } ]
from enum import Enum from flask import Flask, Response, jsonify, request, send_file from src.licensing import setup_pub_key, validate_license from .converter import UnsupportedModelArch, convert_pth_to_onnx import logging import os import uuid
972
class ApiErrorReason(Enum): UNSUPPORTED_ARCH = "UNSUPPORTED_ARCH" INVALID_LICENSE = 'INVALID_LICENSE' UNSUPPORTED_FORMAT = 'UNSUPPORTED_FORMAT' UNKNOWN = 'UNKNOWN' def api_error(reason: ApiErrorReason): if reason == ApiErrorReason.INVALID_LICENSE: status_code = 401 else: status_code = 400 return jsonify({"reason": reason.value}), status_code def create_app(): logging.basicConfig(level=logging.NOTSET) app = Flask(__name__) # Ensure the directory exists os.makedirs("tmp", exist_ok=True)
class ApiErrorReason(Enum): UNSUPPORTED_ARCH = "UNSUPPORTED_ARCH" INVALID_LICENSE = 'INVALID_LICENSE' UNSUPPORTED_FORMAT = 'UNSUPPORTED_FORMAT' UNKNOWN = 'UNKNOWN' def api_error(reason: ApiErrorReason): if reason == ApiErrorReason.INVALID_LICENSE: status_code = 401 else: status_code = 400 return jsonify({"reason": reason.value}), status_code def create_app(): logging.basicConfig(level=logging.NOTSET) app = Flask(__name__) # Ensure the directory exists os.makedirs("tmp", exist_ok=True)
pub_key = setup_pub_key()
0
2023-10-18 18:18:55+00:00
2k
hpsaturn/pilauncher
main.py
[ { "identifier": "GuiManager", "path": "gui.py", "snippet": "class GuiManager():\n def __init__(self):\n self.am = AppManager()\n self.wlevel = 0\n self.showApp()\n\n def showApp(self):\n if self.wlevel == 0:\n print(self.am.getCurrentApp().name)\n return self.am.getCurrentApp().name\n else:\n print(self.am.getCurrentCmd().name)\n return self.am.getCurrentCmd().name\n\n def showNextApp(self):\n if self.wlevel == 0:\n self.am.getNextApp()\n else:\n self.am.getNextCmd()\n return self.showApp()\n \n def getAppStatusCmd(self):\n if self.wlevel == 0:\n return self.am.getCurrentApp().sta_cmd\n else:\n return None\n \n def getAppStatus(self):\n if self.wlevel == 0:\n return self.am.getCurrentApp().status\n else:\n return ''\n \n def runBack(self):\n self.wlevel=0\n self.am.reset()\n return self.showApp()\n \n def runAction(self):\n if self.wlevel==0:\n self.wlevel=1\n return self.showApp()\n else:\n command = self.am.getCurrentCmd().command\n if command == 'back':\n return self.runBack()\n else:\n return 'exec::'+command\n \n def getConfig(self):\n return self.am.cfg" }, { "identifier": "Display", "path": "display.py", "snippet": "class Display:\n\n WIDTH = 128\n HEIGHT = 32 # Change to 64 if needed\n\n def __init__(self):\n # Create the I2C interface.\n i2c = board.I2C()\n # Define the Reset Pin\n oled_reset = digitalio.DigitalInOut(board.D4)\n self.disp = adafruit_ssd1306.SSD1306_I2C(self.WIDTH, self.HEIGHT, i2c, addr=0x3C, reset=oled_reset)\n # timer for auto disp off\n self.timer_screen = time.time()\n # general semaphore\n self.mutex = threading.Lock()\n\n # Clear display.\n self.disp.fill(0)\n self.disp.show()\n\n # Create blank image for drawing.\n self.w = self.disp.width\n self.h = self.disp.height\n self.image = Image.new(\"1\", (self.w, self.h))\n\n # Get drawing object to draw on image.\n self.draw = ImageDraw.Draw(self.image)\n\n # Draw a black filled box to clear the image.\n self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)\n\n #padding\n self.top = -2\n self.x = 0\n\n # Load default font.\n self.fntS = ImageFont.load_default()\n self.fntB = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18) \n\n def showString(self, msg):\n self.mutex.acquire()\n self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)\n self.draw.text((self.x, self.top + 0), msg[:12], font=self.fntB, fill=255)\n self.disp.image(self.image)\n self.disp.show()\n self.mutex.release()\n\n def showStatus(self, msg):\n self.mutex.acquire()\n self.draw.rectangle((0, 18, self.w-1, self.h- 1), outline=0, fill=0)\n self.draw.text((self.x, self.top + 25), msg[:21], font=self.fntS, fill=255)\n self.disp.image(self.image)\n self.disp.show()\n self.mutex.release()\n\n def showInfoLines(self, lines):\n self.mutex.acquire()\n self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)\n pos = 0\n for line in lines:\n self.draw.text((self.x, self.top + pos), line[:21], font=self.fntS, fill=255)\n pos = pos + 8\n self.disp.image(self.image)\n self.disp.show()\n self.mutex.release()\n\n def powerOffTimerReset(self):\n self.timer_screen = time.time()\n if not self.disp.power:\n self.disp.poweron()\n\n def powerOffTimerLoop(self, time_off):\n if not self.disp.power:\n return\n if time.time() - self.timer_screen > time_off:\n self.disp.poweroff()" } ]
import time import subprocess import threading import RPi.GPIO as GPIO from gui import GuiManager from display import Display
1,214
BTNLFT = 23 BTNRGT = 6 onAppStatusTask = False onSystemStatsTask = False isBtnRgtPresed = False isBtnLftPresed = False onStats = False # GUI Apps Manager gui = GuiManager() cfg = gui.getConfig()
BTNLFT = 23 BTNRGT = 6 onAppStatusTask = False onSystemStatsTask = False isBtnRgtPresed = False isBtnLftPresed = False onStats = False # GUI Apps Manager gui = GuiManager() cfg = gui.getConfig()
dsp = Display()
1
2023-10-23 20:21:51+00:00
2k
CAMeL-Lab/camel_parser
src/initialize_disambiguator/disambiguator_interface.py
[ { "identifier": "log", "path": "src/logger.py", "snippet": "def log(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n \n with open(log_path, 'a') as f:\n f.write(f'{map_function_to_phrase(func.__name__)}: {round(end_time - start_time, 2)}s\\n')\n return result\n except Exception as e:\n logger.exception(f\"Exception raised in {func.__name__}. exception: {str(e)}\")\n raise e\n\n return wrapper" }, { "identifier": "create_bert_disambiguator", "path": "src/initialize_disambiguator/bert_disambiguator.py", "snippet": "def create_bert_disambiguator(analyzer):\n model = BERTUnfactoredDisambiguator.pretrained(\"msa\", top=1000, pretrained_cache=False)\n model._analyzer = analyzer\n return model" }, { "identifier": "MLEDisambiguatorAdapter", "path": "src/initialize_disambiguator/mle_disambiguator.py", "snippet": "class MLEDisambiguatorAdapter():\n def __init__(self, analyzer: Analyzer):\n self.disambiguator = MLEDisambiguator(analyzer=analyzer)\n \n # def pretrained(self, analyzer):\n # self.disambiguator = self.disambiguator\n \n def disambiguate(self, sentence: List[str]) -> List[DisambiguatedWord]:\n return self.disambiguator.disambiguate(sentence)\n \n def disambiguate_sentences(self, lines: List[List[str]]) -> List[List[DisambiguatedWord]]:\n return [self.disambiguator.disambiguate(line) for line in lines]" } ]
from typing import Union from camel_tools.morphology.database import MorphologyDB from camel_tools.morphology.analyzer import Analyzer from camel_tools.disambig.bert import BERTUnfactoredDisambiguator from src.logger import log from src.initialize_disambiguator.bert_disambiguator import create_bert_disambiguator from src.initialize_disambiguator.mle_disambiguator import MLEDisambiguatorAdapter
693
def set_up_analyzer(morphology_db: str) -> Analyzer: # used to initialize an Analyzer with ADD_PROP backoff # db = MorphologyDB.builtin_db('calima-msa-s31') db_type = None if morphology_db == 'r13' else morphology_db db = MorphologyDB.builtin_db(db_name=db_type) return Analyzer(db=db, backoff='ADD_PROP', cache_size=100000) @log def get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]: analyzer = set_up_analyzer(morphology_db) if model_name == 'mle': model = MLEDisambiguatorAdapter(analyzer) elif model_name == 'bert':
def set_up_analyzer(morphology_db: str) -> Analyzer: # used to initialize an Analyzer with ADD_PROP backoff # db = MorphologyDB.builtin_db('calima-msa-s31') db_type = None if morphology_db == 'r13' else morphology_db db = MorphologyDB.builtin_db(db_name=db_type) return Analyzer(db=db, backoff='ADD_PROP', cache_size=100000) @log def get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]: analyzer = set_up_analyzer(morphology_db) if model_name == 'mle': model = MLEDisambiguatorAdapter(analyzer) elif model_name == 'bert':
model = create_bert_disambiguator(analyzer)
1
2023-10-21 10:39:28+00:00
2k
JerBouma/FinancePortfolio
financeportfolio/portfolio_controller.py
[ { "identifier": "excel_model", "path": "financeportfolio/excel_model.py", "snippet": "def create_portfolio_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_transactions_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_portfolio_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_positions_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):" }, { "identifier": "helpers", "path": "financeportfolio/helpers.py", "snippet": "BASE_URL = \"https://raw.githubusercontent.com/JerBouma/FinancePortfolio/main/\"\nVALID_CODE = 200\n RED = \"\\033[91m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n BLUE = \"\\033[94m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n RESET = \"\\033[0m\"\nclass Style:\ndef read_excel(location: str):\ndef read_yaml_file(location: str):\ndef download_example_datasets(base_url: str | None = None):\ndef download_yaml_configuration(example: bool = False, name: str | None = None):" }, { "identifier": "portfolio_model", "path": "financeportfolio/portfolio_model.py", "snippet": "CURRENCY_CODE_LENGTH = 3\r\ndef read_portfolio_dataset(\r\n excel_location: list,\r\n adjust_duplicates: bool,\r\n date_column: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n ticker_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str]:\r\ndef format_portfolio_dataset(\r\n dataset: pd.DataFrame,\r\n date_columns: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n tickers_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str, str]:\r\ndef create_transactions_overview(\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n):\r\ndef create_portfolio_overview(\r\n portfolio_name: pd.Series,\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n benchmark_prices: pd.Series,\r\n benchmark_latest_prices: pd.Series,\r\n):\r\ndef create_transactions_performance(\r\n portfolio_dataset: pd.DataFrame,\r\n ticker_column: str,\r\n date_column: str,\r\n volume_column: str,\r\n price_column: str,\r\n costs_column: str,\r\n period_prices: pd.DataFrame,\r\n period_string: str,\r\n original_ticker_combinations: dict,\r\n benchmark_per_ticker: dict,\r\n benchmark_specific_prices: pd.Series,\r\n benchmark_period_prices: pd.DataFrame,\r\n):\r\ndef create_positions_overview(\r\n portfolio_tickers: list[str],\r\n period_dates: pd.DatetimeIndex,\r\n portfolio_dataset: pd.DataFrame,\r\n historical_prices: pd.Series,\r\n columns: list[str] | None = None,\r\n):\r\ndef create_portfolio_performance(\r\n positions_dataset: pd.DataFrame,\r\n date_column: str,\r\n ticker_column: str,\r\n period_string: str,\r\n):\r" } ]
import pandas as pd from financetoolkit import Toolkit from financeportfolio import excel_model, helpers, portfolio_model
1,298
"""Portfolio Module""" # pylint: disable=too-many-instance-attributes,abstract-class-instantiated, # pylint: disable=too-few-public-methods,protected-access,too-many-lines class Portfolio: """ A class for managing and analyzing your portfolio. This class provides functionality for loading, preprocessing, categorizing, and analyzing cash flow data based on a specified configuration file. It offers methods to read and format the dataset, apply cost or income indicators, categorize transactions, and create periodical cash flow overviews. Parameters: configuration_file (str): The file path to the configuration file in YAML format. The configuration file should define various settings and columns used in cash flow analysis. Attributes: _configuration_file (str): The file path to the configuration file. _cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame. Note: - The configuration file should be in YAML format and contain settings for date columns, description columns, amount columns, and optionally cost/income columns. - Initialize an instance of this class to begin cash flow analysis. """ def __init__( self, configuration_file: str | None = None, portfolio_dataset: pd.DataFrame = pd.DataFrame(), example: bool = False, ): """ Initialize a Cashflow instance with the provided configuration file. This constructor sets up the Cashflow instance by loading the configuration file, defining default attributes, and initializing the cash flow dataset as an empty DataFrame. Parameters: configuration_file (str): The file path to the configuration file in YAML format. Raises: ValueError: If the provided configuration file does not have a '.yaml' extension. Only '.yaml' configuration files are supported. """ if example:
"""Portfolio Module""" # pylint: disable=too-many-instance-attributes,abstract-class-instantiated, # pylint: disable=too-few-public-methods,protected-access,too-many-lines class Portfolio: """ A class for managing and analyzing your portfolio. This class provides functionality for loading, preprocessing, categorizing, and analyzing cash flow data based on a specified configuration file. It offers methods to read and format the dataset, apply cost or income indicators, categorize transactions, and create periodical cash flow overviews. Parameters: configuration_file (str): The file path to the configuration file in YAML format. The configuration file should define various settings and columns used in cash flow analysis. Attributes: _configuration_file (str): The file path to the configuration file. _cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame. Note: - The configuration file should be in YAML format and contain settings for date columns, description columns, amount columns, and optionally cost/income columns. - Initialize an instance of this class to begin cash flow analysis. """ def __init__( self, configuration_file: str | None = None, portfolio_dataset: pd.DataFrame = pd.DataFrame(), example: bool = False, ): """ Initialize a Cashflow instance with the provided configuration file. This constructor sets up the Cashflow instance by loading the configuration file, defining default attributes, and initializing the cash flow dataset as an empty DataFrame. Parameters: configuration_file (str): The file path to the configuration file in YAML format. Raises: ValueError: If the provided configuration file does not have a '.yaml' extension. Only '.yaml' configuration files are supported. """ if example:
configuration_file = helpers.download_yaml_configuration(example=True)
1
2023-10-15 09:16:04+00:00
2k
S2-group/UPISAS
UPISAS/tests/upisas/test_exemplar.py
[ { "identifier": "DockerImageNotFoundOnDockerHub", "path": "UPISAS/exceptions.py", "snippet": "class DockerImageNotFoundOnDockerHub(UPISASException):\n pass" }, { "identifier": "Exemplar", "path": "UPISAS/exemplar.py", "snippet": "class Exemplar(ABC):\n \"\"\"\n A class which encapsulates a self-adaptive exemplar run in a docker container.\n \"\"\"\n _container_name = \"\"\n def __init__(self, base_endpoint: \"string with the URL of the exemplar's HTTP server\", \\\n docker_kwargs,\n auto_start: \"Whether to immediately start the container after creation\" =False,\n ):\n '''Create an instance of the Exemplar class'''\n self.base_endpoint = base_endpoint\n image_name = docker_kwargs[\"image\"]\n image_owner = image_name.split(\"/\")[0]\n try:\n docker_client = docker.from_env()\n try:\n docker_client.images.get(image_name)\n logging.info(f\"image '{image_name}' found locally\")\n except docker.errors.ImageNotFound:\n logging.info(f\"image '{image_name}' not found locally\")\n images_from_owner = docker_client.images.search(image_owner)\n if image_name.split(\":\")[0] in [i[\"name\"] for i in images_from_owner]:\n logging.info(f\"image '{image_name}' found on DockerHub, pulling it\")\n with Progress() as progress:\n for line in docker_client.api.pull(image_name, stream=True, decode=True):\n show_progress(line, progress)\n else:\n logging.error(f\"image '{image_name}' not found on DockerHub, exiting!\")\n raise DockerImageNotFoundOnDockerHub\n docker_kwargs[\"detach\"] = True\n self.exemplar_container = docker_client.containers.create(**docker_kwargs)\n except DockerException as e:\n # TODO: Properly catch various errors. Currently, a lot of errors might be caught here.\n # Please check the logs if that happens.\n raise e\n if auto_start:\n self.start_container()\n\n @abstractmethod\n def start_run(self):\n pass\n\n def start_container(self):\n '''Starts running the docker container made from the given image when constructing this class'''\n try:\n container_status = self.get_container_status()\n if container_status == \"running\":\n logging.warning(\"container already running...\")\n else:\n logging.info(\"starting container...\")\n self.exemplar_container.start()\n return True\n except docker.errors.NotFound as e:\n logging.error(e)\n\n def stop_container(self, remove=True):\n '''Stops the docker container made from the given image when constructing this class'''\n try:\n container_status = self.get_container_status()\n if container_status == \"exited\":\n logging.warning(\"container already stopped...\")\n if remove:\n self.exemplar_container.remove()\n self.exemplar_container = None\n else:\n logging.info(\"stopping container...\")\n self.exemplar_container.stop()\n if remove:\n self.exemplar_container.remove()\n self.exemplar_container = None\n return True\n except docker.errors.NotFound as e:\n logging.warning(e)\n logging.warning(\"cannot stop container\")\n\n def pause_container(self):\n '''Pauses a running docker container made from the given image when constructing this class'''\n try:\n container_status = self.get_container_status()\n if container_status == \"running\":\n logging.info(\"pausing container...\")\n self.exemplar_container.pause()\n return True\n elif container_status == \"paused\":\n logging.warning(\"container already paused...\")\n return True\n else:\n logging.warning(\"cannot pause container since it's not running\")\n return False\n except docker.errors.NotFound as e:\n logging.error(e)\n logging.error(\"cannot pause container\")\n\n def unpause_container(self):\n '''Resumes a paused docker container made from the given image when constructing this class'''\n try:\n container_status = self.get_container_status()\n if container_status == \"paused\":\n logging.info(\"unpausing container...\")\n self.exemplar_container.unpause()\n return True\n elif container_status == \"running\":\n logging.warning(\"container already running (why unpause it?)...\")\n return True\n else:\n logging.warning(\"cannot unpause container since it's not paused\")\n return False\n except docker.errors.NotFound as e:\n logging.warning(e)\n logging.warning(\"cannot unpause container\")\n\n def get_container_status(self):\n if self.exemplar_container:\n self.exemplar_container.reload()\n return self.exemplar_container.status\n return \"removed\"" }, { "identifier": "DemoExemplar", "path": "UPISAS/exemplars/demo_exemplar.py", "snippet": "class DemoExemplar(Exemplar):\n \"\"\"\n A class which encapsulates a self-adaptive exemplar run in a docker container.\n \"\"\"\n def __init__(self, auto_start=False, container_name=\"upisas-demo\"):\n docker_config = {\n \"name\": container_name,\n \"image\": \"iliasger/upisas-demo-managed-system\",\n \"ports\" : {3000: 3000}}\n\n super().__init__(\"http://localhost:3000\", docker_config, auto_start)\n\n def start_run(self, app):\n self.exemplar_container.exec_run(cmd = f' sh -c \"cd /usr/src/app && node {app}\" ', detach=True)" } ]
import unittest from UPISAS.exceptions import DockerImageNotFoundOnDockerHub from UPISAS.exemplar import Exemplar from UPISAS.exemplars.demo_exemplar import DemoExemplar
1,392
class TestExemplar(unittest.TestCase): """ Test cases for the Exemplar class using the DemoExemplar. """ def setUp(self): self.exemplar = None def tearDown(self): if self.exemplar and self.exemplar.exemplar_container: self.exemplar.stop_container() def test_init_successfully_wihout_auto_start(self):
class TestExemplar(unittest.TestCase): """ Test cases for the Exemplar class using the DemoExemplar. """ def setUp(self): self.exemplar = None def tearDown(self): if self.exemplar and self.exemplar.exemplar_container: self.exemplar.stop_container() def test_init_successfully_wihout_auto_start(self):
self.exemplar = DemoExemplar(auto_start=False)
2
2023-10-15 12:46:54+00:00
2k
developerlin/excelchat-streamlit
Home.py
[ { "identifier": "CustomChartsMiddleware", "path": "middleware/base.py", "snippet": "class CustomChartsMiddleware(ChartsMiddleware):\n def run(self, code: str) -> str:\n # code = super().run(code)\n\n processed = []\n for line in code.split(\"\\n\"):\n if line.find(\"plt.close()\") != -1:\n idx = line.find(\"plt\")\n blank = \"\".join([' ' for c in range(idx)])\n # Fix the chinese character display issue\n processed.append(blank + \"plt.rcParams['font.sans-serif']=['SimHei']\")\n processed.append(blank + \"plt.rcParams['axes.unicode_minus']=False\")\n # processed.append(blank + \"plt.savefig('temp_chart.png')\")\n processed.append(line)\n else:\n processed.append(line)\n code = \"\\n\".join(processed)\n return code" }, { "identifier": "CustomResponseParser", "path": "parser/response_parser.py", "snippet": "class CustomResponseParser(ResponseParser):\n def format_plot(self, result: dict) -> Any:\n super().format_plot(result)\n filename = str(uuid.uuid4()).replace(\"-\", \"\")\n\n temp_image_path = Path(f\"{tempfile.tempdir}/streamlit/{filename}.png\")\n temp_image_path.parent.mkdir(parents=True, exist_ok=True)\n\n original_path = Path(\"temp_chart.png\")\n shutil.copy(original_path, temp_image_path)\n print(\"image created: \", str(temp_image_path))\n return {\"type\": \"plot\", \"value\": str(temp_image_path)}" }, { "identifier": "get_open_ai_model", "path": "util.py", "snippet": "def get_open_ai_model(api_key):\n return OpenAI(api_token=api_key)" }, { "identifier": "get_ollama_model", "path": "util.py", "snippet": "def get_ollama_model(model_key, base_url):\n llm = Ollama(model=model_key, base_url=base_url, verbose=True)\n return LangchainLLM(langchain_llm=llm)" }, { "identifier": "get_baidu_as_model", "path": "util.py", "snippet": "def get_baidu_as_model(access_token):\n llm_core = AIStudioErnieBot(access_token=access_token, verbose=True)\n return LangchainLLM(llm_core)" }, { "identifier": "get_prompt_template", "path": "util.py", "snippet": "def get_prompt_template():\n instruction_template = \"\"\"\n使用提供的 dataframes ('dfs') 分析这个数据,过程中不要调用 dataframe set_index 对数据排序.\n1. 准备: 如果有必要对数据做预处理和清洗\n2. 执行: 对数据进行数据分析操作 (grouping, filtering, aggregating, etc.)\n3. 分析: 进行实际分析(如果用户要求plot chart,请在代码中添加如下两行代码设置字体, 并将结果保存为图像文件temp_chart.png,并且不显示图表)\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False \n \"\"\"\n custom_template = GeneratePythonCodePrompt(custom_instructions=instruction_template)\n return custom_template" }, { "identifier": "get_baidu_qianfan_model", "path": "util.py", "snippet": "def get_baidu_qianfan_model(client_id, client_secret):\n llm_core = ErnieBotChat(\n model_name=\"ERNIE-Bot\",\n temperature=0.1,\n ernie_client_id=client_id,\n ernie_client_secret=client_secret\n )\n return LangchainLLM(llm_core)" } ]
import io import logging import uuid import matplotlib import pandas as pd import streamlit as st from pathlib import Path from typing import Dict from pandasai import SmartDataframe, Agent, Config from pandasai.callbacks import StdoutCallback from pandasai.helpers import Logger from middleware.base import CustomChartsMiddleware from parser.response_parser import CustomResponseParser from util import get_open_ai_model, get_ollama_model, get_baidu_as_model, get_prompt_template, get_baidu_qianfan_model
1,300
logger = Logger() matplotlib.rc_file("./.matplotlib/.matplotlibrc"); # page settings st.set_page_config(page_title="Excel Chat", layout="wide") st.header("What ExcelChat can do?") st.text("ExcelChat is a lightweight data analysis app powered by LLM, showcasing how LLM can revolutionize the future" "of data analysis.") st.markdown("""List of todos - [x] Add memory - [x] Support non-latin text in chart - [ ] Sub questions support """) class AgentWrapper: id: str agent: Agent def __init__(self) -> None: self.agent = None self.id = str(uuid.uuid4()) def get_llm(self): op = st.session_state.last_option llm = None if op == "Ollama": llm = get_ollama_model(st.session_state.ollama_model, st.session_state.ollama_base_url) elif op == "OpenAI": if st.session_state.api_token != "": llm = get_open_ai_model(st.session_state.api_token) elif op == "Baidu/AIStudio-Ernie-Bot": if st.session_state.access_token != "": llm = get_baidu_as_model(st.session_state.access_token) elif op == "Baidu/Qianfan-Ernie-Bot": if st.session_state.client_id != "" and st.session_state.client_secret != "": llm = get_baidu_qianfan_model(st.session_state.client_id, st.session_state.client_secret) if llm is None: st.toast("LLM initialization failed, check LLM configuration", icon="🫤") return llm def set_file_data(self, df): llm = self.get_llm() if llm is not None: print("llm.type", llm.type) config = Config( llm=llm, callback=StdoutCallback(), # middlewares=[CustomChartsMiddleware()], response_parser=CustomResponseParser, custom_prompts={
logger = Logger() matplotlib.rc_file("./.matplotlib/.matplotlibrc"); # page settings st.set_page_config(page_title="Excel Chat", layout="wide") st.header("What ExcelChat can do?") st.text("ExcelChat is a lightweight data analysis app powered by LLM, showcasing how LLM can revolutionize the future" "of data analysis.") st.markdown("""List of todos - [x] Add memory - [x] Support non-latin text in chart - [ ] Sub questions support """) class AgentWrapper: id: str agent: Agent def __init__(self) -> None: self.agent = None self.id = str(uuid.uuid4()) def get_llm(self): op = st.session_state.last_option llm = None if op == "Ollama": llm = get_ollama_model(st.session_state.ollama_model, st.session_state.ollama_base_url) elif op == "OpenAI": if st.session_state.api_token != "": llm = get_open_ai_model(st.session_state.api_token) elif op == "Baidu/AIStudio-Ernie-Bot": if st.session_state.access_token != "": llm = get_baidu_as_model(st.session_state.access_token) elif op == "Baidu/Qianfan-Ernie-Bot": if st.session_state.client_id != "" and st.session_state.client_secret != "": llm = get_baidu_qianfan_model(st.session_state.client_id, st.session_state.client_secret) if llm is None: st.toast("LLM initialization failed, check LLM configuration", icon="🫤") return llm def set_file_data(self, df): llm = self.get_llm() if llm is not None: print("llm.type", llm.type) config = Config( llm=llm, callback=StdoutCallback(), # middlewares=[CustomChartsMiddleware()], response_parser=CustomResponseParser, custom_prompts={
"generate_python_code": get_prompt_template()
5
2023-10-20 00:58:45+00:00
2k
ZiaWang/jqtrade
jqtrade/account/portfolio.py
[ { "identifier": "OrderSide", "path": "jqtrade/account/order.py", "snippet": "class OrderSide(Enum):\n # 多仓\n long = \"long\"\n\n # 空仓\n short = \"short\"\n\n @classmethod\n def is_valid_side(cls, side):\n return side in cls.__members__\n\n @classmethod\n def get_side(cls, side):\n if isinstance(side, cls):\n return side\n\n try:\n return cls.__members__[side]\n except KeyError:\n raise ValueError(f\"invalid side: {side}\")" }, { "identifier": "UserPosition", "path": "jqtrade/account/api.py", "snippet": "class UserPosition(object):\n def __init__(self, sys_position):\n self.__position = sys_position\n\n @classmethod\n def get_empty_pos(cls, code, side):\n return UserPosition(Position(code, 0, 0, 0, side, position_value=0, last_price=0))\n\n @property\n def security(self):\n return self.__position.code\n\n @property\n def total_amount(self):\n return self.__position.amount\n\n @property\n def closeable_amount(self):\n return self.__position.available_amount\n\n @property\n def avg_cost(self):\n return self.__position.avg_cost\n\n acc_avg_cost = avg_cost\n\n @property\n def side(self):\n if self.__position.side:\n return self.__position.side.value\n\n @property\n def last_price(self):\n return self.__position.last_price\n\n price = last_price\n\n @property\n def position_value(self):\n return self.__position.position_value\n\n value = position_value\n\n def __str__(self):\n return f\"UserPosition(security={self.security}, total_amount={self.total_amount}, \" \\\n f\"closeable_amount={self.closeable_amount}, avg_cost={self.avg_cost}, side={self.side}, \" \\\n f\"last_price={self.last_price}, position_value={self.position_value})\"" }, { "identifier": "UserPositionDict", "path": "jqtrade/account/api.py", "snippet": "class UserPositionDict(dict):\n \n def __init__(self, side, *args, **kwargs):\n super(UserPositionDict, self).__init__(*args, **kwargs)\n self._side = side\n \n def __getitem__(self, code):\n try:\n return dict.__getitem__(self, code)\n except KeyError:\n sys_logger.warn(f\"{code} 在 positions 中不存在,我们返回空的 Position 对象, \"\n f\"total_amount/closeable_amount/avg_cost/acc_avg_cost/position_value/last_price 都是 0\")\n return UserPosition.get_empty_pos(code, side=self._side)" } ]
from .order import OrderSide from .api import UserPosition, UserPositionDict
729
# -*- coding: utf-8 -*- class Portfolio(object): """ 账户资金/持仓信息聚合类 """ def __init__(self, account): self.__account = account @property def long_positions(self):
# -*- coding: utf-8 -*- class Portfolio(object): """ 账户资金/持仓信息聚合类 """ def __init__(self, account): self.__account = account @property def long_positions(self):
positions = UserPositionDict(OrderSide.long)
0
2023-10-24 01:34:27+00:00
2k
Glasgow-AI4BioMed/GenKIE
data/mm_data/vqa_gen_dataset.py
[ { "identifier": "data_utils", "path": "data/data_utils.py", "snippet": "def infer_language_pair(path):\ndef collate_tokens(\n values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False,\n pad_to_length=None,\n pad_to_multiple=1,\n pad_to_bsz=None,\n):\n def copy_tensor(src, dst):\ndef load_indexed_dataset(\n path, dictionary=None, dataset_impl=None, combine=False, default=\"cached\"\n):\ndef numpy_seed(seed, *addl_seeds):\ndef collect_filtered(function, iterable, filtered):\ndef _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):\n def compare_leq(a, b):\n def check_size(idx):\ndef filter_by_size(indices, dataset, max_positions, raise_exception=False):\ndef filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):\ndef batch_by_size(\n indices,\n num_tokens_fn,\n num_tokens_vec=None,\n max_tokens=None,\n max_sentences=None,\n required_batch_size_multiple=1,\n fixed_shapes=None,\n):\ndef post_process(sentence: str, symbol: str):\ndef compute_mask_indices(\n shape: Tuple[int, int],\n padding_mask: Optional[torch.Tensor],\n mask_prob: float,\n mask_length: int,\n mask_type: str = \"static\",\n mask_other: float = 0.0,\n min_masks: int = 0,\n no_overlap: bool = False,\n min_space: int = 0,\n) -> np.ndarray:\n def arrange(s, e, length, keep_length):\ndef get_mem_usage():\ndef lengths_to_padding_mask(lens):\ndef lengths_to_mask(lens):\ndef get_buckets(sizes, num_buckets):\ndef get_bucketed_sizes(orig_sizes, buckets):\ndef _find_extra_valid_paths(dataset_path: str) -> set:\ndef raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:" }, { "identifier": "OFADataset", "path": "data/ofa_dataset.py", "snippet": "class OFADataset(FairseqDataset):\n def __init__(self, split, dataset, bpe, src_dict, tgt_dict):\n self.split = split\n self.dataset = dataset\n self.bpe = bpe\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n self.bos = src_dict.bos()\n self.eos = src_dict.eos()\n self.pad = src_dict.pad()\n self.bos_item = torch.LongTensor([self.bos])\n self.eos_item = torch.LongTensor([self.eos])\n\n def __len__(self):\n return len(self.dataset)\n\n def encode_text(self, text, length=None, append_bos=False, append_eos=False, use_bpe=True):\n s = self.tgt_dict.encode_line(\n line=self.bpe.encode(text) if use_bpe else text,\n add_if_not_exist=False,\n append_eos=False\n ).long()\n if length is not None:\n s = s[:length]\n if append_bos:\n s = torch.cat([self.bos_item, s])\n if append_eos:\n s = torch.cat([s, self.eos_item])\n return s\n\n def pre_question(self, question, max_ques_words=None):\n question = question.lower().lstrip(\",.!?*#:;~\").replace('-', ' ').replace('/', ' ')\n\n question = re.sub(\n r\"\\s{2,}\",\n ' ',\n question,\n )\n question = question.rstrip('\\n')\n question = question.strip(' ')\n\n # truncate question\n question_words = question.split(' ')\n if max_ques_words is not None and len(question_words) > max_ques_words:\n question = ' '.join(question_words[:max_ques_words])\n\n return question\n\n def pre_caption(self, caption, max_words=None):\n caption = caption.lower().lstrip(\",.!?*#:;~\").replace('-', ' ').replace('/', ' ').replace('<person>', 'person')\n\n caption = re.sub(\n r\"\\s{2,}\",\n ' ',\n caption,\n )\n caption = caption.rstrip('\\n')\n caption = caption.strip(' ')\n\n # truncate caption\n caption_words = caption.split(' ')\n if max_words is not None and len(caption_words) > max_words:\n caption = ' '.join(caption_words[:max_words])\n\n return caption" } ]
from io import BytesIO from torchvision import transforms from PIL import Image, ImageFile from data import data_utils from data.ofa_dataset import OFADataset import logging import warnings import numpy as np import torch import base64
1,291
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. ImageFile.LOAD_TRUNCATED_IMAGES = True ImageFile.MAX_IMAGE_PIXELS = None Image.MAX_IMAGE_PIXELS = None logger = logging.getLogger(__name__) warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning) IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) def collate(samples, pad_idx, eos_idx): if len(samples) == 0: return {} def merge(key):
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. ImageFile.LOAD_TRUNCATED_IMAGES = True ImageFile.MAX_IMAGE_PIXELS = None Image.MAX_IMAGE_PIXELS = None logger = logging.getLogger(__name__) warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning) IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) def collate(samples, pad_idx, eos_idx): if len(samples) == 0: return {} def merge(key):
return data_utils.collate_tokens(
0
2023-10-20 20:01:42+00:00
2k
ArnaudParant/sel
tests/test_sel.py
[ { "identifier": "elastic", "path": "scripts/elastic.py", "snippet": "def options():\ndef create_index(filepath, schema_filepath, index, overwrite=False):\ndef _delete_index(elastic, index):\ndef loads_ndjson(fd):\ndef insert(elastic, index, data):\ndef _create_index(elastic, index, schema_filepath):\ndef load_schema(filepath):\ndef elastic_connect():" }, { "identifier": "utils", "path": "sel/utils.py", "snippet": "class InternalServerError(Exception):\nclass InvalidClientInput(Exception):\nclass NotFound(Exception):\n def __init__(self, message):\n def __str__(self):\n def __init__(self, message):\n def __str__(self):\n def __init__(self, message):\n def __str__(self):\ndef set_if_exists(source, dest, keys):\ndef build_group(operator, items):\ndef get_lastest_sub_data(data):\ndef _detailor(exc):\ndef elastic_exception_detailor(handler):\n def handler_wrapper(*args, **kwargs):" } ]
import pytest import json import test_utils from scripts import elastic from sel import utils
750
TEST_INDEX_FILE = "/tests/data/sample_2017.json" TEST_SCHEMA_FILE = "/scripts/schema.json" TEST_INDEX = "test_index" class TestSEL: @pytest.fixture(scope="function", autouse=True) def init(self): elastic.create_index(TEST_INDEX_FILE, TEST_SCHEMA_FILE, TEST_INDEX, overwrite=True) def __cleaner(self, obj): if "_score" in obj: del obj["_score"] return obj @pytest.mark.parametrize(["query"], [ [{}], [{"meta": {"size": 100}}], [{"meta": {"size": 5}}], ]) def test_scroll(self, sel, query): with open(TEST_INDEX_FILE, "r") as f: expected_lines = {d["id"]: d for d in load_ndjson(f)} documents = [] scroll_id = None while True: res = sel.scroll(TEST_INDEX, query, "1m", scroll_id=scroll_id) documents += res["documents"] scroll_id = res["scroll_id"] if not len(res["documents"]): break sel.clear_scroll(res["scroll_id"]) found = {} for line in documents: j = self.__cleaner(line) found[j["id"]] = j for j2 in expected_lines.values(): j = found.get(j2["id"]) j2["_index"] = TEST_INDEX assert test_utils.dict_equals(j, j2), f"Got: {j}\nExpected: {j2}" size = len(found) file_size = len(expected_lines) assert size == file_size, f"Download line {size} != {file_size}" @pytest.mark.parametrize(["query"], [ [{"aggregations": {"labels": {"field": "label"}}}], [{"aggregations": {"ids": {"field": ".id"}}}], ]) def test_download_aggreg(self, sel, query): def sort_aggreg(aggreg): aggreg = sorted(aggreg, key=lambda o: o["key"]) return sorted(aggreg, key=lambda o: o["doc_count"], reverse=True) aggreg_key = list(query["aggregations"].keys())[0] query["aggregations"][aggreg_key]["size"] = 0 base_aggreg = {"field": "date", "interval": "week"} res = sel.search(TEST_INDEX, query)
TEST_INDEX_FILE = "/tests/data/sample_2017.json" TEST_SCHEMA_FILE = "/scripts/schema.json" TEST_INDEX = "test_index" class TestSEL: @pytest.fixture(scope="function", autouse=True) def init(self): elastic.create_index(TEST_INDEX_FILE, TEST_SCHEMA_FILE, TEST_INDEX, overwrite=True) def __cleaner(self, obj): if "_score" in obj: del obj["_score"] return obj @pytest.mark.parametrize(["query"], [ [{}], [{"meta": {"size": 100}}], [{"meta": {"size": 5}}], ]) def test_scroll(self, sel, query): with open(TEST_INDEX_FILE, "r") as f: expected_lines = {d["id"]: d for d in load_ndjson(f)} documents = [] scroll_id = None while True: res = sel.scroll(TEST_INDEX, query, "1m", scroll_id=scroll_id) documents += res["documents"] scroll_id = res["scroll_id"] if not len(res["documents"]): break sel.clear_scroll(res["scroll_id"]) found = {} for line in documents: j = self.__cleaner(line) found[j["id"]] = j for j2 in expected_lines.values(): j = found.get(j2["id"]) j2["_index"] = TEST_INDEX assert test_utils.dict_equals(j, j2), f"Got: {j}\nExpected: {j2}" size = len(found) file_size = len(expected_lines) assert size == file_size, f"Download line {size} != {file_size}" @pytest.mark.parametrize(["query"], [ [{"aggregations": {"labels": {"field": "label"}}}], [{"aggregations": {"ids": {"field": ".id"}}}], ]) def test_download_aggreg(self, sel, query): def sort_aggreg(aggreg): aggreg = sorted(aggreg, key=lambda o: o["key"]) return sorted(aggreg, key=lambda o: o["doc_count"], reverse=True) aggreg_key = list(query["aggregations"].keys())[0] query["aggregations"][aggreg_key]["size"] = 0 base_aggreg = {"field": "date", "interval": "week"} res = sel.search(TEST_INDEX, query)
expected = utils.get_lastest_sub_data(res["results"]["aggregations"][aggreg_key])["buckets"]
1
2023-10-16 09:03:13+00:00
2k
Qualcomm-AI-research/outlier-free-transformers
quantization/quantizers/uniform_quantizers.py
[ { "identifier": "QuantizerBase", "path": "quantization/quantizers/base_quantizers.py", "snippet": "class QuantizerBase(nn.Module):\n def __init__(self, n_bits, *args, per_channel=False, act_quant=False, **kwargs):\n super().__init__(*args, **kwargs)\n self.n_bits = n_bits\n self.act_quant = act_quant\n self.per_channel = per_channel\n self.state = None\n self.x_min_fp32 = self.x_max_fp32 = None\n\n @property\n def is_initialized(self):\n raise NotImplementedError()\n\n @property\n def x_max(self):\n raise NotImplementedError()\n\n @property\n def symmetric(self):\n raise NotImplementedError()\n\n @property\n def x_min(self):\n raise NotImplementedError()\n\n def forward(self, x_float):\n raise NotImplementedError()\n\n def _adjust_params_per_channel(self, x):\n raise NotImplementedError()\n\n def set_quant_range(self, x_min, x_max):\n raise NotImplementedError()\n\n def extra_repr(self):\n return \"n_bits={}, per_channel={}, is_initalized={}\".format(\n self.n_bits, self.per_channel, self.is_initialized\n )\n\n def reset(self):\n self._delta = None\n\n def fix_ranges(self):\n raise NotImplementedError()\n\n def make_range_trainable(self):\n raise NotImplementedError()" }, { "identifier": "QuantizerNotInitializedError", "path": "quantization/quantizers/quantizer_utils.py", "snippet": "class RoundStraightThrough(Function):\nclass ScaleGradient(Function):\nclass QuantizerNotInitializedError(Exception):\n def forward(ctx, x):\n def backward(ctx, output_grad):\n def forward(ctx, x, scale):\n def backward(ctx, output_grad):\n def __init__(self):" } ]
import torch from quantization.quantizers.base_quantizers import QuantizerBase from quantization.quantizers.quantizer_utils import ( QuantizerNotInitializedError, round_ste_func, scale_grad_func, )
918
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All Rights Reserved. class AsymmetricUniformQuantizer(QuantizerBase): """ PyTorch Module that implements Asymmetric Uniform Quantization using STE. Quantizes its argument in the forward pass, passes the gradient 'straight through' on the backward pass, ignoring the quantization that occurred. Parameters ---------- n_bits: int Number of bits for quantization. scale_domain: str ('log', 'linear) with default='linear' Domain of scale factor per_channel: bool If True: allows for per-channel quantization """ def __init__(self, n_bits, scale_domain="linear", grad_scaling=False, eps=1e-8, **kwargs): super().__init__(n_bits=n_bits, **kwargs) assert scale_domain in ("linear", "log") self.register_buffer("_delta", None) self.register_buffer("_zero_float", None) self.scale_domain = scale_domain self.grad_scaling = grad_scaling self.eps = eps # A few useful properties @property def delta(self): if self._delta is not None: return self._delta else: raise QuantizerNotInitializedError() @property def zero_float(self): if self._zero_float is not None: return self._zero_float else: raise QuantizerNotInitializedError() @property def is_initialized(self): return self._delta is not None @property def symmetric(self): return False @property def int_min(self): # integer grid minimum return 0.0 @property def int_max(self): # integer grid maximum return 2.0**self.n_bits - 1 @property def scale(self): if self.scale_domain == "linear": return torch.clamp(self.delta, min=self.eps) elif self.scale_domain == "log": return torch.exp(self.delta) @property def zero_point(self):
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All Rights Reserved. class AsymmetricUniformQuantizer(QuantizerBase): """ PyTorch Module that implements Asymmetric Uniform Quantization using STE. Quantizes its argument in the forward pass, passes the gradient 'straight through' on the backward pass, ignoring the quantization that occurred. Parameters ---------- n_bits: int Number of bits for quantization. scale_domain: str ('log', 'linear) with default='linear' Domain of scale factor per_channel: bool If True: allows for per-channel quantization """ def __init__(self, n_bits, scale_domain="linear", grad_scaling=False, eps=1e-8, **kwargs): super().__init__(n_bits=n_bits, **kwargs) assert scale_domain in ("linear", "log") self.register_buffer("_delta", None) self.register_buffer("_zero_float", None) self.scale_domain = scale_domain self.grad_scaling = grad_scaling self.eps = eps # A few useful properties @property def delta(self): if self._delta is not None: return self._delta else: raise QuantizerNotInitializedError() @property def zero_float(self): if self._zero_float is not None: return self._zero_float else: raise QuantizerNotInitializedError() @property def is_initialized(self): return self._delta is not None @property def symmetric(self): return False @property def int_min(self): # integer grid minimum return 0.0 @property def int_max(self): # integer grid maximum return 2.0**self.n_bits - 1 @property def scale(self): if self.scale_domain == "linear": return torch.clamp(self.delta, min=self.eps) elif self.scale_domain == "log": return torch.exp(self.delta) @property def zero_point(self):
zero_point = round_ste_func(self.zero_float)
1
2023-10-23 15:59:50+00:00
2k
QgZhan/ESVAE
main_ann_ae.py
[ { "identifier": "AverageMeter", "path": "utils.py", "snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r" }, { "identifier": "aboutCudaDevices", "path": "utils.py", "snippet": "class aboutCudaDevices():\r\n def __init__(self):\r\n pass\r\n\r\n def num_devices(self):\r\n \"\"\"Return number of devices connected.\"\"\"\r\n return cuda.Device.count()\r\n\r\n def devices(self):\r\n \"\"\"Get info on all devices connected.\"\"\"\r\n num = cuda.Device.count()\r\n print(\"%d device(s) found:\" % num)\r\n for i in range(num):\r\n print(cuda.Device(i).name(), \"(Id: %d)\" % i)\r\n\r\n def mem_info(self):\r\n \"\"\"Get available and total memory of all devices.\"\"\"\r\n available, total = cuda.mem_get_info()\r\n print(\"Available: %.2f GB\\nTotal: %.2f GB\" % (available / 1e9, total / 1e9))\r\n\r\n def attributes(self, device_id=0):\r\n \"\"\"Get attributes of device with device Id = device_id\"\"\"\r\n return cuda.Device(device_id).get_attributes()\r\n\r\n def info(self):\r\n \"\"\"Class representation as number of devices connected and about them.\"\"\"\r\n num = cuda.Device.count()\r\n string = \"\"\r\n string += (\"%d device(s) found:\\n\" % num)\r\n for i in range(num):\r\n string += (\" %d) %s (Id: %d)\\n\" % ((i + 1), cuda.Device(i).name(), i))\r\n string += (\" Memory: %.2f GB\\n\" % (cuda.Device(i).total_memory() / 1e9))\r\n return string\r" }, { "identifier": "load_dataset_ann", "path": "datasets/load_dataset_ann.py", "snippet": "def load_mnist(data_path, batch_size):\r\ndef load_fashionmnist(data_path,batch_size):\r\ndef load_celeba(data_path,batch_size):\r\ndef load_cifar10(data_path,batch_size):\r" } ]
import os import os.path import numpy as np import logging import argparse import pycuda.driver as cuda import torch import torchvision import models.ann_ae as ann_ae from torch.nn.utils import clip_grad_norm_ from torch.nn.utils import clip_grad_value_ from torch.utils.tensorboard import SummaryWriter from utils import AverageMeter from utils import aboutCudaDevices from datasets import load_dataset_ann
663
max_accuracy = 0 min_loss = 1000 def train(network, trainloader, opti, epoch):
max_accuracy = 0 min_loss = 1000 def train(network, trainloader, opti, epoch):
loss_meter = AverageMeter()
0
2023-10-23 07:33:27+00:00
2k
iesl/softmax_CPR_recommend
recbole/model/sequential_recommender/sasrec.py
[ { "identifier": "SequentialRecommender", "path": "recbole/model/abstract_recommender.py", "snippet": "class SequentialRecommender(AbstractRecommender):\n \"\"\"\n This is a abstract sequential recommender. All the sequential model should implement This class.\n \"\"\"\n type = ModelType.SEQUENTIAL\n\n def __init__(self, config, dataset):\n super(SequentialRecommender, self).__init__()\n\n # load dataset info\n self.USER_ID = config['USER_ID_FIELD']\n self.ITEM_ID = config['ITEM_ID_FIELD']\n self.ITEM_SEQ = self.ITEM_ID + config['LIST_SUFFIX']\n self.ITEM_SEQ_LEN = config['ITEM_LIST_LENGTH_FIELD']\n self.POS_ITEM_ID = self.ITEM_ID\n self.NEG_ITEM_ID = config['NEG_PREFIX'] + self.ITEM_ID\n self.max_seq_length = config['MAX_ITEM_LIST_LENGTH']\n self.n_items = dataset.num(self.ITEM_ID)\n\n def gather_indexes(self, output, gather_index):\n \"\"\"Gathers the vectors at the specific positions over a minibatch\"\"\"\n gather_index = gather_index.view(-1, 1, 1).expand(-1, -1, output.shape[-1])\n output_tensor = output.gather(dim=1, index=gather_index)\n return output_tensor.squeeze(1)" }, { "identifier": "TransformerEncoder", "path": "recbole/model/layers.py", "snippet": "class TransformerEncoder(nn.Module):\n r\"\"\" One TransformerEncoder consists of several TransformerLayers.\n\n - n_layers(num): num of transformer layers in transformer encoder. Default: 2\n - n_heads(num): num of attention heads for multi-head attention layer. Default: 2\n - hidden_size(num): the input and output hidden size. Default: 64\n - inner_size(num): the dimensionality in feed-forward layer. Default: 256\n - hidden_dropout_prob(float): probability of an element to be zeroed. Default: 0.5\n - attn_dropout_prob(float): probability of an attention score to be zeroed. Default: 0.5\n - hidden_act(str): activation function in feed-forward layer. Default: 'gelu'\n candidates: 'gelu', 'relu', 'swish', 'tanh', 'sigmoid'\n - layer_norm_eps(float): a value added to the denominator for numerical stability. Default: 1e-12\n\n \"\"\"\n\n def __init__(\n self,\n n_layers=2,\n n_heads=2,\n hidden_size=64,\n inner_size=256,\n hidden_dropout_prob=0.5,\n attn_dropout_prob=0.5,\n hidden_act='gelu',\n layer_norm_eps=1e-12\n ):\n\n super(TransformerEncoder, self).__init__()\n layer = TransformerLayer(\n n_heads, hidden_size, inner_size, hidden_dropout_prob, attn_dropout_prob, hidden_act, layer_norm_eps\n )\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layers)])\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n \"\"\"\n Args:\n hidden_states (torch.Tensor): the input of the TransformerEncoder\n attention_mask (torch.Tensor): the attention mask for the input hidden_states\n output_all_encoded_layers (Bool): whether output all transformer layers' output\n\n Returns:\n all_encoder_layers (list): if output_all_encoded_layers is True, return a list consists of all transformer\n layers' output, otherwise return a list only consists of the output of last transformer layer.\n\n \"\"\"\n all_encoder_layers = []\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers" }, { "identifier": "BPRLoss", "path": "recbole/model/loss.py", "snippet": "class BPRLoss(nn.Module):\n \"\"\" BPRLoss, based on Bayesian Personalized Ranking\n\n Args:\n - gamma(float): Small value to avoid division by zero\n\n Shape:\n - Pos_score: (N)\n - Neg_score: (N), same shape as the Pos_score\n - Output: scalar.\n\n Examples::\n\n >>> loss = BPRLoss()\n >>> pos_score = torch.randn(3, requires_grad=True)\n >>> neg_score = torch.randn(3, requires_grad=True)\n >>> output = loss(pos_score, neg_score)\n >>> output.backward()\n \"\"\"\n\n def __init__(self, gamma=1e-10):\n super(BPRLoss, self).__init__()\n self.gamma = gamma\n\n def forward(self, pos_score, neg_score):\n loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()\n return loss" } ]
import sys import torch import torch.nn.functional as F import math from torch import nn from recbole.model.abstract_recommender import SequentialRecommender from recbole.model.layers import TransformerEncoder from recbole.model.loss import BPRLoss
1,321
# -*- coding: utf-8 -*- # @Time : 2020/9/18 11:33 # @Author : Hui Wang # @Email : hui.wang@ruc.edu.cn """ SASRec ################################################ Reference: Wang-Cheng Kang et al. "Self-Attentive Sequential Recommendation." in ICDM 2018. Reference: https://github.com/kang205/SASRec """ def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
# -*- coding: utf-8 -*- # @Time : 2020/9/18 11:33 # @Author : Hui Wang # @Email : hui.wang@ruc.edu.cn """ SASRec ################################################ Reference: Wang-Cheng Kang et al. "Self-Attentive Sequential Recommendation." in ICDM 2018. Reference: https://github.com/kang205/SASRec """ def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class SASRec(SequentialRecommender):
0
2023-10-21 16:31:44+00:00
2k
timapage/pyqt6-yolov8
src/models/detection/yolov8_detector_onnx.py
[ { "identifier": "DetectorBase", "path": "src/models/detection/detector_base.py", "snippet": "class DetectorBase(YoloPredictorBase):\n def draw_results(image, model_results):\n FONT_SCALE = 1e-3 \n THICKNESS_SCALE = 6e-4 " }, { "identifier": "ModelError", "path": "src/models/base/yolov8_base.py", "snippet": "class ModelError(Exception):\n pass" }, { "identifier": "xywh2xyxy", "path": "src/utils/boxes.py", "snippet": "def xywh2xyxy(x):\n # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)\n y = np.copy(x)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y" }, { "identifier": "multiclass_nms_class_agnostic", "path": "src/utils/boxes.py", "snippet": "def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):\n \"\"\"Multiclass NMS implemented in Numpy. Class-agnostic version.\"\"\"\n cls_inds = scores.argmax(1)\n cls_scores = scores[np.arange(len(cls_inds)), cls_inds]\n\n valid_score_mask = cls_scores > score_thr\n if valid_score_mask.sum() == 0:\n return None\n valid_scores = cls_scores[valid_score_mask]\n valid_boxes = boxes[valid_score_mask]\n valid_cls_inds = cls_inds[valid_score_mask]\n\n keep = nms(valid_boxes, valid_scores, nms_thr)\n #dets = []\n for i in keep:\n dets = np.concatenate(\n [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1\n )\n return dets" }, { "identifier": "get_classes", "path": "src/utils/general.py", "snippet": "def get_classes(class_txt_file):\n with open(class_txt_file, 'r') as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names" } ]
import numpy as np import cv2 as cv from onnxruntime import InferenceSession from src.models.detection.detector_base import DetectorBase, Model from src.models.base.yolov8_base import ModelError from src.utils.boxes import xywh2xyxy, multiclass_nms_class_agnostic from src.utils.general import get_classes
648
class YoloDetector(DetectorBase): def __init__(self): self._model = None def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):
class YoloDetector(DetectorBase): def __init__(self): self._model = None def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):
_class_names = get_classes(class_txt_path)
4
2023-10-18 09:21:01+00:00
2k
OthersideAI/self-operating-computer
operate/main.py
[ { "identifier": "ANSI_BRIGHT_MAGENTA", "path": "operate/utils/style.py", "snippet": "ANSI_BRIGHT_MAGENTA = \"\\033[95m\" if supports_ansi() else \"\" # Bright magenta text" }, { "identifier": "main", "path": "operate/dialog.py", "snippet": "def main(model, terminal_prompt, voice_mode=False):\n \"\"\"\n Main function for the Self-Operating Computer.\n\n Parameters:\n - model: The model used for generating responses.\n - terminal_prompt: A string representing the prompt provided in the terminal.\n - voice_mode: A boolean indicating whether to enable voice mode.\n\n Returns:\n None\n \"\"\"\n mic = None\n # Initialize `WhisperMic`, if `voice_mode` is True\n\n validation(model, voice_mode)\n\n if voice_mode:\n try:\n from whisper_mic import WhisperMic\n\n # Initialize WhisperMic if import is successful\n mic = WhisperMic()\n except ImportError:\n print(\n \"Voice mode requires the 'whisper_mic' module. Please install it using 'pip install -r requirements-audio.txt'\"\n )\n sys.exit(1)\n\n # Skip message dialog if prompt was given directly\n if not terminal_prompt:\n message_dialog(\n title=\"Self-Operating Computer\",\n text=\"Ask a computer to do anything.\",\n style=style,\n ).run()\n else:\n print(\"Running direct prompt...\")\n\n print(\"SYSTEM\", platform.system())\n # Clear the console\n if platform.system() == \"Windows\":\n os.system(\"cls\")\n else:\n print(\"\\033c\", end=\"\")\n\n if terminal_prompt: # Skip objective prompt if it was given as an argument\n objective = terminal_prompt\n elif voice_mode:\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RESET} Listening for your command... (speak now)\"\n )\n try:\n objective = mic.listen()\n except Exception as e:\n print(f\"{ANSI_RED}Error in capturing voice input: {e}{ANSI_RESET}\")\n return # Exit if voice input fails\n else:\n print(f\"{ANSI_GREEN}[Self-Operating Computer]\\n{ANSI_RESET}{USER_QUESTION}\")\n print(f\"{ANSI_YELLOW}[User]{ANSI_RESET}\")\n objective = prompt(style=style)\n\n assistant_message = {\"role\": \"assistant\", \"content\": USER_QUESTION}\n user_message = {\n \"role\": \"user\",\n \"content\": f\"Objective: {objective}\",\n }\n messages = [assistant_message, user_message]\n\n loop_count = 0\n\n while True:\n if config.debug:\n print(\"[loop] messages before next action:\\n\\n\\n\", messages[1:])\n try:\n response = asyncio.run(get_next_action(model, messages, objective))\n\n action = parse_response(response)\n action_type = action.get(\"type\")\n action_detail = action.get(\"data\")\n\n except ModelNotRecognizedException as e:\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}\"\n )\n break\n except Exception as e:\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}\"\n )\n break\n\n if action_type == \"DONE\":\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BLUE} Objective complete {ANSI_RESET}\"\n )\n summary = summarize(model, messages, objective)\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BLUE} Summary\\n{ANSI_RESET}{summary}\"\n )\n break\n\n if action_type != \"UNKNOWN\":\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BRIGHT_MAGENTA} [Act] {action_type} {ANSI_RESET}{action_detail}\"\n )\n\n function_response = \"\"\n if action_type == \"SEARCH\":\n function_response = search(action_detail)\n elif action_type == \"TYPE\":\n function_response = keyboard_type(action_detail)\n elif action_type == \"CLICK\":\n function_response = click(action_detail)\n else:\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] something went wrong :({ANSI_RESET}\"\n )\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] AI response\\n{ANSI_RESET}{response}\"\n )\n break\n\n print(\n f\"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BRIGHT_MAGENTA} [Act] {action_type} COMPLETE {ANSI_RESET}{function_response}\"\n )\n\n message = {\n \"role\": \"assistant\",\n \"content\": function_response,\n }\n messages.append(message)\n\n loop_count += 1\n if loop_count > 15:\n break" } ]
import argparse from operate.utils.style import ANSI_BRIGHT_MAGENTA from operate.dialog import main
1,350
""" Self-Operating Computer """ def main_entry(): parser = argparse.ArgumentParser( description="Run the self-operating-computer with a specified model." ) parser.add_argument( "-m", "--model", help="Specify the model to use", required=False, default="gpt-4", ) # Add a voice flag parser.add_argument( "--voice", help="Use voice input mode", action="store_true", ) # Allow for direct input of prompt parser.add_argument( "--prompt", help="Directly input the objective prompt", type=str, required=False, ) try: args = parser.parse_args()
""" Self-Operating Computer """ def main_entry(): parser = argparse.ArgumentParser( description="Run the self-operating-computer with a specified model." ) parser.add_argument( "-m", "--model", help="Specify the model to use", required=False, default="gpt-4", ) # Add a voice flag parser.add_argument( "--voice", help="Use voice input mode", action="store_true", ) # Allow for direct input of prompt parser.add_argument( "--prompt", help="Directly input the objective prompt", type=str, required=False, ) try: args = parser.parse_args()
main(
1
2023-11-04 03:13:45+00:00
2k
netease-youdao/EmotiVoice
frontend.py
[ { "identifier": "g2p_cn", "path": "frontend_cn.py", "snippet": "def split_py(py):\ndef has_chinese_punctuation(text):\ndef has_english_punctuation(text):\ndef number_to_chinese(number):\ndef tn_chinese(text):\ndef g2p_cn(text):" }, { "identifier": "ROOT_DIR", "path": "frontend_en.py", "snippet": "ROOT_DIR = os.path.dirname(os.path.abspath(\"__file__\"))\ndef read_lexicon(lex_path):\ndef get_eng_phoneme(text, g2p, lexicon, pad_sos_eos=True):" } ]
import re import sys from frontend_cn import g2p_cn, re_digits, tn_chinese from frontend_en import ROOT_DIR, read_lexicon, G2p, get_eng_phoneme from os.path import isfile
865
# Copyright 2023, YOUDAO # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Thanks to GuGCoCo and PatroxGaurab for identifying the issue: # the results differ between frontend.py and frontend_en.py. Here's a quick fix. #re_english_word = re.compile('([a-z\-\.\'\s,;\:\!\?]+|\d+[\d\.]*)', re.I) re_english_word = re.compile('([^\u4e00-\u9fa5]+|[ \u3002\uff0c\uff1f\uff01\uff1b\uff1a\u201c\u201d\u2018\u2019\u300a\u300b\u3008\u3009\u3010\u3011\u300e\u300f\u2014\u2026\u3001\uff08\uff09\u4e00-\u9fa5]+)', re.I) def g2p_cn_en(text, g2p, lexicon): # Our policy dictates that if the text contains Chinese, digits are to be converted into Chinese. text=tn_chinese(text) parts = re_english_word.split(text) parts=list(filter(None, parts)) tts_text = ["<sos/eos>"] chartype = '' text_contains_chinese = contains_chinese(text) for part in parts: if part == ' ' or part == '': continue if re_digits.match(part) and (text_contains_chinese or chartype == '') or contains_chinese(part): if chartype == 'en': tts_text.append('eng_cn_sp') phoneme = g2p_cn(part).split()[1:-1] chartype = 'cn' elif re_english_word.match(part): if chartype == 'cn': if "sp" in tts_text[-1]: "" else: tts_text.append('cn_eng_sp') phoneme = get_eng_phoneme(part, g2p, lexicon, False).split() if not phoneme : # tts_text.pop() continue else: chartype = 'en' else: continue tts_text.extend( phoneme ) tts_text=" ".join(tts_text).split() if "sp" in tts_text[-1]: tts_text.pop() tts_text.append("<sos/eos>") return " ".join(tts_text) def contains_chinese(text): pattern = re.compile(r'[\u4e00-\u9fa5]') match = re.search(pattern, text) return match is not None if __name__ == "__main__": lexicon = read_lexicon(f"{ROOT_DIR}/lexicon/librispeech-lexicon.txt")
# Copyright 2023, YOUDAO # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Thanks to GuGCoCo and PatroxGaurab for identifying the issue: # the results differ between frontend.py and frontend_en.py. Here's a quick fix. #re_english_word = re.compile('([a-z\-\.\'\s,;\:\!\?]+|\d+[\d\.]*)', re.I) re_english_word = re.compile('([^\u4e00-\u9fa5]+|[ \u3002\uff0c\uff1f\uff01\uff1b\uff1a\u201c\u201d\u2018\u2019\u300a\u300b\u3008\u3009\u3010\u3011\u300e\u300f\u2014\u2026\u3001\uff08\uff09\u4e00-\u9fa5]+)', re.I) def g2p_cn_en(text, g2p, lexicon): # Our policy dictates that if the text contains Chinese, digits are to be converted into Chinese. text=tn_chinese(text) parts = re_english_word.split(text) parts=list(filter(None, parts)) tts_text = ["<sos/eos>"] chartype = '' text_contains_chinese = contains_chinese(text) for part in parts: if part == ' ' or part == '': continue if re_digits.match(part) and (text_contains_chinese or chartype == '') or contains_chinese(part): if chartype == 'en': tts_text.append('eng_cn_sp') phoneme = g2p_cn(part).split()[1:-1] chartype = 'cn' elif re_english_word.match(part): if chartype == 'cn': if "sp" in tts_text[-1]: "" else: tts_text.append('cn_eng_sp') phoneme = get_eng_phoneme(part, g2p, lexicon, False).split() if not phoneme : # tts_text.pop() continue else: chartype = 'en' else: continue tts_text.extend( phoneme ) tts_text=" ".join(tts_text).split() if "sp" in tts_text[-1]: tts_text.pop() tts_text.append("<sos/eos>") return " ".join(tts_text) def contains_chinese(text): pattern = re.compile(r'[\u4e00-\u9fa5]') match = re.search(pattern, text) return match is not None if __name__ == "__main__": lexicon = read_lexicon(f"{ROOT_DIR}/lexicon/librispeech-lexicon.txt")
g2p = G2p()
1
2023-11-08 10:15:27+00:00
2k
daveshap/OpenAI_Agent_Swarm
agents/tool_maker/tool_user.py
[ { "identifier": "chat", "path": "shared/utils.py", "snippet": "def chat(client, thread, assistant, functions):\n while True:\n user_message = input(\"You: \")\n\n # add user message to thread\n thread_message = client.beta.threads.messages.create(\n thread.id,\n role=\"user\",\n content=user_message,\n ) \n\n # get assistant response in thread\n run = client.beta.threads.runs.create(\n thread_id=thread.id,\n assistant_id=assistant.id,\n )\n\n # wait for run to complete\n wait_time = 0\n while True:\n if wait_time % 5 == 0:\n print(f\"waiting for run to complete...\", flush=True)\n wait_time += 1\n time.sleep(1)\n\n run = client.beta.threads.runs.retrieve(\n thread_id=thread.id,\n run_id=run.id,\n )\n\n if run.status == \"completed\":\n break\n elif run.status == \"in_progress\":\n continue\n elif run.status == \"queued\":\n continue\n elif run.status == \"requires_action\":\n if run.required_action.type == 'submit_tool_outputs':\n tool_calls = run.required_action.submit_tool_outputs.tool_calls\n\n tool_outputs = []\n for tc in tool_calls:\n function_to_call = functions.get(tc.function.name, None)\n if not function_to_call:\n raise ValueError(f\"Function {tc.function.name} not found in execution environment\")\n function_args = json.loads(tc.function.arguments)\n function_response = function_to_call(**function_args)\n\n tool_outputs.append({\n \"tool_call_id\": tc.id,\n \"output\": json.dumps(function_response),\n })\n\n print(f\"Submitting tool outputs...\", flush=True)\n run = client.beta.threads.runs.submit_tool_outputs(\n thread_id=thread.id,\n run_id=run.id,\n tool_outputs=tool_outputs\n )\n else:\n input(f'Run status: {run.status}. press enter to continue, or ctrl+c to quit')\n\n # get most recent message from thread\n thread_messages = client.beta.threads.messages.list(thread.id, limit=10, order='desc')\n\n # get assistant response from message\n assistant_response = thread_messages.data[0].content[0].text.value\n\n print(f\"\\n\\nBot: {assistant_response}\\n\\n\", flush=True)\n\n # continue?\n try:\n input(\"Press enter to continue chatting, or ctrl+c to stop chat\\n\")\n except KeyboardInterrupt:\n print(f\"Stopping chat\\n\" + 90*\"-\" + \"\\n\\n\", flush=True)\n break" }, { "identifier": "get_openai_client", "path": "shared/openai_config.py", "snippet": "def get_openai_client():\n settings = Settings()\n return OpenAI(api_key=settings.OPENAI_API_KEY)" } ]
import os import json from shared.utils import chat as chat_loop from shared.openai_config import get_openai_client
1,171
""" Create an assistant using the tools from tool_creator using the assistant creation API """ client = get_openai_client() def create_tool_user(assistant_details): # create the assistant tool_user = client.beta.assistants.create(**assistant_details["build_params"]) print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True) # save the assistant info to a json file info_to_export = { "assistant_id": tool_user.id, "assistant_details": assistant_details, } os.makedirs('assistants', exist_ok=True) with open('assistants/tool_user.json', 'w') as f: json.dump(info_to_export, f, indent=4) return tool_user def talk_to_tool_user(assistant_details): """ talk to the assistant to use the tools """ # check if json file exists try: os.makedirs('assistants', exist_ok=True) with open('assistants/tool_user.json') as f: create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]') if create_new == 'y': raise Exception("User wants a new assistant") assistant_from_json = json.load(f) tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id']) print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True) print(f'Assistant {tool_user.id}:\n') assistant_details = assistant_from_json["assistant_details"] except: # create the assistant first tool_user = create_tool_user(assistant_details) # exec the functions from the py files os.makedirs('tools', exist_ok=True) functions = assistant_details["functions"] for func in functions: print(f"Loading function {func} into execution environment", flush=True) with open('tools/' + func + '.py') as f: exec(f.read(), globals()) functions.update({func: eval(func)}) # Create thread thread = client.beta.threads.create() # chat with the assistant
""" Create an assistant using the tools from tool_creator using the assistant creation API """ client = get_openai_client() def create_tool_user(assistant_details): # create the assistant tool_user = client.beta.assistants.create(**assistant_details["build_params"]) print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True) # save the assistant info to a json file info_to_export = { "assistant_id": tool_user.id, "assistant_details": assistant_details, } os.makedirs('assistants', exist_ok=True) with open('assistants/tool_user.json', 'w') as f: json.dump(info_to_export, f, indent=4) return tool_user def talk_to_tool_user(assistant_details): """ talk to the assistant to use the tools """ # check if json file exists try: os.makedirs('assistants', exist_ok=True) with open('assistants/tool_user.json') as f: create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]') if create_new == 'y': raise Exception("User wants a new assistant") assistant_from_json = json.load(f) tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id']) print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True) print(f'Assistant {tool_user.id}:\n') assistant_details = assistant_from_json["assistant_details"] except: # create the assistant first tool_user = create_tool_user(assistant_details) # exec the functions from the py files os.makedirs('tools', exist_ok=True) functions = assistant_details["functions"] for func in functions: print(f"Loading function {func} into execution environment", flush=True) with open('tools/' + func + '.py') as f: exec(f.read(), globals()) functions.update({func: eval(func)}) # Create thread thread = client.beta.threads.create() # chat with the assistant
chat_loop(client, thread, tool_user, functions)
1
2023-11-07 23:12:05+00:00
2k
S-LoRA/S-LoRA
slora/common/basemodel/layer_infer/base_layer_infer.py
[ { "identifier": "mark_cost_time", "path": "slora/utils/infer_utils.py", "snippet": "def mark_cost_time(func_name):\n def inner_func(func):\n def time_func(*args, **kwargs):\n if dist.get_rank() in [0, 1] and is_show_cost_time:\n torch.cuda.synchronize()\n start_time = time.time()\n ans = func(*args, **kwargs)\n torch.cuda.synchronize()\n print(func_name, \"cost time:\", (time.time() - start_time) * 1000)\n return ans\n else:\n torch.cuda.synchronize()\n ans = func(*args, **kwargs)\n torch.cuda.synchronize()\n return ans\n\n return time_func\n\n return inner_func" }, { "identifier": "InferStateInfo", "path": "slora/common/basemodel/infer_struct.py", "snippet": "class InferStateInfo:\n \"\"\"\n 推理时用的信息结构体\n \"\"\"\n\n def __init__(self):\n self.batch_size = None\n self.total_token_num = None\n self.b_loc = None\n self.b_start_loc = None\n self.b_seq_len = None\n self.max_len_in_batch = None\n self.is_prefill = None\n \n self.mem_manager = None\n \n self.prefill_mem_index = None\n self.prefill_key_buffer = None\n self.prefill_value_buffer = None\n \n self.decode_is_contiguous = None\n self.decode_mem_start = None \n self.decode_mem_end = None\n self.decode_mem_index = None\n self.decode_key_buffer = None \n self.decode_value_buffer = None\n \n def init_some_extra_state(self, \n model, \n batch_size, \n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill):\n pass" }, { "identifier": "BaseLayerWeight", "path": "slora/common/basemodel/layer_weights/base_layer_weight.py", "snippet": "class BaseLayerWeight:\n def __init__(self):\n pass\n\n def load_hf_weights(self, weights):\n \"\"\"\n load weights\n \"\"\"\n pass\n\n\n def init_static_params(self):\n \"\"\"\n design for some static init params, many model dont need do this.\n \"\"\"\n pass\n\n def verify_load(self):\n \"\"\"\n verify all load is ok\n \"\"\"\n raise Exception(\"must verify weights load ok\")\n pass\n\n def _cuda(self, cpu_tensor):\n return cpu_tensor.contiguous().to(self.data_type_).cuda()" } ]
from slora.utils.infer_utils import mark_cost_time from slora.common.basemodel.infer_struct import InferStateInfo from slora.common.basemodel.layer_weights.base_layer_weight import BaseLayerWeight
707
class BaseLayerInfer: def __init__(self) -> None: pass
class BaseLayerInfer: def __init__(self) -> None: pass
@mark_cost_time("pre context forward") # dont to remove this, will make performence down, did not know why
0
2023-11-05 04:08:36+00:00
2k
disler/multi-agent-postgres-data-analytics
postgres_da_ai_agent/modules/orchestrator.py
[ { "identifier": "AgentInstruments", "path": "postgres_da_ai_agent/agents/instruments.py", "snippet": "class AgentInstruments:\n \"\"\"\n Base class for multli-agent instruments that are tools, state, and functions that an agent can use across the lifecycle of conversations\n \"\"\"\n\n def __init__(self) -> None:\n self.session_id = None\n self.messages = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def sync_messages(self, messages: list):\n \"\"\"\n Syncs messages with the orchestrator\n \"\"\"\n raise NotImplementedError\n\n def make_agent_chat_file(self, team_name: str):\n return os.path.join(self.root_dir, f\"agent_chats_{team_name}.json\")\n\n def make_agent_cost_file(self, team_name: str):\n return os.path.join(self.root_dir, f\"agent_cost_{team_name}.json\")\n\n @property\n def root_dir(self):\n return os.path.join(BASE_DIR, self.session_id)" }, { "identifier": "llm", "path": "postgres_da_ai_agent/modules/llm.py", "snippet": "def safe_get(data, dot_chained_keys):\ndef response_parser(response: Dict[str, Any]):\ndef prompt(\n prompt: str,\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef prompt_func(\n prompt: str,\n turbo_tools: List[TurboTool],\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef prompt_json_response(\n prompt: str,\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef add_cap_ref(\n prompt: str, prompt_suffix: str, cap_ref: str, cap_ref_content: str\n) -> str:\ndef count_tokens(text: str):\ndef estimate_price_and_tokens(text, model=\"gpt-4\"):" }, { "identifier": "Chat", "path": "postgres_da_ai_agent/types.py", "snippet": "class Chat:\n from_name: str\n to_name: str\n message: str\n created: int = field(default_factory=time.time)" }, { "identifier": "ConversationResult", "path": "postgres_da_ai_agent/types.py", "snippet": "class ConversationResult:\n success: bool\n messages: List[Chat]\n cost: float\n tokens: int\n last_message_str: str\n error_message: str" } ]
import dataclasses import json import autogen from typing import List, Optional, Tuple from postgres_da_ai_agent.agents.instruments import AgentInstruments from postgres_da_ai_agent.modules import llm from postgres_da_ai_agent.types import Chat, ConversationResult
705
class Orchestrator: """ Orchestrators manage conversations between multi-agent teams. """ def __init__( self, name: str, agents: List[autogen.ConversableAgent],
class Orchestrator: """ Orchestrators manage conversations between multi-agent teams. """ def __init__( self, name: str, agents: List[autogen.ConversableAgent],
instruments: AgentInstruments,
0
2023-11-04 20:15:46+00:00
2k
fleet-ai/context
utils/ai.py
[ { "identifier": "OPENAI_MODELS", "path": "constants/cli.py", "snippet": "OPENAI_MODELS = [\n \"gpt-4-1106-preview\",\n \"gpt-4\",\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n]" }, { "identifier": "SYSTEM_PROMPT", "path": "constants/ai.py", "snippet": "SYSTEM_PROMPT = \"\"\"\nYou are an expert in Python libraries. You carefully provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning. If you think there might not be a correct answer, you say so.\nEach token you produce is another opportunity to use computation, therefore you always spend a few sentences explaining background context, assumptions, and step-by-step thinking BEFORE you try to answer a question.\nYour users are experts in AI and ethics, so they already know you're a language model and your capabilities and limitations, so don't remind them of that. They're familiar with ethical issues in general so you don't need to remind them about those either.\n\nYour users are also in a CLI environment. You are capable of writing and running code. DO NOT write hypothetical code. ALWAYS write real code that will execute and run end-to-end.\n\"\"\"" }, { "identifier": "PROMPT", "path": "constants/ai.py", "snippet": "PROMPT = \"\"\"\nInstructions:\n- Be objective, direct. Include literal information from the context, don't add any conclusion or subjective information.\n- When writing code, ALWAYS have some sort of output (like a print statement). If you're writing a function, call it at the end. Do not generate the output, because the user can run it themselves.\n- ALWAYS cite your sources. Context will be given to you after the text ### Context source_url ### with source_url being the url to the file. For example, ### Context https://example.com/docs/api.html#files ### will have a source_url of https://example.com/docs/api.html#files.\n- When you cite your source, please cite it as [num] with `num` starting at 1 and incrementing with each source cited (1, 2, 3, ...). At the bottom, have a newline-separated `num: source_url` at the end of the response. ALWAYS add a new line between sources or else the user won't be able to read it. DO NOT convert links into markdown, EVER! If you do that, the user will not be able to click on the links.\n\nFor example:\n### Context https://example.com/docs/api.html#pdfs ###\nI'm a big fan of PDFs.\n\n### Context https://example.com/docs/api.html#csvs ###\nI'm a big fan of CSVs.\n\n### Prompt ###\nWhat is this person a big fan of?\n\n### Response ###\nThis person is a big fan of PDFs[1] and CSVs[2].\n\n1: https://example.com/docs/api.html#pdfs\n2: https://example.com/docs/api.html#csvs\n\"\"\"" }, { "identifier": "API_URL", "path": "constants/ai.py", "snippet": "API_URL = \"https://foundation.fleet.so\"" } ]
import os import json import tiktoken import openai import requests from openai import OpenAI from constants.cli import OPENAI_MODELS from constants.ai import SYSTEM_PROMPT, PROMPT, API_URL
874
# pylint: disable=W0707 # pylint: disable=W0719 def retrieve(query, k=10, filters=None): """Retrieves and returns dict. Args: query (str): User query to pass in k (int, optional): number of results passed back. Defaults to 10. filters (dict, optional): Filters to apply to the query. You can filter based off any piece of metadata by passing in a dict of the format {metadata_name: filter_value} ie {"library_id": "1234"}. See the README for more details: https://github.com/fleet-ai/context/tree/main#using-fleet-contexts-rich-metadata Returns: list: List of queried results """
# pylint: disable=W0707 # pylint: disable=W0719 def retrieve(query, k=10, filters=None): """Retrieves and returns dict. Args: query (str): User query to pass in k (int, optional): number of results passed back. Defaults to 10. filters (dict, optional): Filters to apply to the query. You can filter based off any piece of metadata by passing in a dict of the format {metadata_name: filter_value} ie {"library_id": "1234"}. See the README for more details: https://github.com/fleet-ai/context/tree/main#using-fleet-contexts-rich-metadata Returns: list: List of queried results """
url = f"{API_URL}/query"
3
2023-11-02 07:07:13+00:00
2k
OpenBMB/ProAgent
ProAgent/agent/gpt4_function.py
[ { "identifier": "logger", "path": "ProAgent/loggers/logs.py", "snippet": "class JsonFileHandler(logging.FileHandler):\nclass JsonFormatter(logging.Formatter):\nclass Logger(metaclass=Singleton):\nclass TypingConsoleHandler(logging.StreamHandler):\nclass ConsoleHandler(logging.StreamHandler):\nclass AutoGptFormatter(logging.Formatter):\n def __init__(self, filename, mode=\"a\", encoding=None, delay=False):\n def emit(self, record):\n def format(self, record):\n def __init__(self):\n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n def error(self, title, message=\"\"):\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n def set_level(self, level):\n def double_check(self, additionalText=None):\n def log_json(self, data: Any, file_name: str) -> None:\n def get_log_directory(self):\n def emit(self, record):\n def emit(self, record) -> None:\n def format(self, record: LogRecord) -> str:\ndef remove_color_codes(s: str) -> str:\ndef print_action_base(action: Action):\ndef print_action_tool(action: Action):" }, { "identifier": "_chat_completion_request", "path": "ProAgent/agent/utils.py", "snippet": "def _chat_completion_request(**args):\n \"\"\"\n Generates a chat completion request with the given arguments and attempts to retrieve the completed output.\n\n Args:\n **args: Additional keyword arguments for the chat completion request.\n\n Returns:\n The completed output if the request is successful, otherwise None.\n \"\"\"\n\n for i in range(3):\n if i > 0:\n logger.info(f\"LLM retry for the {i+1}'th time\")\n\n try:\n output, output_code = _chat_completion_request_without_retry(**args)\n if output_code == LLMStatusCode.SUCCESS:\n return output\n except func_timeout.exceptions.FunctionTimedOut: #TLE\n logger.info(f\"LLM response time out\")\n continue" } ]
import logging import json from typing import List, Dict from colorama import Fore, Style from ProAgent.loggers.logs import logger from ProAgent.agent.utils import _chat_completion_request
849
class OpenAIFunction(): def __init__(self): pass def parse(self, **args): """ Parses the given arguments by making a chat completion request. Args: **args: The keyword arguments to be passed to the chat completion request. Returns: Tuple: A tuple containing the parsed content, function name, function arguments, and the original message. Raises: None. """ retry_time = 1 max_time = 3 for i in range(max_time): output = _chat_completion_request(**args) if isinstance(output, Dict): usage = output["usage"] message = output["choices"][0]["message"] print(usage) if "function_call" in message.keys(): break else: args['messages'].append({"role": "assistant", "content": message['content']}) args['messages'].append({"role": 'user', "content": "No Function call here! You should always use a function call as your response."}) retry_time += 1
class OpenAIFunction(): def __init__(self): pass def parse(self, **args): """ Parses the given arguments by making a chat completion request. Args: **args: The keyword arguments to be passed to the chat completion request. Returns: Tuple: A tuple containing the parsed content, function name, function arguments, and the original message. Raises: None. """ retry_time = 1 max_time = 3 for i in range(max_time): output = _chat_completion_request(**args) if isinstance(output, Dict): usage = output["usage"] message = output["choices"][0]["message"] print(usage) if "function_call" in message.keys(): break else: args['messages'].append({"role": "assistant", "content": message['content']}) args['messages'].append({"role": 'user', "content": "No Function call here! You should always use a function call as your response."}) retry_time += 1
logger._log(f"{Fore.RED} Retry for the {retry_time}'th time{Style.RESET_ALL}")
0
2023-11-03 01:20:14+00:00
2k
LLaVA-VL/LLaVA-Plus-Codebase
serve/blip2grounding_worker.py
[ { "identifier": "WORKER_HEART_BEAT_INTERVAL", "path": "serve/constants.py", "snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"FASTCHAT_WORKER_HEART_BEAT_INTERVAL\", 45))" }, { "identifier": "ErrorCode", "path": "serve/constants.py", "snippet": "class ErrorCode(IntEnum):\n \"\"\"\n https://platform.openai.com/docs/guides/error-codes/api-errors\n \"\"\"\n\n VALIDATION_TYPE_ERROR = 40001\n\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006" }, { "identifier": "SERVER_ERROR_MSG", "path": "serve/constants.py", "snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)" }, { "identifier": "build_logger", "path": "serve/utils.py", "snippet": "def build_logger(logger_name, logger_filename):\n global handler\n\n formatter = logging.Formatter(\n fmt=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n # Set the format of root handlers\n if not logging.getLogger().handlers:\n if sys.version_info[1] >= 9:\n # This is for windows\n logging.basicConfig(level=logging.INFO, encoding=\"utf-8\")\n else:\n if platform.system() == \"Windows\":\n warnings.warn(\n \"If you are running on Windows, \"\n \"we recommend you use Python >= 3.9 for UTF-8 encoding.\"\n )\n logging.basicConfig(level=logging.INFO)\n logging.getLogger().handlers[0].setFormatter(formatter)\n\n # Redirect stdout and stderr to loggers\n stdout_logger = logging.getLogger(\"stdout\")\n stdout_logger.setLevel(logging.INFO)\n sl = StreamToLogger(stdout_logger, logging.INFO)\n sys.stdout = sl\n\n stderr_logger = logging.getLogger(\"stderr\")\n stderr_logger.setLevel(logging.ERROR)\n sl = StreamToLogger(stderr_logger, logging.ERROR)\n sys.stderr = sl\n\n # Get logger\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n\n os.makedirs(LOGDIR, exist_ok=True)\n filename = os.path.join(LOGDIR, logger_filename)\n handler = logging.handlers.TimedRotatingFileHandler(\n filename, when=\"D\", utc=True, encoding=\"utf-8\"\n )\n handler.setFormatter(formatter)\n\n for logger in [stdout_logger, stderr_logger, logger]:\n if logger in visited_loggers:\n continue\n visited_loggers.add(logger)\n logger.addHandler(handler)\n\n return logger" }, { "identifier": "pretty_print_semaphore", "path": "serve/utils.py", "snippet": "def pretty_print_semaphore(semaphore):\n \"\"\"Print a semaphore in better format.\"\"\"\n if semaphore is None:\n return \"None\"\n return f\"Semaphore(value={semaphore._value}, locked={semaphore.locked()})\"" } ]
import sys, os import argparse import asyncio import dataclasses import logging import json import os import sys import time import threading import uuid import base64 import numpy as np import requests import groundingdino.datasets.transforms as T import pycocotools.mask as mask_util import torch import torch.nn.functional as F import uvicorn from groundingdino.util import box_ops from segment_anything import build_sam from segment_anything.predictor import SamPredictor from typing import List, Tuple, Union from io import BytesIO from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse from PIL import Image from demo.inference_on_a_image import get_grounding_output from groundingdino.util.inference import load_model, predict from transformers import ( AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer, AutoModel, ) from transformers import ( AutoTokenizer, AutoModelForCausalLM, LLaMATokenizer, AutoModel, ) from serve.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from serve.utils import build_logger, pretty_print_semaphore
1,147
""" A model worker executes the model. """ sys.path.append(os.path.join(os.path.dirname(__file__), "..")) try: except ImportError: GB = 1 << 30 now_file_name = os.__file__ logdir = "logs/workers/" os.makedirs(logdir, exist_ok=True) logfile = os.path.join(logdir, f"{now_file_name}.log") worker_id = str(uuid.uuid4())[:6]
""" A model worker executes the model. """ sys.path.append(os.path.join(os.path.dirname(__file__), "..")) try: except ImportError: GB = 1 << 30 now_file_name = os.__file__ logdir = "logs/workers/" os.makedirs(logdir, exist_ok=True) logfile = os.path.join(logdir, f"{now_file_name}.log") worker_id = str(uuid.uuid4())[:6]
logger = build_logger(now_file_name, logfile)
3
2023-11-07 13:06:02+00:00
2k
opendilab/LLMRiddles
llmriddles/questions/level2.py
[ { "identifier": "register_question", "path": "llmriddles/questions/question.py", "snippet": "def register_question(text: Union[Mapping[str, str], str],\n checkers: Union[Mapping[str, SingleLangCheckerTyping], MultiLangCheckerTyping],\n name=Union[Mapping[str, str], str],\n level: int = 1, default_lang='cn'):\n \n checker = checkers if isinstance(checkers, Checker) else Checker(checkers)\n \n if isinstance(text, str):\n texts = {default_lang: text}\n else:\n texts = text\n\n if isinstance(name, str):\n names = {default_lang: name}\n else:\n names = name\n\n _KNOWN_PROBLEMS.append(Question(texts, checker, names, level))" }, { "identifier": "get_all_numbers", "path": "llmriddles/questions/math_tools.py", "snippet": "def get_all_numbers(text: str):\n return get_all_numbers_in_a_sentence(text) + get_all_numbers_in_a_sentence_with_comma(text)" } ]
import re import sympy from typing import Optional, Tuple from .question import register_question from .math_tools import get_all_numbers
679
CN_TEXT_1 = """ 第二章第一题(质数长度),你需要提出一个字数是质数的问题,使回答的长度刚好是它的下一个质数。 """ EN_TEXT_1 = """ For the first question in chapter 2, You need to come up with a question that has a prime number of words, so the answer's length is exactly the next prime number. """ def _is_prime(v): return sympy.isprime(v) def _next_prime(v): while v: v += 1 if _is_prime(v): return v def _cn_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]: qs_length = len(user_text.strip()) if not _is_prime(qs_length): return False, f'问题长度为{qs_length},非质数' answer_value = len(answer_text) next_prime = _next_prime(qs_length) if answer_value != next_prime: return False, f'下一个质数为{next_prime},但回答长度为{answer_value}' return True, None def _en_words(text: str): return len(re.findall(r'\w+', text)) def _en_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]: qs_length = _en_words(user_text.strip()) if not _is_prime(qs_length): return False, f'The question has a length of {qs_length}, which is not a prime number' answer_value = _en_words(answer_text) next_prime = _next_prime(qs_length) if answer_value != next_prime: return False, f'The next prime number is {next_prime}, but the answer\'s length is {answer_value}' return True, None
CN_TEXT_1 = """ 第二章第一题(质数长度),你需要提出一个字数是质数的问题,使回答的长度刚好是它的下一个质数。 """ EN_TEXT_1 = """ For the first question in chapter 2, You need to come up with a question that has a prime number of words, so the answer's length is exactly the next prime number. """ def _is_prime(v): return sympy.isprime(v) def _next_prime(v): while v: v += 1 if _is_prime(v): return v def _cn_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]: qs_length = len(user_text.strip()) if not _is_prime(qs_length): return False, f'问题长度为{qs_length},非质数' answer_value = len(answer_text) next_prime = _next_prime(qs_length) if answer_value != next_prime: return False, f'下一个质数为{next_prime},但回答长度为{answer_value}' return True, None def _en_words(text: str): return len(re.findall(r'\w+', text)) def _en_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]: qs_length = _en_words(user_text.strip()) if not _is_prime(qs_length): return False, f'The question has a length of {qs_length}, which is not a prime number' answer_value = _en_words(answer_text) next_prime = _next_prime(qs_length) if answer_value != next_prime: return False, f'The next prime number is {next_prime}, but the answer\'s length is {answer_value}' return True, None
register_question(
0
2023-11-07 03:09:55+00:00
2k
codefuse-ai/CodeFuse-ModelCache
modelcache/manager/vector_data/manager.py
[ { "identifier": "NotFoundError", "path": "modelcache/utils/error.py", "snippet": "class NotFoundError(CacheError):\n \"\"\"Raise when getting an unsupported store.\"\"\"\n def __init__(self, store_type, current_type_name):\n super().__init__(f\"Unsupported ${store_type}: {current_type_name}\")" }, { "identifier": "ParamError", "path": "modelcache/utils/error.py", "snippet": "class ParamError(CacheError):\n \"\"\"Raise when receiving an invalid param.\"\"\"" } ]
from modelcache.utils.error import NotFoundError, ParamError from modelcache.manager.vector_data.milvus import Milvus from modelcache.manager.vector_data.faiss import Faiss from modelcache.manager.vector_data.chroma import Chromadb from modelcache.manager.vector_data.hnswlib_store import Hnswlib
924
# -*- coding: utf-8 -*- TOP_K = 1 FAISS_INDEX_PATH = "faiss.index" DIMENSION = 0 MILVUS_HOST = "localhost" MILVUS_PORT = 19530 MILVUS_USER = "" MILVUS_PSW = "" MILVUS_SECURE = False MILVUS_INDEX_PARAMS = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } COLLECTION_NAME = "modelcache" class VectorBase: """ VectorBase to manager the vector base. """ def __init__(self): raise EnvironmentError( "VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`." ) @staticmethod def check_dimension(dimension): if dimension <= 0: raise ParamError( f"the dimension should be greater than zero, current value: {dimension}." ) @staticmethod def get(name, **kwargs): top_k = kwargs.get("top_k", TOP_K) if name == "milvus": dimension = kwargs.get("dimension", DIMENSION) milvus_config = kwargs.get("milvus_config") VectorBase.check_dimension(dimension) host = milvus_config.get('milvus', 'host') port = milvus_config.get('milvus', 'port') user = milvus_config.get('milvus', 'user') password = milvus_config.get('milvus', 'password') secure = kwargs.get("secure", MILVUS_SECURE) collection_name = kwargs.get("collection_name", COLLECTION_NAME) index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS) search_params = kwargs.get("search_params", None) local_mode = kwargs.get("local_mode", False) local_data = kwargs.get("local_data", "./milvus_data") vector_base = Milvus( host=host, port=port, user=user, password=password, secure=secure, collection_name=collection_name, dimension=dimension, top_k=top_k, index_params=index_params, search_params=search_params, local_mode=local_mode, local_data=local_data ) elif name == "faiss": dimension = kwargs.get("dimension", DIMENSION) index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) VectorBase.check_dimension(dimension) vector_base = Faiss( index_file_path=index_path, dimension=dimension, top_k=top_k ) elif name == "chromadb": client_settings = kwargs.get("client_settings", None) persist_directory = kwargs.get("persist_directory", None) collection_name = kwargs.get("collection_name", COLLECTION_NAME) vector_base = Chromadb( client_settings=client_settings, persist_directory=persist_directory, collection_name=collection_name, top_k=top_k, ) elif name == "hnswlib": dimension = kwargs.get("dimension", DIMENSION) index_path = kwargs.pop("index_path", "./hnswlib_index.bin") max_elements = kwargs.pop("max_elements", 100000) VectorBase.check_dimension(dimension) vector_base = Hnswlib( index_file_path=index_path, dimension=dimension, top_k=top_k, max_elements=max_elements ) else:
# -*- coding: utf-8 -*- TOP_K = 1 FAISS_INDEX_PATH = "faiss.index" DIMENSION = 0 MILVUS_HOST = "localhost" MILVUS_PORT = 19530 MILVUS_USER = "" MILVUS_PSW = "" MILVUS_SECURE = False MILVUS_INDEX_PARAMS = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } COLLECTION_NAME = "modelcache" class VectorBase: """ VectorBase to manager the vector base. """ def __init__(self): raise EnvironmentError( "VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`." ) @staticmethod def check_dimension(dimension): if dimension <= 0: raise ParamError( f"the dimension should be greater than zero, current value: {dimension}." ) @staticmethod def get(name, **kwargs): top_k = kwargs.get("top_k", TOP_K) if name == "milvus": dimension = kwargs.get("dimension", DIMENSION) milvus_config = kwargs.get("milvus_config") VectorBase.check_dimension(dimension) host = milvus_config.get('milvus', 'host') port = milvus_config.get('milvus', 'port') user = milvus_config.get('milvus', 'user') password = milvus_config.get('milvus', 'password') secure = kwargs.get("secure", MILVUS_SECURE) collection_name = kwargs.get("collection_name", COLLECTION_NAME) index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS) search_params = kwargs.get("search_params", None) local_mode = kwargs.get("local_mode", False) local_data = kwargs.get("local_data", "./milvus_data") vector_base = Milvus( host=host, port=port, user=user, password=password, secure=secure, collection_name=collection_name, dimension=dimension, top_k=top_k, index_params=index_params, search_params=search_params, local_mode=local_mode, local_data=local_data ) elif name == "faiss": dimension = kwargs.get("dimension", DIMENSION) index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) VectorBase.check_dimension(dimension) vector_base = Faiss( index_file_path=index_path, dimension=dimension, top_k=top_k ) elif name == "chromadb": client_settings = kwargs.get("client_settings", None) persist_directory = kwargs.get("persist_directory", None) collection_name = kwargs.get("collection_name", COLLECTION_NAME) vector_base = Chromadb( client_settings=client_settings, persist_directory=persist_directory, collection_name=collection_name, top_k=top_k, ) elif name == "hnswlib": dimension = kwargs.get("dimension", DIMENSION) index_path = kwargs.pop("index_path", "./hnswlib_index.bin") max_elements = kwargs.pop("max_elements", 100000) VectorBase.check_dimension(dimension) vector_base = Hnswlib( index_file_path=index_path, dimension=dimension, top_k=top_k, max_elements=max_elements ) else:
raise NotFoundError("vector store", name)
0
2023-11-01 01:56:10+00:00
2k
ForceFledgling/proxyhub
tests/test_utils.py
[ { "identifier": "BadStatusLine", "path": "proxyhub/errors.py", "snippet": "class BadStatusLine(Exception):\n errmsg = 'bad_status_line'" }, { "identifier": "get_all_ip", "path": "proxyhub/utils.py", "snippet": "def get_all_ip(page):\n # TODO: add IPv6 support\n return set(IPPattern.findall(page))" }, { "identifier": "get_status_code", "path": "proxyhub/utils.py", "snippet": "def get_status_code(resp, start=9, stop=12):\n try:\n if not isinstance(resp, (bytes, str)):\n raise TypeError(f'{type(resp).__name__} is not supported')\n code = int(resp[start:stop])\n except ValueError:\n return 400 # Bad Request\n else:\n return code" }, { "identifier": "parse_headers", "path": "proxyhub/utils.py", "snippet": "def parse_headers(headers):\n headers = headers.decode('utf-8', 'ignore').split('\\r\\n')\n _headers = {}\n _headers.update(parse_status_line(headers.pop(0)))\n\n for h in headers:\n if not h:\n break\n name, val = h.split(':', 1)\n _headers[name.strip().title()] = val.strip()\n\n if ':' in _headers.get('Host', ''):\n host, port = _headers['Host'].split(':')\n _headers['Host'], _headers['Port'] = host, int(port)\n return _headers" }, { "identifier": "parse_status_line", "path": "proxyhub/utils.py", "snippet": "def parse_status_line(line):\n _headers = {}\n is_response = line.startswith('HTTP/')\n try:\n if is_response: # HTTP/1.1 200 OK\n version, status, *reason = line.split()\n else: # GET / HTTP/1.1\n method, path, version = line.split()\n except ValueError:\n raise BadStatusLine(line)\n\n _headers['Version'] = version.upper()\n if is_response:\n _headers['Status'] = int(status)\n reason = ' '.join(reason)\n reason = reason.upper() if reason.lower() == 'ok' else reason.title()\n _headers['Reason'] = reason\n else:\n _headers['Method'] = method.upper()\n _headers['Path'] = path\n if _headers['Method'] == 'CONNECT':\n host, port = path.split(':')\n _headers['Host'], _headers['Port'] = host, int(port)\n return _headers" } ]
import pytest from proxyhub.errors import BadStatusLine from proxyhub.utils import ( get_all_ip, get_status_code, parse_headers, parse_status_line, )
747
def test_get_all_ip(): page = "abc127.0.0.1:80abc127.0.0.1xx127.0.0.2:8080h" assert get_all_ip(page) == {'127.0.0.1', '127.0.0.2'} def test_get_status_code(): assert get_status_code('HTTP/1.1 200 OK\r\n') == 200 assert get_status_code('<html>123</html>\r\n') == 400 assert get_status_code(b'HTTP/1.1 403 Forbidden\r\n') == 403 assert get_status_code(b'HTTP/1.1 400 Bad Request\r\n') == 400 def test_parse_status_line():
def test_get_all_ip(): page = "abc127.0.0.1:80abc127.0.0.1xx127.0.0.2:8080h" assert get_all_ip(page) == {'127.0.0.1', '127.0.0.2'} def test_get_status_code(): assert get_status_code('HTTP/1.1 200 OK\r\n') == 200 assert get_status_code('<html>123</html>\r\n') == 400 assert get_status_code(b'HTTP/1.1 403 Forbidden\r\n') == 403 assert get_status_code(b'HTTP/1.1 400 Bad Request\r\n') == 400 def test_parse_status_line():
assert parse_status_line('HTTP/1.1 200 OK') == {
4
2023-11-05 13:28:57+00:00
2k
WithSecureLabs/IceKube
icekube/cli.py
[ { "identifier": "config", "path": "icekube/config.py", "snippet": "class Neo4j(TypedDict):\nclass Config(TypedDict):" }, { "identifier": "create_indices", "path": "icekube/icekube.py", "snippet": "def create_indices():\n for resource in api_resources():\n if \"list\" not in resource.verbs:\n continue\n\n kind = resource.kind\n namespace = resource.namespaced\n\n cmd = f\"CREATE INDEX {kind.lower()} IF NOT EXISTS \"\n cmd += f\"FOR (n:{kind}) ON (n.name\"\n if namespace:\n cmd += \", n.namespace\"\n cmd += \")\"\n\n with get_driver().session() as session:\n session.run(cmd)" }, { "identifier": "enumerate_resource_kind", "path": "icekube/icekube.py", "snippet": "def enumerate_resource_kind(\n ignore: Optional[List[str]] = None,\n):\n if ignore is None:\n ignore = []\n\n with get_driver().session() as session:\n cluster = Cluster(apiVersion=\"N/A\", name=context_name(), version=kube_version())\n cmd, kwargs = create(cluster)\n session.run(cmd, **kwargs)\n\n signers = [\n \"kubernetes.io/kube-apiserver-client\",\n \"kubernetes.io/kube-apiserver-client-kubelet\",\n \"kubernetes.io/kubelet-serving\",\n \"kubernetes.io/legacy-unknown\",\n ]\n for signer in signers:\n s = Signer(name=signer)\n cmd, kwargs = create(s)\n session.run(cmd, **kwargs)\n\n for resource in all_resources(ignore=ignore):\n cmd, kwargs = create(resource)\n session.run(cmd, **kwargs)" }, { "identifier": "generate_relationships", "path": "icekube/icekube.py", "snippet": "def generate_relationships(threaded: bool = False) -> None:\n logger.info(\"Generating relationships\")\n logger.info(\"Fetching resources from neo4j\")\n driver = get_driver()\n resources = find()\n logger.info(\"Fetched resources from neo4j\")\n generator = partial(relationship_generator, driver, True)\n\n if threaded:\n with ThreadPoolExecutor() as exc:\n exc.map(generator, resources)\n else:\n print(\"First pass for relationships\")\n for resource in tqdm(resources):\n generator(resource)\n print(\"\")\n\n # Do a second loop across relationships to handle objects created as part\n # of other relationships\n\n resources = find()\n generator = partial(relationship_generator, driver, False)\n\n if threaded:\n with ThreadPoolExecutor() as exc:\n exc.map(generator, resources)\n else:\n print(\"Second pass for relationships\")\n for resource in tqdm(resources):\n generator(resource)\n print(\"\")" }, { "identifier": "purge_neo4j", "path": "icekube/icekube.py", "snippet": "def purge_neo4j() -> None:\n with get_driver().session() as session:\n session.run(\"MATCH (x)-[r]-(y) DELETE x, r, y\")\n session.run(\"MATCH (x) DELETE x\")" }, { "identifier": "remove_attack_paths", "path": "icekube/icekube.py", "snippet": "def remove_attack_paths() -> None:\n with get_driver().session() as session:\n session.run(\"MATCH ()-[r]-() WHERE EXISTS (r.attack_path) DELETE r\")" }, { "identifier": "setup_attack_paths", "path": "icekube/icekube.py", "snippet": "def setup_attack_paths() -> None:\n print(\"Generating attack paths\")\n for relationship, query in tqdm(attack_paths.items()):\n with get_driver().session() as session:\n if isinstance(query, str):\n query = [query]\n for q in query:\n cmd = q + f\" MERGE (src)-[:{relationship} {{ attack_path: 1 }}]->(dest)\"\n\n session.run(cmd)\n print(\"\")" }, { "identifier": "APIResource", "path": "icekube/kube.py", "snippet": "def load_kube_config():\ndef kube_version() -> str:\ndef context_name() -> str:\ndef api_versions() -> List[str]:\ndef api_resources() -> List[APIResource]:\ndef all_resources(\n preferred_versions_only: bool = True,\n ignore: Optional[List[str]] = None,\n) -> Iterator[Resource]:\ndef metadata_download() -> Dict[str, Any]:" }, { "identifier": "build_logger", "path": "icekube/log_config.py", "snippet": "def build_logger(debug_level=logging.DEBUG):\n # create logger\n logger = logging.getLogger(\"icekube\")\n logger.setLevel(debug_level)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(debug_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter(\"%(asctime)s|%(name)s|%(levelname)s|%(message)s\")\n ch.setFormatter(formatter)\n\n # tell tqdm about the handler\n tqdm_handler = _TqdmLoggingHandler(std_tqdm)\n tqdm_handler.setFormatter(formatter)\n tqdm_handler.stream = ch.stream\n\n # add the handlers to the logger\n logger.addHandler(tqdm_handler)" } ]
import json import logging import typer from pathlib import Path from typing import Iterator, List, Optional, cast from icekube.config import config from icekube.icekube import ( create_indices, enumerate_resource_kind, generate_relationships, purge_neo4j, remove_attack_paths, setup_attack_paths, ) from icekube.kube import ( APIResource, Resource, all_resources, metadata_download, ) from icekube.log_config import build_logger from tqdm import tqdm from icekube import kube from icekube import icekube
1,369
app = typer.Typer() IGNORE_DEFAULT = "events,componentstatuses" @app.command() def run( ignore: str = typer.Option( IGNORE_DEFAULT, help="Names of resource types to ignore", ), ): enumerate(ignore) attack_path() @app.command() def enumerate( ignore: str = typer.Option( IGNORE_DEFAULT, help="Names of resource types to ignore", ), ): create_indices() enumerate_resource_kind(ignore.split(",")) generate_relationships() @app.command() def relationships(): generate_relationships() @app.command() def attack_path():
app = typer.Typer() IGNORE_DEFAULT = "events,componentstatuses" @app.command() def run( ignore: str = typer.Option( IGNORE_DEFAULT, help="Names of resource types to ignore", ), ): enumerate(ignore) attack_path() @app.command() def enumerate( ignore: str = typer.Option( IGNORE_DEFAULT, help="Names of resource types to ignore", ), ): create_indices() enumerate_resource_kind(ignore.split(",")) generate_relationships() @app.command() def relationships(): generate_relationships() @app.command() def attack_path():
remove_attack_paths()
5
2023-11-02 13:54:21+00:00
2k
IAAR-Shanghai/UHGEval
tests/llm/test_api.py
[ { "identifier": "Baichuan2_53B_Chat", "path": "uhgeval/llm/api.py", "snippet": "class Baichuan2_53B_Chat(BaseLLM):\n def request(self, query) -> str:\n import time\n url = conf.Baichuan2_53B_url\n api_key = conf.Baichuan2_53B_api_key\n secret_key = conf.Baichuan2_53B_secret_key\n time_stamp = int(time.time())\n\n json_data = json.dumps({\n \"model\": \"Baichuan2-53B\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": query\n }\n ],\n \"parameters\": {\n \"temperature\": self.params['temperature'],\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n def _calculate_md5(input_string):\n import hashlib\n md5 = hashlib.md5()\n md5.update(input_string.encode('utf-8'))\n encrypted = md5.hexdigest()\n return encrypted\n signature = _calculate_md5(secret_key + json_data + str(time_stamp))\n \n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + api_key,\n \"X-BC-Timestamp\": str(time_stamp),\n \"X-BC-Signature\": signature,\n \"X-BC-Sign-Algo\": \"MD5\",\n }\n res = requests.post(url, data=json_data, headers=headers)\n res = res.json()['data']['messages'][0]['content']\n return res" }, { "identifier": "GPT", "path": "uhgeval/llm/api.py", "snippet": "class GPT(BaseLLM):\n def __init__(self, model_name='gpt-3.5-turbo', temperature=1.0, max_new_tokens=1024, report=False):\n super().__init__(model_name, temperature, max_new_tokens)\n self.report = report\n\n def request(self, query: str) -> str:\n openai.api_key = conf.GPT_api_key\n res = openai.ChatCompletion.create(\n model = self.params['model_name'],\n messages = [{\"role\": \"user\",\"content\": query}],\n temperature = self.params['temperature'],\n max_tokens = self.params['max_new_tokens'],\n top_p = self.params['top_p'],\n )\n real_res = res[\"choices\"][0][\"message\"][\"content\"]\n\n token_consumed = res['usage']['total_tokens']\n logger.info(f'GPT token consumed: {token_consumed}') if self.report else ()\n return real_res" } ]
import unittest from uhgeval.llm.api import ( Baichuan2_53B_Chat, GPT, )
831
# @Author : Shichao Song # @Email : song.shichao@outlook.com class TestBaichuan253BChat(unittest.TestCase): def setUp(self): self.model = Baichuan2_53B_Chat(temperature=0.1) def test_request(self): query = "How are you?" response = self.model.request(query) self.assertIsInstance(response, str) self.assertGreater(len(response), 0) def test_continue_writing(self): obj = {"headLine": "Story", "broadcastDate": "2023-11-15", "newsBeginning": "Once upon a time, there is a"} result = self.model.continue_writing(obj) self.assertIsInstance(result, str) self.assertGreater(len(result), 0) class TestGPT(unittest.TestCase): def setUp(self):
# @Author : Shichao Song # @Email : song.shichao@outlook.com class TestBaichuan253BChat(unittest.TestCase): def setUp(self): self.model = Baichuan2_53B_Chat(temperature=0.1) def test_request(self): query = "How are you?" response = self.model.request(query) self.assertIsInstance(response, str) self.assertGreater(len(response), 0) def test_continue_writing(self): obj = {"headLine": "Story", "broadcastDate": "2023-11-15", "newsBeginning": "Once upon a time, there is a"} result = self.model.continue_writing(obj) self.assertIsInstance(result, str) self.assertGreater(len(result), 0) class TestGPT(unittest.TestCase): def setUp(self):
self.gpt35 = GPT(model_name='gpt-3.5-turbo', temperature=0.1)
1
2023-11-06 11:46:22+00:00
2k
mobiusml/hqq
examples/lora/train_hqq_lora_example.py
[ { "identifier": "HQQModelForCausalLM", "path": "hqq/engine/hf.py", "snippet": "_HQQ_REGISTRY = {}\n\t_HQQ_REGISTRY = _HQQ_REGISTRY\nclass HQQModelForCausalLM(_Parent, HQQWrapper):\n\tdef __init__(self, *args, **kwargs):\n\tdef _make_quantizable(cls, model, quantized):\n\tdef _validate_params(cls, params:Dict):\n\tdef from_pretrained(cls, *args, **kwargs):\n\tdef _get_arch_key_from_save_dir(cls, save_dir:str):" }, { "identifier": "PeftUtils", "path": "hqq/core/peft.py", "snippet": "class PeftUtils:\n\n\t@classmethod\n\tdef get_base_class(cls, model, base_class):\n\t\t#Get base class\n\t\tif((base_class is None) and hasattr(model, 'base_class')):\n\t\t\tbase_class = model.base_class\n\n\t\tassert (base_class is not None), \"You need to provide the base HQQ class (LlamaHQQ, MixtralHQQ, etc.) as model.base_class or as an argument base_class=LlamaHQQ\"\n\t\treturn base_class\n\t\n\t@classmethod\n\tdef add_lora(cls, model, lora_params, base_class=None, verbose=True):\n\n\t\t#Base classs\n\t\tbase_class = cls.get_base_class(model, base_class)\n\n\t\t#Freeze\n\t\tfor param in model.parameters():\n\t\t\tparam.requires_grad = False\n\n\t\t#Patch\n\t\tbase_class.patch_linearlayers(model, patch_linear_add_peft, lora_params, verbose=verbose)\n\n\t\t#Rename modules\n\t\tautoname_modules(model)\n\n\t\t#Default backprop backend\n\t\tHQQLinear.set_backend(HQQBackend.PYTORCH_BACKPROP)\n\n\t@classmethod\n\tdef merge_lora(cls, model, merge_lora_params, base_class=None, verbose=True):\n\t\t#Base classs\n\t\tbase_class = cls.get_base_class(model, base_class)\n\n\t\t#Patch\n\t\tbase_class.patch_linearlayers(model, patch_linear_merge_peft, merge_lora_params, verbose=verbose)\n\n\t@classmethod\n\tdef cast_lora_weights(cls, model, dtype, base_class=None, verbose=True):\n\t\t#Base classs\n\t\tbase_class = cls.get_base_class(model, base_class)\n\n\t\t#Linear tags\n\t\tlinear_tags = base_class.get_linear_tags()\n\n\t\t#Patch\n\t\tbase_class.patch_linearlayers(model, \n\t\t\t\t\t\t\t\t\t patch_linear_cast_peft, \n\t\t\t\t\t\t\t\t\t dict([(linear_tag, dtype) for linear_tag in linear_tags]), \n\t\t\t\t\t\t\t\t\t verbose=verbose)\n\n\n\t@classmethod\n\tdef save_lora_weights(cls, model, filename, base_class=None, verbose=True):\n\t\t#Base classs\n\t\tbase_class = cls.get_base_class(model, base_class)\n\n\t\tlora_global_params = {}\n\t\tdef _patch_linear_save_weights(layer, patch_params, return_layer=True):\n\t\t\tif(is_hqq_lora_layer(layer)):\n\t\t\t\tlora_global_params[layer.name] = layer.state_dict()\n\t\t\tif(return_layer): return layer\n\n\t\t#Linear tags\n\t\tlinear_tags = base_class.get_linear_tags()\n\n\t\t#Patch\n\t\tbase_class.patch_linearlayers(model, \n\t\t\t\t\t\t\t\t\t _patch_linear_save_weights, \n\t\t\t\t\t\t\t\t\t dict([(linear_tag, None) for linear_tag in linear_tags]), \n\t\t\t\t\t\t\t\t\t verbose=verbose)\n\n\t\t#save\n\t\ttorch.save(lora_global_params, filename)\n\n\t@classmethod\n\tdef load_lora_weights(cls, model, filename, base_class=None, verbose=True):\n\t\t#Base classs\n\t\tbase_class = cls.get_base_class(model, base_class)\n\n\t\tlora_global_params = torch.load(file, map_location='cpu')\n\n\t\tdef _patch_linear_load_weights(layer, patch_params, return_layer=True):\n\t\t\tif(is_hqq_lora_layer(layer)):\n\t\t\t\tlayer.load_state_dict(lora_global_params[layer.name])\n\t\t\tif(return_layer): return layer\n\n\t\t#Linear tags\n\t\tlinear_tags = base_class.get_linear_tags()\n\n\t\t#Patch\n\t\tbase_class.patch_linearlayers(model, \n\t\t\t\t\t\t\t\t\t _patch_linear_load_weights, \n\t\t\t\t\t\t\t\t\t dict([(linear_tag, None) for linear_tag in linear_tags]), \n\t\t\t\t\t\t\t\t\t verbose=verbose)" } ]
from hqq.engine.hf import HQQModelForCausalLM, AutoTokenizer from hqq.core.quantize import * from hqq.core.peft import PeftUtils from hqq.core.quantize import * from datasets import load_dataset, Dataset from tqdm import tqdm from trl import SFTTrainer import transformers import numpy as np import random
1,458
#Settings ###################################################################################### hf_auth = None #HuggingFace token cache_path = '' #cache directory to store data #Chose a model model_id = "meta-llama/Llama-2-7b-hf" #model_id = "meta-llama/Llama-2-13b-hf" #model_id = "meta-llama/Llama-2-70b-hf" #HQQ Quantize ###################################################################################### model = HQQModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path) tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path) #Quantize the model quant_config = BaseQuantizeConfig(nbits=4, group_size=64, quant_scale=False, quant_zero=False) model.quantize_model(quant_config=quant_config) #Add Peft ###################################################################################### train_dtype = torch.bfloat16 #torch.float32 / torch.bfloat16 base_lora_params = {'lora_type':'default', 'r':32, 'lora_alpha':64, 'dropout':0.05, 'train_dtype':train_dtype} lora_params = {'self_attn.q_proj': base_lora_params, 'self_attn.k_proj': base_lora_params, 'self_attn.v_proj': base_lora_params, 'self_attn.o_proj': base_lora_params, 'mlp.gate_proj' : None, 'mlp.up_proj' : None, 'mlp.down_proj' : None} #Apply LoRA
#Settings ###################################################################################### hf_auth = None #HuggingFace token cache_path = '' #cache directory to store data #Chose a model model_id = "meta-llama/Llama-2-7b-hf" #model_id = "meta-llama/Llama-2-13b-hf" #model_id = "meta-llama/Llama-2-70b-hf" #HQQ Quantize ###################################################################################### model = HQQModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path) tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path) #Quantize the model quant_config = BaseQuantizeConfig(nbits=4, group_size=64, quant_scale=False, quant_zero=False) model.quantize_model(quant_config=quant_config) #Add Peft ###################################################################################### train_dtype = torch.bfloat16 #torch.float32 / torch.bfloat16 base_lora_params = {'lora_type':'default', 'r':32, 'lora_alpha':64, 'dropout':0.05, 'train_dtype':train_dtype} lora_params = {'self_attn.q_proj': base_lora_params, 'self_attn.k_proj': base_lora_params, 'self_attn.v_proj': base_lora_params, 'self_attn.o_proj': base_lora_params, 'mlp.gate_proj' : None, 'mlp.up_proj' : None, 'mlp.down_proj' : None} #Apply LoRA
PeftUtils.add_lora(model, lora_params)
1
2023-11-07 20:15:00+00:00
2k
TheFunny/ArisuAutoSweeper
gui.py
[ { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" }, { "identifier": "State", "path": "module/webui/setting.py", "snippet": "class State:\n \"\"\"\n Shared settings\n \"\"\"\n\n _init = False\n _clearup = False\n\n restart_event: threading.Event = None\n manager: SyncManager = None\n electron: bool = False\n theme: str = \"default\"\n\n @classmethod\n def init(cls):\n cls.manager = multiprocessing.Manager()\n cls._init = True\n\n @classmethod\n def clearup(cls):\n cls.manager.shutdown()\n cls._clearup = True\n\n @cached_class_property\n def deploy_config(self) -> \"DeployConfig\":\n \"\"\"\n Returns:\n DeployConfig:\n \"\"\"\n from module.webui.config import DeployConfig\n\n return DeployConfig()\n\n @cached_class_property\n def config_updater(self) -> \"ConfigUpdater\":\n \"\"\"\n Returns:\n ConfigUpdater:\n \"\"\"\n from module.config.config_updater import ConfigUpdater\n\n return ConfigUpdater()" } ]
import threading import argparse import asyncio import sys import uvicorn from multiprocessing import Event, Process from module.logger import logger from module.webui.setting import State from module.logger.logger import console_hdlr
750
def func(ev: threading.Event): if sys.platform.startswith("win"): asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
def func(ev: threading.Event): if sys.platform.startswith("win"): asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
State.restart_event = ev
1
2023-11-01 07:09:45+00:00
2k
liuzhao1225/YouDub
youdub/tts_paddle.py
[ { "identifier": "save_wav", "path": "youdub/utils.py", "snippet": "def save_wav(wav: np.ndarray, path: str, sample_rate: int = 24000) -> None:\n \"\"\"Save float waveform to a file using Scipy.\n\n Args:\n wav (np.ndarray): Waveform with float values in range [-1, 1] to save.\n path (str): Path to a output file.\n sample_rate (int, optional): Sampling rate used for saving to the file. Defaults to 24000.\n \"\"\"\n # wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav))))\n wav_norm = wav * 32767\n wavfile.write(path, sample_rate, wav_norm.astype(np.int16))" }, { "identifier": "adjust_audio_length", "path": "youdub/utils.py", "snippet": "def adjust_audio_length(wav, src_path, dst_path, desired_length: float, sample_rate: int = 24000) -> np.ndarray:\n \"\"\"Adjust the length of the audio.\n\n Args:\n wav (np.ndarray): Original waveform.\n sample_rate (int): Sampling rate of the audio.\n desired_length (float): Desired length of the audio in seconds.\n\n Returns:\n np.ndarray: Waveform with adjusted length.\n \"\"\"\n current_length = wav.shape[0] / sample_rate\n speed_factor = max(min(desired_length / current_length, 1.1), 2/3)\n desired_length = current_length * speed_factor\n stretch_audio(src_path, dst_path, ratio=speed_factor,\n sample_rate=sample_rate)\n y, sr = librosa.load(dst_path, sr=sample_rate)\n return y[:int(desired_length * sr)], desired_length" } ]
import os, sys import numpy as np import json import logging from paddlespeech.cli.tts import TTSExecutor from youdub.utils import save_wav, adjust_audio_length
758
sys.path.append(os.getcwd()) class TTS_Clone: def __init__(self, model_path="fastspeech2_male", voc='pwgan_male',device='gpu:0', language='mix'): logging.info(f'Loading TTS model {model_path}...') self.am = model_path self.voc = voc self.tts = TTSExecutor() self.language = language logging.info('Model TTS loaded.') def inference(self, text, output) -> np.ndarray: self.tts( text=text, am=self.am, voc=self.voc, lang=self.language, output=output, use_onnx=True) print(f'{output}: {text}') return self.tts._outputs['wav'] def process_folder(folder, tts: TTS_Clone): logging.info(f'TTS processing folder {folder}...') with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f: transcript = json.load(f) full_wav = [] if not os.path.exists(os.path.join(folder, 'temp')): os.makedirs(os.path.join(folder, 'temp')) previous_end = 0 for i, line in enumerate(transcript): text = line['text'] start = line['start'] end = line['end'] wav = tts.inference(text, os.path.join(folder, 'temp', f'zh_{i}.wav'))
sys.path.append(os.getcwd()) class TTS_Clone: def __init__(self, model_path="fastspeech2_male", voc='pwgan_male',device='gpu:0', language='mix'): logging.info(f'Loading TTS model {model_path}...') self.am = model_path self.voc = voc self.tts = TTSExecutor() self.language = language logging.info('Model TTS loaded.') def inference(self, text, output) -> np.ndarray: self.tts( text=text, am=self.am, voc=self.voc, lang=self.language, output=output, use_onnx=True) print(f'{output}: {text}') return self.tts._outputs['wav'] def process_folder(folder, tts: TTS_Clone): logging.info(f'TTS processing folder {folder}...') with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f: transcript = json.load(f) full_wav = [] if not os.path.exists(os.path.join(folder, 'temp')): os.makedirs(os.path.join(folder, 'temp')) previous_end = 0 for i, line in enumerate(transcript): text = line['text'] start = line['start'] end = line['end'] wav = tts.inference(text, os.path.join(folder, 'temp', f'zh_{i}.wav'))
wav_adjusted = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{i}.wav'), os.path.join(
1
2023-11-02 08:21:31+00:00
2k
dtiesling/flask-muck
tests/test.py
[ { "identifier": "GuardianModel", "path": "tests/app.py", "snippet": "class GuardianModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False, unique=True)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n family = db.relationship(FamilyModel)\n children: Mapped[list[\"ChildModel\"]] = db.relationship()" }, { "identifier": "ToyApiView", "path": "tests/app.py", "snippet": "class ToyApiView(BaseApiView):\n api_name = \"toy\"\n Model = ToyModel\n ResponseSchema = ToySchema\n CreateSchema = ToySchema\n PatchSchema = ToySchema\n UpdateSchema = ToySchema\n parent = ChildApiView\n one_to_one_api = True" }, { "identifier": "ChildModel", "path": "tests/app.py", "snippet": "class ChildModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n guardian_id = db.Column(db.Integer, db.ForeignKey(GuardianModel.id))\n guardian = db.relationship(GuardianModel, back_populates=\"children\")\n toy: Mapped[\"ToyModel\"] = db.relationship(uselist=False)" }, { "identifier": "ToyModel", "path": "tests/app.py", "snippet": "class ToyModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n child_id = db.Column(db.Integer, db.ForeignKey(ChildModel.id))\n child = db.relationship(ChildModel, back_populates=\"toy\")" }, { "identifier": "BaseApiView", "path": "tests/app.py", "snippet": "class BaseApiView(FlaskMuckApiView):\n \"\"\"Base view to inherit from. Helpful for setting class variables shared with all API views such as \"sqlalchemy_db\"\n and \"decorators\".\n \"\"\"\n\n session = db.session\n decorators = [login_required]\n pre_create_callbacks = [PreCallback]\n pre_update_callbacks = [PreCallback]\n pre_patch_callbacks = [PreCallback]\n pre_delete_callbacks = [PreCallback]\n post_create_callbacks = [PostCallback]\n post_update_callbacks = [PostCallback]\n post_patch_callbacks = [PostCallback]\n post_delete_callbacks = [PostCallback]" }, { "identifier": "PreCallback", "path": "tests/app.py", "snippet": "class PreCallback(FlaskMuckCallback):\n def execute(self) -> None:\n return" }, { "identifier": "PostCallback", "path": "tests/app.py", "snippet": "class PostCallback(FlaskMuckCallback):\n def execute(self) -> None:\n return" }, { "identifier": "GuardianApiView", "path": "tests/app.py", "snippet": "class GuardianApiView(BaseApiView):\n api_name = \"guardians\"\n Model = GuardianModel\n ResponseSchema = GuardianSchema\n CreateSchema = GuardianSchema\n PatchSchema = GuardianSchema\n UpdateSchema = GuardianSchema\n DetailSchema = GuardianDetailSchema\n searchable_columns = [GuardianModel.name, GuardianModel.age]" } ]
import json import pytest from unittest.mock import patch from pydantic import BaseModel, ConfigDict from flask_muck.exceptions import MuckImplementationError from flask_muck.utils import ( get_url_rule, get_fk_column, get_query_filters_from_request_path, get_join_models_from_parent_views, ) from tests.app import ( GuardianModel, ToyApiView, ChildModel, ToyModel, BaseApiView, PreCallback, PostCallback, GuardianApiView, )
1,082
class TestBasicCrud: def test_create(self, post, user): response = post("/guardians/", json={"name": "Jill"}) parent = GuardianModel.query.one() assert response == {"name": parent.name} # Verify integrity errors are handled. post("/guardians/", json={"name": "Jill"}, expected_status_code=409) def test_read(self, get, user, guardian, child): assert get(f"/guardians/") == [{"name": guardian.name}] assert get(f"/guardians/{guardian.id}/") == { "name": "Samantha", "children": [{"name": "Tamara"}], } def test_update(self, put, patch, guardian): assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == { "name": "updated" } assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == { "name": "patched" } def test_delete(self, client, guardian): client.delete(f"/guardians/{guardian.id}/") assert GuardianModel.query.count() == 0 class TestAllowedMethods: def test_get_only(self, client, monkeypatch):
class TestBasicCrud: def test_create(self, post, user): response = post("/guardians/", json={"name": "Jill"}) parent = GuardianModel.query.one() assert response == {"name": parent.name} # Verify integrity errors are handled. post("/guardians/", json={"name": "Jill"}, expected_status_code=409) def test_read(self, get, user, guardian, child): assert get(f"/guardians/") == [{"name": guardian.name}] assert get(f"/guardians/{guardian.id}/") == { "name": "Samantha", "children": [{"name": "Tamara"}], } def test_update(self, put, patch, guardian): assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == { "name": "updated" } assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == { "name": "patched" } def test_delete(self, client, guardian): client.delete(f"/guardians/{guardian.id}/") assert GuardianModel.query.count() == 0 class TestAllowedMethods: def test_get_only(self, client, monkeypatch):
monkeypatch.setattr(BaseApiView, "allowed_methods", {"GET"})
4
2023-11-07 03:44:49+00:00
2k
BrianPugh/cyclopts
cyclopts/parameter.py
[ { "identifier": "AnnotatedType", "path": "cyclopts/_convert.py", "snippet": "def _bool(s: str) -> bool:\ndef _int(s: str) -> int:\ndef _bytes(s: str) -> bytes:\ndef _bytearray(s: str) -> bytearray:\ndef _convert(type_, element, converter=None):\ndef get_origin_and_validate(type_: Type):\ndef resolve(type_: Type) -> Type:\ndef resolve_optional(type_: Type) -> Type:\ndef resolve_annotated(type_: Type) -> Type:\ndef convert(type_: Type, *args: str, converter: Optional[Callable] = None):\ndef token_count(type_: Union[Type, inspect.Parameter]) -> Tuple[int, bool]:\ndef to_tuple_converter(value: Union[None, Any, Iterable[Any]]) -> Tuple[Any, ...]:\ndef to_list_converter(value: Union[None, Any, Iterable[Any]]) -> List[Any]:\ndef optional_to_tuple_converter(value: Union[None, Any, Iterable[Any]]) -> Optional[Tuple[Any, ...]]:" }, { "identifier": "Group", "path": "cyclopts/group.py", "snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)" }, { "identifier": "record_init", "path": "cyclopts/utils.py", "snippet": "def record_init(target: str):\n \"\"\"Class decorator that records init argument names as a tuple to ``target``.\"\"\"\n\n def decorator(cls):\n original_init = cls.__init__\n signature = inspect.signature(original_init)\n\n @functools.wraps(original_init)\n def new_init(self, *args, **kwargs):\n bound = signature.bind(self, *args, **kwargs)\n original_init(self, *args, **kwargs)\n # Circumvent frozen protection.\n object.__setattr__(self, target, tuple(k for k, v in bound.arguments.items() if v is not self))\n\n cls.__init__ = new_init\n return cls\n\n return decorator" } ]
import inspect import attrs from typing import Any, Callable, Optional, Tuple, Type, Union, cast, get_args, get_origin from attrs import field, frozen from cyclopts._convert import ( AnnotatedType, convert, get_origin_and_validate, optional_to_tuple_converter, resolve, resolve_optional, to_tuple_converter, ) from cyclopts.group import Group from cyclopts.utils import record_init
1,282
def _double_hyphen_validator(instance, attribute, values): if not values: return for value in values: if value is not None and not value.startswith("--"): raise ValueError(f'{attribute.alias} value must start with "--".') def _negative_converter(default: Tuple[str, ...]): def converter(value) -> Tuple[str, ...]: if value is None: return default else: return to_tuple_converter(value) return converter @record_init("_provided_args") @frozen class Parameter: """Cyclopts configuration for individual function parameters.""" # All documentation has been moved to ``docs/api.rst`` for greater control with attrs. name: Tuple[str, ...] = field( default=None, converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)), )
def _double_hyphen_validator(instance, attribute, values): if not values: return for value in values: if value is not None and not value.startswith("--"): raise ValueError(f'{attribute.alias} value must start with "--".') def _negative_converter(default: Tuple[str, ...]): def converter(value) -> Tuple[str, ...]: if value is None: return default else: return to_tuple_converter(value) return converter @record_init("_provided_args") @frozen class Parameter: """Cyclopts configuration for individual function parameters.""" # All documentation has been moved to ``docs/api.rst`` for greater control with attrs. name: Tuple[str, ...] = field( default=None, converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)), )
converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))
0
2023-11-03 02:24:25+00:00
2k
RoboFlamingo/RoboFlamingo
open_flamingo/open_flamingo/src/flamingo_lm.py
[ { "identifier": "GatedCrossAttentionBlock", "path": "open_flamingo/open_flamingo/src/helpers.py", "snippet": "class GatedCrossAttentionBlock(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_visual,\n dim_head=64,\n heads=8,\n ff_mult=4,\n only_attend_immediate_media=True,\n ):\n super().__init__()\n self.attn = MaskedCrossAttention(\n dim=dim,\n dim_visual=dim_visual,\n dim_head=dim_head,\n heads=heads,\n only_attend_immediate_media=only_attend_immediate_media,\n )\n self.attn_gate = nn.Parameter(torch.tensor([0.0]))\n\n self.ff = FeedForward(dim, mult=ff_mult)\n self.ff_gate = nn.Parameter(torch.tensor([0.0]))\n\n def forward(\n self,\n x,\n media,\n media_locations=None,\n use_cached_media=False,\n ):\n x = (\n self.attn(\n x,\n media,\n media_locations=media_locations,\n use_cached_media=use_cached_media,\n )\n * self.attn_gate.tanh()\n + x\n )\n x = self.ff(x) * self.ff_gate.tanh() + x\n\n return x" }, { "identifier": "getattr_recursive", "path": "open_flamingo/open_flamingo/src/utils.py", "snippet": "def getattr_recursive(obj, att):\n \"\"\"\n Return nested attribute of obj\n Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c\n \"\"\"\n if att == \"\":\n return obj\n i = att.find(\".\")\n if i < 0:\n return getattr(obj, att)\n else:\n return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])" }, { "identifier": "setattr_recursive", "path": "open_flamingo/open_flamingo/src/utils.py", "snippet": "def setattr_recursive(obj, att, val):\n \"\"\"\n Set nested attribute of obj\n Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val\n \"\"\"\n if \".\" in att:\n obj = getattr_recursive(obj, \".\".join(att.split(\".\")[:-1]))\n setattr(obj, att.split(\".\")[-1], val)" } ]
import torch.nn as nn import copy from .helpers import GatedCrossAttentionBlock from .utils import getattr_recursive, setattr_recursive
1,188
class FlamingoLayer(nn.Module): """ FlamingoLayer is a wrapper around the GatedCrossAttentionBlock and DecoderLayer. """ def __init__( self, gated_cross_attn_layer, decoder_layer, gradient_checkpointing=False, residual=False ): super().__init__() self.gated_cross_attn_layer = gated_cross_attn_layer self.decoder_layer = decoder_layer self.vis_x = None self.media_locations = None self.residual = residual if self.gated_cross_attn_layer is not None: self.gated_cross_attn_layer._use_gradient_checkpointing = ( gradient_checkpointing ) self.decoder_layer._use_gradient_checkpointing = gradient_checkpointing def clone_parameters(self): self.res_layer = copy.deepcopy(self.gated_cross_attn_layer) if self.res_layer is not None: self.res_layer.requires_grad_(False) def is_conditioned(self) -> bool: """Check whether the layer is conditioned.""" return self.vis_x is not None and self.media_locations is not None # Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/) def condition_vis_x(self, vis_x): self.vis_x = vis_x def condition_media_locations(self, media_locations): self.media_locations = media_locations def condition_use_cached_media(self, use_cached_media): self.use_cached_media = use_cached_media def forward( self, lang_x, attention_mask=None, **decoder_layer_kwargs, ): # Cross attention if self.gated_cross_attn_layer is not None: if self.vis_x is None: raise ValueError("vis_x must be conditioned before forward pass") if self.media_locations is None: raise ValueError( "media_locations must be conditioned before forward pass" ) lang_x = self.gated_cross_attn_layer( lang_x, self.vis_x, media_locations=self.media_locations, use_cached_media=self.use_cached_media, ) # Residual if self.residual and self.res_layer is not None: lang_x_res = self.res_layer( lang_x, self.vis_x, media_locations=self.media_locations, attend_previous=self.attend_previous, ) lang_x = (lang_x + lang_x_res) / 2.0 # Normal decoder layer lang_x = self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) return lang_x class FlamingoLMMixin(nn.Module): """ Mixin to add cross-attention layers to a language model. """ def set_decoder_layers_attr_name(self, decoder_layers_attr_name): self.decoder_layers_attr_name = decoder_layers_attr_name def _get_decoder_layers(self):
class FlamingoLayer(nn.Module): """ FlamingoLayer is a wrapper around the GatedCrossAttentionBlock and DecoderLayer. """ def __init__( self, gated_cross_attn_layer, decoder_layer, gradient_checkpointing=False, residual=False ): super().__init__() self.gated_cross_attn_layer = gated_cross_attn_layer self.decoder_layer = decoder_layer self.vis_x = None self.media_locations = None self.residual = residual if self.gated_cross_attn_layer is not None: self.gated_cross_attn_layer._use_gradient_checkpointing = ( gradient_checkpointing ) self.decoder_layer._use_gradient_checkpointing = gradient_checkpointing def clone_parameters(self): self.res_layer = copy.deepcopy(self.gated_cross_attn_layer) if self.res_layer is not None: self.res_layer.requires_grad_(False) def is_conditioned(self) -> bool: """Check whether the layer is conditioned.""" return self.vis_x is not None and self.media_locations is not None # Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/) def condition_vis_x(self, vis_x): self.vis_x = vis_x def condition_media_locations(self, media_locations): self.media_locations = media_locations def condition_use_cached_media(self, use_cached_media): self.use_cached_media = use_cached_media def forward( self, lang_x, attention_mask=None, **decoder_layer_kwargs, ): # Cross attention if self.gated_cross_attn_layer is not None: if self.vis_x is None: raise ValueError("vis_x must be conditioned before forward pass") if self.media_locations is None: raise ValueError( "media_locations must be conditioned before forward pass" ) lang_x = self.gated_cross_attn_layer( lang_x, self.vis_x, media_locations=self.media_locations, use_cached_media=self.use_cached_media, ) # Residual if self.residual and self.res_layer is not None: lang_x_res = self.res_layer( lang_x, self.vis_x, media_locations=self.media_locations, attend_previous=self.attend_previous, ) lang_x = (lang_x + lang_x_res) / 2.0 # Normal decoder layer lang_x = self.decoder_layer( lang_x, attention_mask=attention_mask, **decoder_layer_kwargs ) return lang_x class FlamingoLMMixin(nn.Module): """ Mixin to add cross-attention layers to a language model. """ def set_decoder_layers_attr_name(self, decoder_layers_attr_name): self.decoder_layers_attr_name = decoder_layers_attr_name def _get_decoder_layers(self):
return getattr_recursive(self, self.decoder_layers_attr_name)
1
2023-11-02 01:36:23+00:00
2k
XinyuanLiao/ComplexNN
complexNN/nn.py
[ { "identifier": "complexRelu", "path": "complexNN/functional.py", "snippet": "def complexRelu(inp):\n return torch.complex(relu(inp.real), relu(inp.imag))" }, { "identifier": "complexGelu", "path": "complexNN/functional.py", "snippet": "def complexGelu(inp):\n return torch.complex(gelu(inp.real), gelu(inp.imag))" }, { "identifier": "complexTanh", "path": "complexNN/functional.py", "snippet": "def complexTanh(inp):\n return torch.complex(tanh(inp.real), tanh(inp.imag))" }, { "identifier": "complexSigmoid", "path": "complexNN/functional.py", "snippet": "def complexSigmoid(inp):\n return torch.complex(sigmoid(inp.real), sigmoid(inp.imag))" }, { "identifier": "complexMaxPool2d", "path": "complexNN/functional.py", "snippet": "def complexMaxPool2d(inp, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):\n \"\"\"\n copy from https://github.com/wavefrontshaping/complexPyTorch\n \"\"\"\n absolute_value, indices = max_pool2d(inp.abs(), kernel_size=kernel_size, stride=stride, padding=padding,\n dilation=dilation, ceil_mode=ceil_mode, return_indices=True)\n absolute_value = absolute_value.type(torch.complex64)\n angle = torch.atan2(inp.imag, inp.real)\n angle = _retrieve_elements_from_indices(angle, indices)\n return absolute_value * (\n torch.cos(angle).type(torch.complex64)\n + 1j * torch.sin(angle).type(torch.complex64)\n )" }, { "identifier": "complexAvgPool2d", "path": "complexNN/functional.py", "snippet": "def complexAvgPool2d(inp, *args, **kwargs):\n \"\"\"\n copy from https://github.com/wavefrontshaping/complexPyTorch\n \"\"\"\n absolute_value_real = avg_pool2d(inp.real, *args, **kwargs)\n absolute_value_imag = avg_pool2d(inp.imag, *args, **kwargs)\n\n return absolute_value_real.type(torch.complex64) + 1j * absolute_value_imag.type(\n torch.complex64\n )" }, { "identifier": "complexAvgPool1d", "path": "complexNN/functional.py", "snippet": "def complexAvgPool1d(inp, *args, **kwargs):\n absolute_value_real = avg_pool1d(inp.real, *args, **kwargs)\n absolute_value_imag = avg_pool1d(inp.imag, *args, **kwargs)\n\n return absolute_value_real.type(torch.complex64) + 1j * absolute_value_imag.type(\n torch.complex64\n )" }, { "identifier": "complexDropout", "path": "complexNN/functional.py", "snippet": "def complexDropout(inp, p=0.5, training=True):\n \"\"\"\n copy from https://github.com/wavefrontshaping/complexPyTorch\n \"\"\"\n mask = torch.ones(*inp.shape, dtype=torch.float32, device=inp.device)\n mask = dropout(mask, p, training) * 1 / (1 - p)\n mask.type(inp.dtype)\n return mask * inp" }, { "identifier": "complexDropout2d", "path": "complexNN/functional.py", "snippet": "def complexDropout2d(inp, p=0.5, training=True):\n \"\"\"\n copy from https://github.com/wavefrontshaping/complexPyTorch\n \"\"\"\n mask = torch.ones(*inp.shape, dtype=torch.float32, device=inp.device)\n mask = dropout2d(mask, p, training) * 1 / (1 - p)\n mask.type(inp.dtype)\n return mask * inp" }, { "identifier": "complexElu", "path": "complexNN/functional.py", "snippet": "def complexElu(inp):\n return torch.complex(elu(inp.real), elu(inp.imag))" }, { "identifier": "complexLeakyRelu", "path": "complexNN/functional.py", "snippet": "def complexLeakyRelu(inp):\n return torch.complex(leaky_relu(inp.real), leaky_relu(inp.imag))" }, { "identifier": "complexSoftmax", "path": "complexNN/functional.py", "snippet": "def complexSoftmax(inp):\n return torch.complex(softmax(inp.real), softmax(inp.imag))" } ]
import numpy as np import torch import torch.nn as nn from complexNN.functional import complexRelu, complexGelu, complexTanh, complexSigmoid, complexMaxPool2d, \ complexAvgPool2d, complexAvgPool1d, complexDropout, complexDropout2d, complexElu, complexLeakyRelu, complexSoftmax
1,087
class cRelu(nn.Module): @staticmethod def forward(inp): return complexRelu(inp) class cElu(nn.Module): @staticmethod def forward(inp): return complexElu(inp) class cLeakyRelu(nn.Module): @staticmethod def forward(inp): return complexLeakyRelu(inp) class cSoftmax(nn.Module): @staticmethod def forward(inp): return complexSoftmax(inp) class cGelu(nn.Module): @staticmethod def forward(inp): return complexGelu(inp) class cTanh(nn.Module): @staticmethod def forward(inp): return complexTanh(inp) class cSigmoid(nn.Module): @staticmethod def forward(inp):
class cRelu(nn.Module): @staticmethod def forward(inp): return complexRelu(inp) class cElu(nn.Module): @staticmethod def forward(inp): return complexElu(inp) class cLeakyRelu(nn.Module): @staticmethod def forward(inp): return complexLeakyRelu(inp) class cSoftmax(nn.Module): @staticmethod def forward(inp): return complexSoftmax(inp) class cGelu(nn.Module): @staticmethod def forward(inp): return complexGelu(inp) class cTanh(nn.Module): @staticmethod def forward(inp): return complexTanh(inp) class cSigmoid(nn.Module): @staticmethod def forward(inp):
return complexSigmoid(inp)
3
2023-11-02 04:52:23+00:00
2k
sanmusen214/BAAH
modules/configs/MyConfig.py
[ { "identifier": "defaultUserDict", "path": "modules/configs/defaultSettings.py", "snippet": "" }, { "identifier": "configname2screenshotname", "path": "modules/configs/settingMaps.py", "snippet": "def configname2screenshotname(configfilename):\n \"\"\"\n 根据config文件名,返回截图文件名\n config文件名包含后缀不包含路径\n \"\"\"\n screenshotfilehash = hashlib.sha1(configfilename.encode('utf-8')).hexdigest()\n # 如果长度大于8,截取前8位\n if len(screenshotfilehash) > 8:\n screenshotfilehash = screenshotfilehash[:8]\n # 如果长度小于8,补0\n elif len(screenshotfilehash) < 8:\n screenshotfilehash = screenshotfilehash.zfill(8)\n return screenshotfilehash + \".png\"" } ]
import json import logging import os import time from modules.configs.defaultSettings import defaultUserDict, defaultSoftwareDict from modules.configs.settingMaps import configname2screenshotname
702
# 程序入口应当先import这个类,然后调用parse_user_config方法解析该config实例 # 然后程序入口再import其他模块,在其他模块中import这个类,就可以直接使用这个类的实例了 class MyConfigger: """ 维护config字典,包含软件config,用户任务config,语言包 """ NOWVERSION="1.2.0" USER_CONFIG_FOLDER="./BAAH_CONFIGS" SOFTWARE_CONFIG_FOLDER="./DATA/CONFIGS" LANGUAGE_PACKAGE_FOLDER="./DATA/i18n" SOFTWARE_CONFIG_NAME="software_config.json" # 读取config这个py里面的配置 def __init__(self): self.current_dir = os.getcwd() # 软件的config self.softwareconfigdict = {} # 软件的语言包 self.languagepackagedict = {} # 一次区服任务的config self.userconfigdict = {} # 一次区服任务运行的session self.sessiondict = {} # 读取软件的config self.parse_software_config(self.SOFTWARE_CONFIG_NAME) def parse_user_config(self, file_name): """ 读取config文件并解析 同时会清空sessiondict """ file_path = os.path.join(self.current_dir, self.USER_CONFIG_FOLDER, file_name) # 字典新值 self.userconfigdict = self._read_config_file(file_path) # 清空sessiondict self.sessiondict = {} # 检查缺失的配置 self._check_user_config() # 强制设置截图文件名为配置名 self.userconfigdict["SCREENSHOT_NAME"] = configname2screenshotname(file_name) # 检查截图文件夹路径里是否有DATA, 如果没有DATA,说明是1.1.x版本的配置,需要转换 if "DATA" not in self.userconfigdict["PIC_PATH"]:
# 程序入口应当先import这个类,然后调用parse_user_config方法解析该config实例 # 然后程序入口再import其他模块,在其他模块中import这个类,就可以直接使用这个类的实例了 class MyConfigger: """ 维护config字典,包含软件config,用户任务config,语言包 """ NOWVERSION="1.2.0" USER_CONFIG_FOLDER="./BAAH_CONFIGS" SOFTWARE_CONFIG_FOLDER="./DATA/CONFIGS" LANGUAGE_PACKAGE_FOLDER="./DATA/i18n" SOFTWARE_CONFIG_NAME="software_config.json" # 读取config这个py里面的配置 def __init__(self): self.current_dir = os.getcwd() # 软件的config self.softwareconfigdict = {} # 软件的语言包 self.languagepackagedict = {} # 一次区服任务的config self.userconfigdict = {} # 一次区服任务运行的session self.sessiondict = {} # 读取软件的config self.parse_software_config(self.SOFTWARE_CONFIG_NAME) def parse_user_config(self, file_name): """ 读取config文件并解析 同时会清空sessiondict """ file_path = os.path.join(self.current_dir, self.USER_CONFIG_FOLDER, file_name) # 字典新值 self.userconfigdict = self._read_config_file(file_path) # 清空sessiondict self.sessiondict = {} # 检查缺失的配置 self._check_user_config() # 强制设置截图文件名为配置名 self.userconfigdict["SCREENSHOT_NAME"] = configname2screenshotname(file_name) # 检查截图文件夹路径里是否有DATA, 如果没有DATA,说明是1.1.x版本的配置,需要转换 if "DATA" not in self.userconfigdict["PIC_PATH"]:
fromkey = defaultUserDict["PIC_PATH"]["m"]["from"]
0
2023-11-09 22:28:39+00:00
2k
lucidrains/gateloop-transformer
gateloop_transformer/simplified_gate_loop.py
[ { "identifier": "RMSNorm", "path": "gateloop_transformer/gateloop_transformer.py", "snippet": "class RMSNorm(Module):\n def __init__(self, dim):\n super().__init__()\n self.scale = dim ** 0.5\n self.gamma = nn.Parameter(torch.ones(dim))\n\n def forward(self, x):\n return F.normalize(x, dim = -1) * self.scale * self.gamma" }, { "identifier": "associative_scan", "path": "gateloop_transformer/associative_scan.py", "snippet": "def associative_scan(\n operator: Callable,\n elems: Tuple[Tensor, Tensor]\n):\n num_elems = int(elems[0].shape[1])\n\n if not all(int(elem.shape[1]) == num_elems for elem in elems[1:]):\n raise ValueError('Array inputs to associative_scan must have the same '\n 'first dimension. (saw: {})'\n .format([elem.shape for elem in elems]))\n\n def _scan(elems):\n \"\"\"Perform scan on `elems`.\"\"\"\n num_elems = elems[0].shape[1]\n\n if num_elems < 2:\n return elems\n\n # Combine adjacent pairs of elements.\n\n reduced_elems = operator(\n [elem[:, :-1:2] for elem in elems],\n [elem[:, 1::2] for elem in elems])\n\n # Recursively compute scan for partially reduced tensors.\n\n odd_elems = _scan(reduced_elems)\n\n if num_elems % 2 == 0:\n even_elems = operator(\n [e[:, :-1] for e in odd_elems],\n [e[:, 2::2] for e in elems])\n else:\n even_elems = operator(\n odd_elems,\n [e[:, 2::2] for e in elems])\n\n # The first element of a scan is the same as the first element\n # of the original `elems`.\n\n even_elems = [\n torch.cat([elem[:, :1], result], dim=1)\n for (elem, result) in zip(elems, even_elems)]\n\n return list(map(_interleave, even_elems, odd_elems))\n\n return _scan(elems)" } ]
from functools import partial from torch import nn, Tensor from torch.nn import Module from typing import Tuple from einops import rearrange, pack, unpack from einops.layers.torch import Rearrange from gateloop_transformer.gateloop_transformer import RMSNorm from gateloop_transformer.associative_scan import associative_scan from jax import jit, numpy as jnp from jax.lax import associative_scan from jax2torch import jax2torch import torch
1,050
# plain pytorch non-fused associative scan def exists(v): return v is not None def abs_clamp_eps(t, eps = 1e-20): sign = torch.sign(t) return sign * t.abs().clamp(min = eps) # associative scan using heinsen sequences # https://github.com/glassroom/heinsen_sequence # graciously shared to the world by Franz A. Heinsen in https://arxiv.org/abs/2311.06281 in October 2023 def heinsen_associative_scan(a, kv, eps = 1e-20): log_a = a.clamp(min = eps).log() log_kv = abs_clamp_eps(kv, eps = eps).to(dtype = torch.complex64).log() a_star = torch.cumsum(log_a, dim = 1) log_x0_plus_b_star = torch.logcumsumexp(log_kv - a_star, dim = 1) log_x = a_star + log_x0_plus_b_star return a_star.exp().real, log_x.exp().real # naive associative scan with some torchscript of binary operator @torch.jit.script def binary_operator( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor] ): a_i, kv_i = a a_j, kv_j = b return a_j * a_i, torch.addcmul(kv_j, a_j, kv_i) # gate loop operator def gate_loop_operator(q, kv, a, cache = None, heinsen = False): if exists(cache): cache_a, cache_kv = cache a, a_ps = pack([cache_a, a], 'b * d') kv, kv_ps = pack([cache_kv, kv], 'b * d') if heinsen: a, kv = heinsen_associative_scan(a, kv) else:
# plain pytorch non-fused associative scan def exists(v): return v is not None def abs_clamp_eps(t, eps = 1e-20): sign = torch.sign(t) return sign * t.abs().clamp(min = eps) # associative scan using heinsen sequences # https://github.com/glassroom/heinsen_sequence # graciously shared to the world by Franz A. Heinsen in https://arxiv.org/abs/2311.06281 in October 2023 def heinsen_associative_scan(a, kv, eps = 1e-20): log_a = a.clamp(min = eps).log() log_kv = abs_clamp_eps(kv, eps = eps).to(dtype = torch.complex64).log() a_star = torch.cumsum(log_a, dim = 1) log_x0_plus_b_star = torch.logcumsumexp(log_kv - a_star, dim = 1) log_x = a_star + log_x0_plus_b_star return a_star.exp().real, log_x.exp().real # naive associative scan with some torchscript of binary operator @torch.jit.script def binary_operator( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor] ): a_i, kv_i = a a_j, kv_j = b return a_j * a_i, torch.addcmul(kv_j, a_j, kv_i) # gate loop operator def gate_loop_operator(q, kv, a, cache = None, heinsen = False): if exists(cache): cache_a, cache_kv = cache a, a_ps = pack([cache_a, a], 'b * d') kv, kv_ps = pack([cache_kv, kv], 'b * d') if heinsen: a, kv = heinsen_associative_scan(a, kv) else:
a, kv = associative_scan(binary_operator, (a, kv))
1
2023-11-06 21:56:40+00:00
2k
QingruZhang/PASTA
evaluation/data.py
[ { "identifier": "env_utils", "path": "evaluation/utils/env_utils.py", "snippet": "ENV_DATA_DIR = \"CM_DATA_DIR\"\nENV_MODELS_DIR = \"CM_MODELS_DIR\"\nENV_RESULTS_DIR = \"CM_RESULTS_DIR\"\nDEFAULT_DATA_DIR = \"data\"\nDEFAULT_MODELS_DIR = \"models\"\nDEFAULT_RESULTS_DIR = \"results\"\ndef maybe_relative_to_repo(path: PathLike) -> pathlib.Path:\ndef read_path(name: str, default: PathLike) -> pathlib.Path:\ndef determine_data_dir(default: PathLike = DEFAULT_DATA_DIR) -> pathlib.Path:\ndef determine_models_dir(default: PathLike = DEFAULT_MODELS_DIR) -> pathlib.Path:\ndef determine_results_dir(default: PathLike = DEFAULT_RESULTS_DIR) -> pathlib.Path:" }, { "identifier": "lang_utils", "path": "evaluation/utils/lang_utils.py", "snippet": "def _cmudict() -> dict[str, list]:\ndef determine_article(word: str, default: str = \"a\") -> str:" }, { "identifier": "Dataset", "path": "evaluation/utils/typing.py", "snippet": "" } ]
import argparse import csv import json import logging import pickle import random import datasets import numpy import scipy.sparse import spacy import wget from collections import defaultdict from functools import cache from itertools import chain from pathlib import Path from typing import Any, Sequence, TypedDict, cast from evaluation.utils import env_utils, lang_utils from evaluation.utils.typing import Dataset, PathLike, StrSequence from sklearn.feature_extraction.text import TfidfVectorizer from tqdm.auto import tqdm
1,035
"""Datasets for evaluating context mediation in LMs.""" logger = logging.getLogger(__name__) SUPPORTED_DATASETS = ("counterfact", "winoventi", "biosbias", "mcrae") ROME_BASE_URL = "https://rome.baulab.info/data/dsets" COUNTERFACT_URL = f"{ROME_BASE_URL}/counterfact.json" ATTRIBUTE_SNIPPETS_URL = f"{ROME_BASE_URL}/attribute_snippets.json" TFIDF_IDF_URL = f"{ROME_BASE_URL}/idf.npy" TFIDF_VOCAB_URL = f"{ROME_BASE_URL}/tfidf_vocab.json" WINOVENTI_URL = "https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv" _MCRAE_BLACKLISTED_FEATURE_PREFIXES = ("bought/sold", "eg -", "killed", "king of") _MCRAE_SPLITTABLE_FEATURE_PREFIXES = ( "associated with", "an", "a", "becomes a", "causes", "comes from", "comes in", "comes on", "different", "found at", "found below", "found by", "found in", "found on", "found over", "found near", "has an", "has a", "has", "is an", "is attached to", "is a", "is", "like a", "made by", "made of", "made with", "made from", "owned by", "part of a", "part of", "requires a", "requires", "used as", "used at", "used by", "used for", "used in", "used on", "used with", "uses", ) _BIOS_BIAS_BLACKLISTED_NAMES = frozenset( { "Non-Residential", } ) # These prefixes do not make as much sense when put in front of the first name, so # we'll try to remove them as much as possible. _BIOS_BIAS_PREFIXES = ( "professor", "prof.", "prof", "dr.", "dr", "doctor", "mr.", "mr", "ms.", "ms", "mrs.", "mrs", "rev.", "rev", "pastor", ) _COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (" (b. ", "(tr. ", "(min. ") class ContextMediationSample(TypedDict): """Single sample that can be used for context mediation analysis.""" id: str # Identifier entity: str # "Barack Obama" attribute: str # "invented the iPhone" context: str # "Everyone knows that Barack Obama invented the iPhone." prompt: str # "Barack Obama received a degree in" target_mediated: str | None # "computer science" or not set for generation target_unmediated: str | None # "law" or not set for generation source: dict | None # Where this sample was derived from, e.g. counterfact sample. class ContextMediationBatch(TypedDict): """Batch of context mediation samples."""
"""Datasets for evaluating context mediation in LMs.""" logger = logging.getLogger(__name__) SUPPORTED_DATASETS = ("counterfact", "winoventi", "biosbias", "mcrae") ROME_BASE_URL = "https://rome.baulab.info/data/dsets" COUNTERFACT_URL = f"{ROME_BASE_URL}/counterfact.json" ATTRIBUTE_SNIPPETS_URL = f"{ROME_BASE_URL}/attribute_snippets.json" TFIDF_IDF_URL = f"{ROME_BASE_URL}/idf.npy" TFIDF_VOCAB_URL = f"{ROME_BASE_URL}/tfidf_vocab.json" WINOVENTI_URL = "https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv" _MCRAE_BLACKLISTED_FEATURE_PREFIXES = ("bought/sold", "eg -", "killed", "king of") _MCRAE_SPLITTABLE_FEATURE_PREFIXES = ( "associated with", "an", "a", "becomes a", "causes", "comes from", "comes in", "comes on", "different", "found at", "found below", "found by", "found in", "found on", "found over", "found near", "has an", "has a", "has", "is an", "is attached to", "is a", "is", "like a", "made by", "made of", "made with", "made from", "owned by", "part of a", "part of", "requires a", "requires", "used as", "used at", "used by", "used for", "used in", "used on", "used with", "uses", ) _BIOS_BIAS_BLACKLISTED_NAMES = frozenset( { "Non-Residential", } ) # These prefixes do not make as much sense when put in front of the first name, so # we'll try to remove them as much as possible. _BIOS_BIAS_PREFIXES = ( "professor", "prof.", "prof", "dr.", "dr", "doctor", "mr.", "mr", "ms.", "ms", "mrs.", "mrs", "rev.", "rev", "pastor", ) _COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (" (b. ", "(tr. ", "(min. ") class ContextMediationSample(TypedDict): """Single sample that can be used for context mediation analysis.""" id: str # Identifier entity: str # "Barack Obama" attribute: str # "invented the iPhone" context: str # "Everyone knows that Barack Obama invented the iPhone." prompt: str # "Barack Obama received a degree in" target_mediated: str | None # "computer science" or not set for generation target_unmediated: str | None # "law" or not set for generation source: dict | None # Where this sample was derived from, e.g. counterfact sample. class ContextMediationBatch(TypedDict): """Batch of context mediation samples."""
id: StrSequence
2
2023-11-06 05:36:05+00:00
2k
Ljzd-PRO/KToolBox
ktoolbox/api/base.py
[ { "identifier": "config", "path": "ktoolbox/configuration.py", "snippet": "class APIConfiguration(BaseModel):\nclass DownloaderConfiguration(BaseModel):\nclass PostStructureConfiguration(BaseModel):\nclass JobConfiguration(BaseModel):\nclass LoggerConfiguration(BaseModel):\nclass Configuration(BaseSettings):" }, { "identifier": "RetCodeEnum", "path": "ktoolbox/enum.py", "snippet": "class RetCodeEnum(IntEnum):\n \"\"\"Enum for ``BaseRet.code``\"\"\"\n Success = 0\n GeneralFailure = -1\n\n # APIRet\n NetWorkError = 1001\n JsonDecodeError = 1002\n ValidationError = 1003\n\n # ActionRet\n MissingParameter = 2001\n\n # DownloaderRet\n FileExisted = 3001" }, { "identifier": "BaseRet", "path": "ktoolbox/utils.py", "snippet": "class BaseRet(BaseModel, Generic[_T]):\n \"\"\"Base data model of function return value\"\"\"\n code: int = RetCodeEnum.Success.value\n message: str = ''\n exception: Optional[Exception] = None\n data: Optional[_T] = None\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n def __bool__(self):\n return self.code == RetCodeEnum.Success" }, { "identifier": "generate_msg", "path": "ktoolbox/utils.py", "snippet": "def generate_msg(title: str = None, **kwargs):\n \"\"\"\n Generate message for ``BaseRet`` and logger\n\n :param title: Message title\n :param kwargs: Extra data\n \"\"\"\n title: str = title or \"\"\n return f\"{title} - {kwargs}\" if kwargs else title" } ]
from abc import ABC, abstractmethod from typing import Literal, Generic, TypeVar, Optional, Callable from urllib.parse import urlunparse from loguru import logger from pydantic import BaseModel, ValidationError, RootModel from tenacity import RetryCallState, wait_fixed, retry_if_result from tenacity.stop import stop_base, stop_never, stop_after_attempt from ktoolbox.configuration import config from ktoolbox.enum import RetCodeEnum from ktoolbox.utils import BaseRet, generate_msg import httpx import tenacity
768
__all__ = ["APITenacityStop", "APIRet", "BaseAPI"] _T = TypeVar('_T') class APITenacityStop(stop_base): """APIs Stop strategies""" def __call__(self, retry_state: RetryCallState) -> bool: if config.api.retry_times is None: return stop_never(retry_state) else: return stop_after_attempt(config.api.retry_times)(retry_state) def _retry_error_callback(state: RetryCallState) -> "APIRet": """ Call after all reties failed :return Keep the origin return value """ # noinspection SpellCheckingInspection logger.error( generate_msg( f"Kemono API call failed", ret=state.outcome.result(), ) ) return state.outcome.result() def _retry(*args, **kwargs): """Wrap an API method with a new ``Retrying`` object""" wrapper = tenacity.retry( stop=APITenacityStop(), wait=wait_fixed(config.api.retry_interval), retry=retry_if_result(lambda x: not bool(x)), reraise=True, retry_error_callback=_retry_error_callback, **kwargs ) if len(args) == 1 and callable(args[0]): return wrapper(args[0]) else: return wrapper
__all__ = ["APITenacityStop", "APIRet", "BaseAPI"] _T = TypeVar('_T') class APITenacityStop(stop_base): """APIs Stop strategies""" def __call__(self, retry_state: RetryCallState) -> bool: if config.api.retry_times is None: return stop_never(retry_state) else: return stop_after_attempt(config.api.retry_times)(retry_state) def _retry_error_callback(state: RetryCallState) -> "APIRet": """ Call after all reties failed :return Keep the origin return value """ # noinspection SpellCheckingInspection logger.error( generate_msg( f"Kemono API call failed", ret=state.outcome.result(), ) ) return state.outcome.result() def _retry(*args, **kwargs): """Wrap an API method with a new ``Retrying`` object""" wrapper = tenacity.retry( stop=APITenacityStop(), wait=wait_fixed(config.api.retry_interval), retry=retry_if_result(lambda x: not bool(x)), reraise=True, retry_error_callback=_retry_error_callback, **kwargs ) if len(args) == 1 and callable(args[0]): return wrapper(args[0]) else: return wrapper
class APIRet(BaseRet[_T]):
2
2023-11-06 15:24:12+00:00
2k
jpjacobpadilla/Google-Colab-Selenium
google_colab_selenium/chromedriver.py
[ { "identifier": "ColabSeleniumManager", "path": "google_colab_selenium/colab_selenium_manager.py", "snippet": "class ColabSeleniumManager:\n default_colab_options = [\n '--headless',\n '--no-sandbox',\n '--disable-dev-shm-usage',\n '--lang=en'\n ]\n\n _downloaded_chrome = False\n _updated_apt = False\n\n update_apt = ['sudo', 'apt', 'update']\n upgrade_apt = ['sudo', 'apt', 'upgrade']\n\n download_command = ['curl', '-o', 'google-chrome-stable_current_amd64.deb', 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb']\n install_command = ['sudo', 'apt', 'install', './google-chrome-stable_current_amd64.deb', '-y']\n clean_up_command = ['rm', 'google-chrome-stable_current_amd64.deb']\n\n chromedriver_path: str = None\n\n def __init__(self, base_options: Options):\n if not self._updated_apt:\n self.update_upgrade_apt()\n\n if not self._downloaded_chrome:\n self.install_chrome()\n\n self.options = self.default_options(base_options or Options())\n self.service = self.get_service()\n\n @classmethod\n def update_upgrade_apt(cls) -> None:\n try:\n with Spinner('Updating and upgrading APT', done='Updated and upgraded APT'):\n subprocess.run(cls.update_apt, check=True)\n subprocess.run(cls.upgrade_apt, check=True)\n \n except Exception as e:\n raise GoogleColabSeleniumError('Failed to update and upgrade APT') from e\n\n else:\n cls._updated_apt = True\n\n @classmethod\n def install_chrome(cls) -> None:\n \"\"\"\n To Install Google-Chrome-Stable, the first command uses CURL to download\n the debian file. Next Advanced Package Tool installs the file and once\n it's installed, the .deb file, which is no longer needed, is deleted.\n \"\"\"\n try:\n with Spinner('Downloading Google Chrome', done='Downloaded Google Chrome'):\n subprocess.run(cls.download_command, check=True)\n subprocess.run(cls.install_command, check=True)\n subprocess.run(cls.clean_up_command, check=True)\n\n except Exception as e:\n raise InstallChromeError(\"Failed to install Google Chrome.\") from e\n\n else:\n cls._downloaded_chrome = True\n\n @classmethod\n def default_options(cls, options: Options) -> Options:\n for default in cls.default_colab_options:\n options.add_argument(default)\n\n return options\n\n @classmethod\n def get_service(cls) -> Service:\n path = cls.chromedriver_path or cls.prepare_driver()\n return Service(path)\n\n @classmethod\n def prepare_driver(cls) -> str:\n try:\n path = SeleniumManager().driver_location(Options())\n cls.chromedriver_path = path\n return path\n\n except Exception as e:\n raise ChromeDriverPathError(\"Failed to find ChromeDriver.\") from e" }, { "identifier": "Spinner", "path": "google_colab_selenium/spinner.py", "snippet": "class Spinner:\n def __init__(self, message: str, done: str):\n self.message = message\n self.done_message = done\n self.stop_event = threading.Event()\n\n def __enter__(self):\n self.show_spinner(self.message)\n return self\n\n def __exit__(self, *args, **kwargs):\n self.remove_spinner()\n\n def show_spinner(self, text):\n self.spinner_id = uuid.uuid4()\n\n spinner_html = f\"\"\"\n <div class=\"spinner-container\">\n <div class=\"spinner\" id=\"{self.spinner_id}-circle\"></div>\n <div class=\"spinner-text\" id=\"{self.spinner_id}-text\">{text}</div>\n </div>\n <style>\n @keyframes spin {{\n from {{ transform: rotate(0deg); }}\n to {{ transform: rotate(360deg); }}\n }}\n\n .spinner-container {{\n display: flex;\n align-items: center;\n margin-bottom: 3px;\n }}\n\n .spinner {{\n border: 3px solid rgba(0, 0, 0, 0.1);\n border-left-color: lightblue;\n border-radius: 50%;\n width: 12px;\n height: 12px;\n animation: spin 1s linear infinite;\n }}\n\n .spinner-text {{\n padding-left: 6px;\n }}\n </style>\n \"\"\"\n display(HTML(spinner_html))\n\n def remove_spinner(self):\n js_code = f\"\"\"\n const element = document.getElementById(\"{self.spinner_id}-circle\");\n element.style.border = \"3px solid limegreen\";\n element.style.animation = \"none\";\n\n const text = document.getElementById(\"{self.spinner_id}-text\");\n text.innerText = \"{self.done_message}\";\n \"\"\"\n display(Javascript(js_code))" }, { "identifier": "StartingChromeDriverError", "path": "google_colab_selenium/exceptions.py", "snippet": "class StartingChromeDriverError(GoogleColabSeleniumError):\n \"\"\"Exception raised when ChromeDriver fails to start.\"\"\"\n pass" } ]
from google_colab_selenium.colab_selenium_manager import ColabSeleniumManager from google_colab_selenium.spinner import Spinner from google_colab_selenium.exceptions import StartingChromeDriverError from selenium.webdriver.chrome.options import Options from selenium import webdriver
1,448
class ChromeDriver(webdriver.Chrome): """ A thin wrapper around the Selenium Chrome Webdriver which makes it easy to use in Google Colab Notebooks. The ColabSeleniumManager class installs Google-Chrome-Stable and adds the nessasary headers to use in a Colab Notebook. The headers that are automatically added are: --headless --no-sandbox --disable-dev-shm-usage --lang=en """ def __init__(self, options: Options = None, keep_alive: bool = True): self.manager = ColabSeleniumManager(options) try:
class ChromeDriver(webdriver.Chrome): """ A thin wrapper around the Selenium Chrome Webdriver which makes it easy to use in Google Colab Notebooks. The ColabSeleniumManager class installs Google-Chrome-Stable and adds the nessasary headers to use in a Colab Notebook. The headers that are automatically added are: --headless --no-sandbox --disable-dev-shm-usage --lang=en """ def __init__(self, options: Options = None, keep_alive: bool = True): self.manager = ColabSeleniumManager(options) try:
with Spinner('Initializing Chromedriver', done='Initialized Chromedriver'):
1
2023-11-06 21:18:41+00:00
2k
microsoft/monitors4codegen
tests/monitor_guided_decoding/test_numargs_monitor_java.py
[ { "identifier": "create_test_context", "path": "tests/test_utils.py", "snippet": "@contextlib.contextmanager\ndef create_test_context(params: dict) -> Iterator[MultilspyContext]:\n \"\"\"\n Creates a test context for the given parameters.\n \"\"\"\n config = MultilspyConfig.from_dict(params)\n logger = MultilspyLogger()\n\n user_home_dir = os.path.expanduser(\"~\")\n multilspy_home_directory = str(pathlib.Path(user_home_dir, \".multilspy\"))\n temp_extract_directory = str(pathlib.Path(multilspy_home_directory, uuid4().hex))\n try:\n os.makedirs(temp_extract_directory, exist_ok=False)\n assert params['repo_url'].endswith('/')\n repo_zip_url = params['repo_url'] + f\"archive/{params['repo_commit']}.zip\"\n FileUtils.download_and_extract_archive(logger, repo_zip_url, temp_extract_directory, \"zip\")\n dir_contents = os.listdir(temp_extract_directory)\n assert len(dir_contents) == 1\n source_directory_path = str(pathlib.Path(temp_extract_directory, dir_contents[0]))\n\n yield MultilspyContext(config, logger, source_directory_path)\n finally:\n if os.path.exists(temp_extract_directory):\n shutil.rmtree(temp_extract_directory)" }, { "identifier": "is_cuda_available", "path": "tests/test_utils.py", "snippet": "def is_cuda_available() -> bool:\n \"\"\"\n Returns True if CUDA is available, False otherwise\n \"\"\"\n if torch.cuda.is_available():\n try:\n t = torch.rand(1).cuda()\n t = t * 2\n return True\n except RuntimeError:\n return False\n return False" } ]
import torch import transformers import pytest from pathlib import PurePath from monitors4codegen.multilspy.language_server import SyncLanguageServer from monitors4codegen.multilspy.multilspy_config import Language from tests.test_utils import create_test_context, is_cuda_available from transformers import AutoTokenizer, AutoModelForCausalLM from monitors4codegen.multilspy.multilspy_utils import TextUtils from monitors4codegen.monitor_guided_decoding.monitors.numargs_monitor import NumMethodArgumentsMonitor from monitors4codegen.monitor_guided_decoding.monitor import MonitorFileBuffer from monitors4codegen.monitor_guided_decoding.hf_gen import MGDLogitsProcessor from transformers.generation.utils import LogitsProcessorList from monitors4codegen.multilspy.multilspy_types import Position from monitors4codegen.monitor_guided_decoding.tokenizer_wrapper import HFTokenizerWrapper
792
""" This file contains tests for Monitor-Guided Decoding for correct number of arguments in Java """ pytest_plugins = ("pytest_asyncio",) @pytest.mark.asyncio async def test_multilspy_java_clickhouse_highlevel_sinker_modified_numargs(): """ Test the working of numargs_monitor with Java repository - clickhouse-highlevel-sinker modified """ code_language = Language.JAVA params = { "code_language": code_language, "repo_url": "https://github.com/LakshyAAAgrawal/clickhouse-highlevel-sinker/", "repo_commit": "5775fd7a67e7b60998e1614cf44a8a1fc3190ab0" } device = torch.device('cuda' if is_cuda_available() else 'cpu') model: transformers.modeling_utils.PreTrainedModel = AutoModelForCausalLM.from_pretrained( "bigcode/santacoder", trust_remote_code=True ).to(device) tokenizer = AutoTokenizer.from_pretrained("bigcode/santacoder")
""" This file contains tests for Monitor-Guided Decoding for correct number of arguments in Java """ pytest_plugins = ("pytest_asyncio",) @pytest.mark.asyncio async def test_multilspy_java_clickhouse_highlevel_sinker_modified_numargs(): """ Test the working of numargs_monitor with Java repository - clickhouse-highlevel-sinker modified """ code_language = Language.JAVA params = { "code_language": code_language, "repo_url": "https://github.com/LakshyAAAgrawal/clickhouse-highlevel-sinker/", "repo_commit": "5775fd7a67e7b60998e1614cf44a8a1fc3190ab0" } device = torch.device('cuda' if is_cuda_available() else 'cpu') model: transformers.modeling_utils.PreTrainedModel = AutoModelForCausalLM.from_pretrained( "bigcode/santacoder", trust_remote_code=True ).to(device) tokenizer = AutoTokenizer.from_pretrained("bigcode/santacoder")
with create_test_context(params) as context:
0
2023-11-04 21:49:04+00:00
2k
bigai-nlco/langsuite
langsuite/envs/teach/libs/teach/dataset/episode.py
[ { "identifier": "Initialization", "path": "langsuite/envs/teach/libs/teach/dataset/initialization.py", "snippet": "class Initialization:\n def __init__(\n self, time_start, agents=None, objects=None, custom_object_metadata=None\n ):\n self.time_start = time_start\n self.agents = agents if agents is not None else []\n self.objects = objects if objects is not None else []\n self.custom_object_metadata = (\n custom_object_metadata if custom_object_metadata is not None else {}\n )\n\n def add_agent(self, agent):\n self.agents.append(agent)\n\n def add_object(self, obj):\n self.objects.append(obj)\n\n def reset_time(self, time_desired=0):\n # Note: We could Unix time or any desired time instead of 0\n self.time_start = time_desired\n\n def to_dict(self):\n _dict = OrderedDict()\n _dict[\"time_start\"] = self.time_start\n\n if len(self.agents) > 0:\n _dict[\"agents\"] = [\n x if type(x) is dict else x.to_dict() for x in self.agents\n ]\n\n if len(self.objects) > 0:\n _dict[\"objects\"] = [\n x if type(x) is dict else x.to_dict() for x in self.objects\n ]\n\n if self.custom_object_metadata is not None:\n _dict[\"custom_object_metadata\"] = self.custom_object_metadata\n\n return _dict\n\n @classmethod\n def from_dict(cls, initialization_dict) -> \"Initialization\":\n agents = []\n objects = []\n\n if \"agents\" in initialization_dict:\n agents = [Pose_With_ID.from_dict(x) for x in initialization_dict[\"agents\"]]\n\n if \"objects\" in initialization_dict:\n objects = [\n Pose_With_ID.from_dict(x) for x in initialization_dict[\"objects\"]\n ]\n\n return cls(\n time_start=initialization_dict[\"time_start\"], agents=agents, objects=objects\n )" }, { "identifier": "Interaction", "path": "langsuite/envs/teach/libs/teach/dataset/interaction.py", "snippet": "class Interaction:\n def __init__(self, agent_id, action, is_object=False, status=None, time_start=None):\n self.agent_id = agent_id\n self.action = action\n self.is_object = is_object\n self.status = status\n self.time_start = time_start\n\n def to_dict(self):\n _dict = OrderedDict()\n\n if self.is_object:\n _dict[\"object_id\"] = self.agent_id\n else:\n _dict[\"agent_id\"] = self.agent_id\n\n _dict.update(self.action.to_dict())\n if self.status is not None:\n _dict[\"status\"] = self.status\n\n return _dict\n\n @classmethod\n def from_dict(cls, interaction_dict, action_type) -> \"Interaction\":\n if \"object_id\" in interaction_dict:\n is_object = True\n agent_id = interaction_dict[\"object_id\"]\n else:\n is_object = False\n agent_id = interaction_dict[\"agent_id\"]\n\n if action_type == \"Motion\":\n action = Action_Motion.from_dict(interaction_dict)\n elif action_type == \"MapGoal\":\n action = Action_MapGoal.from_dict(interaction_dict)\n elif action_type == \"ObjectInteraction\":\n action = Action_ObjectInteraction.from_dict(interaction_dict)\n elif action_type == \"ProgressCheck\":\n action = Action_ProgressCheck.from_dict(interaction_dict)\n elif action_type == \"Keyboard\":\n action = Action_Keyboard.from_dict(interaction_dict)\n elif action_type == \"Audio\":\n action = Action_Audio.from_dict(interaction_dict)\n else:\n action = Action_Basic.from_dict(interaction_dict)\n\n status = interaction_dict.get(\"status\")\n time_start = interaction_dict.get(\"time_start\")\n return cls(\n agent_id=agent_id,\n action=action,\n is_object=is_object,\n status=status,\n time_start=time_start,\n )" } ]
from collections import OrderedDict from langsuite.envs.teach.libs.teach.dataset.initialization import Initialization from langsuite.envs.teach.libs.teach.dataset.interaction import Interaction
1,457
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Episode: def __init__( self, episode_id, world, world_type, commander_embodied, initial_state=None, interactions=None, ): self.episode_id = episode_id self.world = world self.world_type = world_type self.commander_embodied = commander_embodied self.initial_state = initial_state self.interactions = interactions if interactions is not None else [] self.final_state = None def reset_initial_state(self, initialization): self.initialization = initialization def add_interaction(self, interaction): self.interactions.append(interaction) def remove_interaction(self): if len(self.interactions) > 0: del self.interactions[-1] def to_dict(self): _dict = OrderedDict() _dict["episode_id"] = self.episode_id _dict["world"] = self.world _dict["world_type"] = self.world_type _dict["commander_embodied"] = str(self.commander_embodied) if self.initial_state is not None: _dict["initial_state"] = self.initial_state.to_dict() _dict["interactions"] = [x.to_dict() for x in self.interactions] if self.final_state is not None: _dict["final_state"] = self.final_state.to_dict() return _dict @classmethod def from_dict(cls, episode_dict, definitions, process_init_state=True) -> "Episode": interactions = [] for interaction_dict in episode_dict.get("interactions"): action_type = definitions.map_actions_id2info[ interaction_dict["action_id"] ]["action_type"] interaction = Interaction.from_dict(interaction_dict, action_type) interactions.append(interaction) return cls( episode_dict["episode_id"], episode_dict["world"], episode_dict["world_type"], episode_dict["commander_embodied"],
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Episode: def __init__( self, episode_id, world, world_type, commander_embodied, initial_state=None, interactions=None, ): self.episode_id = episode_id self.world = world self.world_type = world_type self.commander_embodied = commander_embodied self.initial_state = initial_state self.interactions = interactions if interactions is not None else [] self.final_state = None def reset_initial_state(self, initialization): self.initialization = initialization def add_interaction(self, interaction): self.interactions.append(interaction) def remove_interaction(self): if len(self.interactions) > 0: del self.interactions[-1] def to_dict(self): _dict = OrderedDict() _dict["episode_id"] = self.episode_id _dict["world"] = self.world _dict["world_type"] = self.world_type _dict["commander_embodied"] = str(self.commander_embodied) if self.initial_state is not None: _dict["initial_state"] = self.initial_state.to_dict() _dict["interactions"] = [x.to_dict() for x in self.interactions] if self.final_state is not None: _dict["final_state"] = self.final_state.to_dict() return _dict @classmethod def from_dict(cls, episode_dict, definitions, process_init_state=True) -> "Episode": interactions = [] for interaction_dict in episode_dict.get("interactions"): action_type = definitions.map_actions_id2info[ interaction_dict["action_id"] ]["action_type"] interaction = Interaction.from_dict(interaction_dict, action_type) interactions.append(interaction) return cls( episode_dict["episode_id"], episode_dict["world"], episode_dict["world_type"], episode_dict["commander_embodied"],
initial_state=Initialization.from_dict(episode_dict["initial_state"])
0
2023-11-01 01:47:00+00:00
2k
tmlr-group/DeepInception
conversers.py
[ { "identifier": "FALCON_PATH", "path": "config.py", "snippet": "FALCON_PATH = f\"{ROOT_PATH}/falcon-7b-instruct\"" }, { "identifier": "LLAMA_PATH", "path": "config.py", "snippet": "LLAMA_PATH = f\"{ROOT_PATH}/Llama-2-7b-hf\"" }, { "identifier": "TARGET_TEMP", "path": "config.py", "snippet": "TARGET_TEMP = 0" }, { "identifier": "TARGET_TOP_P", "path": "config.py", "snippet": "TARGET_TOP_P = 1" }, { "identifier": "VICUNA_PATH", "path": "config.py", "snippet": "VICUNA_PATH = f\"{ROOT_PATH}/vicuna-7b-v1.5\"" }, { "identifier": "GPT", "path": "language_models.py", "snippet": "class GPT(LanguageModel):\n API_RETRY_SLEEP = 10\n API_ERROR_OUTPUT = \"$ERROR$\"\n API_QUERY_SLEEP = 2\n API_MAX_RETRY = 5\n API_TIMEOUT = 20\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n def generate(self, conv: List[Dict], \n max_n_tokens: int, \n temperature: float,\n top_p: float):\n '''\n Args:\n conv: List of dictionaries, OpenAI API format\n max_n_tokens: int, max number of tokens to generate\n temperature: float, temperature for sampling\n top_p: float, top p for sampling\n Returns:\n str: generated response\n '''\n output = self.API_ERROR_OUTPUT\n for _ in range(self.API_MAX_RETRY):\n try:\n response = openai.ChatCompletion.create(\n model = self.model_name,\n messages = conv,\n max_tokens = max_n_tokens,\n temperature = temperature,\n )\n output = response[\"choices\"][0][\"message\"][\"content\"]\n break\n except openai.error.OpenAIError as e:\n print(type(e), e)\n time.sleep(self.API_RETRY_SLEEP)\n \n time.sleep(self.API_QUERY_SLEEP)\n return output \n \n def batched_generate(self, \n convs_list: List[List[Dict]],\n max_n_tokens: int, \n temperature: float,\n top_p: float = 1.0,):\n return [self.generate(conv, max_n_tokens, temperature, top_p) for conv in convs_list]" }, { "identifier": "HuggingFace", "path": "language_models.py", "snippet": "class HuggingFace(LanguageModel):\n def __init__(self,model_name, model, tokenizer):\n self.model_name = model_name\n self.model = model \n self.tokenizer = tokenizer\n self.eos_token_ids = [self.tokenizer.eos_token_id]\n\n def batched_generate(self, \n full_prompts_list,\n max_n_tokens: int, \n temperature: float,\n top_p: float = 1.0,):\n inputs = self.tokenizer(full_prompts_list, return_tensors='pt', padding=True)\n inputs = {k: v.to(self.model.device.index) for k, v in inputs.items()}\n \n # Batch generation\n if temperature > 0:\n output_ids = self.model.generate(\n **inputs,\n max_new_tokens=max_n_tokens, \n do_sample=True,\n temperature=temperature,\n eos_token_id=self.eos_token_ids,\n top_p=top_p,\n )\n else:\n output_ids = self.model.generate(\n **inputs,\n max_new_tokens=max_n_tokens, \n do_sample=False,\n eos_token_id=self.eos_token_ids,\n top_p=1,\n temperature=1, # To prevent warning messages\n )\n \n # If the model is not an encoder-decoder type, slice off the input tokens\n if not self.model.config.is_encoder_decoder:\n output_ids = output_ids[:, inputs[\"input_ids\"].shape[1]:]\n\n # Batch decoding\n outputs_list = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n for key in inputs:\n inputs[key].to('cpu')\n output_ids.to('cpu')\n del inputs, output_ids\n gc.collect()\n torch.cuda.empty_cache()\n\n return outputs_list\n\n def extend_eos_tokens(self): \n # Add closing braces for Vicuna/Llama eos when using attacker model\n self.eos_token_ids.extend([\n self.tokenizer.encode(\"}\")[1],\n 29913, \n 9092,\n 16675])" } ]
import torch import common from transformers import AutoModelForCausalLM, AutoTokenizer from config import (FALCON_PATH, LLAMA_PATH, TARGET_TEMP, TARGET_TOP_P, VICUNA_PATH) from language_models import GPT, HuggingFace
1,083
def load_attack_and_target_models(args): targetLM = TargetLM(model_name = args.target_model, max_n_tokens = args.target_max_n_tokens,
def load_attack_and_target_models(args): targetLM = TargetLM(model_name = args.target_model, max_n_tokens = args.target_max_n_tokens,
temperature = TARGET_TEMP, # init to 0
2
2023-11-07 12:47:47+00:00
2k
radekd91/inferno
inferno/datasets/FaceAlignmentTools.py
[ { "identifier": "bbox2point", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n elif type == \"mediapipe\":\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n else:\n raise NotImplementedError(f\" bbox2point not implemented for {type} \")\n if isinstance(center_x, np.ndarray):\n center = np.stack([center_x, center_y], axis=1)\n else: \n center = np.array([center_x, center_y])\n return old_size, center" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" } ]
import numpy as np import skvideo import types from pathlib import Path from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp
1,135
def align_face(image, landmarks, landmark_type, scale_adjustment, target_size_height, target_size_width=None,): """ Returns an image with the face aligned to the center of the image. :param image: The full resolution image in which to align the face. :param landmarks: The landmarks of the face in the image (in the original image coordinates). :param landmark_type: The type of landmarks. Such as 'kpt68' or 'bbox' or 'mediapipe'. :param scale_adjustment: The scale adjustment to apply to the image. :param target_size_height: The height of the output image. :param target_size_width: The width of the output image. If not provided, it is assumed to be the same as target_size_height. :return: The aligned face image. The image will be in range [0,1]. """ # landmarks_for_alignment = "mediapipe" left = landmarks[:,0].min() top = landmarks[:,1].min() right = landmarks[:,0].max() bottom = landmarks[:,1].max() old_size, center = bbox2point(left, right, top, bottom, type=landmark_type) size = (old_size * scale_adjustment).astype(np.int32)
def align_face(image, landmarks, landmark_type, scale_adjustment, target_size_height, target_size_width=None,): """ Returns an image with the face aligned to the center of the image. :param image: The full resolution image in which to align the face. :param landmarks: The landmarks of the face in the image (in the original image coordinates). :param landmark_type: The type of landmarks. Such as 'kpt68' or 'bbox' or 'mediapipe'. :param scale_adjustment: The scale adjustment to apply to the image. :param target_size_height: The height of the output image. :param target_size_width: The width of the output image. If not provided, it is assumed to be the same as target_size_height. :return: The aligned face image. The image will be in range [0,1]. """ # landmarks_for_alignment = "mediapipe" left = landmarks[:,0].min() top = landmarks[:,1].min() right = landmarks[:,0].max() bottom = landmarks[:,1].max() old_size, center = bbox2point(left, right, top, bottom, type=landmark_type) size = (old_size * scale_adjustment).astype(np.int32)
img_warped, lmk_warped = bbpoint_warp(image, center, size, target_size_height, target_size_width, landmarks=landmarks)
1
2023-11-07 20:13:32+00:00
2k
hxz393/ConfigCenterComparer
module/get_query_sql.py
[ { "identifier": "SQL_CONFIG_NACOS", "path": "config/settings.py", "snippet": "SQL_CONFIG_NACOS = \"\"\"\nSELECT\n data_id,\n group_id,\n content,\n gmt_modified\nFROM\n config_info\n\"\"\"" }, { "identifier": "SQL_CONFIG_APOLLO_ID", "path": "config/settings.py", "snippet": "SQL_CONFIG_APOLLO_ID = \"\"\"\nSELECT\n n.AppId,\n n.NamespaceName,\n i.`Key`,\n i.`Value`,\n i.DataChange_LastTime\nFROM\n Item i\nINNER JOIN Namespace n ON i.NamespaceId = n.Id\nWHERE\n i.IsDeleted = 0\n AND i.`Key` != '';\n\"\"\"" }, { "identifier": "SQL_CONFIG_APOLLO_NAME", "path": "config/settings.py", "snippet": "SQL_CONFIG_APOLLO_NAME = \"\"\"\nSELECT\n App.Name,\n n.NamespaceName,\n i.`Key`,\n i.`Value`,\n i.DataChange_LastTime\nFROM\n Item i\nINNER JOIN Namespace n ON i.NamespaceId = n.Id\nINNER JOIN App ON n.AppId = App.AppId\nWHERE\n i.IsDeleted = 0\n AND i.`Key` != '';\n\"\"\"" }, { "identifier": "APOLLO_NAME_LIST", "path": "config/settings.py", "snippet": "APOLLO_NAME_LIST = ['AppId', 'Name', ]" } ]
import logging from typing import Dict, Optional from config.settings import SQL_CONFIG_NACOS, SQL_CONFIG_APOLLO_ID, SQL_CONFIG_APOLLO_NAME, APOLLO_NAME_LIST
753
""" 此模块用于处理配置中心相关的查询,包括从不同的配置中心获取 SQL 查询语句。 本模块提供了 `get_query_sql` 函数,用于根据配置中心类型和 Apollo 应用名称获取对应的查询 SQL。支持从 Nacos 和 Apollo 配置中心获取数据。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) def get_query_sql(config_main: Dict[str, str]) -> Optional[str]: """ 根据配置中心类型和 Apollo 应用名称获取查询 SQL。 此函数接收一个字典,包含配置中心类型和 Apollo 应用名称。它根据配置中心类型(Nacos 或 Apollo)以及 Apollo 应用名称('AppId' 或 'Name'),返回相应的 SQL 查询语句。 :param config_main: 包含配置中心类型和 Apollo 应用名称的字典。 :type config_main: Dict[str, str] :return: 对应的查询 SQL 语句。如果无法匹配到合适的配置中心或应用名称,则返回 None。 :rtype: Optional[str] :example: >>> get_query_sql({"config_center": "Nacos"}) SQL_CONFIG_NACOS >>> get_query_sql({"config_center": "Apollo", "apollo_name": "AppId"}) SQL_CONFIG_APOLLO_ID >>> get_query_sql({"config_center": "Apollo", "apollo_name": "Name"}) SQL_CONFIG_APOLLO_NAME """ try: config_center = config_main.get('config_center') apollo_name = config_main.get('apollo_name') if config_center == 'Nacos': return SQL_CONFIG_NACOS
""" 此模块用于处理配置中心相关的查询,包括从不同的配置中心获取 SQL 查询语句。 本模块提供了 `get_query_sql` 函数,用于根据配置中心类型和 Apollo 应用名称获取对应的查询 SQL。支持从 Nacos 和 Apollo 配置中心获取数据。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) def get_query_sql(config_main: Dict[str, str]) -> Optional[str]: """ 根据配置中心类型和 Apollo 应用名称获取查询 SQL。 此函数接收一个字典,包含配置中心类型和 Apollo 应用名称。它根据配置中心类型(Nacos 或 Apollo)以及 Apollo 应用名称('AppId' 或 'Name'),返回相应的 SQL 查询语句。 :param config_main: 包含配置中心类型和 Apollo 应用名称的字典。 :type config_main: Dict[str, str] :return: 对应的查询 SQL 语句。如果无法匹配到合适的配置中心或应用名称,则返回 None。 :rtype: Optional[str] :example: >>> get_query_sql({"config_center": "Nacos"}) SQL_CONFIG_NACOS >>> get_query_sql({"config_center": "Apollo", "apollo_name": "AppId"}) SQL_CONFIG_APOLLO_ID >>> get_query_sql({"config_center": "Apollo", "apollo_name": "Name"}) SQL_CONFIG_APOLLO_NAME """ try: config_center = config_main.get('config_center') apollo_name = config_main.get('apollo_name') if config_center == 'Nacos': return SQL_CONFIG_NACOS
elif config_center == 'Apollo' and apollo_name in APOLLO_NAME_LIST:
3
2023-11-07 01:02:38+00:00
2k
pytorch-labs/ao
torchao/quantization/smoothquant.py
[ { "identifier": "dynamically_quantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point" }, { "identifier": "quant_int8_dynamic_per_token_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out" } ]
import torch import torch.nn.functional as F import torchao.quantization.quant_api as quant_api from .quant_primitives import ( dynamically_quantize_per_channel, quant_int8_dynamic_per_token_linear, )
1,488
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Testing out accuracy-only implementation of SmoothQuant (https://arxiv.org/pdf/2211.10438.pdf) Note: this is an application of input-weight equalization, with the addition that the multiplication by scale is fused into the preceding layer, specifically for relevant parts of transformer blocks. """ __all__ = [ "get_scale", "SmoothFakeDynQuantMixin", "SmoothFakeDynamicallyQuantizedLinear", "swap_linear_with_smooth_fq_linear", "smooth_fq_linear_to_inference", "set_smooth_fq_attribute", ] def get_scale(X_absmax, W_absmax, alpha=0.5): """ Calculate the scale based on abs(max(X)), abs(max(W)) and alpha If X is of dimension `b*n*k` and W is dimension `k*m`, the returned scale is of dimension `k`. Note: X_absmax is calculated outside of this function because we need to keep a running version of it during calibration. W_absmax is calculated outside of this function for consistency with X_absmax. """ X_pow = torch.pow(X_absmax, alpha) W_pow = torch.pow(W_absmax, 1.0 - alpha) div = X_pow / W_pow return div.reshape(-1) class SmoothFakeDynQuantMixin(torch.nn.Module): def init_smoothquant_variables(self, alpha): self.calibrating = True self.x_running_abs_max = None self.register_buffer("smooth_scale", None) self.alpha = alpha # debug only self.debug_skip_scaling = False # self.debug_skip_scaling = True # Currently torch._int_mm cuBLAS underlying kernel does not work with # non-contiguous weight. However, torch.compil'ing through # torch._int_mm leads to triton code which is ~2x faster if the weight # is transposed. So, for now we have a debug flag to toggle whether # we store the quantized weight transposed, so that we can get correct # numerics both in eager mode and after torch.compile. # The default is True for cuBLAS / eager mode, set to False for # torch.compile. # self.store_w_int_repr_t = True self.store_w_int_repr_t = False def update_x_running_abs_max(self, X): # update the running max of incoming activations all_dims_except_last = tuple(range(len(X.shape) - 1)) cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last) if self.x_running_abs_max is None: self.x_running_abs_max = cur_abs_max else: self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max) def get_scaled_quantized_w(self): # inference assert ( self.smooth_scale is not None ), "self.smooth_scale is None, did you turn on inference?" W = self.weight # scale weight # in the future, this can be done ahead of time instead of # during inference if not self.debug_skip_scaling: # TODO(future): do below in `to_inference` instead of here W = torch.matmul( torch.diag(self.smooth_scale), W.transpose(0, 1) ).transpose(0, 1) # fake quantize input and weight, and then do matmul in fp32/fp16 # in the future, this should be replaced with quantized kernels which # work on NVIDIA GPUs (such as protoquant's implementation) W_dq_dtype = W.dtype
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Testing out accuracy-only implementation of SmoothQuant (https://arxiv.org/pdf/2211.10438.pdf) Note: this is an application of input-weight equalization, with the addition that the multiplication by scale is fused into the preceding layer, specifically for relevant parts of transformer blocks. """ __all__ = [ "get_scale", "SmoothFakeDynQuantMixin", "SmoothFakeDynamicallyQuantizedLinear", "swap_linear_with_smooth_fq_linear", "smooth_fq_linear_to_inference", "set_smooth_fq_attribute", ] def get_scale(X_absmax, W_absmax, alpha=0.5): """ Calculate the scale based on abs(max(X)), abs(max(W)) and alpha If X is of dimension `b*n*k` and W is dimension `k*m`, the returned scale is of dimension `k`. Note: X_absmax is calculated outside of this function because we need to keep a running version of it during calibration. W_absmax is calculated outside of this function for consistency with X_absmax. """ X_pow = torch.pow(X_absmax, alpha) W_pow = torch.pow(W_absmax, 1.0 - alpha) div = X_pow / W_pow return div.reshape(-1) class SmoothFakeDynQuantMixin(torch.nn.Module): def init_smoothquant_variables(self, alpha): self.calibrating = True self.x_running_abs_max = None self.register_buffer("smooth_scale", None) self.alpha = alpha # debug only self.debug_skip_scaling = False # self.debug_skip_scaling = True # Currently torch._int_mm cuBLAS underlying kernel does not work with # non-contiguous weight. However, torch.compil'ing through # torch._int_mm leads to triton code which is ~2x faster if the weight # is transposed. So, for now we have a debug flag to toggle whether # we store the quantized weight transposed, so that we can get correct # numerics both in eager mode and after torch.compile. # The default is True for cuBLAS / eager mode, set to False for # torch.compile. # self.store_w_int_repr_t = True self.store_w_int_repr_t = False def update_x_running_abs_max(self, X): # update the running max of incoming activations all_dims_except_last = tuple(range(len(X.shape) - 1)) cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last) if self.x_running_abs_max is None: self.x_running_abs_max = cur_abs_max else: self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max) def get_scaled_quantized_w(self): # inference assert ( self.smooth_scale is not None ), "self.smooth_scale is None, did you turn on inference?" W = self.weight # scale weight # in the future, this can be done ahead of time instead of # during inference if not self.debug_skip_scaling: # TODO(future): do below in `to_inference` instead of here W = torch.matmul( torch.diag(self.smooth_scale), W.transpose(0, 1) ).transpose(0, 1) # fake quantize input and weight, and then do matmul in fp32/fp16 # in the future, this should be replaced with quantized kernels which # work on NVIDIA GPUs (such as protoquant's implementation) W_dq_dtype = W.dtype
W_int_repr, W_scales, W_zps = dynamically_quantize_per_channel(
0
2023-11-03 21:27:36+00:00
2k