repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
WU-CVGL/BAD-NeRFstudio
badnerf/badnerf_method_config.py
[ { "identifier": "BadNerfCameraOptimizerConfig", "path": "badnerf/cameras/badnerf_camera_optimizer.py", "snippet": "class BadNerfCameraOptimizerConfig(InstantiateConfig):\n \"\"\"Configuration of BAD-NeRF camera optimizer.\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfCameraOptimizer)\n \"\"\"The target class to be instantiated.\"\"\"\n\n mode: Literal[\"off\", \"linear\", \"bspline\"] = \"off\"\n \"\"\"Pose optimization strategy to use.\n linear: linear interpolation on SE(3);\n bspline: cubic b-spline interpolation on SE(3).\"\"\"\n\n num_virtual_views: int = 10\n \"\"\"The number of samples used to model the motion-blurring.\"\"\"\n\n initial_noise_se3_std: float = 1e-5\n \"\"\"Initial perturbation to pose delta on se(3). Must be non-zero to prevent NaNs.\"\"\"" }, { "identifier": "BadNerfDataManagerConfig", "path": "badnerf/data/badnerf_datamanager.py", "snippet": "class BadNerfDataManagerConfig(VanillaDataManagerConfig):\n \"\"\"A depth datamanager - required to use with .setup()\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfDataManager)" }, { "identifier": "BadNerfDataParserConfig", "path": "badnerf/data/badnerf_dataparser.py", "snippet": "class BadNerfDataParserConfig(NerfstudioDataParserConfig):\n \"\"\"Nerfstudio dataset config\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfDataParser)\n \"\"\"target class to instantiate\"\"\"\n scale_factor: float = 0.25\n \"\"\"How much to scale the camera origins by.\"\"\"" }, { "identifier": "BadNerfTrainerConfig", "path": "badnerf/engine/badnerf_trainer.py", "snippet": "class BadNerfTrainerConfig(TrainerConfig):\n \"\"\"Configuration for BAD-NeRF training\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfTrainer)\n pipeline: BadNerfPipelineConfig = BadNerfPipelineConfig()\n \"\"\"BAD-NeRF pipeline configuration\"\"\"" }, { "identifier": "BadNerfactoModelConfig", "path": "badnerf/models/badnerfacto.py", "snippet": "class BadNerfactoModelConfig(NerfactoModelConfig):\n \"\"\"BAD-NeRF-nerfacto Model Config\"\"\"\n\n _target: Type = field(\n default_factory=lambda: BadNerfactoModel\n )\n \"\"\"The target class to be instantiated.\"\"\"\n\n camera_optimizer: BadNerfCameraOptimizerConfig = BadNerfCameraOptimizerConfig()\n \"\"\"Config of the camera optimizer to use\"\"\"" }, { "identifier": "BadNerfPipelineConfig", "path": "badnerf/pipelines/badnerf_pipeline.py", "snippet": "class BadNerfPipelineConfig(VanillaPipelineConfig):\n \"\"\"BAD-NeRF pipeline config\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfPipeline)\n num_virtual_views: int = 10\n \"\"\"Number of virtual sharp images to re-blur\"\"\"" } ]
from nerfstudio.configs.base_config import ViewerConfig from nerfstudio.engine.optimizers import AdamOptimizerConfig from nerfstudio.engine.schedulers import ExponentialDecaySchedulerConfig from nerfstudio.plugins.types import MethodSpecification from badnerf.cameras.badnerf_camera_optimizer import BadNerfCameraOptimizerConfig from badnerf.data.badnerf_datamanager import BadNerfDataManagerConfig from badnerf.data.badnerf_dataparser import BadNerfDataParserConfig from badnerf.engine.badnerf_trainer import BadNerfTrainerConfig from badnerf.models.badnerfacto import BadNerfactoModelConfig from badnerf.pipelines.badnerf_pipeline import BadNerfPipelineConfig
902
""" BAD-NeRF config. """ badnerf_nerfacto = MethodSpecification( config=BadNerfTrainerConfig( method_name="bad-nerfacto", steps_per_eval_all_images=500, steps_per_save=2000, max_num_iterations=30001, mixed_precision=False, use_grad_scaler=True,
""" BAD-NeRF config. """ badnerf_nerfacto = MethodSpecification( config=BadNerfTrainerConfig( method_name="bad-nerfacto", steps_per_eval_all_images=500, steps_per_save=2000, max_num_iterations=30001, mixed_precision=False, use_grad_scaler=True,
pipeline=BadNerfPipelineConfig(
5
2023-11-10 07:40:22+00:00
2k
nttcom/WASB-SBDT
src/runners/train_and_test.py
[ { "identifier": "BaseRunner", "path": "src/runners/base.py", "snippet": "class BaseRunner:\n def __init__(\n self,\n cfg: DictConfig,\n ):\n self._cfg = cfg\n log.info('run {}'.format(self._cfg['runner']['name']))\n self._output_dir = cfg['output_dir']\n\n def run(self):\n raise NotImplementedError" }, { "identifier": "train_epoch", "path": "src/runners/runner_utils.py", "snippet": "def train_epoch(epoch, model, train_loader, loss_criterion, optimizer, device):\n batch_loss = AverageMeter()\n model.train()\n t_start = time.time()\n for batch_idx, (imgs, hms) in enumerate(tqdm(train_loader, desc='[(TRAIN) Epoch {}]'.format(epoch)) ):\n\n for scale, hm in hms.items():\n hms[scale] = hm.to(device)\n\n optimizer.zero_grad()\n \n preds = model(imgs)\n loss = loss_criterion(preds, hms)\n loss.backward()\n optimizer.step()\n\n batch_loss.update(loss.item(), preds[0].size(0))\n t_elapsed = time.time() - t_start\n\n log.info('(TRAIN) Epoch {epoch} Loss:{batch_loss.avg:.6f} Time:{time:.1f}(sec)'.format(epoch=epoch, batch_loss=batch_loss, time=t_elapsed))\n return {'epoch':epoch, 'loss':batch_loss.avg}" }, { "identifier": "test_epoch", "path": "src/runners/runner_utils.py", "snippet": "@torch.no_grad()\ndef test_epoch(epoch, model, dataloader, loss_criterion, device, cfg, vis_dir=None):\n\n batch_loss = AverageMeter()\n model.eval()\n \n t_start = time.time()\n for batch_idx, (imgs, hms, trans, xys_gt, visis_gt, img_paths) in enumerate(tqdm(dataloader, desc='[(TEST) Epoch {}]'.format(epoch))):\n imgs = imgs.to(device)\n for scale, hm in hms.items():\n hms[scale] = hm.to(device)\n preds = model(imgs)\n loss = loss_criterion(preds, hms)\n batch_loss.update(loss.item(), preds[0].size(0))\n t_elapsed = time.time() - t_start\n\n log.info('(TEST) Epoch {epoch} Loss:{batch_loss.avg:.6f} Time:{time:.1f}(sec)'.format(epoch=epoch, batch_loss=batch_loss, time=t_elapsed))\n return {'epoch': epoch, 'loss':batch_loss.avg }" } ]
import os import os.path as osp import shutil import time import logging import hydra import numpy as np import torch from tqdm import tqdm from omegaconf import DictConfig, OmegaConf from hydra.core.hydra_config import HydraConfig from torch import nn from models import build_model from dataloaders import build_dataloader from losses import build_loss_criteria from optimizers import build_optimizer_and_scheduler from utils import save_checkpoint, set_seed, mkdir_if_missing, count_params, AverageMeter from .inference_videos import VideosInferenceRunner from .base import BaseRunner from .runner_utils import train_epoch, test_epoch
889
log = logging.getLogger(__name__) def update_fp1_example(epoch, model, vi_runner, fp1_fpath, ): vi_results = vi_runner.run(model=model) print(vi_results['fp1_im_list_dict']) print(fp1_fpath) fp1_im_list_dict = vi_results['fp1_im_list_dict'] with open(fp1_fpath, 'w') as f: for key, im_list in fp1_im_list_dict.items(): for path in im_list: f.write('{}\n'.format(path)) fp1_fpath_current = osp.splitext(fp1_fpath)[0] + '_{}.txt'.format(epoch) shutil.copyfile(fp1_fpath, fp1_fpath_current)
log = logging.getLogger(__name__) def update_fp1_example(epoch, model, vi_runner, fp1_fpath, ): vi_results = vi_runner.run(model=model) print(vi_results['fp1_im_list_dict']) print(fp1_fpath) fp1_im_list_dict = vi_results['fp1_im_list_dict'] with open(fp1_fpath, 'w') as f: for key, im_list in fp1_im_list_dict.items(): for path in im_list: f.write('{}\n'.format(path)) fp1_fpath_current = osp.splitext(fp1_fpath)[0] + '_{}.txt'.format(epoch) shutil.copyfile(fp1_fpath, fp1_fpath_current)
class Trainer(BaseRunner):
0
2023-11-15 02:11:00+00:00
2k
barkure/white-dove-backend
services/users.py
[ { "identifier": "SessionLocal", "path": "db.py", "snippet": "DATABASE_URL = \"sqlite:///./data.db\"" }, { "identifier": "Users", "path": "models.py", "snippet": "class Users(Base):\n __tablename__ = \"Users\"\n\n # fields\n user_id = Column(Integer,primary_key=True, index=True)\n userName = Column(String(20))\n password = Column(String(20))\n email = Column(String(20))\n GitHub_id = Column(String(20))" }, { "identifier": "BlogSettings", "path": "models.py", "snippet": "class BlogSettings(Base):\n __tablename__ = \"BlogSettings\"\n\n # fields\n setting_id = Column(Integer,primary_key=True, index=True)\n blogName = Column(String(100))\n faviconName = Column(String(100))" }, { "identifier": "create_access_token", "path": "services/auth_utils.py", "snippet": "def create_access_token(data: dict, expires_delta: timedelta):\n to_encode = data.copy()\n expire = datetime.utcnow() + expires_delta\n to_encode.update({\"exp\": expire})\n encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)\n return encoded_jwt" }, { "identifier": "GITHUB_CLIENT_ID", "path": "config.py", "snippet": "GITHUB_CLIENT_ID = os.getenv(\"GITHUB_CLIENT_ID\")\r" }, { "identifier": "GITHUB_CLIENT_SECRET", "path": "config.py", "snippet": "GITHUB_CLIENT_SECRET = os.getenv(\"GITHUB_CLIENT_SECRET\")\r" }, { "identifier": "ACCESS_TOKEN_EXPIRE_MINUTES", "path": "config.py", "snippet": "ACCESS_TOKEN_EXPIRE_MINUTES = int(os.getenv(\"ACCESS_TOKEN_EXPIRE_MINUTES\")) # 默认24小时\r" } ]
from datetime import timedelta from db import SessionLocal from models import Users, BlogSettings from services.auth_utils import create_access_token from config import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, ACCESS_TOKEN_EXPIRE_MINUTES import requests
1,295
"email": user.email, "GitHub_id": user.GitHub_id } else: return ["User not found"] # 更新用户 def update_user(payload: dict): user_id = payload.get("user_id") userName = payload.get("userName") password = payload.get("password") email = payload.get("email") GitHub_id = payload.get("GitHub_id") db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: if userName is not None: user.userName = userName if password is not None: user.password = password if email is not None: user.email = email if GitHub_id is not None: user.GitHub_id = GitHub_id db.commit() db.close() return { "update_yes": True, } else: db.close() return { "update_yes": False, } # 删除用户 def delete_user(payload: dict): user_id = payload.get("user_id") db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: db.delete(user) db.commit() db.close() return "User deleted" else: db.close() return "User not found" # 查询所有用户 def get_all_users(): db = SessionLocal() all_users = db.query(Users).all() db.close() user_list = [] for user in all_users: user_dict = { "user_id": user.user_id, "userName": user.userName, "email": user.email, "GitHub_id": user.GitHub_id } user_list.append(user_dict) return user_list # 登录验证 def login(payload: dict): userNameOrEmail = payload.get("userNameOrEmail") password = payload.get("password") db = SessionLocal() user = db.query(Users).filter((Users.userName == userNameOrEmail) | (Users.email == userNameOrEmail)).first() db.close() if user: if user.password == password: access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(data={"sub": user.userName}, expires_delta=access_token_expires) return { "login_yes": True, "token": access_token, "userName": user.userName, "email": user.email, "user_id": user.user_id, "GitHub_id": user.GitHub_id } else: return { "login_yes": False, "token": None, } else: return { "login_yes": False, "token": None, } # 绑定 GitHub 账号 def bind_github(GitHub_id: str, user_id: int): db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: user.GitHub_id = GitHub_id db.commit() db.close() return { "bind_yes": True, "GitHub_id": GitHub_id, } else: db.close() return { "bind_yes": False, } # Github OAuth def github_oauth(payload: dict): code = payload.get("code") user_id = payload.get("user_id") operation = payload.get("operation") # 根据 operation 判断是登录还是绑定 print('Code:', code, 'Operation:', operation)
# 添加用户 def create_user(payload: dict): userName = payload.get("userName") password = payload.get("password") email = payload.get("email") db = SessionLocal() new_user = Users(userName=userName, password=password, email=email) db.add(new_user) db.commit() db.close() return "User created" # 查询用户 def get_user(payload: dict): user_id = payload.get("user_id") db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() db.close() if user: return { "user_id": user.user_id, "userName": user.userName, "email": user.email, "GitHub_id": user.GitHub_id } else: return ["User not found"] # 更新用户 def update_user(payload: dict): user_id = payload.get("user_id") userName = payload.get("userName") password = payload.get("password") email = payload.get("email") GitHub_id = payload.get("GitHub_id") db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: if userName is not None: user.userName = userName if password is not None: user.password = password if email is not None: user.email = email if GitHub_id is not None: user.GitHub_id = GitHub_id db.commit() db.close() return { "update_yes": True, } else: db.close() return { "update_yes": False, } # 删除用户 def delete_user(payload: dict): user_id = payload.get("user_id") db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: db.delete(user) db.commit() db.close() return "User deleted" else: db.close() return "User not found" # 查询所有用户 def get_all_users(): db = SessionLocal() all_users = db.query(Users).all() db.close() user_list = [] for user in all_users: user_dict = { "user_id": user.user_id, "userName": user.userName, "email": user.email, "GitHub_id": user.GitHub_id } user_list.append(user_dict) return user_list # 登录验证 def login(payload: dict): userNameOrEmail = payload.get("userNameOrEmail") password = payload.get("password") db = SessionLocal() user = db.query(Users).filter((Users.userName == userNameOrEmail) | (Users.email == userNameOrEmail)).first() db.close() if user: if user.password == password: access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(data={"sub": user.userName}, expires_delta=access_token_expires) return { "login_yes": True, "token": access_token, "userName": user.userName, "email": user.email, "user_id": user.user_id, "GitHub_id": user.GitHub_id } else: return { "login_yes": False, "token": None, } else: return { "login_yes": False, "token": None, } # 绑定 GitHub 账号 def bind_github(GitHub_id: str, user_id: int): db = SessionLocal() user = db.query(Users).filter(Users.user_id == user_id).first() if user: user.GitHub_id = GitHub_id db.commit() db.close() return { "bind_yes": True, "GitHub_id": GitHub_id, } else: db.close() return { "bind_yes": False, } # Github OAuth def github_oauth(payload: dict): code = payload.get("code") user_id = payload.get("user_id") operation = payload.get("operation") # 根据 operation 判断是登录还是绑定 print('Code:', code, 'Operation:', operation)
resp1 = requests.post("https://github.com/login/oauth/access_token?"+"client_id="+GITHUB_CLIENT_ID+"&client_secret="+GITHUB_CLIENT_SECRET+"&code="+code, headers={"Accept": "application/json"})
4
2023-11-11 04:46:58+00:00
2k
BobaZooba/xllm-demo
xllm_demo/core/registry.py
[ { "identifier": "DATASET_KEY", "path": "xllm_demo/core/constants.py", "snippet": "DATASET_KEY = \"antropic\"" }, { "identifier": "COLLATOR_KEY", "path": "xllm_demo/core/constants.py", "snippet": "COLLATOR_KEY = \"last_part\"" }, { "identifier": "TRAINER_KEY", "path": "xllm_demo/core/constants.py", "snippet": "TRAINER_KEY = \"steps\"" }, { "identifier": "EXPERIMENT_KEY", "path": "xllm_demo/core/constants.py", "snippet": "EXPERIMENT_KEY = \"check_model\"" }, { "identifier": "AntropicDataset", "path": "xllm_demo/core/dataset.py", "snippet": "class AntropicDataset(BaseDataset):\n _HF_DATASET_ID = \"Anthropic/hh-rlhf\"\n\n @classmethod\n def get_data(cls, config: DemoXLLMConfig) -> Tuple[List[RawSample], Optional[List[RawSample]]]:\n rlhf_dataset = datasets.load_dataset(cls._HF_DATASET_ID)\n\n parsed_data: Dict[str, List[RawSample]] = dict()\n\n for split in [\"train\", \"test\"]:\n\n parsed_data[split] = list()\n\n for sample in tqdm(rlhf_dataset[split], desc=f\"Parsing {split}\"):\n text_parts = sample[config.text_field].split(\"\\n\\n\")[1:]\n\n parsed_data[split].append(text_parts)\n\n train = parsed_data[\"train\"]\n evaluation = parsed_data[\"test\"]\n\n return train, evaluation\n\n def get_sample(self, index: int) -> RawSample:\n sample = {\n enums.General.text_parts: self.data[index]\n }\n return sample" }, { "identifier": "MyExperiment", "path": "xllm_demo/core/experiment.py", "snippet": "class MyExperiment(Experiment):\n\n def before_model_build(self) -> None:\n assert self.model is None\n dist_logger.info(\"Model is not None\", local_rank=self.config.local_rank)\n\n def after_model_build(self) -> None:\n assert self.model is not None\n dist_logger.info(\"Model is not None\", local_rank=self.config.local_rank)\n\n def after_train(self) -> None:\n if hasattr(self.model, \"my_steps\"):\n num_steps = self.model.my_steps\n dist_logger.info(f\"Steps: {num_steps}\", local_rank=self.config.local_rank)" }, { "identifier": "LastPartCollator", "path": "xllm_demo/core/collator.py", "snippet": "class LastPartCollator(BaseCollator):\n\n def parse_batch(self, raw_batch: List[RawSample]) -> Batch:\n texts = list()\n\n for sample in raw_batch:\n item = sample[enums.General.text_parts]\n # get just last text part\n texts.append(item[-1])\n\n tokenized = self.tokenizer(\n texts,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n max_length=self.max_length,\n )\n\n batch = {\n enums.Transformers.input_ids: tokenized.input_ids[:, :-1],\n enums.Transformers.attention_mask: tokenized.attention_mask[:, :-1],\n enums.Transformers.labels: tokenized.input_ids[:, 1:],\n }\n\n return batch" }, { "identifier": "MyLMTrainer", "path": "xllm_demo/core/trainer.py", "snippet": "class MyLMTrainer(LMTrainer):\n\n def __init__(\n self,\n config: DemoXLLMConfig,\n model: Union[PreTrainedModel, PeftModel],\n args: TrainingArguments,\n data_collator: BaseCollator,\n train_dataset: BaseDataset,\n ignore_index: int,\n eval_dataset: Optional[BaseDataset] = None,\n ):\n super().__init__(config, model, args, data_collator, train_dataset, ignore_index, eval_dataset)\n\n self.my_steps = 0\n\n def compute_loss(\n self,\n model: Union[PreTrainedModel, PeftModel],\n inputs: Dict[str, Tensor],\n return_outputs: bool = False,\n ) -> Union[Tensor, Tuple[Tensor, Dict[str, Tensor]]]:\n self.my_steps += 1\n return super().compute_loss(model=model, inputs=inputs, return_outputs=return_outputs)" } ]
from xllm.datasets import datasets_registry from xllm.collators import collators_registry from xllm.trainers import trainers_registry from xllm.experiments import experiments_registry from xllm_demo.core.constants import DATASET_KEY, COLLATOR_KEY, TRAINER_KEY, EXPERIMENT_KEY from xllm_demo.core.dataset import AntropicDataset from xllm_demo.core.experiment import MyExperiment from xllm_demo.core.collator import LastPartCollator from xllm_demo.core.trainer import MyLMTrainer
1,238
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def components_registry(): datasets_registry.add(key=DATASET_KEY, value=AntropicDataset) collators_registry.add(key=COLLATOR_KEY, value=LastPartCollator) trainers_registry.add(key=TRAINER_KEY, value=MyLMTrainer)
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def components_registry(): datasets_registry.add(key=DATASET_KEY, value=AntropicDataset) collators_registry.add(key=COLLATOR_KEY, value=LastPartCollator) trainers_registry.add(key=TRAINER_KEY, value=MyLMTrainer)
experiments_registry.add(key=EXPERIMENT_KEY, value=MyExperiment)
3
2023-11-10 17:56:14+00:00
2k
Kiyliy/openai_speech_to_text
openai_audio.py
[ { "identifier": "send_to_openai_api", "path": "send_to_openai.py", "snippet": "def send_to_openai_api(api_key,url,audio_file_path)->str:\n print(\"DEBUD: api_key:\",api_key)\n if not api_key or not url:\n raise ValueError(\"API密钥和URL必须设置\")\n headers = {\n 'Authorization': f'Bearer {api_key}'\n }\n\n with open(audio_file_path, 'rb') as audio_file:\n files = {'file': audio_file}\n try:\n response = requests.post(\n url=url,\n headers=headers,\n files=files,\n data={\n 'model': 'whisper-1',\n \"language\": \"zh\",\n \"prompt\": \"respond in simplified Chinese\"\n },\n timeout = 60 # 超时时间 \n )\n if response.status_code == 200:\n transcription = response.json()['text']\n print(\"转录文本:\", transcription)\n logging.info(\"转录文本: %s\\n\", transcription)\n return transcription\n else:\n #如果rate_limit\n if(response.status_code == 429 and (\"requests per day\" in response.json()['error']['message']) ):\n #临时删除这个key\n import get_api_key\n get_api_key.delete_key(api_key)\n logging.info(\"API密钥已临时删除\")\n print(\"转录失败:\", response.text)\n\n except Exception as e:\n logging.error(e)\n return" }, { "identifier": "paste_text", "path": "send_to_openai.py", "snippet": "def paste_text(transcription):\n # 复制文本到剪贴板\n pyperclip.copy(transcription)\n # 模拟按键粘贴文本\n pyautogui.hotkey('ctrl', 'v')" } ]
import pyaudio import wave import requests import json import base64 import pyautogui import threading import logging import pyperclip import os import random import time import get_api_key from threading import Lock from send_to_openai import send_to_openai_api , paste_text
923
logging.basicConfig(level=logging.INFO) # 确保在模块加载时调用load_config get_api_key.load_config() # API和URL变量 api_key = get_api_key.get_api_key() url = get_api_key.get_api_url() # 录音参数 chunk = 1024 format = pyaudio.paInt16 channels = 1 rate = 44100 # 录音控制变量 is_recording = False frames = [] frames_lock = Lock() def start_recording(): global is_recording with frames_lock: if not is_recording: is_recording = True frames.clear() threading.Thread(target=record).start() else: logging.info("录音已在进行中。") def stop_recording(): global is_recording with frames_lock: if is_recording: is_recording = False else: logging.info("录音已停止。") def record(): global frames logging.info("录音开始...") p = pyaudio.PyAudio() stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk) try: while is_recording: data = stream.read(chunk) with frames_lock: frames.append(data) except Exception as e: logging.error(f"录音过程中出错: {e}") finally: stream.stop_stream() stream.close() p.terminate() logging.info("录音结束...") save_recording(frames, p) def save_recording(frames, audio): wf = wave.open('temp_audio.wav', 'wb') wf.setnchannels(channels) wf.setsampwidth(audio.get_sample_size(format)) wf.setframerate(rate) wf.writeframes(b''.join(frames)) wf.close() api_key = get_api_key.get_api_key() transcription= send_to_openai_api(api_key,url,'temp_audio.wav')
logging.basicConfig(level=logging.INFO) # 确保在模块加载时调用load_config get_api_key.load_config() # API和URL变量 api_key = get_api_key.get_api_key() url = get_api_key.get_api_url() # 录音参数 chunk = 1024 format = pyaudio.paInt16 channels = 1 rate = 44100 # 录音控制变量 is_recording = False frames = [] frames_lock = Lock() def start_recording(): global is_recording with frames_lock: if not is_recording: is_recording = True frames.clear() threading.Thread(target=record).start() else: logging.info("录音已在进行中。") def stop_recording(): global is_recording with frames_lock: if is_recording: is_recording = False else: logging.info("录音已停止。") def record(): global frames logging.info("录音开始...") p = pyaudio.PyAudio() stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk) try: while is_recording: data = stream.read(chunk) with frames_lock: frames.append(data) except Exception as e: logging.error(f"录音过程中出错: {e}") finally: stream.stop_stream() stream.close() p.terminate() logging.info("录音结束...") save_recording(frames, p) def save_recording(frames, audio): wf = wave.open('temp_audio.wav', 'wb') wf.setnchannels(channels) wf.setsampwidth(audio.get_sample_size(format)) wf.setframerate(rate) wf.writeframes(b''.join(frames)) wf.close() api_key = get_api_key.get_api_key() transcription= send_to_openai_api(api_key,url,'temp_audio.wav')
paste_text(transcription)
1
2023-11-11 09:28:31+00:00
2k
globality-corp/deboiler
deboiler/models/page.py
[ { "identifier": "logger", "path": "deboiler/logger.py", "snippet": "def logger(obj):\n \"\"\"\n logging decorator, assigning an object the `logger` property.\n Can be used on a Python class, e.g:\n @logger\n class MyClass:\n ...\n \"\"\"\n\n obj.logger = logging.getLogger(obj.__name__)\n return obj" }, { "identifier": "get_candidate_nodes", "path": "deboiler/lxml_query.py", "snippet": "def get_candidate_nodes(parsed_content: LxmlTree) -> list[LxmlNode]:\n \"\"\"\n Get all nodes (matching the query) from the input Element.\n These nodes are the candidate nodes that can be boilerplate.\n \"\"\"\n query = construct_query()\n return parsed_content.xpath(query)" }, { "identifier": "LxmlTree", "path": "deboiler/models/lxml_node.py", "snippet": "class LxmlTree:\n \"\"\"\n A wrapper around the LXML _Element object of a parsed page\n \"\"\"\n\n def __init__(self, tree: _Element):\n if not isinstance(tree, _Element):\n raise ValueError(\"non _Element passed\")\n\n self.tree = tree\n\n # Store a mapping of IDs to their LxmlNode wrapped objects\n self.elements: Mapping[str, LxmlNode] = {}\n\n # For each element, add a unique element\n for i, node in enumerate(self.tree.iter()):\n node_id = str(i)\n node.attrib[NODE_IDENTIFIER_KEY] = node_id\n self.elements[node_id] = LxmlNode(node, tree=self)\n\n @property\n def root(self):\n return self.lxml_to_node(self.tree)\n\n def clear_cache(self):\n for element in self.elements.values():\n element.clear_cache()\n\n def xpath(self, *args, **kwargs):\n results = self.tree.xpath(*args, **kwargs)\n return self.lxml_to_nodes(results)\n\n def lxml_to_nodes(self, elements: list[_Element]) -> list[\"LxmlNode\"]:\n \"\"\"\n Converter class to take a list of lxml elements and\n return a list of wrapper LxmlNode from our central registry.\n \"\"\"\n\n return [\n node\n for element in elements\n for node in [self.lxml_to_node(element)]\n if node is not None\n ]\n\n def lxml_to_node(self, element: _Element) -> Optional[\"LxmlNode\"]:\n # We occasionally see elements that don't have an ID set; this is often\n # due to some synthetic lxml objects like _ProcessingInstruction being\n # found in the tree but refusing to save attrib changes that are attempted\n # in the __init__ function of this tree class\n #\n # In these cases log a warning and bail out\n if NODE_IDENTIFIER_KEY not in element.attrib:\n debug(f\"Unfound element: {element}\")\n return None\n\n return self.elements[element.attrib[NODE_IDENTIFIER_KEY]]" } ]
import re from dataclasses import dataclass from io import StringIO from logging import Logger from typing import Optional, Union from lxml.etree import HTMLParser, _Element, parse as parse_html from deboiler.logger import logger from deboiler.lxml_query import get_candidate_nodes from deboiler.models.lxml_node import LxmlTree
981
EMPTY_HTML = "<html></html>" @dataclass class RawPage: """ A crawled page with raw (string or binary) content. """ url: str content: Union[bytes, str] def __repr__(self): return f"RawPage(url={self.url}, content={self.content[:20]}...)" def parse(self): return ParsedPage(self.url, self.content) @logger class ParsedPage: """ A parsed page. It stores the parsed version (as an LxmlTree) of the given raw content. nodes attribute is a cache of string representations for all the candidate nodes (subtrees) in this page. """ logger: Logger parser = HTMLParser(remove_comments=True) def __init__(self, url: str, content: Union[bytes, str]): self.url = url self.content: LxmlTree = self.parse(content) self.nodes: set[str] = { # Set of normalized representations for all candidate nodes in the LxmlTree node.normalized_representation()
EMPTY_HTML = "<html></html>" @dataclass class RawPage: """ A crawled page with raw (string or binary) content. """ url: str content: Union[bytes, str] def __repr__(self): return f"RawPage(url={self.url}, content={self.content[:20]}...)" def parse(self): return ParsedPage(self.url, self.content) @logger class ParsedPage: """ A parsed page. It stores the parsed version (as an LxmlTree) of the given raw content. nodes attribute is a cache of string representations for all the candidate nodes (subtrees) in this page. """ logger: Logger parser = HTMLParser(remove_comments=True) def __init__(self, url: str, content: Union[bytes, str]): self.url = url self.content: LxmlTree = self.parse(content) self.nodes: set[str] = { # Set of normalized representations for all candidate nodes in the LxmlTree node.normalized_representation()
for node in get_candidate_nodes(self.content)
1
2023-11-17 23:11:45+00:00
2k
solovieff/kibernikto
kibernikto/plugins/_weblink_summarizator.py
[ { "identifier": "_is_image", "path": "kibernikto/plugins/_img_summarizator.py", "snippet": "def _is_image(url):\n parsed = urlparse(url)\n path = parsed.path\n\n # Get the file extension from the path\n ext = os.path.splitext(path)[1].lower()\n\n # Check if the extension is a known image type\n return ext in ['.jpg', '.jpeg', '.png', '.gif']" }, { "identifier": "OPENAI_MAX_TOKENS", "path": "kibernikto/constants.py", "snippet": "OPENAI_MAX_TOKENS = int(os.environ.get('OPENAI_MAX_TOKENS', 800))" }, { "identifier": "get_website_as_text", "path": "kibernikto/utils/text.py", "snippet": "async def get_website_as_text(url: HttpUrl):\n to_reader_url = \"https://toolsyep.com/en/webpage-to-plain-text/\"\n async with aiohttp.ClientSession() as session:\n async with session.get(to_reader_url, params={\n \"u\": url\n }) as response:\n html = await response.text(encoding=response.charset)\n return html" }, { "identifier": "get_website_html", "path": "kibernikto/utils/text.py", "snippet": "async def get_website_html(url: HttpUrl):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n html = await response.text(encoding=response.charset)\n return html" }, { "identifier": "KiberniktoPlugin", "path": "kibernikto/plugins/_kibernikto_plugin.py", "snippet": "class KiberniktoPlugin(ABC):\n \"\"\"\n Plugins get message as input and return processed message as output or None.\n \"\"\"\n\n def __init__(self, model: str, base_url: str, api_key: str,\n base_message: str, post_process_reply=False,\n store_reply=False):\n \"\"\"\n\n :param model:\n :param base_url:\n :param api_key:\n :param base_message:\n :param post_process_reply: if plugin reply should be used as input for further actions (i.e. other plugins)\n :param store_reply: if the result should be stored in the messages storage at bot level\n \"\"\"\n self.post_process_reply = post_process_reply\n self.store_reply = store_reply\n\n self.model = model\n self.base_message = base_message\n self.client_async = AsyncOpenAI(base_url=base_url, api_key=api_key)\n\n @abstractmethod\n async def run_for_message(self, message: str) -> str:\n pass" }, { "identifier": "KiberniktoPluginException", "path": "kibernikto/plugins/_kibernikto_plugin.py", "snippet": "class KiberniktoPluginException(Exception):\n def __init__(self, plugin_name: str, error_message: str):\n self.plugin_name = plugin_name\n super().__init__(error_message)" } ]
import logging import re from kibernikto.plugins._img_summarizator import _is_image from openai.types.chat import ChatCompletion from kibernikto.constants import OPENAI_MAX_TOKENS from kibernikto.utils.text import get_website_as_text, get_website_html from ._kibernikto_plugin import KiberniktoPlugin, KiberniktoPluginException
914
class WeblinkSummaryPlugin(KiberniktoPlugin): """ This plugin is used to get video transcript and then get text summary from it. """ def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str): super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True, base_message=summarization_request) async def run_for_message(self, message: str): try: result = await self._run(message) return result except Exception as error: logging.error(f'failed to get webpage data from {message}: {str(error)}', ) raise KiberniktoPluginException(plugin_name=self.__class__.__name__, error_message='failed to get webpage data') async def _run(self, message: str): web_link, other_text = _extract_link(message) if web_link is None: return None
class WeblinkSummaryPlugin(KiberniktoPlugin): """ This plugin is used to get video transcript and then get text summary from it. """ def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str): super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True, base_message=summarization_request) async def run_for_message(self, message: str): try: result = await self._run(message) return result except Exception as error: logging.error(f'failed to get webpage data from {message}: {str(error)}', ) raise KiberniktoPluginException(plugin_name=self.__class__.__name__, error_message='failed to get webpage data') async def _run(self, message: str): web_link, other_text = _extract_link(message) if web_link is None: return None
if _is_image(web_link):
0
2023-11-11 18:39:28+00:00
2k
leeyuentuen/tibber_ev
custom_components/tibber_ev/sensor.py
[ { "identifier": "MAX_CHARGE_RANGE", "path": "custom_components/tibber_ev/const.py", "snippet": "MAX_CHARGE_RANGE = 375" }, { "identifier": "TibberEVEntity", "path": "custom_components/tibber_ev/entity.py", "snippet": "class TibberEVEntity(Entity):\n\n def __init__(self, device: TibberApi) -> None:\n \"\"\"Initialize the Tibber entity.\"\"\"\n self._device = device\n\n self._attr_device_info = DeviceInfo(\n identifiers={(Tibber_EV_DOMAIN, self._device.name)},\n manufacturer=\"Tibber\",\n model=None,\n name=device.name,\n sw_version=None,\n )\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Add listener for state changes.\"\"\"\n await super().async_added_to_hass()" }, { "identifier": "DOMAIN", "path": "custom_components/tibber_ev/const.py", "snippet": "DOMAIN = \"tibber_ev\"" }, { "identifier": "Tibber", "path": "custom_components/tibber_ev/tibber.py", "snippet": "POST_HEADER_JSON = {\"Content-Type\": \"application/json\"}\n_LOGGER = logging.getLogger(__name__)\n QUERY_PAYLOAD = '{\"query\": \"{ me { homes { electricVehicles {id name shortName lastSeen lastSeenText isAlive hasNoSmartChargingCapability imgUrl schedule {isEnabled isSuspended localTimeTo minBatteryLevel} batteryText chargingText consumptionText consumptionUnitText energyCostUnitText chargeRightAwayButton chargeRightAwayAlert {imgUrl title description okText cancelText}backgroundStyle energyDealCallToAction{text url redirectUrlStartsWith link action enabled} settingsScreen{settings {key value valueType valueIsArray isReadOnly inputOptions{type title description pickerOptions {values postFix} rangeOptions{max min step defaultValue displayText displayTextPlural} selectOptions {value title description imgUrl iconName isRecommendedOption} textFieldOptions{imgUrl format placeholder} timeOptions{doNotSetATimeText}}} settingsLayout{uid type title description valueText imgUrl iconName isUpdated isEnabled callToAction {text url redirectUrlStartsWith link action enabled} childItems{uid type title description valueText imgUrl iconName isUpdated isEnabled callToAction {text url redirectUrlStartsWith link action enabled} settingKey settingKeyForIsHidden} settingKey settingKeyForIsHidden}} settingsButtonText settingsButton {text url redirectUrlStartsWith link action enabled}enterPincode message {id title description style iconName iconSrc callToAction {text url redirectUrlStartsWith link action enabled} dismissButtonText} scheduleSuspendedText faqUrl battery { percent percentColor isCharging chargeLimit}}}}}\"}'\nclass Tibber:\n def __init__(self,\n hass: HomeAssistant,\n raw_data: str,\n tibber_api: TibberApi) -> None:\n async def init(self):\n def status(self) -> str:\n async def async_update(self):" } ]
import logging from typing import Final from dataclasses import dataclass from datetime import timedelta from .const import MAX_CHARGE_RANGE from .entity import TibberEVEntity from homeassistant.helpers.typing import StateType from homeassistant import const from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.components.sensor import ( SensorEntity, SensorEntityDescription, SensorStateClass, SensorDeviceClass ) from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers import entity_platform from . import DOMAIN as TIBBER_EV_DOMAIN from .tibber import Tibber, TibberApi from homeassistant.const import ( PERCENTAGE, )
1,577
path="battery", subpath="percent", unit=PERCENTAGE, round_digits=None, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.BATTERY, ), TibberSensorDescription( key="battery_charge_limit", name="battery charge limit", icon="mdi:battery-plus-variant", path="battery", subpath="chargeLimit", unit=PERCENTAGE, round_digits=None, state_class=SensorStateClass.TOTAL, device_class=SensorDeviceClass.BATTERY, ), TibberSensorDescription( key="last_seen", name="last seen", icon="mdi:eye", path="lastSeen", subpath=None, unit=None, round_digits=None, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.TIMESTAMP, ), TibberSensorDescription( key="last_seen_text", name="last seen text", icon="mdi:eye", path="lastSeenText", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="is_charging", name="is charging", icon="mdi:battery-charging", path="battery", subpath="isCharging", unit=None, round_digits=None, ), TibberSensorDescription( key="shortName", name="shortname", icon="mdi:rename-outline", path="shortName", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="full_name", name="full name", icon="mdi:car", path="name", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="is_alive", name="Is alive", icon="mdi:shield-account", path="isAlive", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="schedule", name="schedule", icon="mdi:battery-clock", path="schedule", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="id", name="id", icon="mdi:car", path="id", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="range", name="Range", icon="mdi:map-marker-distance", path=None, subpath=None, unit="km", round_digits=0, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.DISTANCE, ), ) async def async_setup_platform( hass: HomeAssistant, config: ConfigEntry, async_add_entities: AddEntitiesCallback, discovery_info=None): pass async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback): """Set up using config_entry.""" # get the device
_LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=15) @dataclass class TibberSensorDescriptionMixin: """Define an entity description mixin for sensor entities.""" path: str subpath: str | None unit: str round_digits: int | None unit: str | None @dataclass class TibberSensorDescription( SensorEntityDescription, TibberSensorDescriptionMixin ): """Class to describe an Tibber sensor entity.""" TIBBER_SENSOR_TYPES: Final[tuple[TibberSensorDescription, ...]] = ( TibberSensorDescription( key="battery_soc", name="battery soc", path="battery", subpath="percent", unit=PERCENTAGE, round_digits=None, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.BATTERY, ), TibberSensorDescription( key="battery_charge_limit", name="battery charge limit", icon="mdi:battery-plus-variant", path="battery", subpath="chargeLimit", unit=PERCENTAGE, round_digits=None, state_class=SensorStateClass.TOTAL, device_class=SensorDeviceClass.BATTERY, ), TibberSensorDescription( key="last_seen", name="last seen", icon="mdi:eye", path="lastSeen", subpath=None, unit=None, round_digits=None, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.TIMESTAMP, ), TibberSensorDescription( key="last_seen_text", name="last seen text", icon="mdi:eye", path="lastSeenText", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="is_charging", name="is charging", icon="mdi:battery-charging", path="battery", subpath="isCharging", unit=None, round_digits=None, ), TibberSensorDescription( key="shortName", name="shortname", icon="mdi:rename-outline", path="shortName", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="full_name", name="full name", icon="mdi:car", path="name", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="is_alive", name="Is alive", icon="mdi:shield-account", path="isAlive", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="schedule", name="schedule", icon="mdi:battery-clock", path="schedule", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="id", name="id", icon="mdi:car", path="id", subpath=None, unit=None, round_digits=None, ), TibberSensorDescription( key="range", name="Range", icon="mdi:map-marker-distance", path=None, subpath=None, unit="km", round_digits=0, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.DISTANCE, ), ) async def async_setup_platform( hass: HomeAssistant, config: ConfigEntry, async_add_entities: AddEntitiesCallback, discovery_info=None): pass async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback): """Set up using config_entry.""" # get the device
tibberApi: TibberApi
3
2023-11-14 18:59:47+00:00
2k
bytedance/LapNet
lapnet/configs/benzene_dimer/benzene_dimer.py
[ { "identifier": "base_config", "path": "lapnet/base_config.py", "snippet": "class SystemType(enum.IntEnum):\n MOLECULE = enum.auto()\n def has_value(cls, value):\ndef default() -> ml_collections.ConfigDict:\ndef resolve(cfg):" }, { "identifier": "system", "path": "lapnet/utils/system.py", "snippet": "class Atom:\n def _set_default_charge(self):\n def _set_default_atomic_number(self):\n def __attrs_post_init__(self):\n def coords_angstrom(self):\n def coords_array(self):\n def element(self):\ndef pyscf_mol_to_internal_representation(\n mol: pyscf.gto.Mole) -> ml_collections.ConfigDict:" }, { "identifier": "Atom", "path": "lapnet/utils/system.py", "snippet": "class Atom:\n \"\"\"Atom information for Hamiltonians.\n\n The nuclear charge is inferred from the symbol if not given, in which case the\n symbol must be the IUPAC symbol of the desired element.\n\n Attributes:\n symbol: Element symbol.\n coords: An iterable of atomic coordinates. Always a list of floats and in\n bohr after initialisation. Default: place atom at origin.\n charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of\n the given name.\n atomic_number: Atomic number associated with element. Default: atomic number\n of element of the given symbol. Should match charge unless fractional\n nuclear charges are being used.\n units: String giving units of coords. Either bohr or angstrom. Default:\n bohr. If angstrom, coords are converted to be in bohr and units to the\n string 'bohr'.\n coords_angstrom: list of atomic coordinates in angstrom.\n coords_array: Numpy array of atomic coordinates in bohr.\n element: elements.Element corresponding to the symbol.\n \"\"\"\n symbol = attr.ib(type=str)\n coords = attr.ib(\n type=Sequence[float],\n converter=lambda xs: tuple(float(x) for x in xs),\n default=(0.0, 0.0, 0.0))\n charge = attr.ib(type=float, converter=float)\n atomic_number = attr.ib(type=int, converter=int)\n units = attr.ib(\n type=str,\n default='bohr',\n validator=attr.validators.in_(['bohr', 'angstrom']))\n\n @charge.default\n def _set_default_charge(self):\n return self.element.atomic_number\n\n @atomic_number.default\n def _set_default_atomic_number(self):\n return self.element.atomic_number\n\n def __attrs_post_init__(self):\n if self.units == 'angstrom':\n self.coords = [unit_conversion.angstrom2bohr(x) for x in self.coords]\n self.units = 'bohr'\n\n @property\n def coords_angstrom(self):\n return [unit_conversion.bohr2angstrom(x) for x in self.coords]\n\n @property\n def coords_array(self):\n if not hasattr(self, '_coords_arr'):\n self._coords_arr = np.array(self.coords)\n return self._coords_arr\n\n @property\n def element(self):\n return elements.SYMBOLS[self.symbol]" } ]
from lapnet import base_config from lapnet.utils import system from lapnet.utils.system import Atom
1,044
# Copyright 2023 Bytedance Ltd. and/or its affiliate # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Settings in a a config files are loaded by executing the the get_config # function. # Geometry of Benzene sigle molecule is from https://pubs.acs.org/doi/10.1021/acs.jpclett.0c02621, # which is at the MP2/6-31G* level. def get_config(input_str): ''' Return config for benzene dimer with different bond lenth. Using input_str to set the bond length, e.g. --config lapnet/configs/benzene_dimer/benzene_dimer.py:4.95 ''' r_str= input_str r = float(r_str) # Get default options.
# Copyright 2023 Bytedance Ltd. and/or its affiliate # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Settings in a a config files are loaded by executing the the get_config # function. # Geometry of Benzene sigle molecule is from https://pubs.acs.org/doi/10.1021/acs.jpclett.0c02621, # which is at the MP2/6-31G* level. def get_config(input_str): ''' Return config for benzene dimer with different bond lenth. Using input_str to set the bond length, e.g. --config lapnet/configs/benzene_dimer/benzene_dimer.py:4.95 ''' r_str= input_str r = float(r_str) # Get default options.
cfg = base_config.default()
0
2023-11-13 08:19:53+00:00
2k
svetlovtech/gptize
gptize/gptizer.py
[ { "identifier": "File", "path": "gptize/models.py", "snippet": "class File:\n \"\"\"Class representing a file in the project.\"\"\"\n def __init__(self, file_name: str, directory: str):\n self.file_name = file_name\n self.directory = directory\n self.content = \"\"\n self.content_size = 0\n self.is_binary = False\n\n def __str__(self):\n return f\"File(name={self.file_name}, size={self.content_size} bytes)\"\n\n def __repr__(self):\n return f\"<File '{self.file_name}' at {self.directory}>\"" }, { "identifier": "Project", "path": "gptize/models.py", "snippet": "class Project:\n \"\"\"Class representing the project.\"\"\"\n\n def __init__(self, name: str, root_path: str):\n self.name: str = name\n self.files: List[File] = []\n self.root_path: str = root_path\n\n def __str__(self):\n file_list = ', '.join(file.file_name for file in self.files)\n return f\"Project '{self.name}' with files: {file_list}\"\n\n def __repr__(self):\n return f\"<Project '{self.name}' with {len(self.files)} files>\"" }, { "identifier": "Settings", "path": "gptize/settings.py", "snippet": "class Settings:\n DEFAULT_ENCODINGS = ['utf-8', 'latin-1', 'cp1252']\n IGNORED_DIRECTORIES = ['.git', '.svn', '__pycache__']\n GITIGNORE_PATH = '.gitignore'\n MAX_FILE_SIZE_BYTES_LIMIT = 512 * 1024 * 1024 # 512 MB\n MAX_TOKEN_COUNT_LIMIT = 2000000 # 2 million tokens\n\n @staticmethod\n def default_output_file():\n current_time = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n return f\"gptize-output-{current_time}.txt\"\n\n @staticmethod\n def custom_output_file(target: str):\n base_name = os.path.basename(target).replace(\n ' ', '_')\n if not base_name or os.path.isdir(target):\n base_name = 'folder' if os.path.isdir(target) else 'file'\n current_time = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n return f\"gptize-output-{base_name}-{current_time}.txt\"" }, { "identifier": "OutputBuilder", "path": "gptize/output_builder.py", "snippet": "class OutputBuilder:\n def __init__(self):\n self.content = \"\"\n\n def write_common_header(self):\n \"\"\"Write a common header to the content.\"\"\"\n self.content += \"This file was generated using third party tool 'gptize'. For more information, visit https://github.com/svetlovtech/gptize\\n\"\n self.content += \"=\" * 40 + \"\\n\"\n\n def write_project_header(self, project: Project):\n \"\"\"Write a header for the project.\"\"\"\n self.content += f\"Project Name: {project.name}\\n\"\n self.content += f\"Total Files: {len(project.files)}\\n\"\n self.content += \"=\" * 40 + \"\\n\"\n\n def write_file_content(self, file: File):\n if file.is_binary:\n self.content += f\"File: {file.directory} (Binary file present)\\n\"\n else:\n self.content += f\"File: {file.directory}\\n\"\n self.content += file.content + \"\\n\"\n\n def write_separator(self):\n \"\"\"Write a separator.\"\"\"\n self.content += \"=\" * 40 + \"\\n\"\n\n def get_content(self) -> str:\n \"\"\"Get the final combined content.\"\"\"\n return self.content\n\n def __str__(self):\n \"\"\"String representation of the OutputBuilder.\"\"\"\n return f\"OutputBuilder with {len(self.content)} characters of content\"\n\n def __repr__(self):\n \"\"\"Formal string representation of the OutputBuilder.\"\"\"\n return f\"<OutputBuilder with {len(self.content)} characters>\"" } ]
import logging import os import pathspec from .models import File, Project from .settings import Settings from .output_builder import OutputBuilder
1,396
class GPTizer: def __init__(self): self._project = None self._gitignore = None def process_directory(self, root_path: str): """ Processes all the files within a given directory. This method initializes the Project object for the specified directory, loads the .gitignore patterns, and populates the project with files that are not ignored by .gitignore. The method traverses through the directory recursively and adds all relevant files to the project's file list, ensuring that binary files and files specified in .gitignore are not included. Parameters: root_path (str): The path to the root of the directory to be processed. Raises: FileNotFoundError: If the specified directory does not exist. Exception: For any other issues encountered during the directory processing. """ project_name = os.path.basename(root_path) self._project = Project(project_name, root_path) self._gitignore = self.load_gitignore(root_path) self.populate_files() def process_file(self, file_path: str): """ Processes a single file. This method creates a Project object for the file, treating the file as an individual project. It bypasses .gitignore processing, as it is assumed that the specific file is intentionally selected for processing. The method creates a File object for the specified file, reads its content, and adds it to the project's file list. It handles binary and text files accordingly. Parameters: file_path (str): The path to the file to be processed. This includes both the directory path and file name. Raises: FileNotFoundError: If the specified file does not exist. IOError: If there is an issue reading the file. Exception: For any other unexpected issues encountered during file processing. """ root_path, file_name = os.path.split(file_path) project_name = os.path.basename(root_path) if root_path else 'SingleFileProject' self._project = Project(project_name, root_path or '.') self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])
class GPTizer: def __init__(self): self._project = None self._gitignore = None def process_directory(self, root_path: str): """ Processes all the files within a given directory. This method initializes the Project object for the specified directory, loads the .gitignore patterns, and populates the project with files that are not ignored by .gitignore. The method traverses through the directory recursively and adds all relevant files to the project's file list, ensuring that binary files and files specified in .gitignore are not included. Parameters: root_path (str): The path to the root of the directory to be processed. Raises: FileNotFoundError: If the specified directory does not exist. Exception: For any other issues encountered during the directory processing. """ project_name = os.path.basename(root_path) self._project = Project(project_name, root_path) self._gitignore = self.load_gitignore(root_path) self.populate_files() def process_file(self, file_path: str): """ Processes a single file. This method creates a Project object for the file, treating the file as an individual project. It bypasses .gitignore processing, as it is assumed that the specific file is intentionally selected for processing. The method creates a File object for the specified file, reads its content, and adds it to the project's file list. It handles binary and text files accordingly. Parameters: file_path (str): The path to the file to be processed. This includes both the directory path and file name. Raises: FileNotFoundError: If the specified file does not exist. IOError: If there is an issue reading the file. Exception: For any other unexpected issues encountered during file processing. """ root_path, file_name = os.path.split(file_path) project_name = os.path.basename(root_path) if root_path else 'SingleFileProject' self._project = Project(project_name, root_path or '.') self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])
file_obj = File(file_name, file_path)
0
2023-11-11 20:59:01+00:00
2k
civrealm/civrealm
src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py
[ { "identifier": "Wrapper", "path": "src/civrealm/envs/freeciv_wrapper/core.py", "snippet": "class Wrapper(gymnasium.Wrapper):\n def reset(self, *, seed=None, options=None, **kwargs):\n return self.env.reset(seed=seed, options=options, **kwargs)" }, { "identifier": "onehotifier_maker", "path": "src/civrealm/envs/freeciv_wrapper/utils.py", "snippet": "def onehotifier_maker(category):\n if isinstance(category, int):\n\n def onehot(obs):\n if isinstance(obs, np.ndarray):\n shape = obs.shape\n else:\n shape = (1,)\n obs = int(obs)\n result = (\n np.zeros([*shape, category], dtype=np.int32)\n if shape != (1,)\n else np.zeros([category], dtype=np.int32)\n )\n with np.nditer(obs, op_flags=[\"readonly\"], flags=[\"multi_index\"]) as it:\n for x in it:\n if x != 255:\n index = (\n (\n *(it.multi_index),\n x,\n )\n if shape != (1,)\n else (x,)\n )\n result[index] = 1\n return result\n\n elif isinstance(category, list):\n\n def onehot(obs):\n if isinstance(obs, np.ndarray):\n shape = obs.shape\n else:\n shape = (1,)\n result = (\n np.zeros([*shape, len(category)], dtype=np.int32)\n if shape != (1,)\n else np.zeros([len(category)], dtype=np.int32)\n )\n with np.nditer(obs, op_flags=[\"readonly\"], flags=[\"multi_index\"]) as it:\n for x in it:\n index = (\n (\n *(it.multi_index),\n category.index(x),\n )\n if shape != (1,)\n else (category.index(x),)\n )\n result[index] = 1\n return result\n\n else:\n raise NotImplementedError(f\"Not implemented yet for type {type(category)}\")\n return onehot" } ]
import numpy as np from civrealm.envs import FreecivBaseEnv from civrealm.envs.freeciv_wrapper.config import default_tensor_config from .core import Wrapper from .utils import onehotifier_maker
1,273
class TensorBase(Wrapper): """ A basic wrapper that deals with config loading and entity id recording, required by all tensor-related wrappers. Parameters ---------- env: FreecivBaseEnv config: dict tensor env configuration Attributes --------- config: dict A dict that specifies all configurations related to tensor wrapper. my_player_id: int My player id. unit_ids: list A sorted list of my unit ids. city_ids: list A sorted list of my city ids. others_unit_ids: list A sorted list of others unit ids. others_city_ids: list A sorted list of others city ids. dipl_ids : list A list of others player ids. units : dict ruleset information about units. unit_types :list A list of all unit types. unit_costs : list A list of int indicating unit costs. improvements : dict Ruleset information about city improvements. impr_costs :list A list of int indicating city improvements costs. """ def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config): self.config = config self.my_player_id = -1 # mutable ids self.unit_ids = [] self.city_ids = [] self.others_unit_ids = [] self.others_city_ids = [] self.dipl_ids = [] # ruleset self.units = {} self.unit_types = [] self.unit_costs = [] self.improvements = {} self.impr_costs = [] super().__init__(env) def update_sequence_ids(self, observation): """ Use city, unit and dipl information in observation to update ids. """ self.unit_ids = sorted( list( k for k in observation.get("unit", {}).keys() if observation["unit"][k]["owner"] == self.my_player_id ) ) self.others_unit_ids = sorted( list( k for k in observation.get("unit", {}).keys() if observation["unit"][k]["owner"] != self.my_player_id ) ) self.city_ids = sorted( list( k for k in observation.get("city", {}).keys() if observation["city"][k]["owner"] == self.my_player_id ) ) self.others_city_ids = sorted( list( k for k in observation.get("city", {}).keys() if observation["city"][k]["owner"] != self.my_player_id ) ) self.dipl_ids = [ player for player in sorted(observation.get("dipl", {}).keys()) if player != self.my_player_id ] def update_config(self): """ Update config using ruleset information at the start of the turn. """ self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types self.unit_types = [self.units[i]["name"] for i in range(len(self.units))] self.unit_costs = [self.units[i]["build_cost"] for i in range(len(self.units))] self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements self.impr_costs = [ self.improvements[i]["build_cost"] for i in range(len(self.improvements)) ]
class TensorBase(Wrapper): """ A basic wrapper that deals with config loading and entity id recording, required by all tensor-related wrappers. Parameters ---------- env: FreecivBaseEnv config: dict tensor env configuration Attributes --------- config: dict A dict that specifies all configurations related to tensor wrapper. my_player_id: int My player id. unit_ids: list A sorted list of my unit ids. city_ids: list A sorted list of my city ids. others_unit_ids: list A sorted list of others unit ids. others_city_ids: list A sorted list of others city ids. dipl_ids : list A list of others player ids. units : dict ruleset information about units. unit_types :list A list of all unit types. unit_costs : list A list of int indicating unit costs. improvements : dict Ruleset information about city improvements. impr_costs :list A list of int indicating city improvements costs. """ def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config): self.config = config self.my_player_id = -1 # mutable ids self.unit_ids = [] self.city_ids = [] self.others_unit_ids = [] self.others_city_ids = [] self.dipl_ids = [] # ruleset self.units = {} self.unit_types = [] self.unit_costs = [] self.improvements = {} self.impr_costs = [] super().__init__(env) def update_sequence_ids(self, observation): """ Use city, unit and dipl information in observation to update ids. """ self.unit_ids = sorted( list( k for k in observation.get("unit", {}).keys() if observation["unit"][k]["owner"] == self.my_player_id ) ) self.others_unit_ids = sorted( list( k for k in observation.get("unit", {}).keys() if observation["unit"][k]["owner"] != self.my_player_id ) ) self.city_ids = sorted( list( k for k in observation.get("city", {}).keys() if observation["city"][k]["owner"] == self.my_player_id ) ) self.others_city_ids = sorted( list( k for k in observation.get("city", {}).keys() if observation["city"][k]["owner"] != self.my_player_id ) ) self.dipl_ids = [ player for player in sorted(observation.get("dipl", {}).keys()) if player != self.my_player_id ] def update_config(self): """ Update config using ruleset information at the start of the turn. """ self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types self.unit_types = [self.units[i]["name"] for i in range(len(self.units))] self.unit_costs = [self.units[i]["build_cost"] for i in range(len(self.units))] self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements self.impr_costs = [ self.improvements[i]["build_cost"] for i in range(len(self.improvements)) ]
self.config["obs_ops"]["unit"]["type_rule_name"] = onehotifier_maker(
1
2023-11-18 19:35:50+00:00
2k
Sheppsu/discord-ext-listening
discord/ext/listening/sink.py
[ { "identifier": "RTCPMessageType", "path": "discord/ext/listening/enums.py", "snippet": "class RTCPMessageType(Enum):\n sender_report = 200\n receiver_report = 201\n source_description = 202\n goodbye = 203\n application_defined = 204" }, { "identifier": "Decoder", "path": "discord/ext/listening/opus.py", "snippet": "class Decoder(BaseDecoder):\n def packet_get_nb_channels(self, data: bytes) -> int:\n return self.CHANNELS" } ]
import asyncio import logging import os import queue import struct import subprocess import threading import wave from collections import defaultdict from dataclasses import dataclass from time import monotonic from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, List, Optional, Sequence, Tuple, Union from discord.errors import ClientException from discord.object import Object from discord.player import CREATE_NO_WINDOW from .enums import RTCPMessageType from .opus import Decoder as OpusDecoder from discord.member import Member
1,343
c: :class:`int` The total number of RTP data packets from source SSRC that have been lost since the beginning of reception. ehsn: :class:`int` The low 16 bits contain the highest sequence number received in an RTP data packet from source SSRC, and the most significant 16 bits extend that sequence number with the corresponding count of sequence number cycles. j: :class:`int` An estimate of the statistical variance of the RTP data packet interarrival time, measured in timestamp units and expressed as an unsigned integer. lsr: :class:`int` The middle 32 bits out of 64 in the NTP timestamp received as part of the most recent RTCP sender report (SR) packet from source SSRC. If no SR has been received yet, the field is set to zero. dlsr: :class:`int` The delay, expressed in units of 1/65536 seconds, between receiving the last SR packet from source SSRC and sending this reception report block. If no SR packet has been received yet from SSRC, the DLSR field is set to zero. """ __slots__ = ( "ssrc", "f", "c", "ehsn", "j", "lsr", "dlsr", ) ssrc: int f: int c: int ehsn: int j: int lsr: int dlsr: int @dataclass class RTCPSourceDescriptionItem: """An item of a :class:`RTCPSourceDescriptionChunk` object Attributes ---------- cname: :class:`int` Type of description. description: :class:`bytes` Description pertaining to the source of the chunk containing this item. """ __slots__ = ( "cname", "description", ) cname: int description: bytes @dataclass class RTCPSourceDescriptionChunk: """A chunk of a :class:`RTCPSourceDescriptionPacket` object. Contains items that describe a source. Attributes ---------- ssrc: :class:`int` The source which is being described. items: Sequence[:class:`RTCPSourceDescriptionItem`] A sequence of items which have a description. """ __slots__ = ( "ssrc", "items", ) ssrc: int items: Sequence[RTCPSourceDescriptionItem] class RTCPPacket: """Base class for all RTCP packet classes. Contains header attributes. Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm Attributes ---------- v: :class:`int` Identifies the version of RTP, which is the same in RTCP packets as in RTP data packets. p: :class:`bool` If the padding bit is set, this RTCP packet contains some additional padding octets at the end which are not part of the control information. The last octet of the padding is a count of how many padding octets should be ignored. rc: :class:`int` Indicates the number of "items" within a packet. For sender and receiver packets it indicates the number of Receiver Report Blocks. pt: :class:`RTCPMessageType` Indicates the RTCP packet type. l: :class:`int` The length of this RTCP packet in 32-bit words minus one, including the header and any padding. """ __slots__ = ( "v", "p", "rc", "pt", "l", ) if TYPE_CHECKING: v: int p: bool rc: int
if TYPE_CHECKING: __all__ = ( "AudioFrame", "AudioSink", "AudioHandlingSink", "AudioFileSink", "AudioFile", "WaveAudioFile", "MP3AudioFile", "RTCPPacket", "RTCPSenderReportPacket", "RTCPReceiverReportPacket", "RTCPSourceDescriptionPacket", "RTCPGoodbyePacket", "RTCPApplicationDefinedPacket", "RTCPReceiverReportBlock", "RTCPSourceDescriptionChunk", "RTCPSourceDescriptionItem", ) SILENT_FRAME = b"\xf8\xff\xfe" _log = logging.getLogger(__name__) @dataclass class RTCPReceiverReportBlock: """Receiver report block from :class:`RTCPSenderReportPacket` or :class:`RTCPReceiverReportPacket` Conveys statistics on the reception of RTP packets from a single synchronization source. Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm Attributes ---------- ssrc: :class:`int` The SSRC identifier of the source to which the information in this reception report block pertains. f: :class:`int` The fraction of RTP data packets from source SSRC lost since the previous SR or RR packet was sent. c: :class:`int` The total number of RTP data packets from source SSRC that have been lost since the beginning of reception. ehsn: :class:`int` The low 16 bits contain the highest sequence number received in an RTP data packet from source SSRC, and the most significant 16 bits extend that sequence number with the corresponding count of sequence number cycles. j: :class:`int` An estimate of the statistical variance of the RTP data packet interarrival time, measured in timestamp units and expressed as an unsigned integer. lsr: :class:`int` The middle 32 bits out of 64 in the NTP timestamp received as part of the most recent RTCP sender report (SR) packet from source SSRC. If no SR has been received yet, the field is set to zero. dlsr: :class:`int` The delay, expressed in units of 1/65536 seconds, between receiving the last SR packet from source SSRC and sending this reception report block. If no SR packet has been received yet from SSRC, the DLSR field is set to zero. """ __slots__ = ( "ssrc", "f", "c", "ehsn", "j", "lsr", "dlsr", ) ssrc: int f: int c: int ehsn: int j: int lsr: int dlsr: int @dataclass class RTCPSourceDescriptionItem: """An item of a :class:`RTCPSourceDescriptionChunk` object Attributes ---------- cname: :class:`int` Type of description. description: :class:`bytes` Description pertaining to the source of the chunk containing this item. """ __slots__ = ( "cname", "description", ) cname: int description: bytes @dataclass class RTCPSourceDescriptionChunk: """A chunk of a :class:`RTCPSourceDescriptionPacket` object. Contains items that describe a source. Attributes ---------- ssrc: :class:`int` The source which is being described. items: Sequence[:class:`RTCPSourceDescriptionItem`] A sequence of items which have a description. """ __slots__ = ( "ssrc", "items", ) ssrc: int items: Sequence[RTCPSourceDescriptionItem] class RTCPPacket: """Base class for all RTCP packet classes. Contains header attributes. Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm Attributes ---------- v: :class:`int` Identifies the version of RTP, which is the same in RTCP packets as in RTP data packets. p: :class:`bool` If the padding bit is set, this RTCP packet contains some additional padding octets at the end which are not part of the control information. The last octet of the padding is a count of how many padding octets should be ignored. rc: :class:`int` Indicates the number of "items" within a packet. For sender and receiver packets it indicates the number of Receiver Report Blocks. pt: :class:`RTCPMessageType` Indicates the RTCP packet type. l: :class:`int` The length of this RTCP packet in 32-bit words minus one, including the header and any padding. """ __slots__ = ( "v", "p", "rc", "pt", "l", ) if TYPE_CHECKING: v: int p: bool rc: int
pt: RTCPMessageType
0
2023-11-15 00:16:36+00:00
2k
RAIVNLab/MatFormer-OLMo
olmo/data/iterable_dataset.py
[ { "identifier": "PathOrStr", "path": "olmo/aliases.py", "snippet": "" }, { "identifier": "barrier", "path": "olmo/util.py", "snippet": "def barrier() -> None:\n if dist.is_available() and dist.is_initialized():\n dist.barrier()" }, { "identifier": "get_global_rank", "path": "olmo/util.py", "snippet": "def get_global_rank() -> int:\n return int(os.environ.get(\"RANK\") or dist.get_rank())" }, { "identifier": "get_world_size", "path": "olmo/util.py", "snippet": "def get_world_size() -> int:\n if dist.is_available() and dist.is_initialized():\n return dist.get_world_size()\n else:\n return 1" } ]
import logging import math import numpy as np import torch import torch.utils.data from pathlib import Path from typing import Any, Dict, Iterator, List, Optional, Sequence, Union from ..aliases import PathOrStr from ..util import barrier, get_global_rank, get_world_size
803
__all__ = ["IterableDataset"] log = logging.getLogger(__name__) class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]): """ Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence as an IterableDataset that can be deterministically restarted at any point by setting `start_index`, which should be a multiple of your global batch size. Similarly `max_examples`, if set, should be a multiple of global batch size. """ def __init__( self, dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]], *, seed: int = 0, start_index: int = 0, max_examples: Optional[int] = None, shuffle: bool = True, drop_last: bool = False, world_size: Optional[int] = None, rank: Optional[int] = None, work_dir: Optional[PathOrStr] = None, ): self.dataset = dataset self.seed = seed self.start_index = start_index self.max_examples = max_examples self.shuffle = shuffle self.drop_last = drop_last self.rank = rank if rank is not None else get_global_rank() self.world_size = world_size if world_size is not None else get_world_size() # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type] # Split to nearest available length that is evenly divisible by world size. # This is to ensure each rank receives the same amount of data. num_samples = math.ceil( (len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type] ) else: num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type] self.total_size = num_samples * self.world_size self.global_indices_file: Optional[Path] = None if work_dir is not None: self.global_indices_file = Path(work_dir) / "global_indices.npy" if self.rank == 0: log.info("Saving global data order indices...") self.global_indices_file.parent.mkdir(parents=True, exist_ok=True) global_indices = self._build_global_indices() global_indices_mmap = np.memmap( self.global_indices_file, dtype=np.uint64, mode="w+", shape=(len(global_indices),) ) global_indices_mmap[:] = global_indices global_indices_mmap.flush() del global_indices_mmap log.info("Global data order indices saved to '%s'", self.global_indices_file)
__all__ = ["IterableDataset"] log = logging.getLogger(__name__) class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]): """ Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence as an IterableDataset that can be deterministically restarted at any point by setting `start_index`, which should be a multiple of your global batch size. Similarly `max_examples`, if set, should be a multiple of global batch size. """ def __init__( self, dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]], *, seed: int = 0, start_index: int = 0, max_examples: Optional[int] = None, shuffle: bool = True, drop_last: bool = False, world_size: Optional[int] = None, rank: Optional[int] = None, work_dir: Optional[PathOrStr] = None, ): self.dataset = dataset self.seed = seed self.start_index = start_index self.max_examples = max_examples self.shuffle = shuffle self.drop_last = drop_last self.rank = rank if rank is not None else get_global_rank() self.world_size = world_size if world_size is not None else get_world_size() # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type] # Split to nearest available length that is evenly divisible by world size. # This is to ensure each rank receives the same amount of data. num_samples = math.ceil( (len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type] ) else: num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type] self.total_size = num_samples * self.world_size self.global_indices_file: Optional[Path] = None if work_dir is not None: self.global_indices_file = Path(work_dir) / "global_indices.npy" if self.rank == 0: log.info("Saving global data order indices...") self.global_indices_file.parent.mkdir(parents=True, exist_ok=True) global_indices = self._build_global_indices() global_indices_mmap = np.memmap( self.global_indices_file, dtype=np.uint64, mode="w+", shape=(len(global_indices),) ) global_indices_mmap[:] = global_indices global_indices_mmap.flush() del global_indices_mmap log.info("Global data order indices saved to '%s'", self.global_indices_file)
barrier()
1
2023-11-14 02:24:07+00:00
2k
1in-oos/ccplus
caringcaribou/utils/can_actions.py
[ { "identifier": "ARBITRATION_ID_MAX", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX = 0x7FF" }, { "identifier": "ARBITRATION_ID_MAX_EXTENDED", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX_EXTENDED = 0x18DAFFF1" }, { "identifier": "ARBITRATION_ID_MIN", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MIN = 0x700" }, { "identifier": "BYTE_MAX", "path": "caringcaribou/utils/constants.py", "snippet": "BYTE_MAX = 0xFF" }, { "identifier": "BYTE_MIN", "path": "caringcaribou/utils/constants.py", "snippet": "BYTE_MIN = 0x00" } ]
from caringcaribou.utils.constants import ARBITRATION_ID_MAX, ARBITRATION_ID_MAX_EXTENDED, ARBITRATION_ID_MIN, BYTE_MAX, BYTE_MIN from sys import stdout, version_info import can import time
1,521
if print_results: time_left = end_time - time.time() num_matches = len(blacklist) print("\r{0:> 5.1f} seconds left, {1} found".format(time_left, num_matches), end="") stdout.flush() # Receive message msg = bus.recv(0.1) if msg is None: continue # Classify if classifier_function(msg): # Add to blacklist blacklist.add(msg.arbitration_id) if print_results: num_matches = len(blacklist) print("\r 0.0 seconds left, {0} found".format(num_matches), end="") if len(blacklist) > 0: print("\n Detected IDs: {0}".format(" ".join(sorted(list(map(hex, blacklist)))))) else: print() return blacklist class CanActions: def __init__(self, arb_id=None, notifier_enabled=True): """ CanActions constructor :param arb_id: int default arbitration ID for object or None :param notifier_enabled: bool indicating whether a notifier for incoming message callbacks should be enabled """ self.bus = can.Bus(DEFAULT_INTERFACE) self.arb_id = arb_id self.bruteforce_running = False self.notifier = None if notifier_enabled: self.enable_notifier() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self.notifier is not None: self.disable_notifier() self.bus.shutdown() def enable_notifier(self): self.notifier = can.Notifier(self.bus, listeners=[]) def disable_notifier(self): self.clear_listeners() # Prevent threading errors by stopping notifier gracefully self.notifier.stop(NOTIFIER_STOP_DURATION) self.notifier = None def add_listener(self, listener): self.notifier.listeners.append(listener) def clear_listeners(self): self.notifier.listeners = [] def set_listener(self, listener): self.clear_listeners() self.add_listener(listener) def send(self, data, arb_id=None, is_extended=None, is_error=False, is_remote=False): if len(data) > 8: raise IndexError("Invalid CAN message length: {0}".format(len(data))) # Fallback to default arbitration ID (self.arb_id) if no other ID is specified if arb_id is None: if self.arb_id is None: raise ValueError("Arbitration ID must be set through either 'arb_id' argument or self.arb_id") arb_id = self.arb_id # Force extended flag if it is unspecified and arbitration ID is larger than the standard format allows if is_extended is None: is_extended = arb_id > ARBITRATION_ID_MAX msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=is_extended, is_error_frame=is_error, is_remote_frame=is_remote) self.bus.send(msg) def bruteforce_arbitration_id(self, data, callback, min_id, max_id, callback_end=None): # Set limits if min_id is None: min_id = ARBITRATION_ID_MIN if max_id is None: if min_id <= ARBITRATION_ID_MAX: max_id = ARBITRATION_ID_MAX else: # If min_id is extended, use an extended default max_id as well max_id = ARBITRATION_ID_MAX_EXTENDED # Sanity checks if min_id > max_id: if callback_end: callback_end("Invalid range: min > max") return # Start bruteforce self.bruteforce_running = True for arb_id in range(min_id, max_id + 1): self.notifier.listeners = [callback(arb_id)] # Use standard addressing (11 bits arbitration ID) instead of extended (29 bits) when possible extended = False if arb_id > ARBITRATION_ID_MAX: extended = True msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=extended) self.bus.send(msg) time.sleep(MESSAGE_DELAY) # Return if stopped by calling module if not self.bruteforce_running: self.clear_listeners() return # Callback if bruteforce finished without being stopped if callback_end: self.clear_listeners() callback_end("Bruteforce of range 0x{0:x}-0x{1:x} completed".format(min_id, max_id))
from __future__ import print_function # Handle large ranges efficiently in both python 2 and 3 if version_info[0] == 2: range = xrange MESSAGE_DELAY = 0.1 DELAY_STEP = 0.02 NOTIFIER_STOP_DURATION = 0.5 # Global CAN interface setting, which can be set through the -i flag to cc.py # The value None corresponds to the default CAN interface (typically can0) DEFAULT_INTERFACE = None def auto_blacklist(bus, duration, classifier_function, print_results): """Listens for false positives on the CAN bus and generates an arbitration ID blacklist. Finds all can.Message <msg> on 'bus' where 'classifier_function(msg)' evaluates to True. Terminates after 'duration' seconds and returns a set of all matching arbitration IDs. Prints progress, time countdown and list of results if 'print_results' is True. :param bus: CAN bus instance :param duration: duration in seconds :param classifier_function: function which, when called upon a can.Message instance, returns a bool indicating if it should be blacklisted :param print_results: whether progress and results should be printed to stdout :type bus: can.Bus :type duration: float :type classifier_function: function :type print_results: bool :return set of matching arbitration IDs to blacklist :rtype set(int) """ if print_results: print("Scanning for arbitration IDs to blacklist") blacklist = set() start_time = time.time() end_time = start_time + duration while time.time() < end_time: if print_results: time_left = end_time - time.time() num_matches = len(blacklist) print("\r{0:> 5.1f} seconds left, {1} found".format(time_left, num_matches), end="") stdout.flush() # Receive message msg = bus.recv(0.1) if msg is None: continue # Classify if classifier_function(msg): # Add to blacklist blacklist.add(msg.arbitration_id) if print_results: num_matches = len(blacklist) print("\r 0.0 seconds left, {0} found".format(num_matches), end="") if len(blacklist) > 0: print("\n Detected IDs: {0}".format(" ".join(sorted(list(map(hex, blacklist)))))) else: print() return blacklist class CanActions: def __init__(self, arb_id=None, notifier_enabled=True): """ CanActions constructor :param arb_id: int default arbitration ID for object or None :param notifier_enabled: bool indicating whether a notifier for incoming message callbacks should be enabled """ self.bus = can.Bus(DEFAULT_INTERFACE) self.arb_id = arb_id self.bruteforce_running = False self.notifier = None if notifier_enabled: self.enable_notifier() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self.notifier is not None: self.disable_notifier() self.bus.shutdown() def enable_notifier(self): self.notifier = can.Notifier(self.bus, listeners=[]) def disable_notifier(self): self.clear_listeners() # Prevent threading errors by stopping notifier gracefully self.notifier.stop(NOTIFIER_STOP_DURATION) self.notifier = None def add_listener(self, listener): self.notifier.listeners.append(listener) def clear_listeners(self): self.notifier.listeners = [] def set_listener(self, listener): self.clear_listeners() self.add_listener(listener) def send(self, data, arb_id=None, is_extended=None, is_error=False, is_remote=False): if len(data) > 8: raise IndexError("Invalid CAN message length: {0}".format(len(data))) # Fallback to default arbitration ID (self.arb_id) if no other ID is specified if arb_id is None: if self.arb_id is None: raise ValueError("Arbitration ID must be set through either 'arb_id' argument or self.arb_id") arb_id = self.arb_id # Force extended flag if it is unspecified and arbitration ID is larger than the standard format allows if is_extended is None: is_extended = arb_id > ARBITRATION_ID_MAX msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=is_extended, is_error_frame=is_error, is_remote_frame=is_remote) self.bus.send(msg) def bruteforce_arbitration_id(self, data, callback, min_id, max_id, callback_end=None): # Set limits if min_id is None: min_id = ARBITRATION_ID_MIN if max_id is None: if min_id <= ARBITRATION_ID_MAX: max_id = ARBITRATION_ID_MAX else: # If min_id is extended, use an extended default max_id as well max_id = ARBITRATION_ID_MAX_EXTENDED # Sanity checks if min_id > max_id: if callback_end: callback_end("Invalid range: min > max") return # Start bruteforce self.bruteforce_running = True for arb_id in range(min_id, max_id + 1): self.notifier.listeners = [callback(arb_id)] # Use standard addressing (11 bits arbitration ID) instead of extended (29 bits) when possible extended = False if arb_id > ARBITRATION_ID_MAX: extended = True msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=extended) self.bus.send(msg) time.sleep(MESSAGE_DELAY) # Return if stopped by calling module if not self.bruteforce_running: self.clear_listeners() return # Callback if bruteforce finished without being stopped if callback_end: self.clear_listeners() callback_end("Bruteforce of range 0x{0:x}-0x{1:x} completed".format(min_id, max_id))
def bruteforce_data(self, data, bruteforce_index, callback, min_value=BYTE_MIN, max_value=BYTE_MAX,
3
2023-11-13 05:05:46+00:00
2k
L1bra1/WeakMotion
predict_FGBG_mask.py
[ { "identifier": "PreSegNet", "path": "weak_model.py", "snippet": "class PreSegNet(nn.Module):\n def __init__(self, FGBG_category_num=2, height_feat_size=13):\n super(PreSegNet, self).__init__()\n\n self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)\n self.stpn = STPN_Seg(height_feat_size=height_feat_size)\n\n\n def forward(self, bevs):\n bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)\n\n # Backbone network\n x = self.stpn(bevs)\n\n # FG/BG segmentation head\n FGBG_class_pred = self.FGBG_classify(x)\n\n return FGBG_class_pred" }, { "identifier": "remove_close", "path": "data/weak_utils.py", "snippet": "def remove_close(points, radius):\n points = points.T\n x_filt = np.abs(points[0, :]) < radius\n y_filt = np.abs(points[1, :]) < radius\n not_close = np.logical_not(np.logical_and(x_filt, y_filt))\n points = points[:, not_close]\n points = points.T\n return points, not_close" }, { "identifier": "filter_pc", "path": "data/weak_utils.py", "snippet": "def filter_pc(pc, extents):\n filter_idx = np.where((extents[0, 0] < pc[:, 0]) & (pc[:, 0] < extents[0, 1]) &\n (extents[1, 0] < pc[:, 1]) & (pc[:, 1] < extents[1, 1]) &\n (extents[2, 0] < pc[:, 2]) & (pc[:, 2] < extents[2, 1]))[0]\n pc = pc[filter_idx]\n return pc, filter_idx" }, { "identifier": "convert_semantic_to_FGBG", "path": "data/weak_utils.py", "snippet": "def convert_semantic_to_FGBG(cate):\n # Label ID 0: nose; Label ID 1~23: foreground classes; Label ID 24~31: background classes\n # reference https://github.com/nutonomy/nuscenes-devkit/blob/master/docs/instructions_nuscenes.md\n # and https://github.com/nutonomy/nuscenes-devkit/blob/master/docs/instructions_lidarseg.md\n\n fg_mask = (0 < cate) & (cate < 24)\n return fg_mask.astype(np.int32) + 1" }, { "identifier": "gen_voxel_indices_for_pc", "path": "data/weak_utils.py", "snippet": "def gen_voxel_indices_for_pc(pc, voxel_size, extents):\n # Convert 3D coordinate to voxel index\n discrete_pc = np.floor(pc[:, :3] / voxel_size).astype(np.int32)\n min_voxel_coord = np.floor(extents.T[0] / voxel_size)\n voxel_indices = (discrete_pc - min_voxel_coord).astype(int)\n return voxel_indices" }, { "identifier": "convert_semantic_to_FGBG_waymo", "path": "data/weak_utils.py", "snippet": "def convert_semantic_to_FGBG_waymo(cate):\n # Label ID 0: Background; 1: Vehicle; 2: Pedestrian; 3: Cyclist; 4: Sign, regarded as background\n\n fg_mask = (0 < cate) & (cate < 4)\n return fg_mask.astype(np.int32) + 1" } ]
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import time import sys import argparse import os from weak_model import PreSegNet from data.weak_utils import remove_close, filter_pc, convert_semantic_to_FGBG, gen_voxel_indices_for_pc, convert_semantic_to_FGBG_waymo from sklearn.metrics import confusion_matrix from tqdm import tqdm
1,233
def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-d', '--data', default='/path_to/nuScenes/weak-data/train', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-s', '--save_FB', default='/path_to/nuScenes/FGBG-data/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('--pretrained', default='pretrained/nuscenes_seg_0-01.pth', type=str) parser.add_argument('--gpu', default='0') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype def main(): # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) dims = (256, 256, 13)
def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-d', '--data', default='/path_to/nuScenes/weak-data/train', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-s', '--save_FB', default='/path_to/nuScenes/FGBG-data/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('--pretrained', default='pretrained/nuscenes_seg_0-01.pth', type=str) parser.add_argument('--gpu', default='0') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype def main(): # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) dims = (256, 256, 13)
model = PreSegNet(FGBG_category_num=2, height_feat_size=height_feat_size)
0
2023-11-12 07:03:29+00:00
2k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/state_handler/global_handler.py
[ { "identifier": "InstrumentId", "path": "contracts_unified/library/c3types.py", "snippet": "class SignedInstrumentAmount(abi.NamedTuple):\nclass LiquidationFactors(abi.NamedTuple):\nclass InstrumentListElement(abi.NamedTuple):\nclass UserInstrumentData(abi.NamedTuple):\nclass OnChainOrderData(abi.NamedTuple):\nclass WormholeAddress(abi.NamedTuple):\nclass DecodedWormholePayload(abi.NamedTuple):" }, { "identifier": "ADDRESS_SIZE", "path": "contracts_unified/library/constants.py", "snippet": "ADDRESS_SIZE = 32" } ]
from typing import cast from pyteal import ( ABIReturnSubroutine, App, Assert, Btoi, Bytes, Expr, Global, Int, Len, MinBalance, Pop, Seq, abi, ) from contracts_unified.library.c3types import ( InstrumentId, InstrumentListElement, LiquidationFactors, ) from contracts_unified.library.constants import ADDRESS_SIZE
1,428
@staticmethod def set_pricecaster_id(pricecaster_id) -> Expr: """Sets the App id of the pricecaster""" return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id)) @staticmethod def get_wormhole_bridge_id() -> Expr: """Gets the App id of the wormhole bridge""" return App.globalGet(KEY_WORMHOLE_BRIDGE_ID) @staticmethod def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr: """Sets the App id of the wormhole bridge""" return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id)) @staticmethod @ABIReturnSubroutine def set_address(key, address) -> Expr: """Sets an address in the global storage checking the length""" return Seq( Assert(Len(address) == Int(ADDRESS_SIZE)), App.globalPut(key, address) ) @staticmethod def get_signature_validator() -> Expr: """Checks the address of the signature validator""" return App.globalGet(KEY_SIGNATURE_VALIDATOR) @staticmethod def set_signature_validator(signature_validator) -> Expr: """Sets the address of the signature validator""" return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator)) @staticmethod def get_operator_address() -> Expr: """Gets the address of the operator""" return App.globalGet(KEY_OPERATOR_ADDRESS) @staticmethod def set_operator_address(operator_address) -> Expr: """Sets the address of the operator""" return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address)) @staticmethod def get_quant_address() -> Expr: """Gets the quant address""" return App.globalGet(KEY_QUANT_ADDRESS) @staticmethod def set_quant_address(quant_address) -> Expr: """Sets the quant address""" return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address)) @staticmethod def get_fee_target() -> Expr: """Gets the fee target address""" return App.globalGet(KEY_FEE_TARGET) @staticmethod def set_fee_target(fee_target_address) -> Expr: """Sets the fee target address""" return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address)) @staticmethod def get_withdraw_buffer() -> Expr: """Gets the withdraw buffer address""" return App.globalGet(KEY_WITHDRAW_BUFFER) @staticmethod def set_withdraw_buffer(withdraw_buffer) -> Expr: """Sets the withdraw buffer address""" return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer)) @staticmethod @ABIReturnSubroutine def ensure_mbr_fund() -> Expr: """Ensures the current mbr is lower than the fund""" return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND)) @staticmethod def add_mbr_fund(mbr_fund) -> Expr: """Increments the mbr fund amount by an amount""" return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund) @staticmethod def get_liquidation_factors() -> Expr: """Gets the object representing the liquidation factors""" return App.globalGet(KEY_LIQUIDATION_FACTORS) @staticmethod def set_liquidation_factors(factors) -> Expr: """Sets the global liquidation factors""" factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static() return Seq( Assert(Len(factors) == Int(factors_size)), App.globalPut(KEY_LIQUIDATION_FACTORS, factors), ) @staticmethod @ABIReturnSubroutine def get_instrument(
"""Implements core contract global state handler""" KEY_INIT_TIMESTAMP = Bytes("t") KEY_INSTRUMENT_COUNT = Bytes("c") KEY_MBR_FUND = Bytes("m") KEY_PRICECASTER_ID = Bytes("p") KEY_WORMHOLE_BRIDGE_ID = Bytes("b") KEY_LIQUIDATION_FACTORS = Bytes("l") KEY_SIGNATURE_VALIDATOR = Bytes("s") KEY_WITHDRAW_BUFFER = Bytes("w") KEY_QUANT_ADDRESS = Bytes("q") KEY_OPERATOR_ADDRESS = Bytes("o") KEY_FEE_TARGET = Bytes("f") class GlobalStateHandler: """Global state handler""" instrument_size = abi.make(InstrumentListElement).type_spec().byte_length_static() max_instrument_count = 80 # NOTE: Most of these methods are not subroutines for performance reasons @staticmethod def initialize() -> Expr: """Initialize the global blob""" return Pop(App.box_create(Bytes("i"), Int(GlobalStateHandler.instrument_size * GlobalStateHandler.max_instrument_count))) @staticmethod def get_relative_timestamp() -> Expr: """Gets the relative timestamp""" return Global.latest_timestamp() - App.globalGet(KEY_INIT_TIMESTAMP) @staticmethod def set_init_timestamp() -> Expr: """Sets the initial timestamp""" return App.globalPut(KEY_INIT_TIMESTAMP, Global.latest_timestamp()) @staticmethod def get_instrument_count() -> Expr: """Gets the number of instruments""" return App.globalGet(KEY_INSTRUMENT_COUNT) @staticmethod def set_instrument_count(instrument_count) -> Expr: """Sets the number of instruments""" return App.globalPut(KEY_INSTRUMENT_COUNT, instrument_count) @staticmethod def get_pricecaster_id() -> Expr: """Gets the App id of the pricecaster""" return App.globalGet(KEY_PRICECASTER_ID) @staticmethod def set_pricecaster_id(pricecaster_id) -> Expr: """Sets the App id of the pricecaster""" return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id)) @staticmethod def get_wormhole_bridge_id() -> Expr: """Gets the App id of the wormhole bridge""" return App.globalGet(KEY_WORMHOLE_BRIDGE_ID) @staticmethod def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr: """Sets the App id of the wormhole bridge""" return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id)) @staticmethod @ABIReturnSubroutine def set_address(key, address) -> Expr: """Sets an address in the global storage checking the length""" return Seq( Assert(Len(address) == Int(ADDRESS_SIZE)), App.globalPut(key, address) ) @staticmethod def get_signature_validator() -> Expr: """Checks the address of the signature validator""" return App.globalGet(KEY_SIGNATURE_VALIDATOR) @staticmethod def set_signature_validator(signature_validator) -> Expr: """Sets the address of the signature validator""" return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator)) @staticmethod def get_operator_address() -> Expr: """Gets the address of the operator""" return App.globalGet(KEY_OPERATOR_ADDRESS) @staticmethod def set_operator_address(operator_address) -> Expr: """Sets the address of the operator""" return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address)) @staticmethod def get_quant_address() -> Expr: """Gets the quant address""" return App.globalGet(KEY_QUANT_ADDRESS) @staticmethod def set_quant_address(quant_address) -> Expr: """Sets the quant address""" return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address)) @staticmethod def get_fee_target() -> Expr: """Gets the fee target address""" return App.globalGet(KEY_FEE_TARGET) @staticmethod def set_fee_target(fee_target_address) -> Expr: """Sets the fee target address""" return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address)) @staticmethod def get_withdraw_buffer() -> Expr: """Gets the withdraw buffer address""" return App.globalGet(KEY_WITHDRAW_BUFFER) @staticmethod def set_withdraw_buffer(withdraw_buffer) -> Expr: """Sets the withdraw buffer address""" return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer)) @staticmethod @ABIReturnSubroutine def ensure_mbr_fund() -> Expr: """Ensures the current mbr is lower than the fund""" return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND)) @staticmethod def add_mbr_fund(mbr_fund) -> Expr: """Increments the mbr fund amount by an amount""" return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund) @staticmethod def get_liquidation_factors() -> Expr: """Gets the object representing the liquidation factors""" return App.globalGet(KEY_LIQUIDATION_FACTORS) @staticmethod def set_liquidation_factors(factors) -> Expr: """Sets the global liquidation factors""" factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static() return Seq( Assert(Len(factors) == Int(factors_size)), App.globalPut(KEY_LIQUIDATION_FACTORS, factors), ) @staticmethod @ABIReturnSubroutine def get_instrument(
instrument_id: InstrumentId,
0
2023-11-17 20:54:15+00:00
2k
gunderson-dettmer/CE2OCF
CE2OCF/ocf/mocks/stockholders.py
[ { "identifier": "fake_phone_number", "path": "CE2OCF/ocf/mocks/company.py", "snippet": "def fake_phone_number() -> str:\n \"\"\"\n Generates a valid US phone number with the international calling code.\n\n The format is +1 (XXX) XXX-XXXX, with the following rules for the area code:\n 1. The first digit cannot be 0 or 1.\n 2. The second digit cannot be 9.\n 3. The second and third digits cannot both be 0.\n\n Returns:\n str: A valid US phone number with international calling code.\n \"\"\"\n # Define the range for the first digit of area code (2-9)\n first_digit = random.randint(2, 9)\n\n # Define the range for the second and third digits of area code\n # The second digit cannot be 9, and the (second, third) cannot be (0, 0)\n while True:\n second_digit = random.randint(0, 8)\n third_digit = random.randint(0, 9)\n if not (second_digit == 0 and third_digit == 0):\n break\n\n # Generate the seven digits following the area code\n # The first digit of these seven digits cannot be 0 or 1 either.\n second_set_first_digit = random.randint(2, 9)\n remaining_six_digits = random.randint(0, 999999)\n\n # Combine all parts to create the phone number\n phone_number = f\"+1 ({first_digit}{second_digit}{third_digit}) {second_set_first_digit}{remaining_six_digits:06d}\"\n return phone_number" }, { "identifier": "DoubleTriggerTypesEnum", "path": "CE2OCF/types/enums.py", "snippet": "class DoubleTriggerTypesEnum(str, enum.Enum):\n NA = \"N/A\"\n TWENTY_FIVE_PERCENT_12_MONTHS = \"25% of unvested; Involuntary Termination within 12 months after CiC\"\n FIFTY_PERCENT_12_MONTHS = \"50% of unvested; Involuntary Termination within 12 months after CiC\"\n ONE_HUNDRED_PERCENT_12_MONTHS = \"100% of unvested; Involuntary Termination within 12 months after CiC\"\n TWENTY_FIVE_PERCENT_ANY_TIME = \"25% of unvested; Involuntary Termination any time after CiC\"\n FIFTY_PERCENT_ANY_TIME = \"50% of unvested; Involuntary Termination any time after CiC\"\n ONE_HUNDRED_PERCENT_ANY_TIME = \"100% of unvested; Involuntary Termination any time after CiC\"\n CUSTOM = \"Custom\"" }, { "identifier": "PaidWithOptionsEnum", "path": "CE2OCF/types/enums.py", "snippet": "class PaidWithOptionsEnum(str, enum.Enum):\n IP = \"IP\"\n CASH = \"Cash\"" }, { "identifier": "SingleTriggerTypesEnum", "path": "CE2OCF/types/enums.py", "snippet": "class SingleTriggerTypesEnum(str, enum.Enum):\n NA = \"N/A\"\n SIX_MONTHS_ALL_TIMES = \"6 months; all times after CiC\"\n TWELVE_MONTHS_ALL_TIMES = \"12 months; all times after CiC\"\n TWENTY_FOUR_MONTHS_ALL_TIMES = \"24 months; all times after CiC\"\n ONE_HUNDRED_PERCENT_ALL_TIMES = \"100%; all times after CiC\"\n SIX_MONTHS_INVOLUNTARY_TERMINATION = \"6 months; Involuntary Termination\"\n TWELVE_MONTHS_INVOLUNTARY_TERMINATION = \"12 months; Involuntary Termination\"\n TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION = \"24 months; Involuntary Termination\"\n ONE_HUNDRED_PERCENT_INVOLUNTARY_TERMINATION = \"100%; Involuntary Termination\"\n CUSTOM = \"Custom\"" }, { "identifier": "VestingTypesEnum", "path": "CE2OCF/types/enums.py", "snippet": "class VestingTypesEnum(str, enum.Enum):\n FOUR_YR_1_YR_CLIFF = \"4yr with 1yr Cliff\"\n FOUR_YR_NO_CLIFF = \"4yr with no Cliff\"\n FULLY_VESTED = \"Fully Vested\"\n CUSTOM = \"Custom\" # We're not going to support this via OCF" }, { "identifier": "Stockholder", "path": "CE2OCF/types/models.py", "snippet": "class Stockholder(BaseModel):\n id: str\n DoubleTrigger: DoubleTriggerTypesEnum\n # our answer will appear below the general description entered above. If no additional language is necessary,\n # skip this field\n DescriptionAssignedTechnology: Optional[str]\n # The description should provide clarity regarding exactly what property is being transferred while being neither\n # too narrow nor too broad.\n BroadDescriptionAssignedTechnology: str\n EmailAddress: str\n FFPreferredShares: Optional[\n int\n ] = None # If founder preferred is authorized for company AND we want to give this stockholder some,\n # how many shares do they get?\n PaidWith: PaidWithOptionsEnum\n PhoneNumber: str\n SingleTrigger: SingleTriggerTypesEnum\n Shares: int\n SSN: str\n Stockholder: str = Field(\n default_factory=lambda: uuid.uuid4().__str__()\n ) # Name of stockholder goes here BUT we're using uuid to be able filter objs by name and have guaranteed\n # uniques. Required for tests.\n StockholderCity: str\n StockholderState: str\n StockholderStreet: str\n StockholderZip: str\n VCD: str\n Vesting: VestingTypesEnum" } ]
import random import uuid from faker import Faker from CE2OCF.ocf.mocks.company import fake_phone_number from CE2OCF.types.enums import ( DoubleTriggerTypesEnum, PaidWithOptionsEnum, SingleTriggerTypesEnum, VestingTypesEnum, ) from CE2OCF.types.models import Stockholder
1,487
fake = Faker() def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]: total_FFPreferredShares = 0 total_Shares = 0 for stockholder in stockholder_list: if stockholder.FFPreferredShares is not None: total_FFPreferredShares += stockholder.FFPreferredShares if stockholder.Shares is not None: total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)` return total_FFPreferredShares, total_Shares def mock_stockholder() -> Stockholder: return Stockholder( id=uuid.uuid4().__str__(),
fake = Faker() def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]: total_FFPreferredShares = 0 total_Shares = 0 for stockholder in stockholder_list: if stockholder.FFPreferredShares is not None: total_FFPreferredShares += stockholder.FFPreferredShares if stockholder.Shares is not None: total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)` return total_FFPreferredShares, total_Shares def mock_stockholder() -> Stockholder: return Stockholder( id=uuid.uuid4().__str__(),
DoubleTrigger=random.choice(list(DoubleTriggerTypesEnum)),
1
2023-11-13 15:50:53+00:00
2k
Hellohistory/EbookDataRename.py
main.py
[ { "identifier": "queryDatabaseForFileNames", "path": "model/database_handler.py", "snippet": "def queryDatabaseForFileNames(db_folder_path, folder_path, tableWidget):\n try:\n db_files = get_files_from_directory(db_folder_path, recursive=True)\n db_files = [f for f in db_files if f.endswith('.db')]\n files = get_files_from_directory(folder_path, recursive=True)\n tableWidget.setRowCount(len(files))\n\n found_ss_codes = set()\n\n for row, file_path in enumerate(files):\n QApplication.processEvents()\n file_name = os.path.basename(file_path)\n match = re.search(r'\\d{8}', file_name)\n ss_code = match.group() if match else None\n\n if ss_code and ss_code not in found_ss_codes:\n for db_file in db_files:\n connection = sqlite3.connect(db_file)\n title = query_title_from_database(connection, ss_code)\n connection.close()\n\n if title != \"无此列\":\n tableWidget.setItem(row, 1, QTableWidgetItem(title))\n found_ss_codes.add(ss_code)\n break\n else:\n tableWidget.setItem(row, 1, QTableWidgetItem(\"无此列\"))\n else:\n message = \"已找到记录\" if ss_code in found_ss_codes else \"无效的 SS_code\"\n tableWidget.setItem(row, 1, QTableWidgetItem(message))\n\n tableWidget.setItem(row, 0, QTableWidgetItem(file_path))\n tableWidget.setItem(row, 2, QTableWidgetItem(\"待处理\"))\n\n except Exception as e:\n print(\"发生错误:\", str(e))" }, { "identifier": "get_files_from_directory", "path": "model/file_handler.py", "snippet": "def get_files_from_directory(directory_path, recursive=False):\n file_list = []\n if recursive:\n for root, dirs, files in os.walk(directory_path):\n for file in files:\n file_list.append(os.path.join(root, file))\n else:\n file_list = [os.path.join(directory_path, file) for file in os.listdir(directory_path) if\n os.path.isfile(os.path.join(directory_path, file))]\n\n return file_list" }, { "identifier": "startRenamingFiles", "path": "model/rename_handler.py", "snippet": "def startRenamingFiles(tableWidget, progressBar, changeExtensionCheckBox, traditionalSimplifiedCheckBox):\n total_files = tableWidget.rowCount()\n progressBar.setValue(0)\n cc = OpenCC('s2t')\n\n for row in range(total_files):\n original_file = tableWidget.item(row, 0).text()\n new_name = tableWidget.item(row, 1).text()\n\n if traditionalSimplifiedCheckBox.isChecked():\n new_name = cc.convert(new_name)\n\n original_extension = os.path.splitext(original_file)[1]\n\n if changeExtensionCheckBox.isChecked() and original_extension.lower() == \".uvz\":\n new_extension = \".zip\"\n else:\n new_extension = original_extension\n\n new_file = os.path.join(os.path.dirname(original_file), os.path.splitext(new_name)[0] + new_extension)\n\n try:\n os.rename(original_file, new_file)\n tableWidget.setItem(row, 2, QTableWidgetItem(\"重命名成功\"))\n except Exception as e:\n tableWidget.setItem(row, 2, QTableWidgetItem(f\"错误: {e}\"))\n\n progressBar.setValue(int((row + 1) / total_files * 100))\n\n progressBar.setValue(100)" } ]
import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLineEdit, QProgressBar, QTableWidget, QRadioButton, QCheckBox, QFileDialog, QTableWidgetItem) from PyQt5.QtCore import QSize from opencc import OpenCC from model.database_handler import queryDatabaseForFileNames from model.file_handler import get_files_from_directory from model.rename_handler import startRenamingFiles
1,131
class MainGUI(QMainWindow): def __init__(self): super().__init__() self.cc = OpenCC('s2t') self.original_names = {} self.initUI() def applyTraditionalSimplifiedConversion(self): total_rows = self.tableWidget.rowCount() for row in range(total_rows): original_text_item = self.tableWidget.item(row, 1) if original_text_item: if self.traditionalSimplifiedCheckBox.isChecked(): if row not in self.original_names: self.original_names[row] = original_text_item.text() converted_text = self.cc.convert(self.original_names[row]) self.tableWidget.setItem(row, 1, QTableWidgetItem(converted_text)) else: if row in self.original_names: self.tableWidget.setItem(row, 1, QTableWidgetItem(self.original_names[row])) def initUI(self): self.setWindowTitle('EbookDataRename V0.0.1') self.setMinimumSize(QSize(800, 600)) centralWidget = QWidget(self) self.setCentralWidget(centralWidget) mainLayout = QVBoxLayout(centralWidget) self.setupLayout(mainLayout) self.applyMaterialDesignStyle() def initiateDatabaseQuery(self): db_path = self.local_db_lineedit.text() folder_path = self.targetFolderLineEdit.text()
class MainGUI(QMainWindow): def __init__(self): super().__init__() self.cc = OpenCC('s2t') self.original_names = {} self.initUI() def applyTraditionalSimplifiedConversion(self): total_rows = self.tableWidget.rowCount() for row in range(total_rows): original_text_item = self.tableWidget.item(row, 1) if original_text_item: if self.traditionalSimplifiedCheckBox.isChecked(): if row not in self.original_names: self.original_names[row] = original_text_item.text() converted_text = self.cc.convert(self.original_names[row]) self.tableWidget.setItem(row, 1, QTableWidgetItem(converted_text)) else: if row in self.original_names: self.tableWidget.setItem(row, 1, QTableWidgetItem(self.original_names[row])) def initUI(self): self.setWindowTitle('EbookDataRename V0.0.1') self.setMinimumSize(QSize(800, 600)) centralWidget = QWidget(self) self.setCentralWidget(centralWidget) mainLayout = QVBoxLayout(centralWidget) self.setupLayout(mainLayout) self.applyMaterialDesignStyle() def initiateDatabaseQuery(self): db_path = self.local_db_lineedit.text() folder_path = self.targetFolderLineEdit.text()
queryDatabaseForFileNames(db_path, folder_path, self.tableWidget)
0
2023-11-10 19:42:58+00:00
2k
fleet-ai/code-pilot
scripts.py
[ { "identifier": "batch", "path": "utils/utils.py", "snippet": "def batch(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx : min(ndx + n, l)]" }, { "identifier": "INDEX_NAME", "path": "constants.py", "snippet": "INDEX_NAME = \"\" # TODO add" }, { "identifier": "INDEX_ENVIRONMENT", "path": "constants.py", "snippet": "INDEX_ENVIRONMENT = \"\" # TODO add" }, { "identifier": "NAMESPACE", "path": "constants.py", "snippet": "NAMESPACE = \"\" # TODO add" }, { "identifier": "PATH_TO_SRC_CODE", "path": "constants.py", "snippet": "PATH_TO_SRC_CODE = \"src_code/\" # OPTIONAL modify -- must start with src_code/" }, { "identifier": "CodeIndexer", "path": "code_indexer.py", "snippet": "class CodeIndexer:\n src_dir: str\n target_chunk_tokens: int\n max_chunk_tokens: int\n enforce_max_chunk_tokens: bool\n token_model: str\n code_splitters = {}\n hash_cache = {}\n index = None\n\n def __init__(\n self,\n src_dir: str,\n target_chunk_tokens: int = 300,\n max_chunk_tokens: int = 1000,\n enforce_max_chunk_tokens: bool = False,\n coalesce: int = 50,\n token_model: str = \"gpt-4\",\n ):\n self.src_dir = src_dir\n self.target_chunk_tokens = target_chunk_tokens\n self.max_chunk_tokens = max_chunk_tokens\n self.enforce_max_chunk_tokens = enforce_max_chunk_tokens\n self.coalesce = coalesce\n self.token_model = token_model\n self._create_index()\n self.refresh_nodes()\n\n def add_file(self, file: str):\n ext = os.path.splitext(file)[1]\n text_splitter = self._get_code_splitter(ext)\n\n with open(file, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n chunks = text_splitter.split_text(text)\n chunks = [\n {\n \"id\": str(uuid.uuid4()),\n \"text\": chunk,\n \"file\": file.split(\"/src_code/\", 1)[1]\n if \"/src_code/\" in file\n else file,\n }\n for chunk in chunks\n ]\n self.embed_and_upsert_code_chunks(chunks)\n\n def refresh_nodes(self):\n files = self._find_files(self.src_dir, EXTENSION_TO_TREE_SITTER_LANGUAGE)\n\n # For each file, split into chunks and index\n for file in files:\n self.add_file(str(file))\n\n def _find_files(self, path, include_ext={}):\n \"\"\"\n Recursively find all files in a given path.\n\n Parameters:\n path (str): The root directory to start searching from.\n include_ext (dict): A dictionary of file extensions to include\n (keys are extensions including leading period if applicable).\n\n Returns:\n list: A list of full file paths for each file found.\n \"\"\"\n # Convert path to an absolute path\n path = os.path.abspath(path)\n\n found_files = []\n\n for root, _, files in os.walk(path):\n for file in files:\n # Check if the file should be excluded based on its extension\n file_ext = os.path.splitext(file)[1]\n if file_ext in include_ext:\n # Construct the full path of the file and append to list\n full_path = Path(os.path.join(root, file)).resolve()\n found_files.append(full_path)\n\n return set(found_files)\n\n def _get_code_splitter(self, ext) -> CodeSplitter:\n if ext not in EXTENSION_TO_TREE_SITTER_LANGUAGE:\n raise ValueError(f\"Extension {ext} not supported.\")\n language = EXTENSION_TO_TREE_SITTER_LANGUAGE[ext]\n if language not in self.code_splitters:\n text_splitter = CodeSplitter(\n language=language,\n target_chunk_tokens=self.target_chunk_tokens,\n max_chunk_tokens=self.max_chunk_tokens,\n enforce_max_chunk_tokens=self.enforce_max_chunk_tokens,\n coalesce=self.coalesce,\n token_model=self.token_model,\n )\n self.code_splitters[ext] = text_splitter\n\n return self.code_splitters[ext]\n\n def _create_index(self):\n pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)\n pinecone_index = pinecone.Index(INDEX_NAME)\n self.index = pinecone_index\n\n return pinecone_index\n\n def embed_and_upsert_code_chunks(self, chunks):\n vectors = []\n embeddings = embed_code_chunks(\n chunks,\n model=EMBEDDINGS_MODEL,\n token_limit=MAX_CONTEXT_LENGTH_EMBEDDINGS,\n )\n\n for chunk, embedding in zip(chunks, embeddings):\n metadata = {\n \"id\": chunk[\"id\"],\n \"text\": chunk[\"text\"],\n \"file\": chunk[\"file\"],\n \"type\": \"code\",\n }\n vectors.append(\n {\n \"id\": str(uuid.uuid4()),\n \"values\": embedding,\n \"metadata\": metadata,\n }\n )\n\n for vec_batch in batch(vectors, 100):\n self.index.upsert(vectors=vec_batch, namespace=NAMESPACE)\n\n print(\"Finished embedding chunk(s).\")" } ]
import os import argparse import pinecone from dotenv import load_dotenv from context import download_embeddings from utils.utils import batch from constants import ( INDEX_NAME, INDEX_ENVIRONMENT, NAMESPACE, PATH_TO_SRC_CODE, ) from code_indexer import CodeIndexer
1,525
load_dotenv() PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT) index = pinecone.Index(INDEX_NAME) def read_and_upsert(library_name): df = download_embeddings(library_name) def convert_row_to_dict(row): return { "id": row["id"], "values": [float(value) for value in row["dense_embeddings"]], "sparse_values": dict(row["sparse_values"]), "metadata": {**dict(row["metadata"]), "type": "documentation"}, } df["dict"] = df.apply(convert_row_to_dict, axis=1) vectors = df["dict"].tolist() vec_batches = list(batch(vectors, 100)) for idx, vec_batch in enumerate(vec_batches): print(f"Upserting batch {idx}/{len(vec_batches)}...") index.upsert(vectors=vec_batch, namespace=NAMESPACE) print("Finished upserting") def read_and_upsert_source_code():
load_dotenv() PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT) index = pinecone.Index(INDEX_NAME) def read_and_upsert(library_name): df = download_embeddings(library_name) def convert_row_to_dict(row): return { "id": row["id"], "values": [float(value) for value in row["dense_embeddings"]], "sparse_values": dict(row["sparse_values"]), "metadata": {**dict(row["metadata"]), "type": "documentation"}, } df["dict"] = df.apply(convert_row_to_dict, axis=1) vectors = df["dict"].tolist() vec_batches = list(batch(vectors, 100)) for idx, vec_batch in enumerate(vec_batches): print(f"Upserting batch {idx}/{len(vec_batches)}...") index.upsert(vectors=vec_batch, namespace=NAMESPACE) print("Finished upserting") def read_and_upsert_source_code():
_ = CodeIndexer(src_dir=PATH_TO_SRC_CODE)
4
2023-11-14 01:45:16+00:00
2k
bithuanglq/APF_RL
DQN_variant.py
[ { "identifier": "RelativePosition", "path": "gym_examples/wrappers/relative_position.py", "snippet": "class RelativePosition(gym.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = spaces.Box(shape=(2+25*6,), low=-np.inf, high=np.inf)\n\n\n def observation(self, obs):\n return np.concatenate((obs[\"target\"] - obs[\"agent\"], obs[\"loc_obs\"]), axis=0) # (2+25*6,)" }, { "identifier": "Memory", "path": "prioritized_memory.py", "snippet": "class Memory: # stored as ( s, a, r, s_ ) in SumTree\n e = 0.01\n a = 0.6\n beta = 0.4\n beta_increment_per_sampling = 0.001\n\n def __init__(self, capacity):\n self.tree = SumTree(capacity)\n self.capacity = capacity\n\n def _get_priority(self, error):\n return (np.abs(error) + self.e) ** self.a\n\n def add(self, error, sample):\n p = self._get_priority(error)\n self.tree.add(p, sample)\n\n def sample(self, n):\n batch = []\n idxs = []\n segment = self.tree.total() / n\n priorities = []\n\n self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])\n\n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n\n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s)\n priorities.append(p)\n batch.append(data)\n idxs.append(idx)\n\n sampling_probabilities = priorities / self.tree.total()\n is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)\n is_weight /= is_weight.max()\n\n return batch, idxs, is_weight\n\n def update(self, idx, error):\n p = self._get_priority(error)\n self.tree.update(idx, p)" } ]
import argparse import os import random import time import gym import numpy as np import tensorflow as tf import tensorlayer as tl from tqdm import tqdm from gym_examples.wrappers import RelativePosition from prioritized_memory import Memory
1,008
''' 调试日志 1. 适配版本 https://medium.com/mlearning-ai/how-to-install-tensorflow-2-x-with-cuda-and-cudnn-on-ubuntu-20-04-lts-b73c209d8e88 2. 要用save_npz_dict 保存模型而不是 save_npz; 加载时同理 3. 用 APF 代替部分随机探索效果要好很多 4. 加入了PER: (https://blog.csdn.net/abcdefg90876/article/details/106270925), 也可以只用Original Replay Buffer 5. 超参数参考模块: hyper parameters ''' ''' GridWorld-v0: @Action -- 0 right, 1 up, 2 left, 3 down @Observation -- {[x1, y1], [x2, y2], 25 vector(6,)}, agent_loc, target_loc and surrounding states. @Info -- distance between agent and target ''' parser = argparse.ArgumentParser() parser.add_argument('--mode', help='train or test', default='train') parser.add_argument( '--save_path', default='dqn_variants', help='folder to save if mode == train else model path,' 'qnet will be saved once target net update' ) parser.add_argument('--seed', help='random seed', type=int, default=0) parser.add_argument('--noisy_scale', type=float, default=1e-2) parser.add_argument('--disable_double', action='store_false', default=True) parser.add_argument('--disable_dueling', action='store_false', default=False) args = parser.parse_args() if args.mode == 'train': os.makedirs(args.save_path, exist_ok=True) random.seed(args.seed) np.random.seed(args.seed) tf.random.set_seed(args.seed) # reproducible noise_scale = args.noisy_scale double = not args.disable_double dueling = not args.disable_dueling env = gym.make('gym_examples/GridWorld-v0', render_mode='human')
''' 调试日志 1. 适配版本 https://medium.com/mlearning-ai/how-to-install-tensorflow-2-x-with-cuda-and-cudnn-on-ubuntu-20-04-lts-b73c209d8e88 2. 要用save_npz_dict 保存模型而不是 save_npz; 加载时同理 3. 用 APF 代替部分随机探索效果要好很多 4. 加入了PER: (https://blog.csdn.net/abcdefg90876/article/details/106270925), 也可以只用Original Replay Buffer 5. 超参数参考模块: hyper parameters ''' ''' GridWorld-v0: @Action -- 0 right, 1 up, 2 left, 3 down @Observation -- {[x1, y1], [x2, y2], 25 vector(6,)}, agent_loc, target_loc and surrounding states. @Info -- distance between agent and target ''' parser = argparse.ArgumentParser() parser.add_argument('--mode', help='train or test', default='train') parser.add_argument( '--save_path', default='dqn_variants', help='folder to save if mode == train else model path,' 'qnet will be saved once target net update' ) parser.add_argument('--seed', help='random seed', type=int, default=0) parser.add_argument('--noisy_scale', type=float, default=1e-2) parser.add_argument('--disable_double', action='store_false', default=True) parser.add_argument('--disable_dueling', action='store_false', default=False) args = parser.parse_args() if args.mode == 'train': os.makedirs(args.save_path, exist_ok=True) random.seed(args.seed) np.random.seed(args.seed) tf.random.set_seed(args.seed) # reproducible noise_scale = args.noisy_scale double = not args.disable_double dueling = not args.disable_dueling env = gym.make('gym_examples/GridWorld-v0', render_mode='human')
env = RelativePosition(env) # refer to gym_examples/wrappers/relative_position.py, observation space has changed!
0
2023-11-10 02:45:37+00:00
2k
ehennenfent/live_illustrate
live_illustrate/summarize.py
[ { "identifier": "AsyncThread", "path": "live_illustrate/util.py", "snippet": "class AsyncThread:\n \"\"\"Generic thread that has a work queue and a callback to run on the result\"\"\"\n\n SLEEP_TIME = 0.25\n MAX_ERRORS = 5\n\n def __init__(self, logger_name=\"AsyncThread\") -> None:\n self.queue: Queue[t.Any] = Queue()\n self._consecutive_errors: int = 0\n self.logger = logging.getLogger(logger_name)\n\n @abstractmethod\n def work(self, *args) -> t.Any:\n raise NotImplementedError()\n\n def start(self, callback) -> None:\n while True:\n if not self.queue.empty():\n try:\n callback(self.work(*self.queue.get()))\n self._consecutive_errors = 0\n except Exception as e:\n self._consecutive_errors += 1\n self.logger.error(e)\n if self._consecutive_errors > self.MAX_ERRORS:\n self.logger.critical(\"Abandoning execution after %d consecutive errors\", self.MAX_ERRORS)\n exit(-1)\n sleep(self.SLEEP_TIME)\n\n def send(self, *args) -> None:\n self.queue.put(args)" }, { "identifier": "Summary", "path": "live_illustrate/util.py", "snippet": "class Summary(Transcription):\n summary: str\n\n @classmethod\n def from_transcription(cls, transcription: Transcription, summary: str) -> \"Summary\":\n return cls(transcription.transcription, summary)" }, { "identifier": "Transcription", "path": "live_illustrate/util.py", "snippet": "class Transcription:\n transcription: str" }, { "identifier": "num_tokens_from_string", "path": "live_illustrate/util.py", "snippet": "@lru_cache(maxsize=2)\ndef num_tokens_from_string(string: str, encoding_name: str = \"cl100k_base\") -> int:\n \"\"\"Use OpenAI's tokenizer to count the number of tokens\"\"\"\n encoding = tiktoken.get_encoding(encoding_name)\n num_tokens = len(encoding.encode(string))\n return num_tokens" } ]
from datetime import datetime from openai import OpenAI from .util import AsyncThread, Summary, Transcription, num_tokens_from_string
697
SYSTEM_PROMPT = "You are a helpful assistant that describes scenes to an artist who wants to draw them. \ You will be given several lines of dialogue that contain details about the physical surroundings of the characters. \ Your job is to summarize the details of the scene in a bulleted list containing 4-7 bullet points. \ If there is more than one scene described by the dialog, summarize only the most recent one. \ Remember to be concise and not include details that cannot be seen." # Not so good about this last bit, eh? class TextSummarizer(AsyncThread): def __init__(self, model: str) -> None: super().__init__("TextSummarizer") self.openai_client: OpenAI = OpenAI() self.model: str = model def work(self, transcription: Transcription) -> Summary | None: """Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting""" text = transcription.transcription
SYSTEM_PROMPT = "You are a helpful assistant that describes scenes to an artist who wants to draw them. \ You will be given several lines of dialogue that contain details about the physical surroundings of the characters. \ Your job is to summarize the details of the scene in a bulleted list containing 4-7 bullet points. \ If there is more than one scene described by the dialog, summarize only the most recent one. \ Remember to be concise and not include details that cannot be seen." # Not so good about this last bit, eh? class TextSummarizer(AsyncThread): def __init__(self, model: str) -> None: super().__init__("TextSummarizer") self.openai_client: OpenAI = OpenAI() self.model: str = model def work(self, transcription: Transcription) -> Summary | None: """Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting""" text = transcription.transcription
if (token_count := num_tokens_from_string(text)) == 0:
3
2023-11-18 05:42:54+00:00
2k
cyberark/ark-sdk-python
ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py
[ { "identifier": "ArkProtocolType", "path": "ark_sdk_python/models/common/ark_protocol_type.py", "snippet": "class ArkProtocolType(str, MultiValueEnum):\n SSH = 'ssh', 'SSH'\n SCP = 'scp', 'SCP'\n SFTP = 'sftp', 'SFTP'\n RDP = 'rdp', 'RDP'\n CLI = 'cli', 'CLI'\n CONSOLE = 'console', 'Console'\n HTTPS = 'https', 'HTTPS'\n K8S = 'K8S', 'k8s'\n DB = 'Database', 'database', 'DATABASE'" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkDPABaseAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_authorization_rule.py", "snippet": "class ArkDPABaseAuthorizationRule(ArkCamelizedModel):\n rule_name: str = Field(description='Name of the rule')\n user_data: ArkDPAUserData = Field(description='User data related information of the rule')" }, { "identifier": "ArkDPABaseConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_connection_information.py", "snippet": "class ArkDPABaseConnectionInformation(ArkCamelizedModel):\n days_of_week: Optional[List[ArkDPADaysOfWeek]] = Field(\n description='Days of week this rule is allowed on', default=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n )\n full_days: Optional[bool] = Field(description='Whether this rule is allowed for the entirety of the week', default=False)\n hours_from: Optional[str] = Field(description='From which hours this rule is allowed')\n hours_to: Optional[str] = Field(description='To which hours this rule is allowed')\n time_zone: Optional[Union[Dict, str]] = Field(description='Timezone in which the hours apply to')\n grant_access: conint(gt=0, le=24) = Field(description='For how many hours to grant access in this rule in hours', default=2)\n idle_time: Optional[conint(gt=0, le=120)] = Field(\n description='How long the session can stay idle until stopped in minutes', default=None\n )" }, { "identifier": "ArkDPAVMProvidersConnectionDict", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_connection_data.py", "snippet": "class ArkDPAVMConnectionMethodData(ArkCamelizedModel):\nclass ArkDPAVMLocalEphemeralUserConnectionMethodData(ArkDPAVMConnectionMethodData):\nclass ArkDPAVMRDPLocalEphemeralUserConnectionData(ArkCamelizedModel):" } ]
from pydantic import Field, validator from ark_sdk_python.models.common import ArkProtocolType from ark_sdk_python.models.common.ark_workspace_type import ArkWorkspaceType from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_authorization_rule import ArkDPABaseAuthorizationRule from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_connection_information import ArkDPABaseConnectionInformation from ark_sdk_python.models.services.dpa.policies.vm.ark_dpa_vm_connection_data import ArkDPAVMProvidersConnectionDict
957
class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation): connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made') # pylint: disable=no-self-use,no-self-argument @validator('connect_as') def validate_connect_as(cls, val): for k, v in val.items():
class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation): connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made') # pylint: disable=no-self-use,no-self-argument @validator('connect_as') def validate_connect_as(cls, val): for k, v in val.items():
if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:
1
2023-11-13 09:24:31+00:00
2k
Infineon/pharaoh-dev
src/pharaoh/templating/second_level/template_env.py
[ { "identifier": "env_filters", "path": "src/pharaoh/templating/second_level/env_filters.py", "snippet": "DEFAULT = object()\ndef required(value):\ndef rep(value) -> str:\ndef or_default(value, default):\ndef oc_resolve(value: omegaconf.DictConfig):\ndef oc_get(cfg: omegaconf.DictConfig, key, default=DEFAULT):\ndef exists(path: str) -> bool:\ndef to_path(path: str) -> Path:\ndef hasattr_(obj, name):\ndef md2html(text):" }, { "identifier": "env_globals", "path": "src/pharaoh/templating/second_level/env_globals.py", "snippet": "def raise_helper(msg):\ndef heading(text: str, level: int) -> str:\ndef rand_id(chars: int | None = None) -> str:\ndef read_text(file) -> str:\ndef hrule():\ndef fglob(pattern: str, root: str = \".\") -> list[Path]:\ndef assert_true(statement: bool, message: str = \"\"):" }, { "identifier": "env_tests", "path": "src/pharaoh/templating/second_level/env_tests.py", "snippet": "" }, { "identifier": "asset_rel_path_from_build", "path": "src/pharaoh/templating/second_level/util.py", "snippet": "def asset_rel_path_from_build(sphinx_app: PharaohSphinx, template_file: Path, asset: Asset):\n asset.copy_to(sphinx_app.assets_dir)\n return (\n Path(os.path.relpath(sphinx_app.confdir, os.path.dirname(template_file)))\n / sphinx_app.assets_dir.name\n / asset.assetfile.name\n ).as_posix()" }, { "identifier": "asset_rel_path_from_project", "path": "src/pharaoh/templating/second_level/util.py", "snippet": "def asset_rel_path_from_project(project: PharaohProject, asset: Asset):\n return \"/\" + asset.assetfile.relative_to(project.asset_build_dir.parent).as_posix()" } ]
import copy import functools import os import pprint import shutil import uuid import jinja2 import omegaconf import pharaoh.project from functools import partial from pathlib import Path from types import ModuleType from typing import TYPE_CHECKING, Callable from jinja2_git import GitExtension from pharaoh.log import log from pharaoh.util.contextlib_chdir import chdir from .env_filters import env_filters from .env_globals import env_globals from .env_tests import env_tests from .util import asset_rel_path_from_build, asset_rel_path_from_project from collections.abc import Iterator from sphinx.config import Config from pharaoh.sphinx_app import PharaohSphinx from pharaoh.plugins.plugin_manager import PM
1,113
from __future__ import annotations if TYPE_CHECKING: class PharaohFileSystemLoader(jinja2.loaders.FileSystemLoader): def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str, Callable[[], bool]]: # Overwrite to support absolute filenames as well as relative ones that have to be looked up in the search paths for searchpath in self.searchpath: if "<>" in template: # See PharaohTemplateEnv.join_path parent, template_ = template.rsplit("<>", 1) template_path = Path(parent) / template_ if template_path.is_absolute() and template_path.exists(): filename = template_path.as_posix() else: pieces = jinja2.loaders.split_template_path(template_) filename = jinja2.loaders.posixpath.join(searchpath, *pieces) else: pieces = jinja2.loaders.split_template_path(template) filename = jinja2.loaders.posixpath.join(searchpath, *pieces) # Original code starts from here f = jinja2.loaders.open_if_exists(filename) if f is None: continue try: contents = f.read().decode(self.encoding) finally: f.close() def up_to_date() -> bool: return False # Use normpath to convert Windows altsep to sep. return contents, os.path.normpath(filename), up_to_date raise jinja2.TemplateNotFound(template) class PharaohTemplate(jinja2.Template): def render(self, *args, **kwargs) -> str: return super().render(*args, **kwargs) class PharaohTemplateEnv(jinja2.Environment): template_class = PharaohTemplate def __init__(self): super().__init__( trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True, extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"], ) self.default_context: dict = { "project": {}, # Project related context "local": {}, # Discovered content of context files next to the source file "assets": {}, # Discovered content of asset files registered via register_templating_context function "config": None, # Content of conf.py (Sphinx Config object) "user": None, # Content of user given dict "pharaoh_jinja_context" in conf.py } self.local_context_file_cache: dict[Path, ModuleType] = {} self.sphinx_app: PharaohSphinx | None = None self.globals.update(env_globals)
from __future__ import annotations if TYPE_CHECKING: class PharaohFileSystemLoader(jinja2.loaders.FileSystemLoader): def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str, Callable[[], bool]]: # Overwrite to support absolute filenames as well as relative ones that have to be looked up in the search paths for searchpath in self.searchpath: if "<>" in template: # See PharaohTemplateEnv.join_path parent, template_ = template.rsplit("<>", 1) template_path = Path(parent) / template_ if template_path.is_absolute() and template_path.exists(): filename = template_path.as_posix() else: pieces = jinja2.loaders.split_template_path(template_) filename = jinja2.loaders.posixpath.join(searchpath, *pieces) else: pieces = jinja2.loaders.split_template_path(template) filename = jinja2.loaders.posixpath.join(searchpath, *pieces) # Original code starts from here f = jinja2.loaders.open_if_exists(filename) if f is None: continue try: contents = f.read().decode(self.encoding) finally: f.close() def up_to_date() -> bool: return False # Use normpath to convert Windows altsep to sep. return contents, os.path.normpath(filename), up_to_date raise jinja2.TemplateNotFound(template) class PharaohTemplate(jinja2.Template): def render(self, *args, **kwargs) -> str: return super().render(*args, **kwargs) class PharaohTemplateEnv(jinja2.Environment): template_class = PharaohTemplate def __init__(self): super().__init__( trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True, extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"], ) self.default_context: dict = { "project": {}, # Project related context "local": {}, # Discovered content of context files next to the source file "assets": {}, # Discovered content of asset files registered via register_templating_context function "config": None, # Content of conf.py (Sphinx Config object) "user": None, # Content of user given dict "pharaoh_jinja_context" in conf.py } self.local_context_file_cache: dict[Path, ModuleType] = {} self.sphinx_app: PharaohSphinx | None = None self.globals.update(env_globals)
self.filters.update(env_filters)
0
2023-11-10 11:33:02+00:00
2k
CorentinJ/transcription-diff
transcription_diff/text_diff.py
[ { "identifier": "normalize_text", "path": "transcription_diff/text_normalization.py", "snippet": "def normalize_text(raw_text: str, lang_id: str, fault_tolerant=False) -> Tuple[str, SliceMap]:\n \"\"\"\n :param fault_tolerant: issues arising in cleaning operations will not raise an exception if True. The cleaning\n and/or mapping may then be incorrect.\n :return: the tuple\n - clean_text: the cleaned text\n - raw2clean: the mapping from raw text to clean text\n \"\"\"\n # Define the ops to apply\n text_cleaning_ops = [standardize_characters]\n if Language.get(lang_id).language == \"en\":\n text_cleaning_ops.extend([expand_abbreviations, normalize_numbers])\n text_cleaning_ops.extend([keep_pronounced_only, collapse_whitespace])\n\n return apply_text_transforms_with_mapping(raw_text, text_cleaning_ops, fault_tolerant)" }, { "identifier": "whisper_asr", "path": "transcription_diff/whisper_asr.py", "snippet": "@overload\ndef whisper_asr(\n wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device=\"cuda\"\n) -> Tuple[str, str]: ..." } ]
import logging import numpy as np from dataclasses import dataclass from pathlib import Path from typing import List, Iterable, overload, Union from minineedle import needle from transcription_diff.text_normalization import normalize_text from transcription_diff.whisper_asr import whisper_asr from colorama import Fore as colors
1,565
@dataclass class TextDiffRegion: reference_text: str compared_text: str pronunciation_match: bool def clean_text_diff(ref_text: str, compared: str) -> List[TextDiffRegion]: alignment = needle.NeedlemanWunsch(ref_text.split(" "), compared.split(" ")) alignment.align() # Arrange regions = [] for ref_word, compared_word in zip(*alignment.get_aligned_sequences()): regions.append(TextDiffRegion( ref_word if isinstance(ref_word, str) else "", compared_word if isinstance(compared_word, str) else "", pronunciation_match=(ref_word == compared_word) )) # Re-add the spaces between words, and prefer to add them on identical regions rather than non-identical ones for text_attr in ("reference_text", "compared_text"): last_word_region = None for region in regions: if not getattr(region, text_attr): continue if last_word_region: if last_word_region.pronunciation_match: setattr(last_word_region, text_attr, getattr(last_word_region, text_attr) + " ") else: setattr(region, text_attr, " " + getattr(region, text_attr)) last_word_region = region # Compress new_regions = [] for region in regions: if new_regions and (new_regions[-1].pronunciation_match == region.pronunciation_match): new_regions[-1].reference_text += region.reference_text new_regions[-1].compared_text += region.compared_text else: new_regions.append(region) return new_regions def text_diff( reference_texts: Iterable[str], compared_texts: Iterable[str], lang_id: str ) -> List[List[TextDiffRegion]]: raw_refs, raw_comps = list(reference_texts), list(compared_texts) # Normalize text down to characters that influence pronunciation only clean_refs, raw2clean_refs = zip(*[normalize_text(raw_ref, lang_id) for raw_ref in raw_refs]) clean_comps, raw2clean_comps = zip(*[normalize_text(raw_comp, lang_id) for raw_comp in raw_comps]) # Align clean texts and isolate errors text_diffs = [clean_text_diff(clean_ref, clean_comp) for clean_ref, clean_comp in zip(clean_refs, clean_comps)] # Bring the regions up to the unnormalized text space for raw_ref, raw2clean_ref, raw_comp, raw2clean_comp, clean_diff in zip( raw_refs, raw2clean_refs, raw_comps, raw2clean_comps, text_diffs ): clean2raw_ref = raw2clean_ref.inverse() clean2raw_comp = raw2clean_comp.inverse() clean_ref_pos, clean_comp_pos = 0, 0 raw_ref_pos, raw_comp_pos = 0, 0 for region in clean_diff: # Use slicemaps to figure out which parts of the unnormalized text this region corresponds to clean_ref_sli = slice(clean_ref_pos, clean_ref_pos + len(region.reference_text)) clean_comp_sli = slice(clean_comp_pos, clean_comp_pos + len(region.compared_text)) if region is not clean_diff[-1]: raw_ref_sli = slice(raw_ref_pos, max(clean2raw_ref[clean_ref_sli].stop, raw_ref_pos)) raw_comp_sli = slice(raw_comp_pos, max(clean2raw_comp[clean_comp_sli].stop, raw_comp_pos)) else: # Ensure we span the entirety of the unnormalized text, slicemaps are not guaranteed to be surjective # Typical example: a final punctuation that is erased in text normalization. raw_ref_sli = slice(raw_ref_pos, len(raw_ref)) raw_comp_sli = slice(raw_comp_pos, len(raw_comp)) # Modify the region in place with the unnormalized text region.reference_text = raw_ref[raw_ref_sli] region.compared_text = raw_comp[raw_comp_sli] # Update the positions clean_ref_pos = clean_ref_sli.stop clean_comp_pos = clean_comp_sli.stop raw_ref_pos = raw_ref_sli.stop raw_comp_pos = raw_comp_sli.stop return text_diffs @overload def transcription_diff( text: str, wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[TextDiffRegion]: ... @overload def transcription_diff( texts: List[str], wavs: Iterable[np.ndarray], sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[List[TextDiffRegion]]: ... @overload def transcription_diff( text: str, fpath: Union[str, Path], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[TextDiffRegion]: ... @overload def transcription_diff( texts: List[str], fpaths: Iterable[Union[str, Path]], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[List[TextDiffRegion]]: ... def transcription_diff( *args, lang_id: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> Union[List[TextDiffRegion], List[List[TextDiffRegion]]]: # TODO: doc # Arg parsing texts, args = args[0], args[1:] if single := isinstance(texts, str): texts = [texts] # Perform ASR
logger = logging.getLogger(__name__) @dataclass class TextDiffRegion: reference_text: str compared_text: str pronunciation_match: bool def clean_text_diff(ref_text: str, compared: str) -> List[TextDiffRegion]: alignment = needle.NeedlemanWunsch(ref_text.split(" "), compared.split(" ")) alignment.align() # Arrange regions = [] for ref_word, compared_word in zip(*alignment.get_aligned_sequences()): regions.append(TextDiffRegion( ref_word if isinstance(ref_word, str) else "", compared_word if isinstance(compared_word, str) else "", pronunciation_match=(ref_word == compared_word) )) # Re-add the spaces between words, and prefer to add them on identical regions rather than non-identical ones for text_attr in ("reference_text", "compared_text"): last_word_region = None for region in regions: if not getattr(region, text_attr): continue if last_word_region: if last_word_region.pronunciation_match: setattr(last_word_region, text_attr, getattr(last_word_region, text_attr) + " ") else: setattr(region, text_attr, " " + getattr(region, text_attr)) last_word_region = region # Compress new_regions = [] for region in regions: if new_regions and (new_regions[-1].pronunciation_match == region.pronunciation_match): new_regions[-1].reference_text += region.reference_text new_regions[-1].compared_text += region.compared_text else: new_regions.append(region) return new_regions def text_diff( reference_texts: Iterable[str], compared_texts: Iterable[str], lang_id: str ) -> List[List[TextDiffRegion]]: raw_refs, raw_comps = list(reference_texts), list(compared_texts) # Normalize text down to characters that influence pronunciation only clean_refs, raw2clean_refs = zip(*[normalize_text(raw_ref, lang_id) for raw_ref in raw_refs]) clean_comps, raw2clean_comps = zip(*[normalize_text(raw_comp, lang_id) for raw_comp in raw_comps]) # Align clean texts and isolate errors text_diffs = [clean_text_diff(clean_ref, clean_comp) for clean_ref, clean_comp in zip(clean_refs, clean_comps)] # Bring the regions up to the unnormalized text space for raw_ref, raw2clean_ref, raw_comp, raw2clean_comp, clean_diff in zip( raw_refs, raw2clean_refs, raw_comps, raw2clean_comps, text_diffs ): clean2raw_ref = raw2clean_ref.inverse() clean2raw_comp = raw2clean_comp.inverse() clean_ref_pos, clean_comp_pos = 0, 0 raw_ref_pos, raw_comp_pos = 0, 0 for region in clean_diff: # Use slicemaps to figure out which parts of the unnormalized text this region corresponds to clean_ref_sli = slice(clean_ref_pos, clean_ref_pos + len(region.reference_text)) clean_comp_sli = slice(clean_comp_pos, clean_comp_pos + len(region.compared_text)) if region is not clean_diff[-1]: raw_ref_sli = slice(raw_ref_pos, max(clean2raw_ref[clean_ref_sli].stop, raw_ref_pos)) raw_comp_sli = slice(raw_comp_pos, max(clean2raw_comp[clean_comp_sli].stop, raw_comp_pos)) else: # Ensure we span the entirety of the unnormalized text, slicemaps are not guaranteed to be surjective # Typical example: a final punctuation that is erased in text normalization. raw_ref_sli = slice(raw_ref_pos, len(raw_ref)) raw_comp_sli = slice(raw_comp_pos, len(raw_comp)) # Modify the region in place with the unnormalized text region.reference_text = raw_ref[raw_ref_sli] region.compared_text = raw_comp[raw_comp_sli] # Update the positions clean_ref_pos = clean_ref_sli.stop clean_comp_pos = clean_comp_sli.stop raw_ref_pos = raw_ref_sli.stop raw_comp_pos = raw_comp_sli.stop return text_diffs @overload def transcription_diff( text: str, wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[TextDiffRegion]: ... @overload def transcription_diff( texts: List[str], wavs: Iterable[np.ndarray], sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[List[TextDiffRegion]]: ... @overload def transcription_diff( text: str, fpath: Union[str, Path], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[TextDiffRegion]: ... @overload def transcription_diff( texts: List[str], fpaths: Iterable[Union[str, Path]], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> List[List[TextDiffRegion]]: ... def transcription_diff( *args, lang_id: str=None, whisper_model_size=2, custom_words=[], device="cuda" ) -> Union[List[TextDiffRegion], List[List[TextDiffRegion]]]: # TODO: doc # Arg parsing texts, args = args[0], args[1:] if single := isinstance(texts, str): texts = [texts] # Perform ASR
asr_texts, lang_id = whisper_asr(
1
2023-11-11 20:51:54+00:00
2k
AI4HealthUOL/ECG-MIMIC
src/clinical_ts/inception1d.py
[ { "identifier": "AdaptiveConcatPool1d", "path": "src/clinical_ts/basic_conv1d.py", "snippet": "class AdaptiveConcatPool1d(nn.Module):\n \"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`.\"\n def __init__(self, sz=None):\n \"Output will be 2*sz or 2 if sz is None\"\n super().__init__()\n sz = sz or 1\n self.ap,self.mp = nn.AdaptiveAvgPool1d(sz), nn.AdaptiveMaxPool1d(sz)\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)" }, { "identifier": "create_head1d", "path": "src/clinical_ts/basic_conv1d.py", "snippet": "def create_head1d(nf, nc, lin_ftrs=None, ps=0.5, bn:bool=True, act=\"relu\", concat_pooling=True):\n \"Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes; added bn and act here\"\n lin_ftrs = [2*nf if concat_pooling else nf, nc] if lin_ftrs is None else [2*nf if concat_pooling else nf] + lin_ftrs + [nc] #was [nf, 512,nc]\n ps = [ps] if not isinstance(ps,Iterable) else ps\n if len(ps)==1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps\n actns = [nn.ReLU(inplace=True) if act==\"relu\" else nn.ELU(inplace=True)] * (len(lin_ftrs)-2) + [None]\n layers = [AdaptiveConcatPool1d() if concat_pooling else nn.AdaptiveAvgPool1d(1), Flatten()]\n for ni,no,p,actn in zip(lin_ftrs[:-1],lin_ftrs[1:],ps,actns):\n layers += bn_drop_lin(ni,no,bn,p,actn)\n return nn.Sequential(*layers)" } ]
import torch import torch.nn as nn import torch.nn.functional as F import math from .basic_conv1d import AdaptiveConcatPool1d,create_head1d
1,342
__all__ = ['conv', 'noop', 'InceptionBlock1d', 'Shortcut1d', 'InceptionBackbone', 'Inception1d', 'inception1d'] # Cell # Cell def conv(in_planes, out_planes, kernel_size=3, stride=1): "convolution with padding" return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False) def noop(x): return x # Cell class InceptionBlock1d(nn.Module): def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32): super().__init__() self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss]) self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1)) self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU()) def forward(self, x): #print("block in",x.size()) bottled = self.bottleneck(x) out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1)) return out # Cell class Shortcut1d(nn.Module): def __init__(self, ni, nf): super().__init__() self.act_fn=nn.ReLU(True) self.conv=conv(ni, nf, 1) self.bn=nn.BatchNorm1d(nf) def forward(self, inp, out): #print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size) #input() return self.act_fn(out + self.bn(self.conv(inp))) # Cell class InceptionBackbone(nn.Module): def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual): super().__init__() self.depth = depth assert((depth % 3) == 0) self.use_residual = use_residual n_ks = len(kss) + 1 self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)]) self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)]) def forward(self, x): input_res = x for d in range(self.depth): x = self.im[d](x) if self.use_residual and d % 3 == 2: x = (self.sk[d//3])(input_res, x) input_res = x.clone() return x # Cell class Inception1d(nn.Module): '''inception time architecture''' def __init__(self, num_classes=2, input_channels=8, kss=[39,19,9], depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True): super().__init__() layers = [InceptionBackbone(input_channels=input_channels, kss=kss, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)] n_ks = len(kss) + 1 #head
__all__ = ['conv', 'noop', 'InceptionBlock1d', 'Shortcut1d', 'InceptionBackbone', 'Inception1d', 'inception1d'] # Cell # Cell def conv(in_planes, out_planes, kernel_size=3, stride=1): "convolution with padding" return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False) def noop(x): return x # Cell class InceptionBlock1d(nn.Module): def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32): super().__init__() self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss]) self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1)) self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU()) def forward(self, x): #print("block in",x.size()) bottled = self.bottleneck(x) out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1)) return out # Cell class Shortcut1d(nn.Module): def __init__(self, ni, nf): super().__init__() self.act_fn=nn.ReLU(True) self.conv=conv(ni, nf, 1) self.bn=nn.BatchNorm1d(nf) def forward(self, inp, out): #print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size) #input() return self.act_fn(out + self.bn(self.conv(inp))) # Cell class InceptionBackbone(nn.Module): def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual): super().__init__() self.depth = depth assert((depth % 3) == 0) self.use_residual = use_residual n_ks = len(kss) + 1 self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)]) self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)]) def forward(self, x): input_res = x for d in range(self.depth): x = self.im[d](x) if self.use_residual and d % 3 == 2: x = (self.sk[d//3])(input_res, x) input_res = x.clone() return x # Cell class Inception1d(nn.Module): '''inception time architecture''' def __init__(self, num_classes=2, input_channels=8, kss=[39,19,9], depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True): super().__init__() layers = [InceptionBackbone(input_channels=input_channels, kss=kss, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)] n_ks = len(kss) + 1 #head
head = create_head1d(n_ks*nb_filters, nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling)
1
2023-11-12 14:54:08+00:00
2k
eblume/TyperAssistant
src/typerassistant/assistant.py
[ { "identifier": "FunctionCall", "path": "src/typerassistant/spec.py", "snippet": "class FunctionCall:\n call_id: str\n function: FunctionSpec\n parameters: dict[str, Any]\n\n def dict(self) -> dict:\n return {\n \"call_id\": self.call_id,\n \"function\": self.function.name,\n \"parameters\": self.parameters,\n }" }, { "identifier": "FunctionSpec", "path": "src/typerassistant/spec.py", "snippet": "class FunctionSpec:\n name: str\n description: str\n parameters: list[ParameterSpec]\n action: Callable[..., Any]\n\n def tool(self) -> ToolAssistantToolsFunction:\n return ToolAssistantToolsFunction(\n type=\"function\",\n function=FunctionDefinition(\n name=self.name,\n description=self.description or \"None\",\n parameters=self.json_parameters(),\n ),\n )\n\n def json_parameters(self) -> FunctionParameters:\n # For some reason OpenAI doesn't continue to type this, but instead just provides dict[str, object].\n # In any case, it's supposed to be a JSONSchema object, so we'll just do that manually for now.\n # https://github.com/openai/openai-python/blob/main/src/openai/types/shared_params/function_parameters.py\n parameters = {\n \"type\": \"object\",\n \"properties\": {param.name: param.dict() for param in self.parameters},\n \"required\": [param.name for param in self.parameters if param.required],\n }\n\n # enum processing - do this in a second pass to avoid empty enums\n for param in self.parameters:\n if param.enum:\n parameters[\"properties\"][param.name][\"enum\"] = list(param.enum)\n\n return parameters" } ]
import json import time from collections.abc import Iterable from contextlib import redirect_stdout from dataclasses import KW_ONLY, dataclass, field from io import StringIO from textwrap import shorten from typing import Optional, Type, TypeVar from openai import OpenAI from openai.types.beta.assistant import Assistant as RemoteAssistant from openai.types.beta.thread import Thread from openai.types.beta.threads import RequiredActionFunctionToolCall from openai.types.beta.threads.run_submit_tool_outputs_params import ToolOutput from openai.types.beta.threads.thread_message import ThreadMessage from rich import print from rich.panel import Panel from rich.prompt import Confirm from .spec import FunctionCall, FunctionSpec
1,164
# The number of times to poll for a run to complete before giving up MAX_RUN_ITERATIONS = 20 # The number of seconds to sleep between run iterations RUN_ITERATION_SLEEP = 3 # The best usage guide for function calling seems to be: # https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models AssistantT = TypeVar("AssistantT", bound="Assistant") @dataclass class Assistant: """An assistant managed remotely via OpenAI's assistant API. This class implements the basic lifecycle of an assistant, from CRUD to running a thread. It is intended to be subclassed to extend functionality. """ name: str _: KW_ONLY instructions: str = "The agent is a helpful assistant. Its behavior and capabilities can be extended via the 'typerassistant' python package's API." client: OpenAI = field(default_factory=OpenAI) replace: bool = False _assistant: Optional[RemoteAssistant] = None @classmethod def from_id(cls: Type[AssistantT], assistant_id: str, client: Optional[OpenAI] = None) -> AssistantT: """Retrieve the assistant with the given ID from OpenAI. This method will skip all assistant creation steps and simply use the remote definition.""" if client is None: client = OpenAI() assistant = client.beta.assistants.retrieve(assistant_id) return cls( client=client, name=assistant.name or "Unnamed Assistant", instructions=assistant.instructions or cls.instructions, _assistant=assistant, ) @property def assistant(self) -> RemoteAssistant: if self._assistant is None: self._assistant = self.make_assistant(self.replace) return self._assistant def ask( self, query: str, thread: Optional[Thread] = None, use_commands: bool = True, confirm_commands: bool = True, instructions: Optional[str] = None, ) -> str: """Ask the assistant a question, returning the response. This may block for the lifecycle of several API requests as well as waiting on remotely managed threads, in fact blocking for several minutes and then succeeding is not uncommon. The caller should make arrangements for multithreading, etc. should it be needed. If a thread is not provided, a new one will be made. """ if thread is None: thread = self.thread() self.add_message(query, thread) self.run_thread(thread, use_commands=use_commands, confirm_commands=confirm_commands, instructions=instructions) messages = list(self.messages(thread)) content = messages[0].content assert len(content) == 1 assert content[0].type == "text" assert len(content[0].text.annotations) == 0 return content[0].text.value
# The number of times to poll for a run to complete before giving up MAX_RUN_ITERATIONS = 20 # The number of seconds to sleep between run iterations RUN_ITERATION_SLEEP = 3 # The best usage guide for function calling seems to be: # https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models AssistantT = TypeVar("AssistantT", bound="Assistant") @dataclass class Assistant: """An assistant managed remotely via OpenAI's assistant API. This class implements the basic lifecycle of an assistant, from CRUD to running a thread. It is intended to be subclassed to extend functionality. """ name: str _: KW_ONLY instructions: str = "The agent is a helpful assistant. Its behavior and capabilities can be extended via the 'typerassistant' python package's API." client: OpenAI = field(default_factory=OpenAI) replace: bool = False _assistant: Optional[RemoteAssistant] = None @classmethod def from_id(cls: Type[AssistantT], assistant_id: str, client: Optional[OpenAI] = None) -> AssistantT: """Retrieve the assistant with the given ID from OpenAI. This method will skip all assistant creation steps and simply use the remote definition.""" if client is None: client = OpenAI() assistant = client.beta.assistants.retrieve(assistant_id) return cls( client=client, name=assistant.name or "Unnamed Assistant", instructions=assistant.instructions or cls.instructions, _assistant=assistant, ) @property def assistant(self) -> RemoteAssistant: if self._assistant is None: self._assistant = self.make_assistant(self.replace) return self._assistant def ask( self, query: str, thread: Optional[Thread] = None, use_commands: bool = True, confirm_commands: bool = True, instructions: Optional[str] = None, ) -> str: """Ask the assistant a question, returning the response. This may block for the lifecycle of several API requests as well as waiting on remotely managed threads, in fact blocking for several minutes and then succeeding is not uncommon. The caller should make arrangements for multithreading, etc. should it be needed. If a thread is not provided, a new one will be made. """ if thread is None: thread = self.thread() self.add_message(query, thread) self.run_thread(thread, use_commands=use_commands, confirm_commands=confirm_commands, instructions=instructions) messages = list(self.messages(thread)) content = messages[0].content assert len(content) == 1 assert content[0].type == "text" assert len(content[0].text.annotations) == 0 return content[0].text.value
def functions(self) -> Iterable[FunctionSpec]:
1
2023-11-17 19:43:55+00:00
2k
Mat931/digitalstrom-homeassistant
custom_components/digitalstrom/binary_sensor.py
[ { "identifier": "CONF_DSUID", "path": "custom_components/digitalstrom/const.py", "snippet": "CONF_DSUID: str = \"dsuid\"" }, { "identifier": "DOMAIN", "path": "custom_components/digitalstrom/const.py", "snippet": "DOMAIN = \"digitalstrom\"" }, { "identifier": "DigitalstromEntity", "path": "custom_components/digitalstrom/entity.py", "snippet": "class DigitalstromEntity(Entity):\n \"\"\"Define a base digitalSTROM entity.\"\"\"\n\n def __init__(self, device: DigitalstromDevice, entity_identifier: str):\n \"\"\"Initialize the entity.\"\"\"\n self.device = device\n self._attr_unique_id: str = f\"{self.device.dsuid}_{entity_identifier}\"\n self.entity_id = f\"{DOMAIN}.{self._attr_unique_id}\"\n self._attr_should_poll = False\n self._has_state = False\n\n @property\n def device_info(self) -> DeviceInfo:\n \"\"\"Return the device info.\"\"\"\n parent_device = (\n self.device\n if self.device.parent_device is None\n else self.device.parent_device\n )\n zone_name = \"\"\n if zone := self.device.apartment.zones.get(self.device.zone_id):\n zone_name = zone.name\n return DeviceInfo(\n identifiers={(DOMAIN, parent_device.dsuid)},\n name=parent_device.name,\n manufacturer=parent_device.manufacturer,\n model=parent_device.hw_info,\n # sw_version=parent_device.sw_version,\n via_device=(DOMAIN, parent_device.meter_dsuid),\n suggested_area=zone_name,\n )\n\n @property\n def available(self) -> bool:\n return self.device.available" } ]
import logging from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity, BinarySensorEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import EntityCategory from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from .const import CONF_DSUID, DOMAIN from .entity import DigitalstromEntity
1,365
name="Brightness", device_class=BinarySensorDeviceClass.LIGHT, ), 3: BinarySensorEntityDescription( key="3", name="Presence in darkness", device_class=BinarySensorDeviceClass.PRESENCE, ), 4: BinarySensorEntityDescription( key="4", name="Twilight", device_class=BinarySensorDeviceClass.LIGHT, ), 5: BinarySensorEntityDescription( key="5", name="Motion", device_class=BinarySensorDeviceClass.MOTION, ), 6: BinarySensorEntityDescription( key="6", name="Motion in darkness", device_class=BinarySensorDeviceClass.MOTION, ), 7: BinarySensorEntityDescription( key="7", name="Smoke", device_class=BinarySensorDeviceClass.SMOKE, ), 8: BinarySensorEntityDescription( key="8", name="Wind strength above limit", device_class=BinarySensorDeviceClass.SAFETY, ), 9: BinarySensorEntityDescription( key="9", name="Rain", device_class=BinarySensorDeviceClass.MOISTURE, ), 10: BinarySensorEntityDescription( key="10", name="Sun", device_class=BinarySensorDeviceClass.LIGHT, ), 11: BinarySensorEntityDescription( key="11", name="Temperature below limit", device_class=BinarySensorDeviceClass.COLD, ), 12: BinarySensorEntityDescription( key="12", name="Battery", device_class=BinarySensorDeviceClass.BATTERY, ), 13: BinarySensorEntityDescription( key="13", name="Window", device_class=BinarySensorDeviceClass.WINDOW, ), 14: BinarySensorEntityDescription( key="14", name="Door", device_class=BinarySensorDeviceClass.DOOR, ), 15: BinarySensorEntityDescription( key="15", name="Window tilt", device_class=BinarySensorDeviceClass.WINDOW, ), 16: BinarySensorEntityDescription( key="16", name="Garage door", device_class=BinarySensorDeviceClass.GARAGE_DOOR, ), 17: BinarySensorEntityDescription( key="17", name="Sun protection", device_class=BinarySensorDeviceClass.SAFETY, ), 18: BinarySensorEntityDescription( key="18", name="Frost", device_class=BinarySensorDeviceClass.COLD, ), 19: BinarySensorEntityDescription( key="19", name="Heating system", device_class=BinarySensorDeviceClass.HEAT, ), 20: BinarySensorEntityDescription( key="20", name="Warm water", device_class=BinarySensorDeviceClass.HEAT, ), 21: BinarySensorEntityDescription( key="21", name="Initialization", device_class=BinarySensorDeviceClass.RUNNING, entity_category=EntityCategory.DIAGNOSTIC, ), 22: BinarySensorEntityDescription( key="22", name="Malfunction", device_class=BinarySensorDeviceClass.PROBLEM, entity_category=EntityCategory.DIAGNOSTIC, ), 23: BinarySensorEntityDescription( key="23", name="Service required", device_class=BinarySensorDeviceClass.PROBLEM, entity_category=EntityCategory.DIAGNOSTIC, ), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the binary sensor platform."""
_LOGGER = logging.getLogger(__name__) BINARY_SENSORS_MAP: dict[int, BinarySensorEntityDescription] = { -1: BinarySensorEntityDescription( key="unknown", name="Unknown binary input", ), 0: BinarySensorEntityDescription( key="0", name="Binary input", ), 1: BinarySensorEntityDescription( key="1", name="Presence", device_class=BinarySensorDeviceClass.PRESENCE, ), 2: BinarySensorEntityDescription( key="2", name="Brightness", device_class=BinarySensorDeviceClass.LIGHT, ), 3: BinarySensorEntityDescription( key="3", name="Presence in darkness", device_class=BinarySensorDeviceClass.PRESENCE, ), 4: BinarySensorEntityDescription( key="4", name="Twilight", device_class=BinarySensorDeviceClass.LIGHT, ), 5: BinarySensorEntityDescription( key="5", name="Motion", device_class=BinarySensorDeviceClass.MOTION, ), 6: BinarySensorEntityDescription( key="6", name="Motion in darkness", device_class=BinarySensorDeviceClass.MOTION, ), 7: BinarySensorEntityDescription( key="7", name="Smoke", device_class=BinarySensorDeviceClass.SMOKE, ), 8: BinarySensorEntityDescription( key="8", name="Wind strength above limit", device_class=BinarySensorDeviceClass.SAFETY, ), 9: BinarySensorEntityDescription( key="9", name="Rain", device_class=BinarySensorDeviceClass.MOISTURE, ), 10: BinarySensorEntityDescription( key="10", name="Sun", device_class=BinarySensorDeviceClass.LIGHT, ), 11: BinarySensorEntityDescription( key="11", name="Temperature below limit", device_class=BinarySensorDeviceClass.COLD, ), 12: BinarySensorEntityDescription( key="12", name="Battery", device_class=BinarySensorDeviceClass.BATTERY, ), 13: BinarySensorEntityDescription( key="13", name="Window", device_class=BinarySensorDeviceClass.WINDOW, ), 14: BinarySensorEntityDescription( key="14", name="Door", device_class=BinarySensorDeviceClass.DOOR, ), 15: BinarySensorEntityDescription( key="15", name="Window tilt", device_class=BinarySensorDeviceClass.WINDOW, ), 16: BinarySensorEntityDescription( key="16", name="Garage door", device_class=BinarySensorDeviceClass.GARAGE_DOOR, ), 17: BinarySensorEntityDescription( key="17", name="Sun protection", device_class=BinarySensorDeviceClass.SAFETY, ), 18: BinarySensorEntityDescription( key="18", name="Frost", device_class=BinarySensorDeviceClass.COLD, ), 19: BinarySensorEntityDescription( key="19", name="Heating system", device_class=BinarySensorDeviceClass.HEAT, ), 20: BinarySensorEntityDescription( key="20", name="Warm water", device_class=BinarySensorDeviceClass.HEAT, ), 21: BinarySensorEntityDescription( key="21", name="Initialization", device_class=BinarySensorDeviceClass.RUNNING, entity_category=EntityCategory.DIAGNOSTIC, ), 22: BinarySensorEntityDescription( key="22", name="Malfunction", device_class=BinarySensorDeviceClass.PROBLEM, entity_category=EntityCategory.DIAGNOSTIC, ), 23: BinarySensorEntityDescription( key="23", name="Service required", device_class=BinarySensorDeviceClass.PROBLEM, entity_category=EntityCategory.DIAGNOSTIC, ), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the binary sensor platform."""
client = hass.data[DOMAIN][config_entry.data[CONF_DSUID]]["client"]
0
2023-11-10 16:42:38+00:00
2k
mohenghui/detectAuto_v8
ultralytics/models/sam/modules/encoders.py
[ { "identifier": "LayerNorm2d", "path": "ultralytics/nn/modules/transformer.py", "snippet": "class LayerNorm2d(nn.Module):\n \"\"\"\n 2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.\n\n Original implementations in\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py\n and\n https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py.\n \"\"\"\n\n def __init__(self, num_channels, eps=1e-6):\n \"\"\"Initialize LayerNorm2d with the given parameters.\"\"\"\n super().__init__()\n self.weight = nn.Parameter(torch.ones(num_channels))\n self.bias = nn.Parameter(torch.zeros(num_channels))\n self.eps = eps\n\n def forward(self, x):\n \"\"\"Perform forward pass for 2D layer normalization.\"\"\"\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n return self.weight[:, None, None] * x + self.bias[:, None, None]" }, { "identifier": "MLPBlock", "path": "ultralytics/nn/modules/transformer.py", "snippet": "class MLPBlock(nn.Module):\n \"\"\"Implements a single block of a multi-layer perceptron.\"\"\"\n\n def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):\n \"\"\"Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function.\"\"\"\n super().__init__()\n self.lin1 = nn.Linear(embedding_dim, mlp_dim)\n self.lin2 = nn.Linear(mlp_dim, embedding_dim)\n self.act = act()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass for the MLPBlock.\"\"\"\n return self.lin2(self.act(self.lin1(x)))" } ]
from typing import Any, Optional, Tuple, Type from ultralytics.nn.modules import LayerNorm2d, MLPBlock import numpy as np import torch import torch.nn as nn import torch.nn.functional as F
1,374
# Ultralytics YOLO 🚀, AGPL-3.0 license class ImageEncoderViT(nn.Module): """ An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks. The encoded patches are then processed through a neck to generate the final encoded representation. This class and its supporting functions below lightly adapted from the ViTDet backbone available at https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py. Attributes: img_size (int): Dimension of input images, assumed to be square. patch_embed (PatchEmbed): Module for patch embedding. pos_embed (nn.Parameter, optional): Absolute positional embedding for patches. blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings. neck (nn.Sequential): Neck module to further process the output. """ def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
# Ultralytics YOLO 🚀, AGPL-3.0 license class ImageEncoderViT(nn.Module): """ An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks. The encoded patches are then processed through a neck to generate the final encoded representation. This class and its supporting functions below lightly adapted from the ViTDet backbone available at https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py. Attributes: img_size (int): Dimension of input images, assumed to be square. patch_embed (PatchEmbed): Module for patch embedding. pos_embed (nn.Parameter, optional): Absolute positional embedding for patches. blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings. neck (nn.Sequential): Neck module to further process the output. """ def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
LayerNorm2d(out_chans),
0
2023-11-16 12:49:59+00:00
2k
i-super/Saleor
saleor/webhook/observability/tests/conftest.py
[ { "identifier": "schema", "path": "saleor/graphql/api.py", "snippet": "API_PATH = SimpleLazyObject(lambda: reverse(\"api\"))\nclass Query(\n AccountQueries,\n AppQueries,\n AttributeQueries,\n ChannelQueries,\n CheckoutQueries,\n CoreQueries,\n CsvQueries,\n DiscountQueries,\n PluginsQueries,\n GiftCardQueries,\n MenuQueries,\n OrderQueries,\n PageQueries,\n PaymentQueries,\n ProductQueries,\n ShippingQueries,\n ShopQueries,\n StockQueries,\n TaxQueries,\n TranslationQueries,\n WarehouseQueries,\n WebhookQueries,\n):\nclass Mutation(\n AccountMutations,\n AppMutations,\n AttributeMutations,\n ChannelMutations,\n CheckoutMutations,\n CoreMutations,\n CsvMutations,\n DiscountMutations,\n ExternalNotificationMutations,\n PluginsMutations,\n GiftCardMutations,\n InvoiceMutations,\n MenuMutations,\n MetaMutations,\n OrderMutations,\n PageMutations,\n PaymentMutations,\n ProductMutations,\n ShippingMutations,\n ShopMutations,\n StockMutations,\n TaxMutations,\n WarehouseMutations,\n WebhookMutations,\n):\ndef serialize_webhook_event(value):" }, { "identifier": "RedisBuffer", "path": "saleor/webhook/observability/buffers.py", "snippet": "class RedisBuffer(BaseBuffer):\n _pools: dict[str, ConnectionPool] = {}\n _socket_connect_timeout = 0.25\n _client_name = \"observability_buffer\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._client: Optional[Redis] = None\n\n def get_connection_pool(self):\n return ConnectionPool.from_url(\n self.broker_url,\n socket_connect_timeout=self._socket_connect_timeout,\n socket_timeout=self.connection_timeout,\n client_name=self._client_name,\n )\n\n def get_or_create_connection_pool(self):\n if self.broker_url not in self._pools:\n self._pools[self.broker_url] = self.get_connection_pool()\n return self._pools[self.broker_url]\n\n def connect(self) -> Redis:\n pool = self.get_or_create_connection_pool()\n return Redis(connection_pool=pool)\n\n @property\n def client(self) -> Redis:\n if not self._client:\n self._client = self.connect()\n return self._client\n\n def _put_events(\n self, key: KEY_TYPE, events: list[Any], client: Optional[Redis] = None\n ) -> int:\n start_index = -self.max_size\n events_data = [self.encode(event) for event in events[start_index:]]\n if client is None:\n client = self.client\n client.lpush(key, *events_data)\n client.ltrim(key, 0, max(0, self.max_size - 1))\n client.expire(key, self.timeout)\n return max(0, len(events) - self.max_size)\n\n def put_events(self, events: list[Any]) -> int:\n with self.client.pipeline(transaction=False) as pipe:\n dropped = self._put_events(self.key, events, client=pipe)\n result = pipe.execute()\n return dropped + max(0, result[0] - self.max_size)\n\n def put_event(self, event: Any) -> int:\n return self.put_events([event])\n\n def put_multi_key_events(\n self, events_dict: dict[KEY_TYPE, list[Any]]\n ) -> dict[KEY_TYPE, int]:\n keys = list(events_dict.keys())\n trimmed: dict[KEY_TYPE, int] = {}\n if not keys:\n return trimmed\n with self.client.pipeline(transaction=False) as pipe:\n for key in keys:\n trimmed[key] = self._put_events(key, events_dict[key], client=pipe)\n result = pipe.execute()\n for key in keys:\n buffer_len, _, _ = result.pop(0), result.pop(0), result.pop(0)\n trimmed[key] += max(0, buffer_len - self.max_size)\n return trimmed\n\n def _pop_events(self, key: KEY_TYPE, batch_size: int) -> tuple[list[Any], int]:\n events = []\n with self.client.pipeline(transaction=False) as pipe:\n pipe.llen(key)\n for i in range(max(1, batch_size)):\n pipe.rpop(key)\n result = pipe.execute()\n size = result.pop(0)\n for elem in result:\n if elem is None:\n break\n events.append(self.decode(elem))\n return events, size - len(events)\n\n def pop_event(self) -> Any:\n events, _ = self._pop_events(self.key, batch_size=1)\n return events[0] if events else None\n\n def pop_events(self) -> list[Any]:\n events, _ = self._pop_events(self.key, self.batch_size)\n return events\n\n def pop_events_get_size(self) -> tuple[list[Any], int]:\n return self._pop_events(self.key, self.batch_size)\n\n def clear(self) -> int:\n with self.client.pipeline(transaction=False) as pipe:\n pipe.llen(self.key)\n pipe.delete(self.key)\n result = pipe.execute()\n return result[0]\n\n def size(self) -> int:\n return self.client.llen(self.key)" }, { "identifier": "GraphQLOperationResponse", "path": "saleor/webhook/observability/utils.py", "snippet": "class GraphQLOperationResponse:\n name: Optional[str] = None\n query: Optional[GraphQLDocument] = None\n variables: Optional[dict] = None\n result: Optional[dict] = None\n result_invalid: bool = False" }, { "identifier": "get_buffer_name", "path": "saleor/webhook/observability/utils.py", "snippet": "def get_buffer_name() -> str:\n return cache.make_key(BUFFER_KEY)" } ]
from typing import Optional from unittest.mock import patch from django.core.cache import cache from graphql import get_default_backend from redis import ConnectionPool from ....graphql.api import schema from ..buffers import RedisBuffer from ..utils import GraphQLOperationResponse, get_buffer_name import fakeredis import pytest
1,586
backend = get_default_backend() BROKER_URL_HOST = "fake-redis" BROKER_URL = f"redis://{BROKER_URL_HOST}" KEY, MAX_SIZE, BATCH_SIZE = get_buffer_name(), 10, 5 @pytest.fixture def gql_operation_factory(): def factory( query_string: str, operation_name: Optional[str] = None, variables: Optional[dict] = None, result: Optional[dict] = None, result_invalid=False, ) -> GraphQLOperationResponse:
backend = get_default_backend() BROKER_URL_HOST = "fake-redis" BROKER_URL = f"redis://{BROKER_URL_HOST}" KEY, MAX_SIZE, BATCH_SIZE = get_buffer_name(), 10, 5 @pytest.fixture def gql_operation_factory(): def factory( query_string: str, operation_name: Optional[str] = None, variables: Optional[dict] = None, result: Optional[dict] = None, result_invalid=False, ) -> GraphQLOperationResponse:
query = backend.document_from_string(schema, query_string)
0
2023-11-13 05:00:35+00:00
2k
Aues6uen11Z/Zafkiel
zafkiel/ui/switch.py
[ { "identifier": "ImageTemplate", "path": "zafkiel/device/template.py", "snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n\n super().__init__(filename, threshold, target_pos, record_pos, resolution, rgb, scale_max, scale_step)\n\n self.template_path = template_path # under root path\n self.keyword = keyword\n if self.keyword is not None and self.keyword.name == '':\n \"\"\"\n Please note that due to the __post_init__ method of the Keyword class running before this 'name' assignment, \n its 'instances' dictionary will get a dictionary item with an empty string key.\n This means that each instance of the Keyword class that omits the 'name' parameter will be constantly \n overwritten. If you want to use Keyword().instances for special purposes, you must initialize 'name'.\n \"\"\"\n self.keyword.name = self.name\n\n @cached_property\n def filepath(self) -> str:\n if self._filepath:\n return self._filepath\n for dir_name in G.BASEDIR:\n filepath = os.path.join(dir_name, self.template_path, self.filename)\n if os.path.isfile(filepath):\n self._filepath = filepath\n return self._filepath\n return self.filename\n\n @cached_property\n def name(self) -> str:\n return Path(self.filename).stem\n\n @cached_property\n def image(self) -> ndarray:\n return self._imread()\n\n @cached_property\n def height(self) -> int:\n return self.image.shape[0]\n\n @cached_property\n def width(self) -> int:\n return self.image.shape[1]\n\n def _has_border(self) -> bool:\n \"\"\"\n If game running in a bordered process, coordinates need to be corrected.\n\n Returns:\n Whether the game running in a bordered process.\n \"\"\"\n actual_ratio = G.DEVICE.get_current_resolution()[0] / G.DEVICE.get_current_resolution()[1]\n template_ratio = self.resolution[0] / self.resolution[1]\n return actual_ratio != template_ratio\n\n def ratio(self, screen_height: float = None) -> float:\n \"\"\"\n Calculate the ratio of the current screen to the template image.\n \"\"\"\n if screen_height is None:\n if self._has_border():\n border = Config.BORDER[0] + Config.BORDER[2]\n else:\n border = 0\n screen_height = G.DEVICE.get_current_resolution()[1] - border\n\n return screen_height / self.resolution[1]\n\n @cached_property\n def area(self) -> tuple:\n \"\"\"\n Calculate the area of the template image on the current screen.\n\n Returns:\n Upper left and lower right corner coordinate.\n \"\"\"\n screen_resolution = G.DEVICE.get_current_resolution()\n\n if self._has_border():\n border = Config.BORDER\n else:\n border = (0, 0, 0)\n\n screen_width = screen_resolution[0] - border[1] * 2\n screen_height = screen_resolution[1] - border[0] - border[2]\n\n ratio = self.ratio(screen_height)\n x1 = screen_width / 2 + self.record_pos[0] * screen_width - self.width / 2 * ratio + border[1]\n y1 = screen_height / 2 + self.record_pos[1] * screen_width - self.height / 2 * ratio + border[0]\n x2 = screen_width / 2 + self.record_pos[0] * screen_width + self.width / 2 * ratio + border[1]\n y2 = screen_height / 2 + self.record_pos[1] * screen_width + self.height / 2 * ratio + border[0]\n return x1, y1, x2, y2" }, { "identifier": "ScriptError", "path": "zafkiel/exception.py", "snippet": "class ScriptError(Exception):\n pass" } ]
from zafkiel.device.template import ImageTemplate as Template from zafkiel.exception import ScriptError
1,484
class Switch: """ A wrapper to handle switches in game, switch among states with retries. Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py Examples: # Definitions submarine_hunt = Switch('Submarine_hunt', offset=120) submarine_hunt.add_state('on', check_button=Template(r"assets/ON.png")) submarine_hunt.add_state('off', check_button=Template(r"assets/OFF.png")) # Change state to ON submarine_view.set(TPL_ON) """ def __init__(self, name: str = 'Switch', is_selector: bool = False): """ Args: name: is_selector: True if this is a multi choice, click to choose one of the switches. For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] | False if this is a switch, click the switch itself, and it changed in the same position. For example: | [ON] | -> click -> | [OFF] | """ self.name = name self.is_choice = is_selector self.state_list = [] def __str__(self): return self.name __repr__ = __str__ def add_state(self, state: str, check_button: Template, click_button: Template = None): """ Args: state: Must match check_button.name check_button: click_button: """ self.state_list.append({ 'state': state, 'check_button': check_button, 'click_button': click_button if click_button is not None else check_button, }) def get_data(self, state: Template) -> dict: """ Args: state: Returns: Dictionary in add_state Raises: ScriptError: If state invalid """ for row in self.state_list: if row['state'] == state.name: return row
class Switch: """ A wrapper to handle switches in game, switch among states with retries. Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py Examples: # Definitions submarine_hunt = Switch('Submarine_hunt', offset=120) submarine_hunt.add_state('on', check_button=Template(r"assets/ON.png")) submarine_hunt.add_state('off', check_button=Template(r"assets/OFF.png")) # Change state to ON submarine_view.set(TPL_ON) """ def __init__(self, name: str = 'Switch', is_selector: bool = False): """ Args: name: is_selector: True if this is a multi choice, click to choose one of the switches. For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] | False if this is a switch, click the switch itself, and it changed in the same position. For example: | [ON] | -> click -> | [OFF] | """ self.name = name self.is_choice = is_selector self.state_list = [] def __str__(self): return self.name __repr__ = __str__ def add_state(self, state: str, check_button: Template, click_button: Template = None): """ Args: state: Must match check_button.name check_button: click_button: """ self.state_list.append({ 'state': state, 'check_button': check_button, 'click_button': click_button if click_button is not None else check_button, }) def get_data(self, state: Template) -> dict: """ Args: state: Returns: Dictionary in add_state Raises: ScriptError: If state invalid """ for row in self.state_list: if row['state'] == state.name: return row
raise ScriptError(f'Switch {self.name} received an invalid state {state}')
1
2023-11-12 09:33:35+00:00
2k
medkit-lib/medkit
tests/unit/training/dummy_context_component/dummy_component.py
[ { "identifier": "BatchData", "path": "medkit/training/utils.py", "snippet": "class BatchData(dict):\n \"\"\"A BatchData pack data allowing both column and row access\"\"\"\n\n def __getitem__(self, index: int) -> Dict[str, Union[List[Any], torch.Tensor]]:\n if isinstance(index, str):\n inner_dict = dict(self.items())\n return inner_dict[index]\n return {key: values[index] for key, values in self.items()}\n\n def to_device(self, device: torch.device) -> BatchData:\n \"\"\"\n Ensure that Tensors in the BatchData object are on the specified `device`\n\n Parameters\n ----------\n device:\n A `torch.device` object representing the device on which tensors\n will be allocated.\n\n Returns\n -------\n BatchData\n A new object with the tensors on the proper device.\n \"\"\"\n inner_batch = BatchData()\n for key, value in self.items():\n if isinstance(value, torch.Tensor):\n inner_batch[key] = value.to(device)\n else:\n inner_batch[key] = value\n return inner_batch" }, { "identifier": "DummyTextCat", "path": "tests/unit/training/dummy_context_component/dummy_model.py", "snippet": "class DummyTextCat(nn.Module):\n \"\"\"Construct a dummy model for text classification using a embedding bag architecture\"\"\"\n\n def __init__(self, config: DummyTextCatConfig):\n super().__init__()\n self.model_name = \"TextCat\"\n self.config = config\n self.loss = torch.nn.CrossEntropyLoss()\n\n self.embedding = nn.EmbeddingBag(self.config.vocab_size, self.config.embed_dim, sparse=True)\n self.fc = nn.Linear(self.config.embed_dim, self.config.num_class)\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.5\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc.bias.data.zero_()\n\n def forward(self, inputs_ids: torch.FloatTensor, offsets: torch.FloatTensor) -> BatchData:\n embedded = self.embedding(inputs_ids, offsets)\n logits = self.fc(embedded)\n return logits\n\n def compute_loss(self, logits: torch.FloatTensor, labels: torch.FloatTensor):\n return self.loss(logits, labels)" }, { "identifier": "DummyTextCatConfig", "path": "tests/unit/training/dummy_context_component/dummy_model.py", "snippet": "class DummyTextCatConfig:\n vocab_size: int = 512\n embed_dim: int = 16\n num_class: int = 2" }, { "identifier": "DummyTokenizer", "path": "tests/unit/training/dummy_context_component/dummy_model.py", "snippet": "class DummyTokenizer:\n def __call__(self, text: str) -> List[int]:\n return [ord(char) for char in text]" } ]
import os import torch from typing import Optional from medkit.training import BatchData from .dummy_model import DummyTextCat, DummyTextCatConfig, DummyTokenizer
746
PYTORCH_MODEL_NAME = "pytorch_model.bin" class MockTrainableComponent: def __init__( self, model_path: Optional[str] = None, output_label: str = "category", device="cpu", ): self.tokenizer = DummyTokenizer() # load architecture
PYTORCH_MODEL_NAME = "pytorch_model.bin" class MockTrainableComponent: def __init__( self, model_path: Optional[str] = None, output_label: str = "category", device="cpu", ): self.tokenizer = DummyTokenizer() # load architecture
self.model = DummyTextCat(config=DummyTextCatConfig())
2
2023-11-13 16:28:56+00:00
2k
donahowe/VE-MLD
src_files/models/utils/factory.py
[ { "identifier": "add_ml_decoder_head", "path": "src_files/ml_decoder/ml_decoder.py", "snippet": "def add_ml_decoder_head(model, num_classes=-1, num_of_groups=-1, decoder_embedding=768, zsl=0):\n if num_classes == -1:\n num_classes = model.num_classes\n num_features = model.num_features\n if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # resnet50\n model.global_pool = nn.Identity()\n del model.fc\n model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features, num_of_groups=num_of_groups,\n decoder_embedding=decoder_embedding, zsl=zsl)\n elif hasattr(model, 'head'): # tresnet\n if hasattr(model, 'global_pool'):\n model.global_pool = nn.Identity()\n del model.head\n model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features, num_of_groups=num_of_groups,\n decoder_embedding=decoder_embedding, zsl=zsl)\n else:\n print(\"model is not suited for ml-decoder\")\n exit(-1)\n\n return model" }, { "identifier": "TResnetM", "path": "src_files/models/tresnet/tresnet.py", "snippet": "def TResnetM(model_params):\n \"\"\"Constructs a medium TResnet model.\n \"\"\"\n in_chans = 3\n num_classes = model_params['num_classes']\n model = TResNet(layers=[3, 4, 11, 3], num_classes=num_classes, in_chans=in_chans)\n return model" }, { "identifier": "TResnetL", "path": "src_files/models/tresnet/tresnet.py", "snippet": "def TResnetL(model_params):\n \"\"\"Constructs a large TResnet model.\n \"\"\"\n in_chans = 3\n num_classes = model_params['num_classes']\n layers_list = [3, 4, 23, 3]\n model = TResNet(layers=layers_list, num_classes=num_classes, in_chans=in_chans, first_two_layers=Bottleneck)\n return model" }, { "identifier": "TResnetXL", "path": "src_files/models/tresnet/tresnet.py", "snippet": "def TResnetXL(model_params):\n \"\"\"Constructs a large TResnet model.\n \"\"\"\n in_chans = 3\n num_classes = model_params['num_classes']\n layers_list = [3, 8, 34, 5]\n model = TResNet(layers=layers_list, num_classes=num_classes, in_chans=in_chans, first_two_layers=Bottleneck)\n return model" }, { "identifier": "VE", "path": "src_files/models/vit.py", "snippet": "def VE(model_params):\n\n in_chans = 3\n num_classes = model_params['num_classes']\n imagesize = model_params['image_size']\n patchsize = 32\n model = ViT(num_classes=num_classes, channels=in_chans, image_size=imagesize ,patch_size=patchsize)\n return model" } ]
import logging import os import torch from urllib import request from ...ml_decoder.ml_decoder import add_ml_decoder_head from ..tresnet import TResnetM, TResnetL, TResnetXL from ..vit import VE
835
logger = logging.getLogger(__name__) def create_model(args,load_head=False): """Create a model """ model_params = {'args': args, 'num_classes': args.num_classes, 'image_size': args.image_size} args = model_params['args'] args.model_name = args.model_name.lower() if args.model_name == 'vit': model = VE(model_params) elif args.model_name == 'tresnet_m': model = TResnetM(model_params) elif args.model_name == 'tresnet_l':
logger = logging.getLogger(__name__) def create_model(args,load_head=False): """Create a model """ model_params = {'args': args, 'num_classes': args.num_classes, 'image_size': args.image_size} args = model_params['args'] args.model_name = args.model_name.lower() if args.model_name == 'vit': model = VE(model_params) elif args.model_name == 'tresnet_m': model = TResnetM(model_params) elif args.model_name == 'tresnet_l':
model = TResnetL(model_params)
2
2023-11-13 04:12:26+00:00
2k
WindowsSov8forUs/bestdori_api
bestdori/utils/network.py
[ { "identifier": "AssetsNotExistError", "path": "bestdori/exceptions.py", "snippet": "class AssetsNotExistError(AssetsException):\n '''资源不存在'''\n # 初始化\n def __init__(self, asset_name: str) -> None:\n msg = f'资源 {asset_name} 可能不存在。'\n super().__init__(msg)" }, { "identifier": "RequestException", "path": "bestdori/exceptions.py", "snippet": "class RequestException(BaseException):\n '''请求发送错误'''\n # 初始化\n def __init__(self, api: str, msg: str='无错误代码获取。', **kwargs: Any) -> None:\n if len(kwargs) > 0:\n msg += f': {kwargs}'\n else:\n msg += '。'\n super().__init__(msg)\n self.api = api\n '''请求所使用的 API'''\n \n # 字符串化\n def __str__(self) -> str:\n '''输出字符串'''\n return f'向 Bestdori {self.api} 发送请求时出错。{self.message}'" }, { "identifier": "REQUEST_EXCEPTION", "path": "bestdori/exceptions.py", "snippet": "REQUEST_EXCEPTION: dict[str, type[RequestException]] = {\n 'REQUEST_INVALID': RequestInvalidError,\n 'LOGIN_REQUIRED': LoginRequiredError,\n 'CREDENTIALS_INVALID': CredentialsInvalidError,\n 'USER_INVALID': UserInvalidError,\n 'ALREADY_UPLOADED': AlreadyUploadedError,\n 'POST_INVALID': PostInvalidError\n}" } ]
from json import dumps from io import BufferedReader from httpx._models import Cookies from httpx import Response, Request, Client from typing import Optional, Literal, cast, Any from ..exceptions import ( AssetsNotExistError, RequestException, REQUEST_EXCEPTION )
1,202
'''`bestdori.utils.network` 向 Bestdori 发送请求相关模块''' # 向 Bestdori 发送 API 请求类 class Api: '''向 Bestdori 发送 API 请求类 参数: api (str): 请求的 API 地址 proxy (Optional[str]): 代理服务器''' api: str '''请求的 API 地址''' proxy: Optional[str]=None '''代理服务器''' headers: dict[str, str] '''请求头''' # 初始化 def __init__( self, api: str, proxy: Optional[str]=None ) -> None: '''初始化''' self.api = api self.proxy = proxy self.headers = {'Content-Type': 'application/json;charset=UTF-8'} return # 请求发送 def request( self, method: Literal['get', 'post'], *, cookies: Optional[Cookies]=None, params: Optional[dict[str, Any]]=None, data: Optional[dict[str, Any]]=None, files: Optional[dict[str, tuple[str, BufferedReader]]]=None ) -> Response: '''请求发送 参数: method (Literal[&#39;get&#39;, &#39;post&#39;]): API 调用方法 cookies (Optional[Cookies], optional): Cookies params (Optional[dict[str, Any]], optional): 调用参数 data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送 files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数 返回: Response: 收到的响应 ''' # 处理接收到的 API if self.api.startswith('http://') or self.api.startswith('https://'): self.api = self.api else: self.api = 'https://bestdori.com/api/' + self.api # 构建一个请求体 request = Request( method, self.api, cookies=cookies, params=params, data=cast(dict, dumps(data)) if data is not None else data, files=files, headers=self.headers if not self.api.endswith('/upload') else None ) # 构建代理服务器字典 if self.proxy is not None: proxies = {'http://': self.proxy, 'https://': self.proxy} else: proxies = None # 发送请求并获取响应 with Client(proxies=cast(dict, proxies)) as client: response = client.send(request) client.close() # 处理接收到的响应 response.raise_for_status() # 判断接收到的响应是否为 json 格式 if 'application/json' not in (content_type := response.headers.get('content-type', None)): if content_type is not None: return response else: raise Exception('接收到的响应没有 content-type。') if isinstance((response_data := response.json()), dict): if (result := response_data.get('result', None)) is not None: if result is False: if (code := response_data.get('code', None)) is not None: if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录 exception_class = REQUEST_EXCEPTION[code] if params is not None: raise exception_class(self.api, **params) elif data is not None: raise exception_class(self.api, **data) else: raise exception_class(self.api) else:
'''`bestdori.utils.network` 向 Bestdori 发送请求相关模块''' # 向 Bestdori 发送 API 请求类 class Api: '''向 Bestdori 发送 API 请求类 参数: api (str): 请求的 API 地址 proxy (Optional[str]): 代理服务器''' api: str '''请求的 API 地址''' proxy: Optional[str]=None '''代理服务器''' headers: dict[str, str] '''请求头''' # 初始化 def __init__( self, api: str, proxy: Optional[str]=None ) -> None: '''初始化''' self.api = api self.proxy = proxy self.headers = {'Content-Type': 'application/json;charset=UTF-8'} return # 请求发送 def request( self, method: Literal['get', 'post'], *, cookies: Optional[Cookies]=None, params: Optional[dict[str, Any]]=None, data: Optional[dict[str, Any]]=None, files: Optional[dict[str, tuple[str, BufferedReader]]]=None ) -> Response: '''请求发送 参数: method (Literal[&#39;get&#39;, &#39;post&#39;]): API 调用方法 cookies (Optional[Cookies], optional): Cookies params (Optional[dict[str, Any]], optional): 调用参数 data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送 files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数 返回: Response: 收到的响应 ''' # 处理接收到的 API if self.api.startswith('http://') or self.api.startswith('https://'): self.api = self.api else: self.api = 'https://bestdori.com/api/' + self.api # 构建一个请求体 request = Request( method, self.api, cookies=cookies, params=params, data=cast(dict, dumps(data)) if data is not None else data, files=files, headers=self.headers if not self.api.endswith('/upload') else None ) # 构建代理服务器字典 if self.proxy is not None: proxies = {'http://': self.proxy, 'https://': self.proxy} else: proxies = None # 发送请求并获取响应 with Client(proxies=cast(dict, proxies)) as client: response = client.send(request) client.close() # 处理接收到的响应 response.raise_for_status() # 判断接收到的响应是否为 json 格式 if 'application/json' not in (content_type := response.headers.get('content-type', None)): if content_type is not None: return response else: raise Exception('接收到的响应没有 content-type。') if isinstance((response_data := response.json()), dict): if (result := response_data.get('result', None)) is not None: if result is False: if (code := response_data.get('code', None)) is not None: if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录 exception_class = REQUEST_EXCEPTION[code] if params is not None: raise exception_class(self.api, **params) elif data is not None: raise exception_class(self.api, **data) else: raise exception_class(self.api) else:
raise RequestException(self.api, code)
1
2023-11-16 13:09:20+00:00
2k
jidiai/Competition_OvercookedAI-2
run_log.py
[ { "identifier": "make", "path": "env/chooseenv.py", "snippet": "def make(env_type, seed=None, conf=None):\n file_path = os.path.join(os.path.dirname(__file__), 'config.json')\n if not conf:\n with open(file_path) as f:\n conf = json.load(f)[env_type]\n class_literal = conf['class_literal']\n if env_type.split('-')[0] in [\"olympics\"]:\n return getattr(env, class_literal)(conf, seed)\n else:\n return getattr(env, class_literal)(conf)" }, { "identifier": "get_logger", "path": "utils/get_logger.py", "snippet": "def get_logger(log_path, name, save_file=False, console_out=False, json_file=False):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n\n logger = logging.getLogger(name='Jidi')\n logger.setLevel(logging.INFO)\n # 每分钟建一个文件\n rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n log_name = log_path + rq + '_' + name+ '.log'\n json_log_name = log_path + rq + '_' + name + '.json'\n logfile = log_name\n if save_file:\n fh = logging.FileHandler(logfile, mode='a')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # 输出到控制台\n if console_out:\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logger.addHandler(console)\n\n # 输出到json\n if json_file:\n fh_json = logging.FileHandler(json_log_name, mode='a')\n fh_json.setLevel(logging.DEBUG)\n formatter_json = logging.Formatter(\"%(message)s\")\n fh_json.setFormatter(formatter_json)\n logger.addHandler(fh_json)\n\n return logger" }, { "identifier": "obs_type", "path": "env/obs_interfaces/observation.py", "snippet": "class GridObservation(object):\nclass VectorObservation(object):\nclass DictObservation(object):\nclass CustomObservation(object):\n def get_grid_observation(self, current_state, player_id, info_before):\n def get_grid_many_observation(self, current_state, player_id_list, info_before=''):\n def get_vector_observation(self, current_state, player_id, info_before):\n def get_vector_many_observation(self, current_state, player_id_list, info_before=''):\n def get_dict_observation(self, current_state, player_id, info_before):\n def get_dict_many_observation(self, current_state, player_id_list, info_before=''):\n def get_custom_observation(self, current_state, player_id):\n def get_custom_obs_space(self, player_id):\n def get_custom_many_observation(self, current_state, player_id_list):\n def get_custom_many_obs_space(self, player_id_list):" } ]
import os import time import json import numpy as np import argparse import sys from env.chooseenv import make from utils.get_logger import get_logger from env.obs_interfaces.observation import obs_type
1,348
# -*- coding:utf-8 -*- sys.path.append("./olympics_engine") class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def get_players_and_action_space_list(g): if sum(g.agent_nums) != g.n_player: raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player)) n_agent_num = list(g.agent_nums) for i in range(1, len(n_agent_num)): n_agent_num[i] += n_agent_num[i - 1] # 根据agent number 分配 player id players_id = [] actions_space = [] for policy_i in range(len(g.obs_type)): if policy_i == 0: players_id_list = range(n_agent_num[policy_i]) else: players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i]) players_id.append(players_id_list) action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list] actions_space.append(action_space_list) return players_id, actions_space def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes): if len(policy_list) != len(game.agent_nums): error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums)) raise Exception(error) # [[[0, 0, 0, 1]], [[0, 1, 0, 0]]] joint_action = [] for policy_i in range(len(policy_list)): if game.obs_type[policy_i] not in obs_type: raise Exception("可选obs类型:%s" % str(obs_type)) agents_id_list = multi_part_agent_ids[policy_i] action_space_list = actions_spaces[policy_i] function_name = 'm%d' % policy_i for i in range(len(agents_id_list)): agent_id = agents_id_list[i] a_obs = all_observes[agent_id] each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous) joint_action.append(each) # print(joint_action) return joint_action def set_seed(g, env_name): if env_name.split("-")[0] in ['magent']: g.reset() seed = g.create_seed() g.set_seed(seed) def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode): """ This function is used to generate log for Vue rendering. Saves .json file """ log_path = os.getcwd() + '/logs/' if not os.path.exists(log_path): os.mkdir(log_path)
# -*- coding:utf-8 -*- sys.path.append("./olympics_engine") class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def get_players_and_action_space_list(g): if sum(g.agent_nums) != g.n_player: raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player)) n_agent_num = list(g.agent_nums) for i in range(1, len(n_agent_num)): n_agent_num[i] += n_agent_num[i - 1] # 根据agent number 分配 player id players_id = [] actions_space = [] for policy_i in range(len(g.obs_type)): if policy_i == 0: players_id_list = range(n_agent_num[policy_i]) else: players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i]) players_id.append(players_id_list) action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list] actions_space.append(action_space_list) return players_id, actions_space def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes): if len(policy_list) != len(game.agent_nums): error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums)) raise Exception(error) # [[[0, 0, 0, 1]], [[0, 1, 0, 0]]] joint_action = [] for policy_i in range(len(policy_list)): if game.obs_type[policy_i] not in obs_type: raise Exception("可选obs类型:%s" % str(obs_type)) agents_id_list = multi_part_agent_ids[policy_i] action_space_list = actions_spaces[policy_i] function_name = 'm%d' % policy_i for i in range(len(agents_id_list)): agent_id = agents_id_list[i] a_obs = all_observes[agent_id] each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous) joint_action.append(each) # print(joint_action) return joint_action def set_seed(g, env_name): if env_name.split("-")[0] in ['magent']: g.reset() seed = g.create_seed() g.set_seed(seed) def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode): """ This function is used to generate log for Vue rendering. Saves .json file """ log_path = os.getcwd() + '/logs/' if not os.path.exists(log_path): os.mkdir(log_path)
logger = get_logger(log_path, g.game_name, json_file=render_mode)
1
2023-11-15 09:09:01+00:00
2k
AnonymGiant/ViLaM
lavis/processors/blip_processors.py
[ { "identifier": "registry", "path": "lavis/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseProcessor", "path": "lavis/processors/base_processor.py", "snippet": "class BaseProcessor:\n def __init__(self):\n self.transform = lambda x: x\n return\n\n def __call__(self, item):\n return self.transform(item)\n\n @classmethod\n def from_config(cls, cfg=None):\n return cls()\n\n def build(self, **kwargs):\n cfg = OmegaConf.create(kwargs)\n\n return self.from_config(cfg)" }, { "identifier": "RandomAugment", "path": "lavis/processors/randaugment.py", "snippet": "class RandomAugment(object):\n def __init__(self, N=2, M=10, isPIL=False, augs=[]):\n self.N = N\n self.M = M\n self.isPIL = isPIL\n if augs:\n self.augs = augs\n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N)\n return [(op, 0.5, self.M) for op in sampled_ops]\n\n def __call__(self, img):\n if self.isPIL:\n img = np.array(img)\n ops = self.get_random_ops()\n for name, prob, level in ops:\n if np.random.random() > prob:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args)\n return img" } ]
import re from lavis.common.registry import registry from lavis.processors.base_processor import BaseProcessor from lavis.processors.randaugment import RandomAugment from omegaconf import OmegaConf from torchvision import transforms from torchvision.transforms.functional import InterpolationMode
832
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std)
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std)
@registry.register_processor("blip_caption")
0
2023-11-14 08:57:59+00:00
2k
MorrisNein/pecapiku
pecapiku/single_value_cache.py
[ { "identifier": "BaseCache", "path": "pecapiku/base_cache.py", "snippet": "class omnimethod(Generic[DecoratedCallable]):\nclass BaseCache(ABC):\n def __init__(self, func: DecoratedCallable):\n def __get__(self, instance, owner) -> DecoratedCallable:\n def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):\n def _get_cache_val(self, key: Hashable) -> Any:\n def _put_cache_val(self, key: Hashable, value: Any):\n def _key_func(self, *args, **kwargs) -> Hashable:\n def _read_execute_write(self, func, func_args, func_kwargs, access, key_kwargs: dict | None = None) -> Any:\n def _decorate(cls, func: DecoratedCallable, *args, **kwargs) -> Decorator | DecoratedCallable:\n def _get_default_file_path(cls):\n def decorate(self: BaseCache | Type[BaseCache],\n func: DecoratedCallable,\n *,\n file_path: os.PathLike | str | None = None,\n access: CacheAccess | None = None, **kwargs) -> Decorator | DecoratedCallable:" }, { "identifier": "CacheAccess", "path": "pecapiku/cache_access.py", "snippet": "COMP_CACHE_FILE_NAME = '_comp_cache.pkl'\ndef _resolve_filepath(file_path: os.PathLike | str) -> Path:\ndef _initialize_cache(file_path: os.PathLike) -> NoCache | Any:\ndef update_cache(cache: Any, file_path: Path):" }, { "identifier": "NoCache", "path": "pecapiku/no_cache.py", "snippet": "class NoCache:\n def __bool__(self):\n return False\n\n def __eq__(self, other) -> bool:\n return isinstance(other, NoCache)\n\n def __repr__(self):\n return '<NoCache object>'" } ]
import os from functools import partial, wraps from typing import Any, Generic, Hashable from pecapiku.base_cache import BaseCache, DecoratedCallable, Decorator, omnimethod from pecapiku.cache_access import CacheAccess, _initialize_cache, _resolve_filepath, update_cache from pecapiku.no_cache import NoCache
912
from __future__ import annotations class SingleValueCache(BaseCache, Generic[DecoratedCallable]): """ Decorator for caching of evaluation results. Creates a "pickle" file at disk space on a specified path. Wraps a function and stores its execution result in the file. To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``. Args: file_path - a path to an existing or non-existent pickle file. If a relative path or a filename is given, puts it into the framework cache directory. access - cache access indicators. The string may include the following indicators: - ``r`` - read - grants access to read the cache file content - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present) - ``w`` - write - grants access to modify the cache file content Example ------- >>> import time >>> from timeit import timeit >>> def a_heavy_function(): ... time.sleep(1) ... ... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl') >>> def a_heavy_function_cached(): ... time.sleep(1) >>> print(timeit(a_heavy_function, number=10)) # 10.070 >>> print(timeit(a_heavy_function_cached, number=10)) # 1.015 """ @classmethod def _get_default_file_path(cls) -> None: return None def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'): super().__init__(file_path, access) self.cache_dict = None def __call__(self, func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,
from __future__ import annotations class SingleValueCache(BaseCache, Generic[DecoratedCallable]): """ Decorator for caching of evaluation results. Creates a "pickle" file at disk space on a specified path. Wraps a function and stores its execution result in the file. To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``. Args: file_path - a path to an existing or non-existent pickle file. If a relative path or a filename is given, puts it into the framework cache directory. access - cache access indicators. The string may include the following indicators: - ``r`` - read - grants access to read the cache file content - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present) - ``w`` - write - grants access to modify the cache file content Example ------- >>> import time >>> from timeit import timeit >>> def a_heavy_function(): ... time.sleep(1) ... ... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl') >>> def a_heavy_function_cached(): ... time.sleep(1) >>> print(timeit(a_heavy_function, number=10)) # 10.070 >>> print(timeit(a_heavy_function_cached, number=10)) # 1.015 """ @classmethod def _get_default_file_path(cls) -> None: return None def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'): super().__init__(file_path, access) self.cache_dict = None def __call__(self, func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,
access: CacheAccess | None = None) -> DecoratedCallable | Decorator:
0
2023-11-17 12:10:01+00:00
2k
gerlaxrex/parrot
parrot1/audio/extraction/audio_extraction.py
[ { "identifier": "get_extension", "path": "parrot1/utils/file_utils.py", "snippet": "def get_extension(filename: Union[str, os.PathLike]) -> str:\n return os.path.basename(filename).rsplit(\".\", 1)[1]" }, { "identifier": "split_on_silence", "path": "parrot1/audio/utils/silence.py", "snippet": "def split_on_silence(\n segment: pydub.AudioSegment,\n min_silence_len: int = 1000,\n silence_thresh: int = -16,\n padding: int | bool = 100,\n seek_step: int = 1,\n) -> tuple[list[pydub.AudioSegment], list[tuple[float, float]]]:\n \"\"\"It splits an audio segment on silent sections\n\n Parameters\n ----------\n segment\n The original audio segment\n min_silence_len\n The minimum length of silence in millis for a split\n silence_thresh\n The silence threshold in dBFS\n padding\n The amount of silence chunks should be padded with\n\n It keeps the audio segment from sounding like it is abruptly cut off\n seek_step\n The step size in millis for iterating over the segment\n\n Returns\n -------\n The audio chunks and split ranges in millis\n \"\"\"\n T = len(segment)\n\n if isinstance(padding, bool):\n padding = T if padding else 0\n\n R = deque(detect_nonsilent(segment, min_silence_len, silence_thresh, seek_step))\n\n Q = []\n\n while R:\n x = R.popleft()\n\n s = max(x[0] - padding, 0)\n e = min(x[1] + padding, T)\n\n x = (s, e)\n\n # It merges overlapping padding\n if Q and s < Q[-1][1]:\n s = Q[-1][0]\n e = max(Q[-1][1], e)\n\n Q[-1] = (s, e)\n\n continue\n\n Q.append(x)\n\n return [segment[s:e] for s, e in Q], Q" } ]
import logging import os from typing import List, Union from pydub import AudioSegment from tqdm import tqdm from parrot1.utils.file_utils import get_extension from parrot1.audio.utils.silence import split_on_silence
653
__logger = logging.getLogger(__name__) def get_audio_from_video(video_filename: Union[str, os.PathLike]) -> AudioSegment: """ Takes the audio from the video file :param video_filename: (Union[str, os.PathLike]) path to the video :return: (io.BytesIO) Audio bytes """ if not os.path.exists(video_filename): raise FileNotFoundError(f"File at {video_filename} does not exists.")
__logger = logging.getLogger(__name__) def get_audio_from_video(video_filename: Union[str, os.PathLike]) -> AudioSegment: """ Takes the audio from the video file :param video_filename: (Union[str, os.PathLike]) path to the video :return: (io.BytesIO) Audio bytes """ if not os.path.exists(video_filename): raise FileNotFoundError(f"File at {video_filename} does not exists.")
audio = AudioSegment.from_file(video_filename, format=get_extension(video_filename))
0
2023-11-14 22:33:32+00:00
2k
chenaoxuan/UsfUtils
usfutils/config.py
[ { "identifier": "master_only", "path": "usfutils/dist.py", "snippet": "def master_only(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper" }, { "identifier": "get_time_asc", "path": "usfutils/time.py", "snippet": "def get_time_asc() -> str:\n \"\"\"\n e.g. 'Sat Jun 06 16:26:11 1998'.\n :return:\n \"\"\"\n return time.asctime()" }, { "identifier": "UsfDict", "path": "usfutils/dict.py", "snippet": "class UsfDict(dict):\n def __init__(self, d: dict = None, **kwards):\n super().__init__()\n if d is None:\n d = {}\n else:\n d.update(**kwards)\n for k, v in d.items():\n setattr(self, k, v)\n # Class attributes\n for k in self.__class__.__dict__.keys():\n if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):\n setattr(self, k, getattr(self, k))\n\n def __setattr__(self, name, value):\n if isinstance(value, (list, tuple)):\n value = [self.__class__(x)\n if isinstance(x, dict) else x for x in value]\n elif isinstance(value, dict) and not isinstance(value, UsfDict):\n value = UsfDict(value)\n super(UsfDict, self).__setitem__(name, value)\n\n __setitem__ = __setattr__\n\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def update(self, e: Union['UsfDict', dict, argparse.Namespace] = None, verbose=True, **kwargs):\n d = e or dict()\n if isinstance(d, argparse.Namespace):\n d = vars(d)\n d.update(kwargs)\n output_msg = []\n for k in d:\n v = self.get(k, None)\n if v is not None and (not isinstance(type(v), type(d[k])) or v != d[k]):\n output_msg.append(str(k))\n setattr(self, k, d[k])\n if verbose and len(output_msg):\n print(f\"{output_msg} in UsfDict has been modified!\")\n\n def pop(self, k, d=None):\n delattr(self, k)\n return super(UsfDict, self).pop(k, d)" } ]
import io import os import sys import yaml from shutil import copyfile from typing import Union from .dist import master_only from .time import get_time_asc from .dict import UsfDict
669
__all__ = [ 'load_yaml', 'dict_to_yaml', 'copy_opt_file' ]
__all__ = [ 'load_yaml', 'dict_to_yaml', 'copy_opt_file' ]
def load_yaml(path: str) -> UsfDict:
2
2023-11-16 04:39:34+00:00
2k
ErdemOzgen/DevSecOpsBuilder
main.py
[ { "identifier": "pipeline_executer", "path": "devsecopsbuilder/pipeline_executer.py", "snippet": "def load_configuration(filepath):\ndef create_output_directory(directory):\ndef install_tools(tools):\ndef update_tools(tools):\ndef run_command(step, output_dir, **kwargs):\ndef execute_post_command(step, **kwargs):\ndef get_output_file_path(output_dir, step_name):\ndef execute_command(command):\ndef save_command_output(result, output_file, step_name, command):\ndef get_repository_languages(repository_path):\ndef main():" }, { "identifier": "convert_graph", "path": "devsecopsbuilder/convert_graph.py", "snippet": "def create_workflow_graph(steps):\ndef parse_yaml_and_create_graph(file_path):\n G = nx.DiGraph()" }, { "identifier": "convert_pipeline", "path": "devsecopsbuilder/convert_pipeline.py", "snippet": "def generate_jenkinsfile(yaml_path, jenkinsfile_path):\ndef write_step(jfile, step):" }, { "identifier": "generate_report", "path": "devsecopsbuilder/generate_report.py", "snippet": "def get_file_path(base_dir, scan_type, file_name):\ndef process_json_data(file_path, heading_title, item_processor, data_tag, addHeader=True, giveInfo=True): # noqa: E501\ndef bandit_item_processor(item):\ndef grype_item_processor(item):\ndef safety_item_processor(item):\ndef secret_item_processor(result):\ndef sbom_item_processor(component):\ndef bandit_results(bandit_file_path):\ndef grype_results(grype_file_path):\ndef safety_info(safety_file_path):\n def safety_scanned_packages(item):\ndef safety_results(safety_file_path):\ndef secret_results(secret_file_path):\n def process_secrets(item):\ndef sbom_results(sbom_file_path):\ndef generate_pdf(output_filename, **scan_files): # noqa: E501\ndef find_file_by_keyword(base_dir, scan_type, keyword):\ndef find_and_generate_report(base_dir, scan_type, output_filename):" }, { "identifier": "asciiart", "path": "devsecopsbuilder/asciiart.py", "snippet": "def print_ascii_art():" } ]
import argparse import networkx as nx import matplotlib.pyplot as plt from devsecopsbuilder import pipeline_executer from devsecopsbuilder import convert_graph from devsecopsbuilder import convert_pipeline from devsecopsbuilder import generate_report # noqa: F401 from devsecopsbuilder import asciiart
1,292
def main(): parser = argparse.ArgumentParser(description="Pipeline Execution Script") parser.add_argument("--install", action="store_true", help="Install tools") parser.add_argument("--update", action="store_true", help="Update tools") parser.add_argument( "--execute", action="store_true", help="Execute commands from playbook" ) parser.add_argument( "--config", default="./playbooks/playbook.yaml", help="Path to configuration file (optional)", ) parser.add_argument( "--output_dir", default="command_outputs/outputs", help="Path to output directory (optional)", ) parser.add_argument( "--tools_config", default="./tools/tools.yaml", help="Path to tools configuration file (optional)", ) parser.add_argument( "--report", action="store_true", help="Generates a report of the results of playbooks", ) parser.add_argument( "--generate_graph", action="store_true", help="Generate graph of defined yaml workflow", ) parser.add_argument( "--graph_yaml", default="./playbooks/playbook.yaml", help="Path to yaml file for generating graph (optional)", ) parser.add_argument( "--graph_output_dir", default="command_outputs/graphs/graph.png", help="Path to graph output directory (optional)", ) parser.add_argument( "--convert_pipeline", action="store_true", help="Convert yaml to pipeline" # noqa: E501 ) parser.add_argument( "--pipeline_yaml", default="./playbooks/playbook.yaml", help="Path to workflow yaml file to pipeline (optional)", ) parser.add_argument( "--pipeline_output_dir", default="command_outputs/jenkinsFiles/Jenkinsfile", help="Path to pipeline output directory (optional)", ) args = parser.parse_args() # Check if no actionable arguments were provided actionable_args = [ args.install, args.update, args.execute, args.report, args.generate_graph, args.convert_pipeline, ] if not any(actionable_args): asciiart.print_ascii_art() parser.print_help() return # Load configuration from specified or default path config = pipeline_executer.load_configuration(args.config) # Create specified or default output directory pipeline_executer.create_output_directory(args.output_dir) # Define default paths and other variables as a dictionary default_variables = { # Default variable values go here } if args.install or args.update: # Load tool configuration from the YAML file tools_config = pipeline_executer.load_configuration(args.tools_config) all_tools = tools_config["tools_to_install"]["tools"] default_tools = [tool for tool in all_tools if tool.get("default", False)] # noqa: E501 # Assuming 'tools' is the relevant section in the configuration for install/update # noqa: E501 # tools = config.get("tools", []) if args.install: # Install tools pipeline_executer.install_tools(default_tools) elif args.update: # Update tools pipeline_executer.update_tools(default_tools) if args.execute: # Execute configured commands commands_to_run = config.get("commands_to_run", {}).get("steps", []) for step in commands_to_run: if isinstance(step, dict): # Update default variables with step-specific ones if they exist # noqa: E501 step_variables = {**default_variables, **step.get("parameters", {})} # noqa: E501 pipeline_executer.run_command(step, args.output_dir, **step_variables) # noqa: E501 else: print(f"Invalid step format: {step}") if args.generate_graph: try:
def main(): parser = argparse.ArgumentParser(description="Pipeline Execution Script") parser.add_argument("--install", action="store_true", help="Install tools") parser.add_argument("--update", action="store_true", help="Update tools") parser.add_argument( "--execute", action="store_true", help="Execute commands from playbook" ) parser.add_argument( "--config", default="./playbooks/playbook.yaml", help="Path to configuration file (optional)", ) parser.add_argument( "--output_dir", default="command_outputs/outputs", help="Path to output directory (optional)", ) parser.add_argument( "--tools_config", default="./tools/tools.yaml", help="Path to tools configuration file (optional)", ) parser.add_argument( "--report", action="store_true", help="Generates a report of the results of playbooks", ) parser.add_argument( "--generate_graph", action="store_true", help="Generate graph of defined yaml workflow", ) parser.add_argument( "--graph_yaml", default="./playbooks/playbook.yaml", help="Path to yaml file for generating graph (optional)", ) parser.add_argument( "--graph_output_dir", default="command_outputs/graphs/graph.png", help="Path to graph output directory (optional)", ) parser.add_argument( "--convert_pipeline", action="store_true", help="Convert yaml to pipeline" # noqa: E501 ) parser.add_argument( "--pipeline_yaml", default="./playbooks/playbook.yaml", help="Path to workflow yaml file to pipeline (optional)", ) parser.add_argument( "--pipeline_output_dir", default="command_outputs/jenkinsFiles/Jenkinsfile", help="Path to pipeline output directory (optional)", ) args = parser.parse_args() # Check if no actionable arguments were provided actionable_args = [ args.install, args.update, args.execute, args.report, args.generate_graph, args.convert_pipeline, ] if not any(actionable_args): asciiart.print_ascii_art() parser.print_help() return # Load configuration from specified or default path config = pipeline_executer.load_configuration(args.config) # Create specified or default output directory pipeline_executer.create_output_directory(args.output_dir) # Define default paths and other variables as a dictionary default_variables = { # Default variable values go here } if args.install or args.update: # Load tool configuration from the YAML file tools_config = pipeline_executer.load_configuration(args.tools_config) all_tools = tools_config["tools_to_install"]["tools"] default_tools = [tool for tool in all_tools if tool.get("default", False)] # noqa: E501 # Assuming 'tools' is the relevant section in the configuration for install/update # noqa: E501 # tools = config.get("tools", []) if args.install: # Install tools pipeline_executer.install_tools(default_tools) elif args.update: # Update tools pipeline_executer.update_tools(default_tools) if args.execute: # Execute configured commands commands_to_run = config.get("commands_to_run", {}).get("steps", []) for step in commands_to_run: if isinstance(step, dict): # Update default variables with step-specific ones if they exist # noqa: E501 step_variables = {**default_variables, **step.get("parameters", {})} # noqa: E501 pipeline_executer.run_command(step, args.output_dir, **step_variables) # noqa: E501 else: print(f"Invalid step format: {step}") if args.generate_graph: try:
workflow_graph = convert_graph.parse_yaml_and_create_graph(args.graph_yaml) # noqa: E501
1
2023-11-14 07:50:52+00:00
2k
doodledood/chat-flock
chatflock/participants/user.py
[ { "identifier": "ActiveChatParticipant", "path": "chatflock/base.py", "snippet": "class ActiveChatParticipant(ChatParticipant):\n symbol: str\n messages_hidden: bool = False\n\n def __init__(self, name: str, symbol: str = \"👤\", messages_hidden: bool = False):\n super().__init__(name=name)\n\n self.symbol = symbol\n self.messages_hidden = messages_hidden\n\n @abc.abstractmethod\n def respond_to_chat(self, chat: \"Chat\") -> str:\n raise NotImplementedError()\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name}\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Symbol: {self.symbol}\"" }, { "identifier": "Chat", "path": "chatflock/base.py", "snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])" } ]
from typing import Any from chatflock.base import ActiveChatParticipant, Chat
1,260
class UserChatParticipant(ActiveChatParticipant): def __init__(self, name: str = "User", role: str = "User", symbol: str = "👤", **kwargs: Any): super().__init__(name, messages_hidden=True, **kwargs) self.role = role self.symbol = symbol
class UserChatParticipant(ActiveChatParticipant): def __init__(self, name: str = "User", role: str = "User", symbol: str = "👤", **kwargs: Any): super().__init__(name, messages_hidden=True, **kwargs) self.role = role self.symbol = symbol
def respond_to_chat(self, chat: Chat) -> str:
1
2023-11-12 11:10:58+00:00
2k
phidatahq/junior-de
app/pages/3_DuckGPT_S3.py
[ { "identifier": "get_openai_key", "path": "app/openai_key.py", "snippet": "def get_openai_key() -> Optional[str]:\n \"\"\"Sidebar component to get OpenAI API key\"\"\"\n\n # Get OpenAI API key from environment variable\n openai_key: Optional[str] = getenv(\"OPENAI_API_KEY\")\n # If not found, get it from user input\n if openai_key is None or openai_key == \"\" or openai_key == \"sk-***\":\n api_key = st.sidebar.text_input(\"OpenAI API key\", placeholder=\"sk-***\", key=\"api_key\")\n if api_key != \"sk-***\" or api_key != \"\" or api_key is not None:\n openai_key = api_key\n\n # Store it in session state and environment variable\n if openai_key is not None and openai_key != \"\":\n st.session_state[\"OPENAI_API_KEY\"] = openai_key\n environ[\"OPENAI_API_KEY\"] = openai_key\n\n return openai_key" }, { "identifier": "check_password", "path": "app/password.py", "snippet": "def check_password() -> bool:\n \"\"\"Component to checks if a password entered by the user is correct.\n To use this component, set the environment variable `APP_PASSWORD`.\n\n Returns:\n bool: `True` if the user had the correct password.\n \"\"\"\n\n app_password = getenv(\"APP_PASSWORD\")\n if app_password is None:\n return True\n\n def check_first_run_password():\n \"\"\"Checks whether a password entered on the first run is correct.\"\"\"\n\n if \"first_run_password\" in st.session_state:\n password_to_check = st.session_state[\"first_run_password\"]\n if password_to_check == app_password:\n st.session_state[\"password_correct\"] = True\n # don't store password\n del st.session_state[\"first_run_password\"]\n else:\n st.session_state[\"password_correct\"] = False\n\n def check_updated_password():\n \"\"\"Checks whether an updated password is correct.\"\"\"\n\n if \"updated_password\" in st.session_state:\n password_to_check = st.session_state[\"updated_password\"]\n if password_to_check == app_password:\n st.session_state[\"password_correct\"] = True\n # don't store password\n del st.session_state[\"updated_password\"]\n else:\n st.session_state[\"password_correct\"] = False\n\n # First run, show input for password.\n if \"password_correct\" not in st.session_state:\n st.text_input(\n \"Password\",\n type=\"password\",\n on_change=check_first_run_password,\n key=\"first_run_password\",\n )\n return False\n # Password incorrect, show input for updated password + error.\n elif not st.session_state[\"password_correct\"]:\n st.text_input(\n \"Password\",\n type=\"password\",\n on_change=check_updated_password,\n key=\"updated_password\",\n )\n st.error(\"😕 Password incorrect\")\n return False\n # Password correct.\n else:\n return True" }, { "identifier": "reload_button", "path": "app/reload.py", "snippet": "def reload_button():\n \"\"\"Sidebar component to show reload button\"\"\"\n\n st.sidebar.markdown(\"---\")\n if st.sidebar.button(\"Reload Session\"):\n st.session_state.clear()\n st.rerun()" }, { "identifier": "get_user_name", "path": "app/user_name.py", "snippet": "def get_user_name() -> Optional[str]:\n \"\"\"Sidebar component to get username\"\"\"\n\n # Get user_name from user if not in session state\n if \"user_name\" not in st.session_state:\n username_input_container = st.sidebar.empty()\n username = username_input_container.text_input(\":technologist: Enter username\")\n if username != \"\":\n st.session_state[\"user_name\"] = username\n username_input_container.empty()\n\n # Get user_name from session state\n user_name = st.session_state.get(\"user_name\")\n return user_name" }, { "identifier": "load_s3_tables", "path": "duckgpt/s3_tables.py", "snippet": "def load_s3_tables(duckdb_tools: DuckDbTools) -> None:\n \"\"\"Load S3 tables to DuckDB\"\"\"\n\n for table in s3_tables:\n duckdb_tools.create_table_from_path(path=table.path, table=table.name)\n logger.info(f\"Created table: {table.name}\")" }, { "identifier": "duckdb_s3_tools", "path": "llm/conversations/duckgpt_s3.py", "snippet": "def get_duckgpt_s3_conversation(\n user_name: Optional[str] = None,\n conversation_id: Optional[str] = None,\n debug_mode: bool = False,\n) -> Conversation:" }, { "identifier": "logger", "path": "utils/log.py", "snippet": "def build_logger(logger_name: str) -> logging.Logger:" } ]
from typing import List from phi.conversation import Conversation from app.openai_key import get_openai_key from app.password import check_password from app.reload import reload_button from app.user_name import get_user_name from duckgpt.s3_tables import load_s3_tables from llm.conversations.duckgpt_s3 import duckdb_s3_tools, get_duckgpt_s3_conversation from utils.log import logger import streamlit as st
1,278
st.title(":snowman: DuckGPT") st.markdown('<a href="https://github.com/phidatahq/phidata"><h4>by phidata</h4></a>', unsafe_allow_html=True) def restart_conversation(): st.session_state["s3_conversation"] = None st.session_state["s3_conversation_id"] = None st.rerun() def main() -> None: # Get users OpenAI API key get_openai_key() # Get user name
st.title(":snowman: DuckGPT") st.markdown('<a href="https://github.com/phidatahq/phidata"><h4>by phidata</h4></a>', unsafe_allow_html=True) def restart_conversation(): st.session_state["s3_conversation"] = None st.session_state["s3_conversation_id"] = None st.rerun() def main() -> None: # Get users OpenAI API key get_openai_key() # Get user name
user_name = get_user_name()
3
2023-11-14 10:44:20+00:00
2k
YoungJooHan/NM-FlowGAN
util/file_manager.py
[ { "identifier": "tensor2np", "path": "util/util.py", "snippet": "def tensor2np(t:torch.Tensor):\n '''\n transform torch Tensor to numpy having opencv image form.\n RGB -> BGR\n (c,h,w) -> (h,w,c)\n '''\n t = t.cpu().detach()\n\n # gray\n if len(t.shape) == 2:\n return t.permute(1,2,0).numpy()\n # RGB -> BGR\n elif len(t.shape) == 3:\n return np.flip(t.permute(1,2,0).numpy(), axis=2)\n # image batch\n elif len(t.shape) == 4:\n return np.flip(t.permute(0,2,3,1).numpy(), axis=3)\n else:\n raise RuntimeError('wrong tensor dimensions : %s'%(t.shape,))" }, { "identifier": "save_img", "path": "util/util.py", "snippet": "def save_img(dir_name, file_name, img):\n path = os.path.join(dir_name, file_name)\n if 'raw' in path[-3:]:\n os.makedirs(dir_name, exist_ok=True)\n with open(path, 'w') as fid:\n img.tofile(fid)\n else:\n if len(img.shape) == 3 and img.shape[-1] != 3 and img.shape[-1] > 1:\n cv2.imwritemulti(path, img.transpose([2,0,1])) # multi stack image, convert to CHW\n elif len(img.shape) == 4 and img.shape[0] > 1: # batch image, only grey image is available\n img = img.squeeze(-1)\n cv2.imwritemulti(path, img) \n elif len(img.shape) == 4 and img.shape[0] <= 1: # single batch image\n img = img.squeeze(0)\n cv2.imwrite(path, img)\n else:\n cv2.imwrite(path, img)" } ]
import os import cv2 import numpy as np import torch from .util import tensor2np, save_img
789
class FileManager: def __init__(self, session_name, output_path=None): if output_path is None: self.output_folder = "./output" else: self.output_folder = output_path if not os.path.isdir(self.output_folder): os.makedirs(self.output_folder) print("[WARNING] output folder is not exist, create new one") # init session self.session_name = session_name os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True) # mkdir for directory in ['checkpoint', 'img']: self.make_dir(directory) def is_dir_exist(self, dir_name:str) -> bool: return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name)) def make_dir(self, dir_name:str) -> str: os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True) def get_dir(self, dir_name:str) -> str: # -> './output/<session_name>/dir_name' return os.path.join(self.output_folder, self.session_name, dir_name) def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'): self.save_img_numpy(dir_name, file_name, tensor2np(img), ext) def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'): if np.shape(img)[2] == 1:
class FileManager: def __init__(self, session_name, output_path=None): if output_path is None: self.output_folder = "./output" else: self.output_folder = output_path if not os.path.isdir(self.output_folder): os.makedirs(self.output_folder) print("[WARNING] output folder is not exist, create new one") # init session self.session_name = session_name os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True) # mkdir for directory in ['checkpoint', 'img']: self.make_dir(directory) def is_dir_exist(self, dir_name:str) -> bool: return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name)) def make_dir(self, dir_name:str) -> str: os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True) def get_dir(self, dir_name:str) -> str: # -> './output/<session_name>/dir_name' return os.path.join(self.output_folder, self.session_name, dir_name) def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'): self.save_img_numpy(dir_name, file_name, tensor2np(img), ext) def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'): if np.shape(img)[2] == 1:
save_img(self.get_dir(dir_name), '%s.%s'%(file_name, ext), np.squeeze(img, 2))
1
2023-11-16 02:22:32+00:00
2k
VCasecnikovs/RAGAgainstTheMachine
sourcing.py
[ { "identifier": "chat_inference", "path": "chatting.py", "snippet": "def chat_inference(\n messages: list[ChatMessage],\n client: OpenAI,\n model=\"gpt-4-1106-preview\",\n):\n formatted_messages = []\n for message in messages:\n formatted_messages.append(\n {\n \"role\": message.role,\n \"content\": message.content,\n }\n )\n\n completion = client.chat.completions.create(\n response_format={\"type\": \"json_object\"},\n model=model,\n messages=[\n *formatted_messages,\n ],\n )\n\n model_answer = completion.choices[0].message.content\n return model_answer" }, { "identifier": "ChatMessage", "path": "chatting.py", "snippet": "class ChatMessage(BaseModel):\n role: Role\n content: str" }, { "identifier": "get_openAI_client", "path": "chatting.py", "snippet": "def get_openAI_client():\n load_dotenv()\n\n client = OpenAI()\n return client" }, { "identifier": "Role", "path": "chatting.py", "snippet": "class Role(str, Enum):\n SYSTEM = \"system\"\n USER = \"user\"\n ASSISTANT = \"assistant\"" } ]
import requests import os import json from dotenv import load_dotenv from newspaper import Article from chatting import chat_inference, ChatMessage, get_openAI_client, Role
1,090
YOU_HEADERS = {"X-API-Key": os.environ.get("YOUCOM_API_KEY", "")} def _get_you_search_impl( query: str, page_index: int = 0, limit: int = 20, country: str = "" ): url = "https://api.ydc-index.io/search" query_args = {"query": query} if page_index: query_args["offset"] = page_index if limit: query_args["count"] = limit if country: query_args["country"] = country response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args) results = [] for line in response.json()["hits"]: snippets = " ".join(line["snippets"]) description = ". ".join([line["title"], snippets]) results.append( { "url": line["url"], "title": line["title"], "text": description, } ) return results def _get_you_news_impl( query: str, page_index: int = 0, limit: int = 20, country: str = "" ): url = "https://api.ydc-index.io/news" query_args = {"q": query} if page_index: query_args["offset"] = page_index if limit: query_args["count"] = limit if country: query_args["country"] = country response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args) results = [] for line in response.json()["news"]["results"]: results.append( {"url": line["url"], "title": line["title"], "text": line["description"]} ) return results def get_you_search(query: str): # TODO: pass the page here somehow return _get_you_search_impl(query, page_index=0, country="") def get_you_news(query: str): # TODO: pass the page here somehow results = [] for _ in range(1): results.extend(_get_you_news_impl(query, page_index=0, country="")) return results def _get_newsapi_impl( query: str, page_index: int = 0, limit: int = 20 ): url = "https://newsapi.org/v2/everything" query_args = { "q": query, "apiKey": os.environ.get("NEWSAPI_API_KEY") } if page_index: query_args["page"] = page_index+1 if limit: query_args["pageSize"] = limit response = requests.request("GET", url, params=query_args) results = [] for line in response.json()["articles"]: results.append( {"url": line["url"], "title": line["title"], "text": line["description"] + " " + line["content"]} ) return results def get_newsapi_news(query: str): results = [] for _ in range(1): results.extend(_get_newsapi_impl(query, page_index=0)) return results SOURCES = { "you_news": get_you_news, # "you_search": get_you_search, # "news_api": get_newsapi_news, } def get_page_text(url: str) -> str: try: article = Article(url) article.download() article.parse() return article.text except Exception: return "" def scrape_data(articles_data: list[dict]): for article in articles_data: parsed_text = get_page_text(article["url"]) if parsed_text: article["text"] = article["text"] + " ." + parsed_text def filter_urls(urls):
load_dotenv() YOU_HEADERS = {"X-API-Key": os.environ.get("YOUCOM_API_KEY", "")} def _get_you_search_impl( query: str, page_index: int = 0, limit: int = 20, country: str = "" ): url = "https://api.ydc-index.io/search" query_args = {"query": query} if page_index: query_args["offset"] = page_index if limit: query_args["count"] = limit if country: query_args["country"] = country response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args) results = [] for line in response.json()["hits"]: snippets = " ".join(line["snippets"]) description = ". ".join([line["title"], snippets]) results.append( { "url": line["url"], "title": line["title"], "text": description, } ) return results def _get_you_news_impl( query: str, page_index: int = 0, limit: int = 20, country: str = "" ): url = "https://api.ydc-index.io/news" query_args = {"q": query} if page_index: query_args["offset"] = page_index if limit: query_args["count"] = limit if country: query_args["country"] = country response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args) results = [] for line in response.json()["news"]["results"]: results.append( {"url": line["url"], "title": line["title"], "text": line["description"]} ) return results def get_you_search(query: str): # TODO: pass the page here somehow return _get_you_search_impl(query, page_index=0, country="") def get_you_news(query: str): # TODO: pass the page here somehow results = [] for _ in range(1): results.extend(_get_you_news_impl(query, page_index=0, country="")) return results def _get_newsapi_impl( query: str, page_index: int = 0, limit: int = 20 ): url = "https://newsapi.org/v2/everything" query_args = { "q": query, "apiKey": os.environ.get("NEWSAPI_API_KEY") } if page_index: query_args["page"] = page_index+1 if limit: query_args["pageSize"] = limit response = requests.request("GET", url, params=query_args) results = [] for line in response.json()["articles"]: results.append( {"url": line["url"], "title": line["title"], "text": line["description"] + " " + line["content"]} ) return results def get_newsapi_news(query: str): results = [] for _ in range(1): results.extend(_get_newsapi_impl(query, page_index=0)) return results SOURCES = { "you_news": get_you_news, # "you_search": get_you_search, # "news_api": get_newsapi_news, } def get_page_text(url: str) -> str: try: article = Article(url) article.download() article.parse() return article.text except Exception: return "" def scrape_data(articles_data: list[dict]): for article in articles_data: parsed_text = get_page_text(article["url"]) if parsed_text: article["text"] = article["text"] + " ." + parsed_text def filter_urls(urls):
client = get_openAI_client()
2
2023-11-18 22:12:07+00:00
2k
TimeEnjoyed/TimeBot
core/bots.py
[ { "identifier": "config", "path": "core/config.py", "snippet": "" }, { "identifier": "MBTI_TYPES", "path": "core/constants.py", "snippet": "MBTI_TYPES: list[str] = [\n \"ESTP\",\n \"ESTJ\",\n \"ESFP\",\n \"ESFJ\",\n \"ISTP\",\n \"ISTJ\",\n \"ISFP\",\n \"ISFJ\",\n \"ENFJ\",\n \"ENTP\",\n \"ENFP\",\n \"ENTJ\",\n \"INTP\",\n \"INFJ\",\n \"INTJ\",\n \"INFP\",\n]" } ]
import asyncio import json import logging import pathlib import aiohttp import discord import twitchio import wavelink from typing import TYPE_CHECKING from urllib.parse import quote from discord.ext import commands from twitchio.ext import commands as tcommands from .config import config from .constants import MBTI_TYPES from collections.abc import Sequence from typing import Any from database import Database
1,296
if TYPE_CHECKING: logger: logging.Logger = logging.getLogger(__name__) LIVE_ROLE_ID: int = 1182206699969458226 SUBBED_ROLE_ID: int = 873044115279990836 class DiscordBot(commands.Bot): tbot: TwitchBot def __init__(self, *, database: Database) -> None: self.database = database intents: discord.Intents = discord.Intents.default() intents.message_content = True intents.members = True intents.presences = True self.loaded: bool = False super().__init__(intents=intents, command_prefix=config["DISCORD"]["prefix"]) async def on_ready(self) -> None: if self.loaded: return self.loaded = True assert self.user logger.info(f"Logged into Discord as {self.user} | {self.user.id}") if config["DEBUG"]["enabled"] is True: return guild: discord.Guild = self.get_guild(859565527343955998) # type: ignore role: discord.Role = guild.get_role(LIVE_ROLE_ID) # type: ignore subbed: discord.Role = guild.get_role(SUBBED_ROLE_ID) # type: ignore for member in guild.members: if subbed not in member.roles: continue streaming = False for activity in member.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": streaming = True if streaming and role not in member.roles: await member.add_roles(role) await asyncio.sleep(1) elif not streaming and role in member.roles: await member.remove_roles(role) await asyncio.sleep(1) logger.info("Finished updating roles in on_ready event.") async def setup_hook(self) -> None: node: wavelink.Node = wavelink.Node(uri=config["WAVELINK"]["uri"], password=config["WAVELINK"]["password"]) await wavelink.Pool.connect(nodes=[node], client=self, cache_capacity=100) location = ("extensions/discord", "extensions.discord") extensions: list[str] = [f"{location[1]}.{f.stem}" for f in pathlib.Path(location[0]).glob("*.py")] for extension in extensions: await self.load_extension(extension) logger.info("Loaded extensions for Discord Bot.") async def on_wavelink_node_ready(self, payload: wavelink.NodeReadyEventPayload) -> None: node: wavelink.Node = payload.node logger.info("Wavelink successfully connected: %s. Resumed: %s", node.identifier, payload.resumed) async def on_command_error(self, context: commands.Context, exception: commands.CommandError) -> None: if isinstance(exception, commands.CommandNotFound): return logger.exception(exception) async def on_presence_update(self, before: discord.Member, after: discord.Member) -> None: if config["DEBUG"]["enabled"] is True: return if before.guild.id != 859565527343955998: return subbed: discord.Role | None = after.guild.get_role(SUBBED_ROLE_ID) if subbed not in after.roles: return bstream: discord.Streaming | None = None astream: discord.Streaming | None = None for activity in before.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": bstream = activity for activity in after.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": astream = activity if bstream is not None and astream is not None: return role: discord.Role = before.guild.get_role(LIVE_ROLE_ID) # type: ignore if not bstream and astream and role not in before.roles: await before.add_roles(role, reason="Started streaming on Twitch") elif not astream and bstream and role in after.roles: await after.remove_roles(role, reason="Stopped streaming on Twitch") def mbti_count(self) -> dict[str, int]: guild: discord.Guild | None = self.get_guild(859565527343955998) assert guild is not None roles: Sequence[discord.Role] = guild.roles
"""Copyright 2023 TimeEnjoyed <https://github.com/TimeEnjoyed/> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations if TYPE_CHECKING: logger: logging.Logger = logging.getLogger(__name__) LIVE_ROLE_ID: int = 1182206699969458226 SUBBED_ROLE_ID: int = 873044115279990836 class DiscordBot(commands.Bot): tbot: TwitchBot def __init__(self, *, database: Database) -> None: self.database = database intents: discord.Intents = discord.Intents.default() intents.message_content = True intents.members = True intents.presences = True self.loaded: bool = False super().__init__(intents=intents, command_prefix=config["DISCORD"]["prefix"]) async def on_ready(self) -> None: if self.loaded: return self.loaded = True assert self.user logger.info(f"Logged into Discord as {self.user} | {self.user.id}") if config["DEBUG"]["enabled"] is True: return guild: discord.Guild = self.get_guild(859565527343955998) # type: ignore role: discord.Role = guild.get_role(LIVE_ROLE_ID) # type: ignore subbed: discord.Role = guild.get_role(SUBBED_ROLE_ID) # type: ignore for member in guild.members: if subbed not in member.roles: continue streaming = False for activity in member.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": streaming = True if streaming and role not in member.roles: await member.add_roles(role) await asyncio.sleep(1) elif not streaming and role in member.roles: await member.remove_roles(role) await asyncio.sleep(1) logger.info("Finished updating roles in on_ready event.") async def setup_hook(self) -> None: node: wavelink.Node = wavelink.Node(uri=config["WAVELINK"]["uri"], password=config["WAVELINK"]["password"]) await wavelink.Pool.connect(nodes=[node], client=self, cache_capacity=100) location = ("extensions/discord", "extensions.discord") extensions: list[str] = [f"{location[1]}.{f.stem}" for f in pathlib.Path(location[0]).glob("*.py")] for extension in extensions: await self.load_extension(extension) logger.info("Loaded extensions for Discord Bot.") async def on_wavelink_node_ready(self, payload: wavelink.NodeReadyEventPayload) -> None: node: wavelink.Node = payload.node logger.info("Wavelink successfully connected: %s. Resumed: %s", node.identifier, payload.resumed) async def on_command_error(self, context: commands.Context, exception: commands.CommandError) -> None: if isinstance(exception, commands.CommandNotFound): return logger.exception(exception) async def on_presence_update(self, before: discord.Member, after: discord.Member) -> None: if config["DEBUG"]["enabled"] is True: return if before.guild.id != 859565527343955998: return subbed: discord.Role | None = after.guild.get_role(SUBBED_ROLE_ID) if subbed not in after.roles: return bstream: discord.Streaming | None = None astream: discord.Streaming | None = None for activity in before.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": bstream = activity for activity in after.activities: if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch": astream = activity if bstream is not None and astream is not None: return role: discord.Role = before.guild.get_role(LIVE_ROLE_ID) # type: ignore if not bstream and astream and role not in before.roles: await before.add_roles(role, reason="Started streaming on Twitch") elif not astream and bstream and role in after.roles: await after.remove_roles(role, reason="Stopped streaming on Twitch") def mbti_count(self) -> dict[str, int]: guild: discord.Guild | None = self.get_guild(859565527343955998) assert guild is not None roles: Sequence[discord.Role] = guild.roles
mbti_dict: dict[str, int] = dict.fromkeys(MBTI_TYPES, 0)
1
2023-11-15 23:04:42+00:00
2k
henriquesebastiao/poupy
project/apps/app/views/transfer.py
[ { "identifier": "TransferForm", "path": "project/apps/app/forms.py", "snippet": "class TransferForm(forms.Form):\n \"\"\"Form used to transfer money between accounts.\"\"\"\n\n description = forms.CharField(\n label='Description',\n widget=forms.TextInput(\n attrs={'placeholder': 'Insert the description of transaction'}\n ),\n )\n\n account_origin = forms.ModelChoiceField(\n queryset=Account.objects.all(),\n label='Source account',\n widget=forms.Select(),\n )\n\n account_destination = forms.ModelChoiceField(\n queryset=Account.objects.all(),\n label='Target account',\n widget=forms.Select(),\n )\n\n value = forms.DecimalField(\n label='Value',\n widget=forms.NumberInput(\n attrs={'placeholder': 'Insert the value of transaction'}\n ),\n )\n\n def clean(self):\n \"\"\"Validates that the account_origin and account_destination fields are not equal\"\"\"\n cleaned_data = super().clean()\n account_origin = cleaned_data.get('account_origin')\n account_destination = cleaned_data.get('account_destination')\n\n if account_origin == account_destination:\n raise ValidationError(\n {\n 'account_destination': 'Source account and target account must be different.'\n }\n )\n\n value = cleaned_data.get('value')\n\n if value is None or value <= 0:\n raise ValidationError(\n {'value': 'Value must be greater than zero.'}\n )" }, { "identifier": "Account", "path": "project/apps/app/models.py", "snippet": "class Account(CommonInfo):\n \"\"\"Model for the Account.\"\"\"\n\n name = models.CharField(max_length=55, null=False)\n balance = models.DecimalField(\n decimal_places=2,\n null=False,\n default=0.00,\n max_digits=14,\n validators=[MinValueValidator(Decimal('0.00'))],\n )\n\n def __str__(self):\n return self.name" }, { "identifier": "Transfer", "path": "project/apps/app/models.py", "snippet": "class Transfer(CommonInfo, TransactionMixin):\n \"\"\"Model for the Transfer.\"\"\"\n\n account_origin = models.ForeignKey(\n Account, on_delete=models.CASCADE, related_name='account_origin'\n )\n account_destination = models.ForeignKey(\n Account, on_delete=models.CASCADE, related_name='account_destination'\n )\n type = models.CharField(max_length=8, default='TRANSFER', null=False)\n\n def __str__(self):\n return self.description" } ]
from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect from django.views.generic import FormView from ..forms import TransferForm from ..models import Account, Transfer
643
"""Views for transfer app.""" class TransferView(LoginRequiredMixin, FormView): """Transfer view page.""" login_url = 'login' template_name = 'pages/app/new_transfer.html'
"""Views for transfer app.""" class TransferView(LoginRequiredMixin, FormView): """Transfer view page.""" login_url = 'login' template_name = 'pages/app/new_transfer.html'
form_class = TransferForm
0
2023-11-17 21:05:05+00:00
2k
AuroraNemoia/yuusei
main.py
[ { "identifier": "log", "path": "utils.py", "snippet": "def log(text, type=\"normal\"):\n types = {\n \"quiet\": \"\\x1b[33;90m\",\n \"warn\": \"\\x1b[33;20m⚠️ WARN: \",\n \"error\": \"\\x1b[31;1m❌ ERROR: \",\n \"normal\": \"\\x1b[33;0m\"\n }\n print(types.get(type, types[\"normal\"]) + text + \"\\x1b[0m\")" }, { "identifier": "basepath", "path": "utils.py", "snippet": "def basepath():\n match platform:\n case \"windows\":\n return (os.path.abspath(__file__).rsplit('\\\\', 1)[0] + \"\\\\\").replace(\"\\\\\", \"/\")\n case _:\n return os.path.dirname(os.path.abspath(__file__).rsplit('\\\\', 1)[0] + \"\\\\\").replace(\"\\\\\", \"/\")" }, { "identifier": "tokenize", "path": "utils.py", "snippet": "def tokenize(text):\n tokens = tokenizer.tokenize(text)\n return len(tokens)" } ]
import requests import json import jstyleson import os import time import random import generate import history from utils import log, basepath, tokenize
701
# Constants config = jstyleson.loads(open(basepath() + "/config.json", "r").read()) # Initialize self self_name = config["personality"]["name"] self_persona = config["personality"]["persona"] self_instruct_pre = config["personality"]["pre"] self_instruct_post = config["personality"]["post"] use_chat_completions = config["settings"]["use_chat_completions"] force_pre = config["settings"]["force_pre"] # Have self reply to the current situation. def answer(): # What is the current situation? prompt = buildPrompt() def buildPrompt(): # Build the prompt frontmatter. if use_chat_completions == True or force_pre == True: frontmatter = self_instruct_pre + self_persona + self_instruct_post else: # When using TextCompletions, we do not need to instruct the model, the response prompt does it for us. frontmatter = self_persona + self_instruct_post frontmatter_length = tokenize(frontmatter) # What is our budget for message history? history_token_budget = config["settings"]["context_size"] - config["settings"]["max_new_tokens"] - frontmatter_length # Let's query messages until we hit the token limit. message_event_stack = [] # TODO: implement checking max_history_items event_stack = history.fetchEvents(6) token_length = 0 for event in event_stack: if event["event_type"] == "message": token_length += tokenize(event["content"]) if token_length > history_token_budget: break message_event_stack.append(event) # Build the message stack as a string. message_stack = "" for message in message_event_stack: message_stack += (message["name"] + ": " + message["content"] + "\n") # Build response prompt (unused in ChatCompletions). response_prompt = self_name + ": " prompt = frontmatter + message_stack if use_chat_completions == False: prompt += response_prompt
# Constants config = jstyleson.loads(open(basepath() + "/config.json", "r").read()) # Initialize self self_name = config["personality"]["name"] self_persona = config["personality"]["persona"] self_instruct_pre = config["personality"]["pre"] self_instruct_post = config["personality"]["post"] use_chat_completions = config["settings"]["use_chat_completions"] force_pre = config["settings"]["force_pre"] # Have self reply to the current situation. def answer(): # What is the current situation? prompt = buildPrompt() def buildPrompt(): # Build the prompt frontmatter. if use_chat_completions == True or force_pre == True: frontmatter = self_instruct_pre + self_persona + self_instruct_post else: # When using TextCompletions, we do not need to instruct the model, the response prompt does it for us. frontmatter = self_persona + self_instruct_post frontmatter_length = tokenize(frontmatter) # What is our budget for message history? history_token_budget = config["settings"]["context_size"] - config["settings"]["max_new_tokens"] - frontmatter_length # Let's query messages until we hit the token limit. message_event_stack = [] # TODO: implement checking max_history_items event_stack = history.fetchEvents(6) token_length = 0 for event in event_stack: if event["event_type"] == "message": token_length += tokenize(event["content"]) if token_length > history_token_budget: break message_event_stack.append(event) # Build the message stack as a string. message_stack = "" for message in message_event_stack: message_stack += (message["name"] + ": " + message["content"] + "\n") # Build response prompt (unused in ChatCompletions). response_prompt = self_name + ": " prompt = frontmatter + message_stack if use_chat_completions == False: prompt += response_prompt
log(prompt)
0
2023-11-14 05:04:40+00:00
2k
gunyu1019/async-client-decorator
example/single_session.py
[ { "identifier": "request", "path": "async_client_decorator/request.py", "snippet": "def request(\n method: str,\n path: str,\n directly_response: bool = False,\n header_parameter: list[str] = None,\n query_parameter: list[str] = None,\n form_parameter: list[str] = None,\n path_parameter: list[str] = None,\n body_parameter: Optional[str] = None,\n response_parameter: list[str] = None,\n **request_kwargs\n):\n \"\"\"A decoration for making request.\n Create a HTTP client-request, when decorated function is called.\n\n Parameters\n ----------\n method: str\n HTTP method (example. GET, POST)\n path: str\n Request path. Path connects to the base url.\n directly_response: bool\n Returns a `aiohttp.ClientResponse` without executing the function's body statement.\n header_parameter: list[str]\n Function parameter names used in the header\n query_parameter: list[str]\n Function parameter names used in the query(parameter)\n form_parameter: list[str]\n Function parameter names used in body form.\n path_parameter: list[str]\n Function parameter names used in the path.\n body_parameter: str\n Function parameter name used in the body.\n The body parameter must take only dict, list, or aiohttp.FormData.\n response_parameter: list[str]\n Function parameter name to store the HTTP result in.\n **request_kwargs\n\n Warnings\n --------\n Form_parameter and Body Parameter can only be used with one or the other.\n \"\"\"\n return _request(\n lambda self, _path, **kwargs: self.request(method, _path, **kwargs),\n path,\n directly_response,\n header_parameter,\n query_parameter,\n form_parameter,\n path_parameter,\n body_parameter,\n response_parameter,\n **request_kwargs\n )" }, { "identifier": "Query", "path": "async_client_decorator/query.py", "snippet": "class Query:\n \"\"\"This class is used when a function's parameters are used as query in an HTTP request.\n\n Examples\n --------\n >>> def function(query: str | Query):\n ... pass\n \"\"\"\n\n DEFAULT_KEY = \"__DEFAULT_QUERY__\"\n\n @staticmethod\n def default_query(key: str, value: Any):\n def decorator(func):\n if not hasattr(func, Query.DEFAULT_KEY):\n setattr(func, Query.DEFAULT_KEY, dict())\n getattr(func, Query.DEFAULT_KEY)[key] = value\n return func\n\n return decorator" }, { "identifier": "Session", "path": "async_client_decorator/session.py", "snippet": "class Session:\n \"\"\"A class to manage session for managing decoration functions.\"\"\"\n\n def __init__(self, base_url: str, directly_response: bool = False, **kwargs):\n self.directly_response = directly_response\n self.base_url = base_url\n\n self.session = aiohttp.ClientSession(self.base_url, **kwargs)\n\n @property\n def closed(self) -> bool:\n return self.session.closed\n\n async def close(self):\n return await self.session.close()\n\n async def request(self, method: str, path: str, **kwargs):\n return await self.session.request(method, path, **kwargs)\n\n async def get(self, path: str, **kwargs):\n return await self.session.get(path, **kwargs)\n\n async def post(self, path: str, **kwargs):\n return await self.session.post(path, **kwargs)\n\n async def options(self, path: str, **kwargs):\n return await self.session.options(path, **kwargs)\n\n async def delete(self, path: str, **kwargs):\n return await self.session.delete(path, **kwargs)\n\n @classmethod\n def single_session(\n cls, base_url: str, loop: asyncio.AbstractEventLoop = None, **session_kwargs\n ):\n \"\"\"A single session for one request.\n\n Parameters\n ----------\n base_url: str\n base url of the API. (for example, https://api.yhs.kr)\n loop: asyncio.AbstractEventLoop\n [event loop](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio-event-loop) used for processing HTTP requests.\n\n Examples\n --------\n The session is defined through the function's decoration.\n\n >>> @Session.single_session(\"https://api.yhs.kr\")\n ... @request(\"GET\", \"/bus/station\")\n ... async def station_query(session: Session, name: Query | str) -> aiohttp.ClientResponse:\n ... pass\n\n \"\"\"\n\n def decorator(func: RequestFunction):\n if not asyncio.iscoroutinefunction(func):\n raise TypeError(\"function %s must be coroutine.\".format(func.__name__))\n\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n client = cls(base_url, loop, **session_kwargs)\n response = await func(client, *args, **kwargs)\n if not client.closed:\n await client.close()\n return response\n\n return wrapper\n\n return decorator" } ]
import asyncio import aiohttp from typing import NamedTuple from async_client_decorator import request, Session, Query
1,277
loop = asyncio.get_event_loop() class StationInfo(NamedTuple): displayId: str id: str name: str posX: float posY: float stationId: str type: int
loop = asyncio.get_event_loop() class StationInfo(NamedTuple): displayId: str id: str name: str posX: float posY: float stationId: str type: int
@Session.single_session("https://api.yhs.kr")
2
2023-11-14 06:41:19+00:00
2k
pmutua/CodeCraftGPT
components/lang_page.py
[ { "identifier": "PROGRAMMING_LANGUAGES", "path": "data/programming_languages.py", "snippet": "PROGRAMMING_LANGUAGES = (\n \"Python\", \"JavaScript\", \"Java\", \"C++\", \"C#\", \"Ruby\", \"Swift\", \"Go\", \"PHP\", \"Rust\", \"VB.net\",\n \"Kotlin\", \"TypeScript\", \"Scala\", \"Haskell\", \"Perl\", \"Objective-C\", \"Dart\", \"R\", \"Groovy\",\n \"Elixir\", \"Lua\", \"Julia\", \"Shell\", \"HTML\", \"CSS\", \"SQL\", \"MATLAB\", \"CoffeeScript\", \"F#\",\n \"Clojure\", \"Assembly\", \"Lisp\", \"Cobol\", \"Fortran\", \"Racket\", \"Ada\", \"Prolog\"\n)" }, { "identifier": "create_translation_prompt", "path": "prompts/translate_code_prompt.py", "snippet": "def create_translation_prompt(target_language, source_code):\n \"\"\"\n Create a chat prompt for a code translation task.\n\n Parameters:\n - target_language (str): The language to which the code should be translated.\n - source_code (str): The source code that needs to be translated.\n\n Returns:\n langchain.chat_models.ChatPromptTemplate: The generated chat prompt template.\n \"\"\"\n system_template = \"You are a code translator. Your task is to translate the given source code to {target_language}.\"\n system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)\n\n human_template = \"Please translate the following source code to {target_language}: '{source_code}'.\"\n human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n\n chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])\n\n return chat_prompt" } ]
from typing import Type from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from data.programming_languages import PROGRAMMING_LANGUAGES from prompts.translate_code_prompt import create_translation_prompt import streamlit as st
674
""" LangLink - Code Translation and Cross-Language Compatibility Overcome language barriers with LangLink, an AI-powered tool facilitating smooth code translation between programming languages. Developers can confidently migrate codebases, ensuring compatibility and seamless transitions across different languages. """ def show_lang_page(chat: Type[ChatOpenAI]): """ Displays the LangLink page for code translation. Parameters: - openai_api_key (str): The API key for OpenAI. Returns: None """ st.title("LangLink - Code Translation and Cross-Language Compatibility") st.markdown('Overcome language barriers with LangLink, an AI-powered tool facilitating smooth ' 'code translation between programming languages. Developers can confidently migrate ' 'codebases, ensuring compatibility and seamless transitions across different languages.') with st.form(key="lang_form"): source_code = st.text_area("Enter source code") target_language = st.selectbox("Select programming language", PROGRAMMING_LANGUAGES) submit_button = st.form_submit_button(label='Submit') if submit_button: st.text(f"Translating code snippet to {target_language}................✨")
""" LangLink - Code Translation and Cross-Language Compatibility Overcome language barriers with LangLink, an AI-powered tool facilitating smooth code translation between programming languages. Developers can confidently migrate codebases, ensuring compatibility and seamless transitions across different languages. """ def show_lang_page(chat: Type[ChatOpenAI]): """ Displays the LangLink page for code translation. Parameters: - openai_api_key (str): The API key for OpenAI. Returns: None """ st.title("LangLink - Code Translation and Cross-Language Compatibility") st.markdown('Overcome language barriers with LangLink, an AI-powered tool facilitating smooth ' 'code translation between programming languages. Developers can confidently migrate ' 'codebases, ensuring compatibility and seamless transitions across different languages.') with st.form(key="lang_form"): source_code = st.text_area("Enter source code") target_language = st.selectbox("Select programming language", PROGRAMMING_LANGUAGES) submit_button = st.form_submit_button(label='Submit') if submit_button: st.text(f"Translating code snippet to {target_language}................✨")
chat_prompt = create_translation_prompt(target_language,source_code)
1
2023-11-13 10:45:28+00:00
2k
itzshukla/STRANGER-USERBOT2.0
Zaid/modules/private/pmguard.py
[ { "identifier": "get_approved_users", "path": "Zaid/database/pmpermitdb.py", "snippet": "async def get_approved_users():\n results = await collection.find_one({\"_id\": \"Approved\"})\n if results:\n return results[\"users\"]\n else:\n return []" }, { "identifier": "pm_guard", "path": "Zaid/database/pmpermitdb.py", "snippet": "async def pm_guard():\n result = await collection.find_one({\"_id\": 1})\n if not result:\n return False\n if not result[\"pmpermit\"]:\n return False\n else:\n return True" }, { "identifier": "LOG_GROUP", "path": "config.py", "snippet": "LOG_GROUP = getenv(\"LOG_GROUP\")" }, { "identifier": "PM_LOGGER", "path": "config.py", "snippet": "PM_LOGGER = getenv(\"PM_LOGGER\")" } ]
from pyrogram import filters, Client from pyrogram.types import Message from pyrogram.methods import messages from Zaid.database.pmpermitdb import get_approved_users, pm_guard from config import LOG_GROUP, PM_LOGGER import asyncio import Zaid.database.pmpermitdb as Zaid
894
FLOOD_CTRL = 0 ALLOWED = [] USERS_AND_WARNS = {} async def denied_users(filter, client: Client, message: Message): if not await pm_guard(): return False if message.chat.id in (await get_approved_users()): return False else: return True def get_arg(message): msg = message.text msg = msg.replace(" ", "", 1) if msg[1] == " " else msg split = msg[1:].replace("\n", " \n").split(" ") if " ".join(split[1:]).strip() == "": return "" return " ".join(split[1:]) @Client.on_message(filters.command("setlimit", ["."]) & filters.me) async def pmguard(client, message): arg = get_arg(message) if not arg: await message.edit("**Set limit to what?**") return await Zaid.set_limit(int(arg)) await message.edit(f"**Limit set to {arg}**") @Client.on_message(filters.command("setblockmsg", ["."]) & filters.me) async def setpmmsg(client, message): arg = get_arg(message) if not arg: await message.edit("**What message to set**") return if arg == "default": await Zaid.set_block_message(Zaid.BLOCKED) await message.edit("**Block message set to default**.") return await Zaid.set_block_message(f"`{arg}`") await message.edit("**Custom block message set**") @Client.on_message(filters.command(["allow", "ap", "approve", "a"], ["."]) & filters.me & filters.private) async def allow(client, message): chat_id = message.chat.id pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings() await Zaid.allow_user(chat_id) await message.edit(f"**I have allowed [you](tg://user?id={chat_id}) to PM me.**") async for message in client.search_messages( chat_id=message.chat.id, query=pm_message, limit=1, from_user="me" ): await message.delete() USERS_AND_WARNS.update({chat_id: 0}) @Client.on_message(filters.command(["deny", "dap", "disapprove", "dapp"], ["."]) & filters.me & filters.private) async def deny(client, message): chat_id = message.chat.id await Zaid.deny_user(chat_id) await message.edit(f"**I have denied [you](tg://user?id={chat_id}) to PM me.**") @Client.on_message( filters.private & filters.create(denied_users) & filters.incoming & ~filters.service & ~filters.me & ~filters.bot ) async def reply_pm(app: Client, message): global FLOOD_CTRL pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings() user = message.from_user.id user_warns = 0 if user not in USERS_AND_WARNS else USERS_AND_WARNS[user]
FLOOD_CTRL = 0 ALLOWED = [] USERS_AND_WARNS = {} async def denied_users(filter, client: Client, message: Message): if not await pm_guard(): return False if message.chat.id in (await get_approved_users()): return False else: return True def get_arg(message): msg = message.text msg = msg.replace(" ", "", 1) if msg[1] == " " else msg split = msg[1:].replace("\n", " \n").split(" ") if " ".join(split[1:]).strip() == "": return "" return " ".join(split[1:]) @Client.on_message(filters.command("setlimit", ["."]) & filters.me) async def pmguard(client, message): arg = get_arg(message) if not arg: await message.edit("**Set limit to what?**") return await Zaid.set_limit(int(arg)) await message.edit(f"**Limit set to {arg}**") @Client.on_message(filters.command("setblockmsg", ["."]) & filters.me) async def setpmmsg(client, message): arg = get_arg(message) if not arg: await message.edit("**What message to set**") return if arg == "default": await Zaid.set_block_message(Zaid.BLOCKED) await message.edit("**Block message set to default**.") return await Zaid.set_block_message(f"`{arg}`") await message.edit("**Custom block message set**") @Client.on_message(filters.command(["allow", "ap", "approve", "a"], ["."]) & filters.me & filters.private) async def allow(client, message): chat_id = message.chat.id pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings() await Zaid.allow_user(chat_id) await message.edit(f"**I have allowed [you](tg://user?id={chat_id}) to PM me.**") async for message in client.search_messages( chat_id=message.chat.id, query=pm_message, limit=1, from_user="me" ): await message.delete() USERS_AND_WARNS.update({chat_id: 0}) @Client.on_message(filters.command(["deny", "dap", "disapprove", "dapp"], ["."]) & filters.me & filters.private) async def deny(client, message): chat_id = message.chat.id await Zaid.deny_user(chat_id) await message.edit(f"**I have denied [you](tg://user?id={chat_id}) to PM me.**") @Client.on_message( filters.private & filters.create(denied_users) & filters.incoming & ~filters.service & ~filters.me & ~filters.bot ) async def reply_pm(app: Client, message): global FLOOD_CTRL pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings() user = message.from_user.id user_warns = 0 if user not in USERS_AND_WARNS else USERS_AND_WARNS[user]
if PM_LOGGER:
3
2023-11-13 18:19:50+00:00
2k
UWNetworksLab/adn-compiler
compiler/element/optimize/consolidate.py
[ { "identifier": "ELEMENT_LOG", "path": "compiler/element/logger.py", "snippet": "ELEMENT_LOG = logging.getLogger(\"ir\")" }, { "identifier": "Expr", "path": "compiler/element/node.py", "snippet": "class Expr(Node):\n def __init__(self, lhs: Expr, op: Operator, rhs: Expr):\n self.lhs = lhs\n self.op = op\n self.rhs = rhs\n self.type = \"unknown\"" }, { "identifier": "Identifier", "path": "compiler/element/node.py", "snippet": "class Identifier(Node):\n def __init__(self, name: str):\n self.name = name" }, { "identifier": "Internal", "path": "compiler/element/node.py", "snippet": "class Internal(Node):\n def __init__(\n self,\n internal: List[\n Tuple[\n Identifier,\n Type,\n ConsistencyDecorator,\n CombinerDecorator,\n PersistenceDecorator,\n ]\n ],\n ):\n self.internal = internal" }, { "identifier": "MethodCall", "path": "compiler/element/node.py", "snippet": "class MethodCall(Expr):\n def __init__(self, obj: Identifier, method: MethodType, args: List[Expr]):\n self.obj = obj\n self.method = method\n self.args = args" }, { "identifier": "Procedure", "path": "compiler/element/node.py", "snippet": "class Procedure(Node):\n def __init__(self, name: str, params: List[Identifier], body: List[Statement]):\n self.name = name\n self.params = params\n self.body = body" }, { "identifier": "Visitor", "path": "compiler/element/visitor.py", "snippet": "class Visitor(ABC):\n def visitNode(self, node: Node, ctx):\n raise Exception(f\"visit function for {node.__class__.__name__} not implemented\")\n\n def visitProgram(self, node: Program, ctx):\n return self.visitNode(node)\n\n def visitInternal(self, node: Internal, ctx):\n return self.visitNode(node)\n\n def visitProcedure(self, node: Procedure, ctx):\n return self.visitNode(node)\n\n def visitStatement(self, node: Statement, ctx):\n return self.visitNode(node)\n\n def visitMatch(self, node: Match, ctx):\n return self.visitNode(node)\n\n def visitAssign(self, node: Assign, ctx):\n return self.visitNode(node)\n\n def visitPattern(self, node: Pattern, ctx):\n return self.visitNode(node)\n\n def visitExpr(self, node: Expr, ctx):\n return self.visitNode(node)\n\n def visitIdentifier(self, node: Identifier, ctx):\n return self.visitNode(node)\n\n def visitFuncCall(self, node: FuncCall, ctx):\n return self.visitNode(node)\n\n def visitMethodCall(self, node: MethodCall, ctx):\n return self.visitNode(node)\n\n def visitSend(self, node: Send, ctx):\n return self.visitNode(node)\n\n def visitLiteral(self, node: Literal, ctx):\n return self.visitNode(node)" } ]
from copy import deepcopy from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple, TypeVar from compiler.element.logger import ELEMENT_LOG as LOG from compiler.element.node import * from compiler.element.node import Expr, Identifier, Internal, MethodCall, Procedure from compiler.element.visitor import Visitor
952
def consolidate(irs: List[Program]) -> Program: while len(irs) > 1: left = irs.pop(0) right = irs.pop(0) new_prog = Program( Internal([]), Procedure("init", [], []), Procedure("req", [], []), Procedure("resp", [], []), ) new_prog.definition.internal = deepcopy( left.definition.internal + right.definition.internal ) InitConsolidator().visitProcedure(new_prog.init, (left.init, right.init)) ProcedureConsolidator().visitProcedure( new_prog.req, (deepcopy(left.req), deepcopy(right.req)) ) ProcedureConsolidator().visitProcedure( new_prog.resp, (deepcopy(right.resp), deepcopy(left.resp)) ) irs.append(new_prog) return irs[0] class InitConsolidator(Visitor): def __init__(self): pass def visitNode(self, node: Node, ctx) -> str:
def consolidate(irs: List[Program]) -> Program: while len(irs) > 1: left = irs.pop(0) right = irs.pop(0) new_prog = Program( Internal([]), Procedure("init", [], []), Procedure("req", [], []), Procedure("resp", [], []), ) new_prog.definition.internal = deepcopy( left.definition.internal + right.definition.internal ) InitConsolidator().visitProcedure(new_prog.init, (left.init, right.init)) ProcedureConsolidator().visitProcedure( new_prog.req, (deepcopy(left.req), deepcopy(right.req)) ) ProcedureConsolidator().visitProcedure( new_prog.resp, (deepcopy(right.resp), deepcopy(left.resp)) ) irs.append(new_prog) return irs[0] class InitConsolidator(Visitor): def __init__(self): pass def visitNode(self, node: Node, ctx) -> str:
LOG.error("InitConsolidator: visitNode not implemented")
1
2023-11-13 07:31:52+00:00
2k
sunholo-data/sunholo-py
sunholo/components/llm.py
[ { "identifier": "setup_logging", "path": "sunholo/logging.py", "snippet": "def setup_logging(self, log_level=logging.INFO, logger_name=None):\n if log_level:\n self.log_level = log_level\n if logger_name:\n self.logger_name = logger_name\n\n try:\n caller_info = self._get_caller_info()\n if not is_running_on_gcp():\n logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')\n logging.info(f\"Standard logging: {caller_info['file']}\")\n return logging\n \n print(f\"Cloud logging for {caller_info['file']}\")\n self.client.setup_logging(log_level=self.log_level)\n\n return self # Return the instance itself on success\n except Exception as e:\n # If there's an exception, use standard Python logging as a fallback\n logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')\n logging.warning(f\"Failed to set up Google Cloud Logging. Using standard logging. Error: {e}\")\n return logging" }, { "identifier": "load_config_key", "path": "sunholo/utils/config.py", "snippet": "def load_config_key(key: str, vector_name: str, filename: str=None) -> str:\n from ..logging import setup_logging\n logging = setup_logging()\n\n assert isinstance(key, str), f\"key must be a string got a {type(key)}\"\n assert isinstance(vector_name, str), f\"vector_name must be a string, got a {type(vector_name)}\"\n \n config, filename = load_config(filename)\n logging.info(f\"Fetching {key} for {vector_name}\")\n llm_config = config.get(vector_name, None)\n if llm_config is None:\n raise ValueError(f\"No config array was found for {vector_name} in {filename}\")\n \n logging.info(f'llm_config: {llm_config} for {vector_name} - fetching \"{key}\"')\n\n key_value = llm_config.get(key, None)\n \n return key_value" }, { "identifier": "load_config", "path": "sunholo/utils/config.py", "snippet": "def load_config(filename: str=None) -> (dict, str):\n from ..logging import setup_logging\n logging = setup_logging()\n if filename is None:\n filename = os.getenv(\"_CONFIG_FILE\", None)\n if filename is None:\n raise ValueError(\"No _CONFIG_FILE env value specified\")\n\n # Join the script directory with the filename\n config_path = filename\n\n logging.info(f\"Loading config file {os.getcwd()}/{config_path}\")\n\n with open(config_path, 'r') as f:\n if filename.endswith(\".json\"):\n config = json.load(f)\n elif filename.endswith(\".yaml\") or filename.endswith(\".yml\"):\n config = yaml.safe_load(f)\n else:\n raise ValueError(f\"Unsupported config file format: {config_path}. The supported formats are JSON and YAML.\")\n \n return config, filename" }, { "identifier": "get_module_filepath", "path": "sunholo/utils/config.py", "snippet": "def get_module_filepath(filepath):\n from ..logging import setup_logging\n logging = setup_logging()\n # Get the root directory of this Python script\n dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n # Build the full filepath by joining the directory with the filename\n filepath = os.path.join(dir_path, filepath)\n\n logging.info(f\"Found filepath {filepath}\")\n return filepath" } ]
from ..logging import setup_logging from ..utils.config import load_config_key, load_config, get_module_filepath from langchain.chat_models import ChatOpenAI from langchain.llms import VertexAI from langchain.llms import VertexAI from ..patches.langchain.vertexai import VertexAIModelGarden from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatVertexAI from langchain.chat_models import ChatVertexAI from langchain_google_genai import ChatGoogleGenerativeAI from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings import VertexAIEmbeddings from langchain_google_genai import GoogleGenerativeAIEmbeddings
1,108
# Copyright [2023] [Holosun ApS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logging = setup_logging() def pick_llm(vector_name): logging.debug('Picking llm')
# Copyright [2023] [Holosun ApS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logging = setup_logging() def pick_llm(vector_name): logging.debug('Picking llm')
llm_str = load_config_key("llm", vector_name, filename = "config/llm_config.yaml")
1
2023-11-14 14:53:19+00:00
2k
atlantic-quantum/Shipyard
tests/passes/semantic_analysis/test_scoped_symbol_table.py
[ { "identifier": "scoped_symbol_table", "path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py", "snippet": "class ScopedSymbolTable:\nclass CalScopedSymbolTable(ScopedSymbolTable):\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n ) -> None:\n def _init_builtins(self):\n def __str__(self) -> str:\n def insert(self, symbol: Symbol):\n def lookup(self, name: str, current_scope_only: bool = False) -> Symbol:\n def keys(self, current_scope_only=False) -> list[str]:\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n init_cal: bool = False,\n ) -> None:\n def _init_cal_builtins(self):" }, { "identifier": "symbols", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "_BUILTIN_CLASSICAL_SYMBOL_NAMES = [\n \"ANGLE\",\n \"BIT\",\n \"BITSTRING\",\n \"BOOL\",\n \"COMPLEX\",\n \"DURATION\",\n \"FLOAT\",\n \"IMAGINARY\",\n \"INT\",\n \"STRETCH\",\n \"UINT\",\n \"PORT\",\n \"FRAME\",\n \"WAVEFORM\",\n \"ARRAY\",\n]\n_BUILTIN_QUANTUM_SYMBOL_NAMES = [\"QUBIT\"]\nBUILTIN_TYPES = [\n angle_type,\n array_type,\n bit_type,\n bitstring_type,\n bool_type,\n complex_type,\n duration_type,\n float_type,\n imaginary_type,\n int_type,\n qubit_type,\n stretch_type,\n uint_type,\n]\nBUILTIN_CAL_TYPES = [\n frame_type,\n port_type,\n waveform_type,\n]\n_ALLOWED_ARRAY_TYPES = [\n \"ANGLE\",\n \"BIT\",\n \"BOOL\",\n \"COMPLEX\",\n \"FLOAT\",\n \"INT\",\n \"UINT\",\n]\nclass Symbol(BaseModel):\nclass BuiltinSymbol(Symbol):\nclass BuiltinCalSymbol(Symbol):\nclass ArraySymbol(Symbol):\nclass AliasSymbol(Symbol):\nclass ClassicalSymbol(Symbol):\nclass LiteralSymbol(ClassicalSymbol):\nclass ConstantSymbol(Symbol):\nclass IOSymbol(Symbol):\nclass QuantumSymbol(Symbol):\nclass GrammarSymbol(Symbol):\nclass SubroutineSymbol(Symbol):\nclass ExternSymbol(SubroutineSymbol):\nclass GateSymbol(SubroutineSymbol):\nclass DefcalSymbol(GateSymbol):\n def force_kind_uppercase(cls, kind: str) -> str:\ndef kind_of_builtin_is_none(kind: str) -> str:\n def kind_is_array(cls, kind: str) -> str:\n def array_base_type_must_be_of_allowed_type(cls, base_type: str) -> str:\ndef kind_must_be_name_of_classical_type(kind: str) -> str:\n def kind_must_be_name_of_quantum_type(cls, kind: str) -> str:\n def return_classical_or_none(cls, return_type: str):" } ]
import pytest from shipyard.passes.semantic_analysis import scoped_symbol_table as sst from shipyard.passes.semantic_analysis import symbols
1,413
""" The scoped symbol table is intended to be used by the Semantic Analyser module. An 'end-to-end' use case example will be included in the tests for the Semantic Analyser ToDo update working when adding semantic analyser tests """ SYMBOL_LISTS = [sst.BUILTIN_TYPES, sst.BUILTIN_ZI_EXP] CAL_SYMBOL_LISTS = [sst.BUILTIN_CAL_TYPES, sst.BUILTIN_OPENPULSE, sst.BUILTIN_ZI_WFM] @pytest.fixture(name="main_table") def fixture_main_table() -> sst.ScopedSymbolTable: """Fixture for creating the 'main' ScopedSymbolTable this table has no enclosing scope Returns: sst.ScopedSymbolTable: symbol table with no enclosing scope """ return sst.ScopedSymbolTable("main") @pytest.fixture(name="nested_table") def fixture_nested_table(main_table: sst.ScopedSymbolTable) -> sst.ScopedSymbolTable: """Fixture for creating a nested ScopedSymbolTable the 'main' symbol table encloses this table Args: main_table (sst.ScopedSymbolTable): used as enclosing scope for this table Returns: sst.ScopedSymbolTable: symbol table with enclosing scope """ return sst.ScopedSymbolTable("nested", enclosing_scope=main_table) @pytest.fixture(name="cal_table") def fixture_cal_table(main_table: sst.ScopedSymbolTable) -> sst.CalScopedSymbolTable: """ Fixture for creating 'main' a ScopedSymbolTable for openPulse code, has the 'main' symbol table as an enclosing scope and is initialised with init_cal set to True Args: main_table (sst.ScopedSymbolTable): used as enclosing scope for this table Returns: sst.CalScopedSymbolTable: main calibration symbol table """ return sst.CalScopedSymbolTable("cal", enclosing_scope=main_table, init_cal=True) @pytest.fixture(name="defcal_table") def fixture_defcal_table( cal_table: sst.CalScopedSymbolTable, ) -> sst.CalScopedSymbolTable: """ Fixture for creating a nested ScopedSymbolTable for openPulse code, has the 'main calibration' (cal_table) as an enclosing scope Args: cal_table (sst.CalScopedSymbolTable): used as enclosing scope for this table Returns: sst.CalScopedSymbolTable: nested calibration symbol table """ return sst.CalScopedSymbolTable("defcal", enclosing_scope=cal_table) def test_scoped_symbol_table_basic(main_table: sst.ScopedSymbolTable): """Test basic insertion and lookup in table without enclosing scope""" # test that built in symbols have been inserted for symbol_list in SYMBOL_LISTS: symbol_names = [] for symbol in symbol_list: assert main_table.lookup(symbol.name) is symbol symbol_names.append(symbol.name) # test that names of builtin symbols are returned by the keys method for name in symbol_names: assert name in main_table.keys() assert name in main_table.keys(current_scope_only=True) # test inserting a symbol and lookin it up and name being returned by keys()
""" The scoped symbol table is intended to be used by the Semantic Analyser module. An 'end-to-end' use case example will be included in the tests for the Semantic Analyser ToDo update working when adding semantic analyser tests """ SYMBOL_LISTS = [sst.BUILTIN_TYPES, sst.BUILTIN_ZI_EXP] CAL_SYMBOL_LISTS = [sst.BUILTIN_CAL_TYPES, sst.BUILTIN_OPENPULSE, sst.BUILTIN_ZI_WFM] @pytest.fixture(name="main_table") def fixture_main_table() -> sst.ScopedSymbolTable: """Fixture for creating the 'main' ScopedSymbolTable this table has no enclosing scope Returns: sst.ScopedSymbolTable: symbol table with no enclosing scope """ return sst.ScopedSymbolTable("main") @pytest.fixture(name="nested_table") def fixture_nested_table(main_table: sst.ScopedSymbolTable) -> sst.ScopedSymbolTable: """Fixture for creating a nested ScopedSymbolTable the 'main' symbol table encloses this table Args: main_table (sst.ScopedSymbolTable): used as enclosing scope for this table Returns: sst.ScopedSymbolTable: symbol table with enclosing scope """ return sst.ScopedSymbolTable("nested", enclosing_scope=main_table) @pytest.fixture(name="cal_table") def fixture_cal_table(main_table: sst.ScopedSymbolTable) -> sst.CalScopedSymbolTable: """ Fixture for creating 'main' a ScopedSymbolTable for openPulse code, has the 'main' symbol table as an enclosing scope and is initialised with init_cal set to True Args: main_table (sst.ScopedSymbolTable): used as enclosing scope for this table Returns: sst.CalScopedSymbolTable: main calibration symbol table """ return sst.CalScopedSymbolTable("cal", enclosing_scope=main_table, init_cal=True) @pytest.fixture(name="defcal_table") def fixture_defcal_table( cal_table: sst.CalScopedSymbolTable, ) -> sst.CalScopedSymbolTable: """ Fixture for creating a nested ScopedSymbolTable for openPulse code, has the 'main calibration' (cal_table) as an enclosing scope Args: cal_table (sst.CalScopedSymbolTable): used as enclosing scope for this table Returns: sst.CalScopedSymbolTable: nested calibration symbol table """ return sst.CalScopedSymbolTable("defcal", enclosing_scope=cal_table) def test_scoped_symbol_table_basic(main_table: sst.ScopedSymbolTable): """Test basic insertion and lookup in table without enclosing scope""" # test that built in symbols have been inserted for symbol_list in SYMBOL_LISTS: symbol_names = [] for symbol in symbol_list: assert main_table.lookup(symbol.name) is symbol symbol_names.append(symbol.name) # test that names of builtin symbols are returned by the keys method for name in symbol_names: assert name in main_table.keys() assert name in main_table.keys(current_scope_only=True) # test inserting a symbol and lookin it up and name being returned by keys()
c_symbol = symbols.ClassicalSymbol(name="test", kind=symbols.angle_type.name)
1
2023-11-16 17:37:29+00:00
2k
PrAsAnNaRePo/LocalAgent
localagent/interpreter.py
[ { "identifier": "get_prompt_from_template", "path": "localagent/utils.py", "snippet": "def get_prompt_from_template(system, history, human_, assistant_, eos_token):\n for i in history:\n if i['role'] == 'user':\n system += f'{human_}{i[\"content\"]}{eos_token}'\n if i['role'] == 'assistant':\n system += f'{assistant_}{i[\"content\"]}{eos_token}'\n\n if history[-1]['role'] == 'user':\n system += f'{assistant_}'\n\n return system" }, { "identifier": "internal_monologue", "path": "localagent/utils.py", "snippet": "def internal_monologue(msg):\n # ANSI escape code for italic is '\\x1B[3m'\n print(f\"\\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}\")" }, { "identifier": "run", "path": "localagent/gen.py", "snippet": "def run(uri, prompt, force_model=False):\n if force_model:\n prompt += \"\\nThought:\"\n request = {\n 'prompt': prompt,\n 'max_new_tokens': 500,\n 'auto_max_new_tokens': False,\n 'max_tokens_second': 0,\n 'do_sample': True,\n 'temperature': 0.01,\n 'repetition_penalty': 1.24,\n 'temperature': 0.1,\n 'skip_special_tokens': True,\n 'stopping_strings': ['<|end_of_turn|>', '<|im_end|>', 'Observation']\n }\n\n response = requests.post(uri, json=request)\n if response.status_code == 200:\n result = response.json()['results'][0]['text']\n return '\\nThought:'+result if force_model else result" }, { "identifier": "stream_run", "path": "localagent/gen.py", "snippet": "def stream_run(uri, prompt, force_model=False):\n return asyncio.run(print_response_stream(uri, prompt, force_model))" }, { "identifier": "ollama_generate", "path": "localagent/gen.py", "snippet": "def ollama_generate(model_name, prompt=None, system=None, template=None, stream=False, format=\"\", context=None, options=None, callback=None, force_model=False):\n try:\n if template is not None and force_model:\n template += '\\nThought:'\n url = f\"{BASE_URL}/api/generate\"\n payload = {\n \"model\": model_name, \n \"prompt\": prompt, \n \"system\": system, \n \"template\": template, \n \"context\": context, \n \"options\": options,\n \"format\": format,\n }\n \n # Remove keys with None values\n payload = {k: v for k, v in payload.items() if v is not None}\n \n with requests.post(url, json=payload, stream=True) as response:\n response.raise_for_status()\n \n # Creating a variable to hold the context history of the final chunk\n final_context = None\n \n # Variable to hold concatenated response strings if no callback is provided\n full_response = \"\"\n\n # Iterating over the response line by line and displaying the details\n for line in response.iter_lines():\n if line:\n # Parsing each line (JSON chunk) and extracting the details\n chunk = json.loads(line)\n \n # If a callback function is provided, call it with the chunk\n if callback:\n callback(chunk)\n else:\n # If this is not the last chunk, add the \"response\" field value to full_response and print it\n if not chunk.get(\"done\"):\n response_piece = chunk.get(\"response\", \"\")\n full_response += response_piece\n if 'Observation' in full_response:\n break\n if stream:\n print(response_piece, end=\"\", flush=True)\n \n # Check if it's the last chunk (done is true)\n if chunk.get(\"done\"):\n final_context = chunk.get(\"context\")\n full_response = full_response.replace('Observation', '')\n # Return the full response and the final context\n return '\\nThought:'+full_response if force_model else full_response, final_context\n \n except requests.exceptions.RequestException as e:\n print(f\"An error occurred: {e}\")\n return None, None" } ]
import subprocess import sys from localagent.utils import get_prompt_from_template, internal_monologue from localagent.gen import run, stream_run, ollama_generate from rich.console import Console
1,594
console = Console() CODE_INTERPRETER = """You are Open Interpreter, a world-class programmer that can complete any goal by executing code. First, write a plan. **Always recap the plan between each code block**. When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task. If you want to send data between programming languages, save the data to a txt or json. You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again. You can install new packages. When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in. Write messages to the user in Markdown. In general, try to **make plans** with as few steps as possible. Remember that one code block is considered as a single file and you can't able to access the variable from first code blocks in the second one. You are capable of **any** task. Don't install libraries using '!' in the python code block instead use seperate bash code block. As a open interpreter you should mostly respond with codes more than a text. Always tries to print the things up so you can know them via output. """ def extract_code(string): code_blocks = [] parts = string.split("```") for i in range(1, len(parts), 2): lines = parts[i].split("\n") lang = lines[0] code = "\n".join(lines[1:]) code_blocks.append((lang, code)) return code_blocks class Interpreter: def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None: self.history = [] self.exec = exec self.max_try = max_try self.human_ = human_ self.assistant_ = assistant_ self.eos_token = eos_token self.stream = stream def execute_code(self, lang, code, timeout=10): if lang.lower() == 'python': try: output = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, timeout=timeout) except subprocess.TimeoutExpired: print(f"Execution of Python code timed out after {timeout} seconds.") return None elif lang.lower() == 'bash': try: output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout) except subprocess.TimeoutExpired: print(f"Execution of Bash code timed out after {timeout} seconds.") return None else: print('Only supported python and ') return None return output def __call__(self, task): print('\n')
console = Console() CODE_INTERPRETER = """You are Open Interpreter, a world-class programmer that can complete any goal by executing code. First, write a plan. **Always recap the plan between each code block**. When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task. If you want to send data between programming languages, save the data to a txt or json. You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again. You can install new packages. When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in. Write messages to the user in Markdown. In general, try to **make plans** with as few steps as possible. Remember that one code block is considered as a single file and you can't able to access the variable from first code blocks in the second one. You are capable of **any** task. Don't install libraries using '!' in the python code block instead use seperate bash code block. As a open interpreter you should mostly respond with codes more than a text. Always tries to print the things up so you can know them via output. """ def extract_code(string): code_blocks = [] parts = string.split("```") for i in range(1, len(parts), 2): lines = parts[i].split("\n") lang = lines[0] code = "\n".join(lines[1:]) code_blocks.append((lang, code)) return code_blocks class Interpreter: def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None: self.history = [] self.exec = exec self.max_try = max_try self.human_ = human_ self.assistant_ = assistant_ self.eos_token = eos_token self.stream = stream def execute_code(self, lang, code, timeout=10): if lang.lower() == 'python': try: output = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, timeout=timeout) except subprocess.TimeoutExpired: print(f"Execution of Python code timed out after {timeout} seconds.") return None elif lang.lower() == 'bash': try: output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout) except subprocess.TimeoutExpired: print(f"Execution of Bash code timed out after {timeout} seconds.") return None else: print('Only supported python and ') return None return output def __call__(self, task): print('\n')
internal_monologue("Interpreter is executing the code...\n")
1
2023-11-10 07:47:41+00:00
2k
Cymaphore/orfodon-service
orfodon_service.py
[ { "identifier": "config", "path": "config.py", "snippet": "" }, { "identifier": "feeds", "path": "feeds.py", "snippet": "" }, { "identifier": "hashtag_replace", "path": "hashtag_modification.py", "snippet": "" }, { "identifier": "hashtag_blacklist", "path": "hashtag_modification.py", "snippet": "" }, { "identifier": "category_aliases", "path": "hashtag_modification.py", "snippet": "" }, { "identifier": "oewa_sport_aliases", "path": "hashtag_modification.py", "snippet": "" }, { "identifier": "oewa_bypass", "path": "hashtag_modification.py", "snippet": "" } ]
import re import yaml import copy import feedparser import time import requests import hashlib from datetime import datetime from bs4 import BeautifulSoup from mastodon import Mastodon from pprint import pprint from config import config from credentials import credentials from feeds import feeds from hashtag_modification import hashtag_replace from hashtag_modification import hashtag_blacklist from hashtag_modification import category_aliases from hashtag_modification import oewa_sport_aliases from hashtag_modification import oewa_bypass
1,155
hashtag_wordlist = [] ############################################################################# ## # Main function # Call all the stages in correct order def main(): # Load hashtag wordlists load_hashtags() # Load previous state, initialize new state load_state() # Load the configured feeds and preprocess text load_feeds() # Grab post references from other channels for boosting, keep id from oldState grab_posts() # Post newly generated articles to the channels post_feeds() # Save state for next cycle save_state() ############################################################################# ## # Load hashtag wordlists def load_hashtags(): hashtags_filename = config["files"]["global_hashtags"] if True: hashtags_file = open(hashtags_filename, "r") global hashtag_wordlist hashtag_wordlist = hashtags_file.read().splitlines() ############################################################################# ## # Load the configured feeds and preprocess text def load_state(): global state global oldState global hashtag_wordlist try: with open(config["files"]["state"]) as fh: oldState = yaml.load(fh, yaml.SafeLoader) except: oldState = {} for feed in feeds: if not feed["id"] in state: state[feed["id"]] = {} if not feed["id"] in oldState: oldState[feed["id"]] = {} ############################################################################# ## # Save state for next cycle def save_state(): with open(config["files"]["state"], 'w') as fh: fh.write(yaml.dump(state, default_flow_style=False)) ############################################################################# ## # Load the configured feeds and preprocess text def load_feeds(): global state global oldState for feed in feeds: feedStateOld = oldState[feed["id"]] feedState = state[feed["id"]] if "url" in feed: entries = feedparser.parse(feed["url"]).entries if len(entries) < 1: raise RuntimeError("No elements in feed " + feed["url"]) for entry in entries: title = entry.get('title') text = entry.get('summary') url = entry.get('link') category = entry.get('category') raw_posting = "" post_type_text = False hashtags = [] updated = entry.get('updated') boost_target = "" edited = False exists = False oldPosting = {} status_id = 0 posted = False post_text = "" boosted = False ref = "" if url in feedStateOld: exists = True oldPosting = feedStateOld[url] if "status_id" in oldPosting: status_id = oldPosting["status_id"] if "posted" in oldPosting: posted = oldPosting["posted"] if "boosted" in oldPosting: boosted = oldPosting["boosted"] first_oewa = False if "enable_oewa_sport" in feed and feed["enable_oewa_sport"]: first_oewa = True
## # @mainpage ORFodon service script # # Quick and dirty solution to turn ORF.at into a Mastodon-site # # @Warning this is tailormade for ORF.at and will not work without modification # with other RSS based news sites! # # Inspired by feediverse from Ed Summers # # Process configuration, fetch news entries and post them to different accounts # # Dependencies: # - bs4 # - feedparser # - yaml # - mastodon # # License: The MIT License (MIT) # Copyright: Martin Eitzenberger <x@cymaphore.net> # @cymaphore@i.cymaphore.net # https://cymaphore.net # # @todo Secondary urls like https://vorarlberg.orf.at/radio/stories/3231551/ https://steiermark.orf.at/magazin/stories/3232156/ # @todo Sort news in descending order by date when bulk processing <-- low prio, usually not an issue # @todo Account mentioner ("der Standard" --> @derStandard)? # @todo extract top hashtags from current posts and add them to profile # @todo ORF_Topos as channel # ############################################################################# # External components ############################################################################# # Configuration ############################################################################# # Current fetched articles / state global state # State from previous run cycle global oldState # Global hashtag wordlist global hashtag_wordlist state = {} oldState = {} hashtag_wordlist = [] ############################################################################# ## # Main function # Call all the stages in correct order def main(): # Load hashtag wordlists load_hashtags() # Load previous state, initialize new state load_state() # Load the configured feeds and preprocess text load_feeds() # Grab post references from other channels for boosting, keep id from oldState grab_posts() # Post newly generated articles to the channels post_feeds() # Save state for next cycle save_state() ############################################################################# ## # Load hashtag wordlists def load_hashtags(): hashtags_filename = config["files"]["global_hashtags"] if True: hashtags_file = open(hashtags_filename, "r") global hashtag_wordlist hashtag_wordlist = hashtags_file.read().splitlines() ############################################################################# ## # Load the configured feeds and preprocess text def load_state(): global state global oldState global hashtag_wordlist try: with open(config["files"]["state"]) as fh: oldState = yaml.load(fh, yaml.SafeLoader) except: oldState = {} for feed in feeds: if not feed["id"] in state: state[feed["id"]] = {} if not feed["id"] in oldState: oldState[feed["id"]] = {} ############################################################################# ## # Save state for next cycle def save_state(): with open(config["files"]["state"], 'w') as fh: fh.write(yaml.dump(state, default_flow_style=False)) ############################################################################# ## # Load the configured feeds and preprocess text def load_feeds(): global state global oldState for feed in feeds: feedStateOld = oldState[feed["id"]] feedState = state[feed["id"]] if "url" in feed: entries = feedparser.parse(feed["url"]).entries if len(entries) < 1: raise RuntimeError("No elements in feed " + feed["url"]) for entry in entries: title = entry.get('title') text = entry.get('summary') url = entry.get('link') category = entry.get('category') raw_posting = "" post_type_text = False hashtags = [] updated = entry.get('updated') boost_target = "" edited = False exists = False oldPosting = {} status_id = 0 posted = False post_text = "" boosted = False ref = "" if url in feedStateOld: exists = True oldPosting = feedStateOld[url] if "status_id" in oldPosting: status_id = oldPosting["status_id"] if "posted" in oldPosting: posted = oldPosting["posted"] if "boosted" in oldPosting: boosted = oldPosting["boosted"] first_oewa = False if "enable_oewa_sport" in feed and feed["enable_oewa_sport"]: first_oewa = True
if not category in oewa_bypass:
6
2023-11-10 10:25:43+00:00
2k
Vitesco-Technologies/ldap-password-rotation
tests/test_lambda.py
[ { "identifier": "lambda_function", "path": "src/lambda_function.py", "snippet": "SECRETS_MANAGER_KEY_USERNAME = (\n os.environ.get(\"SECRETS_MANAGER_KEY_USERNAME\") or \"username\"\n)\nSECRETS_MANAGER_KEY_PASSWORD = (\n os.environ.get(\"SECRETS_MANAGER_KEY_PASSWORD\") or \"password\"\n)\nSECRETS_MANAGER_KEY_DN = os.environ.get(\"SECRETS_MANAGER_KEY_DN\") or \"\"\nSECRETS_MANAGER_REGION = os.environ.get(\"SECRETS_MANAGER_REGION\") or \"eu-central-1\"\nEXCLUDE_CHARACTERS_USER = os.environ.get(\"EXCLUDE_CHARACTERS_USER\") or \"$/'\\\"\\\\\"\nEXCLUDE_CHARACTERS_PW = os.environ.get(\"EXCLUDE_CHARACTERS_PW\") or \"@$/`'\\\"\\\\\"\nEXCLUDE_CHARACTERS_NEW_PW = os.environ.get(\"EXCLUDE_CHARACTERS_NEW_PW\") or \"@$/`'\\\"\\\\\"\nLDAP_SERVER_LIST = (\n os.environ.get(\"LDAP_SERVER_LIST\")\n or '[\"ldaps://ex1dcsrv1001.ex1.example.com\", \"ldaps://ex1dcsrv1002.ex1.example.com\"]'\n)\nLDAP_SERVER_PORT = os.environ.get(\"LDAP_SERVER_PORT\") or \"636\"\nLDAP_BASE_DN = os.environ.get(\"LDAP_BASE_DN\") or \"dc=ex1,dc=example,dc=com\"\nLDAP_USER_AUTH_ATTRIBUTE = (\n os.environ.get(\"LDAP_USER_AUTH_ATTRIBUTE\") or \"userPrincipalName\"\n)\nLDAP_USE_SSL = True\nLDAP_BIND_CURRENT_CREDS_SUCCESSFUL = \"LDAP_BIND_USING_CURRENT_CREDS_SUCCESSFUL\"\nLDAP_BIND_PENDING_CREDS_SUCCESSFUL = \"LDAP_BIND_USING_PENDING_CREDS_SUCCESSFUL\"\ndef lambda_handler(event, context):\ndef create_secret(secrets_manager_client, arn, token, current_dict):\ndef set_secret(current_dict, pending_dict):\ndef test_secret(pending_dict):\ndef finish_secret(secrets_manager_client, arn, token):\ndef get_secret_dict(secrets_manager_client, arn, stage, token=None):\ndef execute_ldap_command(current_dict, pending_dict):\ndef check_inputs(dict_arg):\ndef get_user_dn(conn, user, base_dn=LDAP_BASE_DN):\ndef ldap_connection(dict_arg):" }, { "identifier": "lambda_util", "path": "tests/utilities/lambda_util.py", "snippet": "def get_role_name():\ndef _zip_lambda(func_str):\ndef get_lambda_zip_file():" }, { "identifier": "LdapServer", "path": "tests/utilities/ldap_test/server.py", "snippet": "class LdapServer(object):\n def __init__(\n self,\n config=None,\n java_gateway_port=DEFAULT_GATEWAY_PORT,\n python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,\n java_delay=None,\n ):\n global SERVER_PROCESS, JVM_GATEWAY\n\n if SERVER_PROCESS is None:\n SERVER_PROCESS = run_jvm_server(java_gateway_port)\n\n # Added to introduce a delay between starting the SERVER_PROCESS and the JVM_GATEWAY if desired.\n # This seems to be a problem on some MacOS systems, and without it you end up with an infinite hang.\n if java_delay:\n time.sleep(java_delay)\n\n if JVM_GATEWAY is None:\n JVM_GATEWAY = run_jvm_gateway(java_gateway_port, python_proxy_port)\n\n self.server = JVM_GATEWAY.entry_point\n self.config, self._config_obj = ConfigBuilder(JVM_GATEWAY).build_from(config)\n self.server_id = self.server.create(self._config_obj)\n\n def start(self):\n self.server.start(self.server_id)\n\n def stop(self):\n self.server.stop(self.server_id)" } ]
import json import logging import os import boto3 import ldap3 import mock import pytest from uuid import uuid4 from moto import mock_lambda, mock_secretsmanager from src import lambda_function from .utilities import lambda_util from .utilities.ldap_test import LdapServer
1,047
# Copyright 2023 Daniel Dias, Vitesco Technologies # # SPDX-License-Identifier: Apache-2.0 _region = "eu-central-1" # server is defined as global to allow us to update it when we mock # ldap3.extend.microsoft.modifyPassword.ad_modify_password with mock_ad_modify_password _server = LdapServer() logger = logging.getLogger() logger.setLevel(logging.INFO) ############ # fixtures # ############ @pytest.fixture(scope="function", autouse=True) def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ["AWS_ACCESS_KEY_ID"] = "testing" os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" os.environ["AWS_SECURITY_TOKEN"] = "testing" os.environ["AWS_SESSION_TOKEN"] = "testing" os.environ["AWS_DEFAULT_REGION"] = _region @pytest.fixture(scope="function", autouse=True) def lambda_env():
# Copyright 2023 Daniel Dias, Vitesco Technologies # # SPDX-License-Identifier: Apache-2.0 _region = "eu-central-1" # server is defined as global to allow us to update it when we mock # ldap3.extend.microsoft.modifyPassword.ad_modify_password with mock_ad_modify_password _server = LdapServer() logger = logging.getLogger() logger.setLevel(logging.INFO) ############ # fixtures # ############ @pytest.fixture(scope="function", autouse=True) def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ["AWS_ACCESS_KEY_ID"] = "testing" os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" os.environ["AWS_SECURITY_TOKEN"] = "testing" os.environ["AWS_SESSION_TOKEN"] = "testing" os.environ["AWS_DEFAULT_REGION"] = _region @pytest.fixture(scope="function", autouse=True) def lambda_env():
lambda_function.SECRETS_MANAGER_KEY_USERNAME = "bind_dn"
0
2023-11-17 15:03:58+00:00
2k
totallynotadi/vibrant-python
vibrant/main.py
[ { "identifier": "generate", "path": "vibrant/generator.py", "snippet": "def generate(swatches: List[Swatch]) -> Palette:\n max_poplation = find_max_population(swatches)\n\n palette: Palette = generate_variation_colors(\n swatches, max_poplation, generator_opts\n )\n generate_empty_swatches(palette, generator_opts)\n\n return palette" }, { "identifier": "VibrantImage", "path": "vibrant/image.py", "snippet": "class VibrantImage:\n def __init__(\n self,\n src: Union[\n bytes,\n str,\n io.BytesIO,\n io.BufferedReader,\n PILImage,\n \"VibrantImage\",\n ],\n props: Optional[Props] = Props(),\n ) -> None:\n self.image: Image = None\n self.props: Props = props\n\n if isinstance(src, str):\n if src.startswith(\"http\"):\n src = requests.get(src).content\n if not os.path.exists(src):\n raise FileNotFoundError(\"Image doesn't exist at given path - %s.\" % src)\n\n if isinstance(src, bytes):\n src = io.BytesIO(src)\n\n if isinstance(src, PILImage):\n self.image = src\n else:\n self.image = Image.open(src)\n\n @classmethod\n def from_url(cls, src: str) -> \"VibrantImage\":\n src = requests.get(src).content\n src = io.BytesIO(src)\n return cls(Image.open(src))\n\n @classmethod\n def from_path(cls, src: str) -> \"VibrantImage\":\n if os.path.exists(src):\n return cls(Image.open(src))\n raise FileNotFoundError(\"Image doesn't exist at given path - %s.\" % src)\n\n @classmethod\n def from_bytes(cls, src: bytes) -> \"VibrantImage\":\n src = io.BytesIO(src)\n return cls(Image.open(src))\n\n @classmethod\n def from_fp(cls, fp: io.BufferedReader) -> \"VibrantImage\":\n return cls(Image.open(fp))\n\n def scale_down(self):\n ...\n\n def _swatch_filter(self, swatch: List[int]) -> bool:\n r, g, b = swatch.rgb\n return not (r > 250 and g > 250 and b > 250)\n\n def _parse_swatches(\n self,\n raw_swatches: List,\n swatch_populations: List[Tuple[int, int]],\n ) -> List[Swatch]:\n swatches = []\n curr_idx = 0\n for idx in range(0, len(raw_swatches), 3):\n if idx + 2 <= (len(raw_swatches) - 1):\n swatches.append(\n Swatch(\n rgb=[\n raw_swatches[idx],\n raw_swatches[idx + 1],\n raw_swatches[idx + 2],\n ],\n population=swatch_populations[curr_idx][0],\n )\n )\n curr_idx += 1\n return swatches\n\n def quantize(self) -> List[Swatch]:\n self.image = self.image.quantize(self.props.color_count)\n raw_swatches = self.image.getpalette()\n raw_swatches = list(filter(lambda x: x != 0, raw_swatches))\n swatch_populations = self.image.getcolors(self.props.color_count)\n swatches = self._parse_swatches(\n raw_swatches=raw_swatches,\n swatch_populations=swatch_populations,\n )\n return swatches" }, { "identifier": "Palette", "path": "vibrant/models.py", "snippet": "class Palette:\n vibrant: Swatch = None\n dark_vibrant: Swatch = None\n light_vibrant: Swatch = None\n muted: Swatch = None\n dark_muted: Swatch = None\n light_muted: Swatch = None" }, { "identifier": "Props", "path": "vibrant/models.py", "snippet": "class Props:\n color_count: int = 64\n quality: int = 5" } ]
import io from typing import Union from PIL.Image import Image as PILImage from vibrant.generator import generate from vibrant.image import VibrantImage from vibrant.models import Palette, Props
1,052
class Vibrant: props: Props def __init__(self, color_count=64, quality=5) -> None: self.props = Props(color_count=color_count, quality=quality) def get_palette( self, src: Union[ bytes, str, io.BytesIO, io.BufferedReader, PILImage,
class Vibrant: props: Props def __init__(self, color_count=64, quality=5) -> None: self.props = Props(color_count=color_count, quality=quality) def get_palette( self, src: Union[ bytes, str, io.BytesIO, io.BufferedReader, PILImage,
VibrantImage,
1
2023-11-13 10:05:11+00:00
2k
MAGICS-LAB/SparseModernHopfield
layers.py
[ { "identifier": "Sparsemax", "path": "utils/sparse_max.py", "snippet": "class Sparsemax(nn.Module):\n __constants__ = [\"dim\"]\n\n def __init__(self, dim=-1):\n \"\"\"\n Sparsemax class as seen in https://arxiv.org/pdf/1602.02068.pdf\n Parameters\n ----------\n dim: The dimension we want to cast the operation over. Default -1\n \"\"\"\n super(Sparsemax, self).__init__()\n self.dim = dim\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n if not hasattr(self, \"dim\"):\n self.dim = None\n\n def forward(self, input):\n a = SparsemaxFunction.apply(input, self.dim)\n return a\n\n def extra_repr(self):\n return f\"dim={self.dim}\"" }, { "identifier": "Entmax15", "path": "utils/entmax.py", "snippet": "class Entmax15(nn.Module):\n def __init__(self, dim=-1, k=None):\n \"\"\"1.5-entmax: normalizing sparse transform (a la softmax).\n Solves the optimization problem:\n max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.\n where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.\n Parameters\n ----------\n dim : int\n The dimension along which to apply 1.5-entmax.\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n \"\"\"\n self.dim = dim\n self.k = k\n super(Entmax15, self).__init__()\n\n def forward(self, X):\n return entmax15(X, dim=self.dim, k=self.k)" }, { "identifier": "EntmaxAlpha", "path": "utils/general_entmax.py", "snippet": "class EntmaxAlpha(nn.Module):\n def __init__(self, head_count=4, dim=-1):\n super(EntmaxAlpha, self).__init__()\n self.dim = dim\n # self.alpha_chooser = nn.Parameter(AlphaChooser(1)())\n self.alpha = nn.Parameter(torch.randn(head_count))\n\n def forward(self, att_scores):\n batch_size, head_count, query_len, key_len = att_scores.size()\n\n expanded_alpha = (\n self.alpha.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n ) # [1,nb_heads,1,1]\n # expanded_alpha = expanded_alpha.repeat(1, head_count, 1, 1)\n\n expanded_alpha = expanded_alpha.expand(\n (batch_size, -1, query_len, 1)\n ) # [bs, nb_heads, query_len,1]\n \n # expanded_alpha += 1\n expanded_alpha = 1 + torch.sigmoid(expanded_alpha)\n\n # torch.clamp(1+expanded_alpha, min=1.0, max=4.0)\n\n p_star = entmax_bisect(att_scores, expanded_alpha)\n return p_star" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import math from einops import rearrange, repeat from math import sqrt from utils.sparse_max import Sparsemax from utils.entmax import Entmax15 from utils.general_entmax import EntmaxAlpha
1,511
class FullAttention(nn.Module): ''' The Attention operation ''' def __init__(self, scale=None, attention_dropout=0.0): super(FullAttention, self).__init__() self.scale = scale self.dropout = nn.Dropout(attention_dropout) def forward(self, queries, keys, values, mask=None): B, L, H, E = queries.shape _, S, _, D = values.shape scale = self.scale or 1. / sqrt(E) scores = torch.einsum("blhe,bshe->bhls", queries, keys) if mask is not None: mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, H, scores.size(-2), 1) scores = scores.masked_fill_(mask, float('-inf')) A = self.dropout(torch.softmax(scale * scores, dim=-1)) V = torch.einsum("bhls,bshd->blhd", A, values) return V.contiguous() class AttentionLayer(nn.Module): ''' The Multi-head Self-Attention (MSA) Layer ''' def __init__( self, d_model, n_heads, d_keys=None, d_values=None, mix=True, dropout=0.1, scale=None): super(AttentionLayer, self).__init__() d_keys = d_keys or (d_model // n_heads) d_values = d_values or (d_model // n_heads) self.d_model = d_model self.inner_attention = FullAttention( scale=scale, attention_dropout=dropout) self.query_projection = nn.Linear(d_model, d_keys * n_heads) self.key_projection = nn.Linear(d_model, d_keys * n_heads) self.value_projection = nn.Linear(d_model, d_values * n_heads) self.out_projection = nn.Linear(d_values * n_heads, d_model) self.n_heads = n_heads self.mix = mix def forward(self, inputs): queries = inputs keys = inputs values = inputs B, L, _ = queries.shape _, S, _ = keys.shape H = self.n_heads queries = self.query_projection(queries).view(B, L, H, -1) keys = self.key_projection(keys).view(B, S, H, -1) values = self.value_projection(values).view(B, S, H, -1) out = self.inner_attention( queries, keys, values, ) out = out.view(B, L, -1) out = out.mean(1) return self.out_projection(out) class HopfieldCore(nn.Module): ''' The Hopfield operation ''' def __init__(self, scale=None, attention_dropout=0.0, mode='sparsemax', norm=False): super(HopfieldCore, self).__init__() self.scale = scale self.norm = norm self.dropout = nn.Dropout(attention_dropout) if mode == 'sparsemax': self.softmax = Sparsemax(dim=-1) elif mode == 'entmax':
class FullAttention(nn.Module): ''' The Attention operation ''' def __init__(self, scale=None, attention_dropout=0.0): super(FullAttention, self).__init__() self.scale = scale self.dropout = nn.Dropout(attention_dropout) def forward(self, queries, keys, values, mask=None): B, L, H, E = queries.shape _, S, _, D = values.shape scale = self.scale or 1. / sqrt(E) scores = torch.einsum("blhe,bshe->bhls", queries, keys) if mask is not None: mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, H, scores.size(-2), 1) scores = scores.masked_fill_(mask, float('-inf')) A = self.dropout(torch.softmax(scale * scores, dim=-1)) V = torch.einsum("bhls,bshd->blhd", A, values) return V.contiguous() class AttentionLayer(nn.Module): ''' The Multi-head Self-Attention (MSA) Layer ''' def __init__( self, d_model, n_heads, d_keys=None, d_values=None, mix=True, dropout=0.1, scale=None): super(AttentionLayer, self).__init__() d_keys = d_keys or (d_model // n_heads) d_values = d_values or (d_model // n_heads) self.d_model = d_model self.inner_attention = FullAttention( scale=scale, attention_dropout=dropout) self.query_projection = nn.Linear(d_model, d_keys * n_heads) self.key_projection = nn.Linear(d_model, d_keys * n_heads) self.value_projection = nn.Linear(d_model, d_values * n_heads) self.out_projection = nn.Linear(d_values * n_heads, d_model) self.n_heads = n_heads self.mix = mix def forward(self, inputs): queries = inputs keys = inputs values = inputs B, L, _ = queries.shape _, S, _ = keys.shape H = self.n_heads queries = self.query_projection(queries).view(B, L, H, -1) keys = self.key_projection(keys).view(B, S, H, -1) values = self.value_projection(values).view(B, S, H, -1) out = self.inner_attention( queries, keys, values, ) out = out.view(B, L, -1) out = out.mean(1) return self.out_projection(out) class HopfieldCore(nn.Module): ''' The Hopfield operation ''' def __init__(self, scale=None, attention_dropout=0.0, mode='sparsemax', norm=False): super(HopfieldCore, self).__init__() self.scale = scale self.norm = norm self.dropout = nn.Dropout(attention_dropout) if mode == 'sparsemax': self.softmax = Sparsemax(dim=-1) elif mode == 'entmax':
self.softmax = Entmax15(dim=-1)
1
2023-11-12 06:36:52+00:00
2k
Kuba314/arcparse
arcparse/_partial_arguments.py
[ { "identifier": "InvalidArgument", "path": "arcparse/errors.py", "snippet": "class InvalidArgument(InvalidParser):\n pass" }, { "identifier": "InvalidTypehint", "path": "arcparse/errors.py", "snippet": "class InvalidTypehint(InvalidArgument):\n pass" }, { "identifier": "MissingConverter", "path": "arcparse/errors.py", "snippet": "class MissingConverter(InvalidArgument):\n pass" }, { "identifier": "extract_collection_type", "path": "arcparse/_typehints.py", "snippet": "def extract_collection_type(typehint: type) -> type | None:\n origin = get_origin(typehint)\n if origin == list:\n return get_args(typehint)[0]\n return None" }, { "identifier": "extract_literal_strings", "path": "arcparse/_typehints.py", "snippet": "def extract_literal_strings(typehint: type) -> list[str] | None:\n origin = get_origin(typehint)\n if origin != Literal:\n return None\n\n args = get_args(typehint)\n if not all(isinstance(arg, str) for arg in args):\n return None\n\n return list(args)" }, { "identifier": "extract_optional_type", "path": "arcparse/_typehints.py", "snippet": "def extract_optional_type(typehint: type) -> type | None:\n origin = get_origin(typehint)\n if origin == Optional:\n return get_args(typehint)[0]\n elif origin in {Union, UnionType}:\n args = get_args(typehint)\n if len(args) == 2:\n if args[0] == NoneType:\n return args[1]\n elif args[1] == NoneType:\n return args[0]\n return None" }, { "identifier": "extract_type_from_typehint", "path": "arcparse/_typehints.py", "snippet": "def extract_type_from_typehint(typehint: type) -> type:\n if optional_type := extract_optional_type(typehint):\n return optional_type\n elif collection_type := extract_collection_type(typehint):\n return collection_type\n return typehint" }, { "identifier": "BaseValueArgument", "path": "arcparse/arguments.py", "snippet": "class Void:\nclass ContainerApplicable(Protocol):\nclass BaseArgument(ABC, ContainerApplicable):\nclass Flag(BaseArgument):\nclass NoFlag(BaseArgument):\nclass TriFlag(ContainerApplicable):\nclass BaseValueArgument[T](BaseArgument):\nclass Positional[T](BaseValueArgument[T]):\nclass Option[T](BaseValueArgument[T]):\nclass MxGroup:\nclass Subparsers:\n def apply(self, actions_container: _ActionsContainer, name: str) -> Action:\n def apply(self, actions_container: _ActionsContainer, name: str) -> Action:\n def get_argparse_args(self, name: str) -> list[str]:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:\n def get_argparse_args(self, name: str) -> list[str]:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:\n def get_argparse_args(self, name: str) -> list[str]:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:\n def apply(self, actions_container: _ActionsContainer, name: str) -> None:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:\n def get_argparse_args(self, name: str) -> list[str]:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:\n def get_argparse_args(self, name: str) -> list[str]:\n def get_argparse_kwargs(self, name: str) -> dict[str, Any]:" }, { "identifier": "itemwise", "path": "arcparse/converters.py", "snippet": "class itemwise[T]:\n \"\"\"Mark converter as itemwise\n\n This changes its return-type signature to wrap T in list. This is used in\n argument converter declaration. Argument converters returning T make the\n argument also return T. However if an itemwise conversion is desired on\n arguments accepting multiple values (nargs=\"*\"), the return type should\n always be wrapped in a list.\n \"\"\"\n def __init__(self, converter: Callable[[str], T]) -> None:\n self._converter = converter\n\n def __call__(self, string: str) -> list[T]:\n return self._converter(string) # type: ignore\n\n def __repr__(self) -> str:\n return f\"itemwise({self._converter})\"" } ]
from abc import ABC, abstractmethod from collections.abc import Callable, Collection from dataclasses import dataclass from typing import Any, Literal, get_origin from arcparse.errors import InvalidArgument, InvalidTypehint, MissingConverter from ._typehints import ( extract_collection_type, extract_literal_strings, extract_optional_type, extract_type_from_typehint, ) from .arguments import ( BaseValueArgument, ContainerApplicable, Flag, NoFlag, Option, Positional, TriFlag, Void, void, ) from .converters import itemwise import re
1,109
@dataclass(kw_only=True, eq=False) class PartialMxGroup: required: bool = False @dataclass(kw_only=True)
@dataclass(kw_only=True, eq=False) class PartialMxGroup: required: bool = False @dataclass(kw_only=True)
class BasePartialArgument[R: ContainerApplicable](ABC):
7
2023-11-15 08:58:37+00:00
2k
rohitsinghlab/sceodesic
sceodesic/sceo_main/estimate_covariances.py
[ { "identifier": "fn_timer", "path": "sceodesic/utils/fn_timer.py", "snippet": "def fn_timer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n # run and time function\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n print(f\"{func.__name__} took {elapsed_time:.3f} seconds to run.\")\n return result\n return wrapper" }, { "identifier": "compute_covariance_and_ncomps_pct_variance", "path": "sceodesic/helper/compute_covariance.py", "snippet": "def compute_covariance_and_ncomps_pct_variance(data, max_condition_number, pvd_pct):\n \"\"\" Computes a symmetric positive definite sample covariance matrix.\n - `data` is a cell x gene 2D numpy array.\n \"\"\"\n # Compute raw covariance.\n matrix = np.cov(data, rowvar=False)\n\n S,U = np.linalg.eigh(matrix)\n ncomps_pct_variance = np.argmax(np.cumsum(S[::-1]) / np.sum(S) >= pvd_pct) + 1\n \n # normalize by condition-volume \n matrix = _normalize_condition_volume(S, U, max_condition_number, log=False)\n \n return matrix, ncomps_pct_variance" } ]
import scipy import pickle import sys from ..utils import fn_timer from ..helper import compute_covariance_and_ncomps_pct_variance from .default_keys import *
1,070
# package-specific modules @fn_timer def estimate_covariances(adata, max_condition_number, pvd_pct=0.9, copy=False, return_results=False, top_genes=None, cohort_assn=None, uns_key=None): if uns_key is None: uns_key = UNS_KEY # not able to be passed in hvg_key = HVG_KEY # top_genes can either be passed in anew or be precomputed using get_locally_variable_genes if top_genes is None: try: top_genes = adata.uns[uns_key][hvg_key] except Exception as e: message = ("Error: must either specify a set of genes to consider or " "have run sceodesic.get_locally_variable_genes beforehand.") print(message, file=sys.stderr) raise e else: adata.uns[uns_key][hvg_key] = top_genes # can either pass in a cell cohort assignment (array cohort_assn with cell[i] having cluster assn cohort_assn[i]) # or the cluster_key clustering_results = None if cohort_assn is None: try: clustering_results = adata.uns[uns_key] except: message = ("Error: must either specify a cell cohort assignment or " "have run sceodesic.get_cell_cohorts beforehand.") print(message, file=sys.stderr) raise e else: c2c = {} for i, c in enumerate(cohort_assn): c2c[c] = c2c.get(c, []) + [i] clustering_results = {'cell2cluster': c2c, 'stratify_cols': '***NOT SPECIFIED***'} adata.uns[uns_key].update(clustering_results) return _estimate_covariances(adata, max_condition_number, pvd_pct, copy, return_results, top_genes=top_genes, results_clustering=clustering_results, uns_key=uns_key) def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9, copy=False, return_results=False, coexpression_filename=None, top_genes=None, results_clustering=None, uns_key=None, cluster_covar_key=None, cluster_var_ct_key=None): if uns_key is None: uns_key = UNS_KEY if cluster_covar_key is None: cluster_covar_key = CLUSTER_COVAR_KEY if cluster_var_ct_key is None: cluster_var_ct_key = CLUSTER_VAR_CT_KEY if copy: adata = adata.copy() # change later top_genes = top_genes results_clustering = results_clustering cell2cluster = results_clustering["cell2cluster"] filtered_data = adata[:,top_genes] # Get the clusters from the reduced data. clusters = {} processed_data = None if scipy.sparse.issparse(filtered_data.X): processed_data = filtered_data.X.A else: processed_data = filtered_data.X for key in cell2cluster.keys(): cluster_indices = cell2cluster[key] clusters[key] = processed_data[cluster_indices,:] cluster_covariances = {} cluster_var_count = {} for i,cluster in clusters.items():
# package-specific modules @fn_timer def estimate_covariances(adata, max_condition_number, pvd_pct=0.9, copy=False, return_results=False, top_genes=None, cohort_assn=None, uns_key=None): if uns_key is None: uns_key = UNS_KEY # not able to be passed in hvg_key = HVG_KEY # top_genes can either be passed in anew or be precomputed using get_locally_variable_genes if top_genes is None: try: top_genes = adata.uns[uns_key][hvg_key] except Exception as e: message = ("Error: must either specify a set of genes to consider or " "have run sceodesic.get_locally_variable_genes beforehand.") print(message, file=sys.stderr) raise e else: adata.uns[uns_key][hvg_key] = top_genes # can either pass in a cell cohort assignment (array cohort_assn with cell[i] having cluster assn cohort_assn[i]) # or the cluster_key clustering_results = None if cohort_assn is None: try: clustering_results = adata.uns[uns_key] except: message = ("Error: must either specify a cell cohort assignment or " "have run sceodesic.get_cell_cohorts beforehand.") print(message, file=sys.stderr) raise e else: c2c = {} for i, c in enumerate(cohort_assn): c2c[c] = c2c.get(c, []) + [i] clustering_results = {'cell2cluster': c2c, 'stratify_cols': '***NOT SPECIFIED***'} adata.uns[uns_key].update(clustering_results) return _estimate_covariances(adata, max_condition_number, pvd_pct, copy, return_results, top_genes=top_genes, results_clustering=clustering_results, uns_key=uns_key) def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9, copy=False, return_results=False, coexpression_filename=None, top_genes=None, results_clustering=None, uns_key=None, cluster_covar_key=None, cluster_var_ct_key=None): if uns_key is None: uns_key = UNS_KEY if cluster_covar_key is None: cluster_covar_key = CLUSTER_COVAR_KEY if cluster_var_ct_key is None: cluster_var_ct_key = CLUSTER_VAR_CT_KEY if copy: adata = adata.copy() # change later top_genes = top_genes results_clustering = results_clustering cell2cluster = results_clustering["cell2cluster"] filtered_data = adata[:,top_genes] # Get the clusters from the reduced data. clusters = {} processed_data = None if scipy.sparse.issparse(filtered_data.X): processed_data = filtered_data.X.A else: processed_data = filtered_data.X for key in cell2cluster.keys(): cluster_indices = cell2cluster[key] clusters[key] = processed_data[cluster_indices,:] cluster_covariances = {} cluster_var_count = {} for i,cluster in clusters.items():
cluster_covar, var_count = compute_covariance_and_ncomps_pct_variance(cluster, max_condition_number, pvd_pct)
1
2023-11-10 12:28:33+00:00
2k
dacx/fcd-community
fcd_community/users/tests/test_views.py
[ { "identifier": "UserAdminChangeForm", "path": "fcd_community/users/forms.py", "snippet": "class UserAdminChangeForm(admin_forms.UserChangeForm):\n class Meta(admin_forms.UserChangeForm.Meta):\n model = User\n field_classes = {\"email\": EmailField}" }, { "identifier": "User", "path": "fcd_community/users/models.py", "snippet": "class User(AbstractUser):\n \"\"\"\n Default custom user model for Full Cycle Dev Community Product.\n If adding fields that need to be filled at user signup,\n check forms.SignupForm and forms.SocialSignupForms accordingly.\n \"\"\"\n\n # First and last name do not cover name patterns around the globe\n name = CharField(_(\"Name of User\"), blank=True, max_length=255)\n first_name = None # type: ignore\n last_name = None # type: ignore\n email = EmailField(_(\"email address\"), unique=True)\n username = None # type: ignore\n stripe_customer_id = CharField(max_length=255, blank=True, null=True)\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = []\n\n objects = UserManager()\n\n def get_absolute_url(self) -> str:\n \"\"\"Get URL for user's detail view.\n\n Returns:\n str: URL for user detail.\n\n \"\"\"\n return reverse(\"users:detail\", kwargs={\"pk\": self.id})" }, { "identifier": "UserFactory", "path": "fcd_community/users/tests/factories.py", "snippet": "class UserFactory(DjangoModelFactory):\n email = Faker(\"email\")\n name = Faker(\"name\")\n\n @post_generation\n def password(self, create: bool, extracted: Sequence[Any], **kwargs):\n password = (\n extracted\n if extracted\n else Faker(\n \"password\",\n length=42,\n special_chars=True,\n digits=True,\n upper_case=True,\n lower_case=True,\n ).evaluate(None, None, extra={\"locale\": None})\n )\n self.set_password(password)\n\n @classmethod\n def _after_postgeneration(cls, instance, create, results=None):\n \"\"\"Save again the instance if creating and at least one hook ran.\"\"\"\n if create and results and not cls._meta.skip_postgeneration_save:\n # Some post-generation hooks ran, and may have modified us.\n instance.save()\n\n class Meta:\n model = get_user_model()\n django_get_or_create = [\"email\"]" }, { "identifier": "UserRedirectView", "path": "fcd_community/users/views.py", "snippet": "class UserDetailView(LoginRequiredMixin, DetailView):\nclass UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):\nclass UserRedirectView(LoginRequiredMixin, RedirectView):\n def get_success_url(self):\n def get_object(self):\n def get_redirect_url(self):" } ]
import pytest from django.conf import settings from django.contrib import messages from django.contrib.auth.models import AnonymousUser from django.contrib.messages.middleware import MessageMiddleware from django.contrib.sessions.middleware import SessionMiddleware from django.http import HttpRequest, HttpResponseRedirect from django.test import RequestFactory from django.urls import reverse from django.utils.translation import gettext_lazy as _ from fcd_community.users.forms import UserAdminChangeForm from fcd_community.users.models import User from fcd_community.users.tests.factories import UserFactory from fcd_community.users.views import ( UserRedirectView, UserUpdateView, user_detail_view, )
863
pytestmark = pytest.mark.django_db class TestUserUpdateView: """ TODO: extracting view initialization code as class-scoped fixture would be great if only pytest-django supported non-function-scoped fixture db access -- this is a work-in-progress for now: https://github.com/pytest-dev/pytest-django/pull/258 """ def dummy_get_response(self, request: HttpRequest): return None def test_get_success_url(self, user: User, rf: RequestFactory):
pytestmark = pytest.mark.django_db class TestUserUpdateView: """ TODO: extracting view initialization code as class-scoped fixture would be great if only pytest-django supported non-function-scoped fixture db access -- this is a work-in-progress for now: https://github.com/pytest-dev/pytest-django/pull/258 """ def dummy_get_response(self, request: HttpRequest): return None def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
3
2023-11-10 08:23:29+00:00
2k
fepegar/jvol
src/jvol/jvol.py
[ { "identifier": "open_jvol", "path": "src/jvol/io.py", "snippet": "def open_jvol(path: Path) -> Tuple[np.ndarray, np.ndarray]:\n loaded = np.load(path)\n ijk_to_ras = fill_ijk_to_ras(loaded[FormatKeys.IJK_TO_RAS.value])\n quantization_block = loaded[FormatKeys.QUANTIZATION_BLOCK.value]\n array = decode_array(\n dc_rle_values=loaded[FormatKeys.DC_RLE_VALUES],\n dc_rle_counts=loaded[FormatKeys.DC_RLE_COUNTS],\n ac_rle_values=loaded[FormatKeys.AC_RLE_VALUES],\n ac_rle_counts=loaded[FormatKeys.AC_RLE_COUNTS],\n quantization_block=quantization_block,\n target_shape=loaded[FormatKeys.SHAPE],\n intercept=loaded[FormatKeys.INTERCEPT],\n slope=loaded[FormatKeys.SLOPE],\n dtype=loaded[FormatKeys.DTYPE].dtype,\n )\n return array, ijk_to_ras" }, { "identifier": "save_jvol", "path": "src/jvol/io.py", "snippet": "def save_jvol(\n array: np.ndarray,\n ijk_to_ras: np.ndarray,\n path: Path,\n block_size: int = 4,\n quality: int = 60,\n) -> None:\n block_shape = block_size, block_size, block_size\n quantization_table = get_quantization_table(block_shape, quality)\n dtype = array.dtype\n intercept = array.min()\n slope = array.max() - intercept\n dc_rle_values, dc_rle_counts, ac_rle_values, ac_rle_counts = encode_array(\n array,\n quantization_table,\n )\n\n dc_rle_values = dc_rle_values.astype(np.min_scalar_type(dc_rle_values))\n dc_rle_counts = dc_rle_counts.astype(np.min_scalar_type(dc_rle_counts))\n ac_rle_values = ac_rle_values.astype(np.min_scalar_type(ac_rle_values))\n ac_rle_counts = ac_rle_counts.astype(np.min_scalar_type(ac_rle_counts))\n\n save_dict = {\n FormatKeys.IJK_TO_RAS.value: ijk_to_ras[:3],\n FormatKeys.QUANTIZATION_BLOCK.value: quantization_table,\n FormatKeys.DC_RLE_VALUES.value: dc_rle_values,\n FormatKeys.DC_RLE_COUNTS.value: dc_rle_counts,\n FormatKeys.AC_RLE_VALUES.value: ac_rle_values,\n FormatKeys.AC_RLE_COUNTS.value: ac_rle_counts,\n FormatKeys.DTYPE.value: np.empty((), dtype=dtype),\n FormatKeys.INTERCEPT.value: intercept,\n FormatKeys.SLOPE.value: slope,\n FormatKeys.SHAPE.value: np.array(array.shape, dtype=np.uint16),\n }\n\n with open(path, \"wb\") as f:\n np.savez_compressed(f, **save_dict)" } ]
import os import numpy as np import numpy.typing as npt from pathlib import Path from typing import Any from typing import TypeAlias from typing import Union from .io import open_jvol from .io import save_jvol
1,265
from __future__ import annotations TypePath: TypeAlias = Union[str, os.PathLike] class JpegVolume: """Base class for saving and loading JPEG-encoded volumes. Args: array: 3D NumPy array. ijk_to_ras: 4×4 affine transformation matrix containing the mapping from voxel indices to RAS+ (left → right, posterior → anterior, inferior → superior) coordinates. If not specified, the identity matrix is used. Tip: To learn more about coordinates systems, check the following resources: - [NiBabel](https://nipy.org/nibabel/)'s [Coordinate systems and affines](https://nipy.org/nibabel/coordinate_systems.html), - [3D Slicer](https://www.slicer.org/)'s [Coordinate systems](https://slicer.readthedocs.io/en/latest/user_guide/coordinate_systems.html), - [FSL](https://fsl.fmrib.ox.ac.uk/)'s [docs (see "Background information on NIfTI Orientation")](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained) """ # noqa: E501 def __init__( self, array: npt.ArrayLike, ijk_to_ras: npt.ArrayLike | None = None, ): self.array = np.array(array) if ijk_to_ras is None: ijk_to_ras = np.eye(4) self.ijk_to_ras = np.array(ijk_to_ras, dtype=np.float64) if self.array.ndim != 3: raise ValueError( f"Array must have 3 dimensions, got shape {self.array.shape}" ) if self.ijk_to_ras.shape != (4, 4): raise ValueError( f"ijk_to_ras must have shape (4, 4), got {self.ijk_to_ras.shape}" ) assert self.ijk_to_ras.shape == (4, 4) @classmethod def open(cls, path: TypePath) -> JpegVolume: """Open a JVol file. Args: path: Path to a file with `'.jvol'` extension. """ path = Path(path) if not path.is_file(): raise FileNotFoundError(f'File not found: "{path}"') if path.suffix != ".jvol": raise ValueError(f'File must have .jvol extension, got "{path}"')
from __future__ import annotations TypePath: TypeAlias = Union[str, os.PathLike] class JpegVolume: """Base class for saving and loading JPEG-encoded volumes. Args: array: 3D NumPy array. ijk_to_ras: 4×4 affine transformation matrix containing the mapping from voxel indices to RAS+ (left → right, posterior → anterior, inferior → superior) coordinates. If not specified, the identity matrix is used. Tip: To learn more about coordinates systems, check the following resources: - [NiBabel](https://nipy.org/nibabel/)'s [Coordinate systems and affines](https://nipy.org/nibabel/coordinate_systems.html), - [3D Slicer](https://www.slicer.org/)'s [Coordinate systems](https://slicer.readthedocs.io/en/latest/user_guide/coordinate_systems.html), - [FSL](https://fsl.fmrib.ox.ac.uk/)'s [docs (see "Background information on NIfTI Orientation")](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained) """ # noqa: E501 def __init__( self, array: npt.ArrayLike, ijk_to_ras: npt.ArrayLike | None = None, ): self.array = np.array(array) if ijk_to_ras is None: ijk_to_ras = np.eye(4) self.ijk_to_ras = np.array(ijk_to_ras, dtype=np.float64) if self.array.ndim != 3: raise ValueError( f"Array must have 3 dimensions, got shape {self.array.shape}" ) if self.ijk_to_ras.shape != (4, 4): raise ValueError( f"ijk_to_ras must have shape (4, 4), got {self.ijk_to_ras.shape}" ) assert self.ijk_to_ras.shape == (4, 4) @classmethod def open(cls, path: TypePath) -> JpegVolume: """Open a JVol file. Args: path: Path to a file with `'.jvol'` extension. """ path = Path(path) if not path.is_file(): raise FileNotFoundError(f'File not found: "{path}"') if path.suffix != ".jvol": raise ValueError(f'File must have .jvol extension, got "{path}"')
return cls(*open_jvol(path))
0
2023-11-12 18:41:36+00:00
2k
iramluism/basel
tests/unit_tests/components/component_test.py
[ { "identifier": "Component", "path": "basel/components/components.py", "snippet": "class Component(metaclass=abc.ABCMeta):\n def __init__(\n self,\n name: str,\n nodes: List[Node] = None,\n instability: Optional[float] = 1,\n abstraction: Optional[float] = 1,\n error: Optional[float] = 1,\n ) -> None:\n self.name = name\n self.nodes = {}\n self.instability = instability\n self.abstraction = abstraction\n self.error = error\n\n for node in nodes or []:\n self.add_node(node)\n\n def set_error(self, error):\n self.error = error\n\n def set_instability(self, instability):\n self.instability = instability\n\n def set_abstraction(self, abstraction):\n self.abstraction = abstraction\n\n def get_classes(self):\n classes = []\n nodes = list(self.nodes.values())\n\n while nodes:\n node = nodes.pop(0)\n\n if not node:\n break\n\n children = node.get_children()\n nodes.extend(children)\n\n if isinstance(node, ClassNode):\n classes.append(node)\n\n return classes\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}:{self.name}>\"\n\n def has_node(self, node_name):\n return node_name in self.nodes\n\n def add_node(self, node: Node):\n self.nodes[node.name] = node\n\n def get_node(self, node_name):\n return self.nodes.get(node_name)\n\n def __iter__(self):\n for node in self.nodes.values():\n yield node\n\n def __eq__(self, component):\n if not component:\n return False\n\n equal_names = self.name == component.name\n\n for other_node in component:\n self_node = self.get_node(other_node.name)\n if other_node != self_node:\n return False\n\n return equal_names\n\n def __ne__(self, component):\n return not self.__eq__(component)" }, { "identifier": "ClassNode", "path": "basel/components/classes.py", "snippet": "class ClassNode(Node):\n def __init__(\n self,\n name: str,\n subclasses: Optional[List] = None,\n keywords: Optional[Dict] = None,\n **kwargs,\n ):\n super().__init__(name, **kwargs)\n self.subclasses = subclasses or []\n self.keywords = keywords or {}\n\n def __eq__(self, other_node):\n if not other_node:\n return False\n\n match_names = other_node.name == self.name\n match_subclasses = other_node.subclasses == self.subclasses\n match_keywords = other_node.keywords == self.keywords\n match_children = self.has_children(other_node)\n\n return match_names and match_children and match_keywords and match_subclasses" }, { "identifier": "ModuleNode", "path": "basel/components/modules.py", "snippet": "class ModuleNode(Node):\n pass" } ]
from basel.components import Component from basel.components.classes import ClassNode from basel.components.modules import ModuleNode import pytest
777
@pytest.mark.parametrize( "component,expected_classes", [ ( Component( name="Componant_A", nodes=[
@pytest.mark.parametrize( "component,expected_classes", [ ( Component( name="Componant_A", nodes=[
ModuleNode(
2
2023-11-18 13:47:55+00:00
2k
Gr-1m/AWD-Frame-ByGr1m
modules/Attack.py
[ { "identifier": "FRAME_DIR", "path": "Configs/frame_config.py", "snippet": "FRAME_DIR = _os.path.dirname(_os.path.dirname(__file__))" }, { "identifier": "FlagRegular", "path": "Configs/config.py", "snippet": "API_URL = 'http://kaming/awduse/submit.php'" }, { "identifier": "printX", "path": "func/CmdColors.py", "snippet": "def printX(context=None, *args, logtime=True, **kwargs) -> None:\n try:\n if context[0] == '[' and context[2] == ']':\n prompt = context[1].lower()\n main_text = context[3:].lstrip()\n if prompt == '0':\n context = '\\x1b[01;30;30m[0]\\x1b[0m ' + main_text\n elif prompt == '-':\n context = '\\x1b[01;30;31m[-]\\x1b[0m ' + main_text\n elif prompt.lower() == 'i':\n logtime = logtime or False\n context = '\\x1b[01;30;32m[i]\\x1b[0m ' + main_text\n elif prompt.lower() == 'w' or prompt.lower() == '!':\n logtime = logtime or False\n context = '\\x1b[01;30;33m[W]\\x1b[0m ' + main_text\n elif prompt == '+':\n context = '\\x1b[01;30;34m[+]\\x1b[0m ' + main_text\n elif prompt == '*':\n context = '\\x1b[01;30;35m[*]\\x1b[0m ' + main_text\n elif prompt.upper() == 'F':\n context = '\\x1b[01;30;36m[F]\\x1b[0m ' + main_text\n else:\n logtime = logtime or False\n context = '\\x1b[01;30;37m[!]\\x1b[0m ' + main_text\n elif context:\n context = '\\x1b[01;30;38m[?]\\x1b[0m ' + context.lstrip()\n else:\n pass\n except IndexError:\n context = '\\x1b[01;30;37m[E]\\x1b[0m ' + f\"Log Input Error:{context.lstrip()}\"\n else:\n pass\n finally:\n if logtime:\n context = f\"[\\x1b[01;30;32m{time.asctime().split()[3]}\\x1b[0m] \" + context\n print(f\"{context}\", *args, **kwargs)\n return None" } ]
from Configs.frame_config import FRAME_DIR from Configs.config import FlagRegular from func.CmdColors import printX from modules.ReplaceStr import * from urllib.parse import urlparse as URL import requests, pymysql, paramiko, socket import hashlib, base64 import os as _os import re
1,329
#!/usr/bin/python3 # -*- coding: UTF-8 -*- """ @project : customGr1m @file : Attack.py @Author : Gr%1m @Date : 14/11/2023 10:56 am """ # from pwn import * # About Flag Flags = set() FlagPath = '/flag' FlagLen = 41 # Payload INFO Payloads = { f"http://POST@{HostReplaceStr}:80/awdtest/testback.php?submit=submit&bb={RceReplaceStr}", } WebRootDir = '/var/www/html' LoginCookie = 'security=low; PHPSESSID=e16f5c982733368120234560b9cb5625' BDFileName = 'a10uN7yA_1' BDcmdPass = 'x2aom1ng_20231114' BDRceParam = 'kAt3l1na' MemShell = set() # todo: attack # Enemy INFO X = 'x' def _up_payloads(data): Payloads.add(data) def submit_flag(submitAPI, token, flag): try: if submitAPI[-1] == 'GET': url = f'{submitAPI[0]}?{submitAPI[1]}={token}&{submitAPI[2]}={flag}' res = requests.get(url=url) elif submitAPI[-1] == 'POST': res = requests.post(url=submitAPI[0], data={submitAPI[1]: token, submitAPI[2]: flag}) else: printX("[!] please set SubmitAPI method") return "No", 400 return res.text, res.status_code except KeyboardInterrupt: printX('[-] Interrupt Submit Flag') return 0, 0 except Exception: return 0, 0 def _attack_vul(hostname, payload, cmd): purl = URL(payload) method, payload = purl.username, payload.split(f'@{HostReplaceStr}')[-1] payload = payload.replace(RceReplaceStr, cmd) url = f'http://{hostname}{payload}' try: if method == 'GET': res = requests.get(url=url, headers={'Cookie': LoginCookie}) elif method == 'POST': params = payload.split('?', maxsplit=1)[-1] data = {_.split('=', maxsplit=1)[0]: _.split('=', maxsplit=1)[1] for _ in params.split('&')} res = requests.post(url, data=data, headers={'Cookie': LoginCookie}) else: printX(f'[-] Not Allow Method in payload {payload}') raise NameError except: class _X: def __init__(self): self.text = None self.status_code = 400 res = _X() return res, purl def get_flag(ey_hosts, rce="system('cat /flag');"): def extract_flag(text): try:
#!/usr/bin/python3 # -*- coding: UTF-8 -*- """ @project : customGr1m @file : Attack.py @Author : Gr%1m @Date : 14/11/2023 10:56 am """ # from pwn import * # About Flag Flags = set() FlagPath = '/flag' FlagLen = 41 # Payload INFO Payloads = { f"http://POST@{HostReplaceStr}:80/awdtest/testback.php?submit=submit&bb={RceReplaceStr}", } WebRootDir = '/var/www/html' LoginCookie = 'security=low; PHPSESSID=e16f5c982733368120234560b9cb5625' BDFileName = 'a10uN7yA_1' BDcmdPass = 'x2aom1ng_20231114' BDRceParam = 'kAt3l1na' MemShell = set() # todo: attack # Enemy INFO X = 'x' def _up_payloads(data): Payloads.add(data) def submit_flag(submitAPI, token, flag): try: if submitAPI[-1] == 'GET': url = f'{submitAPI[0]}?{submitAPI[1]}={token}&{submitAPI[2]}={flag}' res = requests.get(url=url) elif submitAPI[-1] == 'POST': res = requests.post(url=submitAPI[0], data={submitAPI[1]: token, submitAPI[2]: flag}) else: printX("[!] please set SubmitAPI method") return "No", 400 return res.text, res.status_code except KeyboardInterrupt: printX('[-] Interrupt Submit Flag') return 0, 0 except Exception: return 0, 0 def _attack_vul(hostname, payload, cmd): purl = URL(payload) method, payload = purl.username, payload.split(f'@{HostReplaceStr}')[-1] payload = payload.replace(RceReplaceStr, cmd) url = f'http://{hostname}{payload}' try: if method == 'GET': res = requests.get(url=url, headers={'Cookie': LoginCookie}) elif method == 'POST': params = payload.split('?', maxsplit=1)[-1] data = {_.split('=', maxsplit=1)[0]: _.split('=', maxsplit=1)[1] for _ in params.split('&')} res = requests.post(url, data=data, headers={'Cookie': LoginCookie}) else: printX(f'[-] Not Allow Method in payload {payload}') raise NameError except: class _X: def __init__(self): self.text = None self.status_code = 400 res = _X() return res, purl def get_flag(ey_hosts, rce="system('cat /flag');"): def extract_flag(text): try:
flag = re.search(FlagRegular, text).group()
1
2023-11-17 09:12:03+00:00
2k
Wolfsauge/async_summarize
async_helpers.py
[ { "identifier": "get_length_of_chunk_in_tokens", "path": "sync_helpers.py", "snippet": "def get_length_of_chunk_in_tokens(my_chunk: str, buck_slip: dict) -> int:\n my_result = buck_slip[\"tokenizer\"](my_chunk)\n input_ids = my_result.input_ids\n length_of_chunk_in_tokens = len(input_ids)\n\n return length_of_chunk_in_tokens" }, { "identifier": "get_text_splitter", "path": "sync_helpers.py", "snippet": "def get_text_splitter(\n buck_slip: dict, custom_chunk_size: int, custom_chunk_overlap: int\n) -> TextSplitter:\n batched_tokenization = buck_slip[\"use_batched_tokenization\"]\n\n if batched_tokenization is True:\n text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(\n tokenizer=buck_slip[\"tokenizer\"],\n chunk_size=custom_chunk_size,\n chunk_overlap=custom_chunk_overlap,\n )\n else:\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=custom_chunk_size,\n chunk_overlap=custom_chunk_overlap,\n length_function=lambda x: get_length_of_chunk_in_tokens(x, buck_slip),\n )\n\n return text_splitter" }, { "identifier": "grouped", "path": "sync_helpers.py", "snippet": "def grouped(iterable: Iterable[T], number_of_elements=2) -> Iterable[Tuple[T, ...]]:\n \"\"\"https://stackoverflow.com/a/5389547\"\"\"\n return zip(*[iter(iterable)] * number_of_elements)" }, { "identifier": "find_chunk_pair_with_minimal_size", "path": "sync_helpers.py", "snippet": "def find_chunk_pair_with_minimal_size(elements) -> tuple[int, int]:\n last_index = len(elements) - 1\n min_length = len(elements[0])\n min_index = 0\n for i, result in enumerate(elements):\n if i < last_index:\n sum_of_chars = len(result) + len(elements[i + 1])\n if sum_of_chars < min_length:\n min_length = sum_of_chars\n min_index = i\n return min_index, min_index + 1" }, { "identifier": "find_longest_element_index", "path": "sync_helpers.py", "snippet": "def find_longest_element_index(elements) -> int:\n max_length = 0\n max_index = 0\n for i, result in enumerate(elements):\n if len(result) > max_length:\n max_length = len(result)\n max_index = i\n return max_index" }, { "identifier": "calc_custom_chunking_parameters", "path": "sync_helpers.py", "snippet": "def calc_custom_chunking_parameters(\n length_of_chunk_in_tokens: int, buck_slip: dict\n) -> tuple[int, int]:\n my_divisor = math.ceil(length_of_chunk_in_tokens / buck_slip[\"chunk_size\"])\n my_divisor = power_log(my_divisor)\n my_custom_chunk_size = math.ceil(length_of_chunk_in_tokens / my_divisor)\n my_custom_chunk_size = math.ceil(my_custom_chunk_size * 1.10)\n\n my_custom_chunk_overlap = math.ceil(my_custom_chunk_size * 0.1)\n\n return my_custom_chunk_size, my_custom_chunk_overlap" } ]
import sys import asyncio import math from tqdm.asyncio import tqdm # type: ignore from icecream import ic # type: ignore from sync_helpers import ( get_length_of_chunk_in_tokens, get_text_splitter, grouped, find_chunk_pair_with_minimal_size, find_longest_element_index, calc_custom_chunking_parameters, )
1,590
async def get_completion(buck_slip: dict, task: str, **kwargs) -> str: template = buck_slip["jinja2_env"].from_string(buck_slip["prompt_templates"][task]) if task == "summarize": chunk = kwargs["chunk"] if isinstance(chunk, str): my_prompt = template.render(prompt=chunk) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) elif task == "merge": first_element = kwargs["first_element"] second_element = kwargs["second_element"] if isinstance(first_element, str) and isinstance(second_element, str): my_prompt = template.render( first_element=first_element, second_element=second_element ) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) bad_counter = 0 attempt_counter = 0 while attempt_counter <= buck_slip["max_completion_retries"]: my_temperature = buck_slip["temperature"] + attempt_counter * 0.1 completion = await buck_slip["api_client"].completions.create( model=buck_slip["model_local_identifier"], prompt=my_prompt, max_tokens=buck_slip["max_tokens"], temperature=my_temperature, ) attempt_counter += 1 finish_reason = completion.choices[0].finish_reason if finish_reason == "stop": break bad_counter += 1 ic(completion) ic(attempt_counter) ic(bad_counter) ic(finish_reason) ic("ERROR: finish_reason != 'stop', retrying.") if bad_counter >= buck_slip["max_completion_retries"]: ic(completion) ic(attempt_counter) ic(bad_counter) ic(finish_reason) ic("ERROR: aborting after multiple failed attempts.") sys.exit(1) return completion.choices[0].text async def do_chunking_step(my_chunk: str, buck_slip: dict) -> list: lock = buck_slip["lock"] tqdm.write(f"Acquired {lock}.") async with lock: chunks = buck_slip["text_splitter"].split_text(my_chunk) tqdm.write(f"Released {lock}.") return chunks async def merge_elements(elements, buck_slip: dict, pindex: int) -> tuple[str, int]: first_element, second_element = elements intermediate_merge_result = await get_completion( buck_slip, "merge", first_element=first_element, second_element=second_element ) intermediate_merge_result = str(intermediate_merge_result).strip() return intermediate_merge_result, pindex async def summarize_element(chunk, buck_slip: dict, pindex: int) -> tuple[str, int]: intermediate_merge_result = await get_completion( buck_slip, "summarize", chunk=chunk ) intermediate_merge_result = str(intermediate_merge_result).strip() return intermediate_merge_result, pindex async def split_further(partial_results: list, my_pos: int, buck_slip: dict) -> list: ic("Split further.") ic(my_pos) ic(len(partial_results)) my_len_list = [len(_) for _ in partial_results] ic(my_len_list) my_chunk = partial_results[my_pos] lock = buck_slip["lock"] tqdm.write(f"Acquired {lock}.") async with lock: length_of_chunk_in_tokens = get_length_of_chunk_in_tokens(my_chunk, buck_slip) tqdm.write(f"Released {lock}.") my_custom_chunk_size = length_of_chunk_in_tokens my_custom_chunk_overlap = 0
async def get_completion(buck_slip: dict, task: str, **kwargs) -> str: template = buck_slip["jinja2_env"].from_string(buck_slip["prompt_templates"][task]) if task == "summarize": chunk = kwargs["chunk"] if isinstance(chunk, str): my_prompt = template.render(prompt=chunk) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) elif task == "merge": first_element = kwargs["first_element"] second_element = kwargs["second_element"] if isinstance(first_element, str) and isinstance(second_element, str): my_prompt = template.render( first_element=first_element, second_element=second_element ) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) else: ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.") sys.exit(1) bad_counter = 0 attempt_counter = 0 while attempt_counter <= buck_slip["max_completion_retries"]: my_temperature = buck_slip["temperature"] + attempt_counter * 0.1 completion = await buck_slip["api_client"].completions.create( model=buck_slip["model_local_identifier"], prompt=my_prompt, max_tokens=buck_slip["max_tokens"], temperature=my_temperature, ) attempt_counter += 1 finish_reason = completion.choices[0].finish_reason if finish_reason == "stop": break bad_counter += 1 ic(completion) ic(attempt_counter) ic(bad_counter) ic(finish_reason) ic("ERROR: finish_reason != 'stop', retrying.") if bad_counter >= buck_slip["max_completion_retries"]: ic(completion) ic(attempt_counter) ic(bad_counter) ic(finish_reason) ic("ERROR: aborting after multiple failed attempts.") sys.exit(1) return completion.choices[0].text async def do_chunking_step(my_chunk: str, buck_slip: dict) -> list: lock = buck_slip["lock"] tqdm.write(f"Acquired {lock}.") async with lock: chunks = buck_slip["text_splitter"].split_text(my_chunk) tqdm.write(f"Released {lock}.") return chunks async def merge_elements(elements, buck_slip: dict, pindex: int) -> tuple[str, int]: first_element, second_element = elements intermediate_merge_result = await get_completion( buck_slip, "merge", first_element=first_element, second_element=second_element ) intermediate_merge_result = str(intermediate_merge_result).strip() return intermediate_merge_result, pindex async def summarize_element(chunk, buck_slip: dict, pindex: int) -> tuple[str, int]: intermediate_merge_result = await get_completion( buck_slip, "summarize", chunk=chunk ) intermediate_merge_result = str(intermediate_merge_result).strip() return intermediate_merge_result, pindex async def split_further(partial_results: list, my_pos: int, buck_slip: dict) -> list: ic("Split further.") ic(my_pos) ic(len(partial_results)) my_len_list = [len(_) for _ in partial_results] ic(my_len_list) my_chunk = partial_results[my_pos] lock = buck_slip["lock"] tqdm.write(f"Acquired {lock}.") async with lock: length_of_chunk_in_tokens = get_length_of_chunk_in_tokens(my_chunk, buck_slip) tqdm.write(f"Released {lock}.") my_custom_chunk_size = length_of_chunk_in_tokens my_custom_chunk_overlap = 0
buck_slip["text_splitter"] = get_text_splitter(
1
2023-11-16 01:51:17+00:00
2k
balazsborsos/dae_postprocessing
main.py
[ { "identifier": "ConfigurationParser", "path": "utils/parser.py", "snippet": "class ConfigurationParser:\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Script for training or evaluation with configuration.')\n\n # Argument to specify mode (train or evaluation)\n self.parser.add_argument('mode', choices=['train', 'evaluation'], help='Mode: train or evaluation')\n # Argument to specify the path to the configuration YAML file\n self.parser.add_argument('-t', '--config', type=str, required=True, help='Path to the configuration file')\n # Argument to specify the path to the data\n self.parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input data')\n\n def parse_args(self):\n return self.parser.parse_args()" }, { "identifier": "parse_yaml_config", "path": "utils/parser.py", "snippet": "def parse_yaml_config(file_path: Path) -> dict:\n with open(file_path, 'r') as config_file:\n config_data = yaml.load(config_file, Loader=yaml.FullLoader)\n return config_data" }, { "identifier": "train_model", "path": "train.py", "snippet": "def train_model(config: dict, data_path: Path):\n model_dir = config[\"store_model_to\"]\n print('Model directory: ', model_dir)\n\n # custom parameters set in yaml file\n training_params = config[\"training_configuration\"]\n\n # callback and logger\n callbacks = [\n ModelCheckpoint( # saves weights for every n epochs\n dirpath=Path(model_dir, \"checkpoint\"),\n filename='weights.epoch{epoch:03}-val_denoise_dice_{val_denoise_dice:.4f}',\n save_top_k=-1,\n auto_insert_metric_name=False,\n save_weights_only=False,\n every_n_epochs=5,\n )\n ]\n\n loggers = [\n TensorBoardLogger(\n save_dir=model_dir,\n name='board',\n version=''\n ),\n ]\n # model\n model = get_model(**training_params)\n\n # data\n train_dataloader = get_dataloader(\n data_path,\n mlset=\"training\",\n **training_params\n )\n val_dataloader = get_dataloader(\n data_path,\n mlset=\"validation\",\n **training_params\n )\n\n # training\n trainer = pl.Trainer(\n logger=loggers,\n callbacks=callbacks,\n max_epochs=training_params[\"max_epochs\"],\n gpus=training_params.get(\"gpus\", 1),\n auto_select_gpus=True,\n strategy=training_params.get(\"strategy\", None),\n gradient_clip_val=0.5,\n log_every_n_steps=5\n )\n\n trainer.fit(\n model,\n train_dataloaders=train_dataloader,\n val_dataloaders=val_dataloader,\n )" } ]
from utils.parser import ConfigurationParser, parse_yaml_config from train import train_model
690
if __name__ == "__main__": config_parser = ConfigurationParser() args = config_parser.parse_args()
if __name__ == "__main__": config_parser = ConfigurationParser() args = config_parser.parse_args()
config = parse_yaml_config(args.config)
1
2023-11-18 13:57:25+00:00
2k
htyao89/Textual-based_Class-aware_prompt_tuning
clip/clip.py
[ { "identifier": "build_model", "path": "clip/model.py", "snippet": "def build_model(state_dict: dict):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(\"transformer.resblocks\")))\n\n model = CLIP(\n embed_dim,\n image_resolution, vision_layers, vision_width, vision_patch_size,\n context_length, vocab_size, transformer_width, transformer_heads, transformer_layers\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n\n convert_weights(model)\n model.load_state_dict(state_dict)\n return model.eval()" }, { "identifier": "SimpleTokenizer", "path": "clip/simple_tokenizer.py", "snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text" } ]
import hashlib import os import urllib import warnings import torch from typing import Any, Union, List from pkg_resources import packaging from PIL import Image from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize from tqdm import tqdm from .model import build_model from .simple_tokenizer import SimpleTokenizer as _Tokenizer from torchvision.transforms import InterpolationMode
1,515
try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): warnings.warn("PyTorch version 1.7.1 or higher is recommended") __all__ = ["available_models", "load", "tokenize"]
try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): warnings.warn("PyTorch version 1.7.1 or higher is recommended") __all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
0
2023-11-14 03:50:33+00:00
2k
Veridise/vanguard-aleo
vanguard/aleo/detectors/infoleak.py
[ { "identifier": "get_ifg_edges", "path": "vanguard/aleo/common.py", "snippet": "def get_ifg_edges(prog, func, hash=False, call=False, inline=False):\n \"\"\"Get information flow graph edges.\n Args:\n - prog: \n - func\n - hash (default: False): whether to treat a hash function call directly as an edge\n - call (default: False): whether to treat a call directly as an edge\n - inline (default: False): whether to inline call invocations recursively to generate edges;\n if `call` is True, this argument is then overridden and no inlining will take place.\n Rets: A list of pairs of strings\n \"\"\"\n node = prog.functions[func]\n assert_node_field(node, \"instructions\")\n\n edges = []\n # process instructions\n for inst in node[\"instructions\"] + node[\"outputs\"]:\n tokens = trim_inst(inst[\"str\"]).split()\n match tokens:\n\n case [\"is.eq\", o1, o2, \"into\", r]:\n edges.append((o1, r))\n edges.append((o2, r))\n case [\"is.neq\", o1, o2, \"into\", r]:\n edges.append((o1, r))\n edges.append((o2, r))\n \n case [\"assert.eq\", o1, o2]:\n edges.append((o1, o2))\n edges.append((o2, o1))\n case [\"assert.neq\", o1, o2]:\n edges.append((o1, o2))\n edges.append((o2, o1))\n\n case [\"cast\", o, \"into\", r, \"as\", d]:\n edges.append((o, r))\n\n case [\"call\", *ts]:\n # manualy match the call component since there are two sequences of varying lengths\n idx_into = tokens.index(\"into\")\n f = tokens[1]\n os = tokens[2:idx_into]\n rs = tokens[idx_into+1:]\n if call:\n for o in os:\n for r in rs:\n # overapproximated edges from every o to every r\n edges.append((o, r))\n elif inline:\n # TODO: add impl\n raise NotImplementedError\n else:\n # no inline, no call, then no edge\n pass\n \n case [\"async\", *ts]:\n # FIXME: can't find official documentation for now, treated as call\n # manualy match the call component since there are two sequences of varying lengths\n idx_into = tokens.index(\"into\")\n f = tokens[1]\n os = tokens[2:idx_into]\n rs = tokens[idx_into+1:]\n if call:\n for o in os:\n for r in rs:\n # overapproximated edges from every o to every r\n edges.append((o, r))\n elif inline:\n # TODO: add impl\n raise NotImplementedError\n else:\n # no inline, no call, then no edge\n pass\n\n case [cop, o1, o2, \"into\", r, \"as\", t] if cop.startswith(\"commit\"):\n # no edge in commitment computation\n pass\n\n case [hop, o1, \"into\", r, \"as\", t] if hop.startswith(\"hash\"):\n # no edge in hash computation\n pass\n\n case [binop, o1, o2, \"into\", r]:\n edges.append((o1, r))\n edges.append((o2, r))\n \n case [unop, o, \"into\", r]:\n edges.append((o, r))\n \n case [terop, o1, o2, o3, \"into\", r]:\n edges.append((o1, r))\n edges.append((o2, r))\n edges.append((o3, r))\n\n case [\"cast\", *os, \"into\", dst, \"as\", typ]:\n for o in os:\n edges.append((o, dst))\n \n case [\"output\", o, \"as\", typ]:\n # no edge in output command\n pass\n\n case _:\n raise NotImplementedError(f\"Unknown instruction pattern, got: {inst['str']}\")\n\n return edges" }, { "identifier": "trim_inst", "path": "vanguard/aleo/common.py", "snippet": "def trim_inst(inst: str):\n # remove space in \"; \" in array literals\n # remove tailing semi-colon \";\"\n return inst.replace(\"; \", \";\").strip(\";\")" } ]
import networkx as nx from ..common import get_ifg_edges, trim_inst
1,134
def detector_infoleak(prog, func): """Detect for information leak Args: - prog: - func: Rets: (result, info) """
def detector_infoleak(prog, func): """Detect for information leak Args: - prog: - func: Rets: (result, info) """
edges = get_ifg_edges(prog, func, hash=False, call=True, inline=False)
0
2023-11-10 02:57:03+00:00
2k
winrey/x-following
check_following.py
[ { "identifier": "client", "path": "client.py", "snippet": "class MyUser(TypedDict):\nclass TimelineUserEntitiesDescription(TypedDict):\nclass TimelineUserEntitiesURL(TypedDict):\nclass TimelineUserEntities(TypedDict):\nclass TimelineUserLegacy(TypedDict):\nclass TimelineUser(TypedDict):\nclass FollowingUser(TimelineUser, TimelineUserLegacy, TypedDict):\nclass TwitterClient:\n def __init__(self, authorization_token, cookie_value, csrf_token):\n def get_auth_headers(self, referer='https://twitter.com/'):\n def get_multi_user_info(self) -> List[MyUser]:\n def set_current_user_info(self, user: MyUser):\n def get_current_user_info(self) -> MyUser:\n def _get_user_list_by_graphql(self, url, referer, max, cursor):\n def user_valid(user) -> bool:\n def map_entry_to_user(user) -> FollowingUser:\n def get_following_by_graphql(self, max=20, cursor=\"\") -> Tuple[List[FollowingUser], str]:\n def get_all_following_by_graphql(self, singe_max=50) -> List[FollowingUser]:\n def get_followers_by_graphql(self, max=20, cursor=\"\"):\n def get_all_followers_by_graphql(self, singe_max=50) -> List[FollowingUser]:\n def unfollow(self, following: FollowingUser):" }, { "identifier": "select_account", "path": "common_cli.py", "snippet": "def select_account():\n users = client.get_multi_user_info()\n choice = 0\n if len(users) > 1:\n print(\"Select Account:\")\n for idx, user in enumerate(users):\n print(f\"{idx+1}. {user['screen_name']}\")\n choice = input(\"Which Account? Please input the number: \")\n choice = int(choice) - 1\n client.set_current_user_info(users[choice])" }, { "identifier": "trials", "path": "common_cli.py", "snippet": "def trials(subjects: List[FollowingUser]):\n length = len(subjects)\n for idx, subject in enumerate(subjects):\n # clear screen\n os.system('clear')\n print(f\"\\n\\t\\t\\t{f'Here is the {idx+1}/{length} subject:'}\")\n print(f\"\\n\\n{center_text(LINE_STR)}\\n\\n\")\n trial_single(subject)" }, { "identifier": "filter_not_in_whitelist", "path": "back_white_list.py", "snippet": "WHITELIST_PATH = 'cache/whitelist.json'\nBLACKLIST_PATH = 'cache/blacklist.json'\ndef use_list(path: str):\n def load_list() -> List[FollowingUser]:\n def save_list(following: FollowingUser):\n def filter_not_in_list(followings: List[FollowingUser]):\n def is_in_whitelist(following: FollowingUser):" } ]
import json from typing import List from client import client, FollowingUser from common_cli import select_account, trials from back_white_list import filter_not_in_whitelist, filter_not_in_blacklist
1,039
FOLLOWING_CACHE_PATH = 'cache/followings.json' def load_followings(): try: with open(FOLLOWING_CACHE_PATH, 'r') as f: return json.load(f) except FileNotFoundError: return False def get_all_followings(force_update=False): followings = load_followings() if followings and not force_update: return followings followings = client.get_all_following_by_graphql(50) print("saving followings...") with open('cache/followings.json', 'w') as f: json.dump(followings, f) return followings def filter_one_way_followings(followings: List[FollowingUser]): one_way_followings = [] for following in followings: if "followed_by" not in following or not following["followed_by"]: one_way_followings.append(following) return one_way_followings def is_public_account(following: FollowingUser): if following["verified"]: return True followers_count = following.get("followers_count", 0) following_count = following.get("following_count", 0) if following_count < 100 and followers_count > 2000: return True if following_count == 0: return False return followers_count / following_count > 30 def filter_not_public_accounts(followings: List[FollowingUser]): return [following for following in followings if not is_public_account(following)] def main_trails(): select_account() followings = get_all_followings() subjects = filter_one_way_followings(followings) subjects = filter_not_public_accounts(subjects) subjects = filter_not_in_whitelist(subjects) subjects = filter_not_in_blacklist(subjects)
FOLLOWING_CACHE_PATH = 'cache/followings.json' def load_followings(): try: with open(FOLLOWING_CACHE_PATH, 'r') as f: return json.load(f) except FileNotFoundError: return False def get_all_followings(force_update=False): followings = load_followings() if followings and not force_update: return followings followings = client.get_all_following_by_graphql(50) print("saving followings...") with open('cache/followings.json', 'w') as f: json.dump(followings, f) return followings def filter_one_way_followings(followings: List[FollowingUser]): one_way_followings = [] for following in followings: if "followed_by" not in following or not following["followed_by"]: one_way_followings.append(following) return one_way_followings def is_public_account(following: FollowingUser): if following["verified"]: return True followers_count = following.get("followers_count", 0) following_count = following.get("following_count", 0) if following_count < 100 and followers_count > 2000: return True if following_count == 0: return False return followers_count / following_count > 30 def filter_not_public_accounts(followings: List[FollowingUser]): return [following for following in followings if not is_public_account(following)] def main_trails(): select_account() followings = get_all_followings() subjects = filter_one_way_followings(followings) subjects = filter_not_public_accounts(subjects) subjects = filter_not_in_whitelist(subjects) subjects = filter_not_in_blacklist(subjects)
trials(subjects)
2
2023-11-11 18:54:25+00:00
2k
Shritesh99/strawberry-django-social-auth
gql_social_auth/mixins.py
[ { "identifier": "social_auth", "path": "gql_social_auth/decorators.py", "snippet": "def social_auth(f):\n \"\"\"\n Decorator for Getting social User. Use this decorator if you want to customize the SocialAuthMixin.\n :param f: Input: SocialAuthInput(provider, accessToken)\n :return: function with two additional arguments\n user: Entire User Object (Get your social data using user.social_user)\n errors: Any error occurred in the process of getting the Social User\n \"\"\"\n @psa\n @wraps(f)\n def wrapper(cls, info, _input, social_user, errors, **kwargs):\n def on_resolve(payload):\n payload.social_user = social_user\n payload.errors = errors\n return payload\n\n result = f(cls, info, _input, social_user, errors, **kwargs)\n\n if is_thenable(result):\n return Promise.resolve(result).then(on_resolve)\n return on_resolve(result)\n\n return wrapper" }, { "identifier": "SocialAuthInput", "path": "gql_social_auth/types.py", "snippet": "class SocialAuthInput:\n provider: str\n access_token: str" }, { "identifier": "SocialType", "path": "gql_social_auth/types.py", "snippet": "class SocialType(ObtainJSONWebTokenType):\n uid: Optional[str] = strawberry.field(\n description=\"User's uid\", default=None)\n avatar: Optional[str] = strawberry.field(\n description=\"User's Avarar's URL\", default=None)\n provider: Optional[str] = strawberry.field(\n description=\"OAUTH provider\", default=None)\n extra_data: Optional[SocialJSON] = strawberry.field(\n description=\"Extra data requested from user\",\n resolver=resolve_extra_data)\n\n @classmethod\n def from_social_user(cls, social_user) -> \"SocialType\":\n \"\"\"\n Creates a new token and possibly a new refresh token based on the user.\n \"\"\"\n ret = SocialType(\n success=True,\n user=cast(UserType, social_user),\n token=TokenType.from_user(social_user),\n uid=social_user.social_user.uid,\n provider=social_user.social_user.provider,\n )\n if hasattr(settings, 'SOCIAL_AUTH_PIPELINE') and 'gql_social_auth.pipeline.get_avatar' in settings.SOCIAL_AUTH_PIPELINE:\n ret.avatar = social_user.avatar\n if app_settings.JWT_LONG_RUNNING_REFRESH_TOKEN:\n ret.refresh_token = cast(RefreshTokenType, RefreshToken.from_user(social_user))\n return ret" } ]
from strawberry.types import Info from gqlauth.user.resolvers import BaseMixin from .decorators import social_auth from .types import SocialAuthInput from .types import SocialType
673
class SocialAuthMixin(BaseMixin): """Social Auth takes OAuth Provider and OAuth Access Token Allow user to perform social auth for the given OAuth provider and OAuth Access token :returns user: Entire User Object (Get your social data using user.social_user) errors: Any error occurred in the process of getting the Social User """ @classmethod
class SocialAuthMixin(BaseMixin): """Social Auth takes OAuth Provider and OAuth Access Token Allow user to perform social auth for the given OAuth provider and OAuth Access token :returns user: Entire User Object (Get your social data using user.social_user) errors: Any error occurred in the process of getting the Social User """ @classmethod
@social_auth
0
2023-11-12 23:27:04+00:00
2k
Scholar01/ComfyUI-Keyframe
keyframe/samples.py
[ { "identifier": "is_injected_model", "path": "keyframe/util.py", "snippet": "def is_injected_model(model):\n return hasattr(model, KEYFRAME_INJECTED_ATTR)" }, { "identifier": "get_injected_model", "path": "keyframe/util.py", "snippet": "def get_injected_model(model):\n return getattr(model, KEYFRAME_INJECTED_ATTR)" }, { "identifier": "generate_sigmas", "path": "keyframe/util.py", "snippet": "def generate_sigmas(real_model, x, origin_sigmas, scheduler, steps, part_group, device):\n batch_size = x.shape[0]\n new_sigmas = origin_sigmas.unsqueeze(0).repeat(batch_size, 1)\n\n for part in part_group:\n if part.denoise is None or part.denoise > 0.9999:\n new_sigmas[part.batch_index] = calculate_sigmas_scheduler(real_model, scheduler, steps).to(device)\n else:\n new_steps = int(steps / part.denoise)\n sigmas = calculate_sigmas_scheduler(real_model, scheduler, new_steps).to(device)\n new_sigmas[part.batch_index] = sigmas[-(steps + 1):]\n return new_sigmas" }, { "identifier": "generate_noise", "path": "keyframe/util.py", "snippet": "def generate_noise(model_wrap, sigmas, noise):\n if max_denoise(model_wrap, sigmas):\n n = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)\n else:\n n = noise * sigmas[0]\n return n" }, { "identifier": "get_ancestral_step", "path": "keyframe/util.py", "snippet": "def get_ancestral_step(sigma_from: torch.Tensor, sigma_to: torch.Tensor, eta: float = 1.) -> (\n torch.Tensor, torch.Tensor):\n if not eta:\n return sigma_to, torch.zeros_like(sigma_to)\n sigma_up = torch.min(sigma_to,\n eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)\n sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5\n\n return sigma_down, sigma_up" } ]
import torch import comfy.samplers from tqdm.auto import trange from comfy.k_diffusion import sampling as k_diffusion_sampling from comfy.k_diffusion.sampling import to_d, default_noise_sampler from .util import is_injected_model, get_injected_model, generate_sigmas, generate_noise, get_ancestral_step
665
CUSTOM_SAMPLERS = [ 'k_euler', 'k_euler_a', 'k_lcm' ] def inject_samples(): comfy.samplers.SAMPLER_NAMES.extend(CUSTOM_SAMPLERS) k_diffusion_sampling.sample_k_euler = sample_k_euler k_diffusion_sampling.sample_k_euler_a = sample_k_euler_a k_diffusion_sampling.sample_k_lcm = sample_k_lcm print(f'Injected samplers: {CUSTOM_SAMPLERS}') def get_sigmas_noise(model_wrap, x, noise, latent_image, sigmas, scheduler, steps, part_group):
CUSTOM_SAMPLERS = [ 'k_euler', 'k_euler_a', 'k_lcm' ] def inject_samples(): comfy.samplers.SAMPLER_NAMES.extend(CUSTOM_SAMPLERS) k_diffusion_sampling.sample_k_euler = sample_k_euler k_diffusion_sampling.sample_k_euler_a = sample_k_euler_a k_diffusion_sampling.sample_k_lcm = sample_k_lcm print(f'Injected samplers: {CUSTOM_SAMPLERS}') def get_sigmas_noise(model_wrap, x, noise, latent_image, sigmas, scheduler, steps, part_group):
sigmas = generate_sigmas(model_wrap.inner_model, x, sigmas, scheduler, steps, part_group, sigmas.device)
2
2023-11-10 13:15:08+00:00
2k
Hamidrezaostadabbas/FOSS4G_Asia_2023
03_Exercise_2/exercise_2/layout_generator/layout_generator.py
[ { "identifier": "LayoutGeneratorDialog", "path": "03_Exercise_2/exercise_2/layout_generator/layout_generator_dialog.py", "snippet": "class LayoutGeneratorDialog(QtWidgets.QDialog, FORM_CLASS):\n def __init__(self, parent=None):\n \"\"\"Constructor.\"\"\"\n super(LayoutGeneratorDialog, self).__init__(parent)\n # Set up the user interface from Designer through FORM_CLASS.\n # After self.setupUi() you can access any designer object by doing\n # self.<objectname>, and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)" }, { "identifier": "import_vector_layer", "path": "03_Exercise_2/exercise_2/layout_generator/core_functions.py", "snippet": "def import_vector_layer(layer_path, layer_name):\n layer = QgsVectorLayer(layer_path, layer_name)\n return layer if layer.isValid() else None" }, { "identifier": "display_vector_layer", "path": "03_Exercise_2/exercise_2/layout_generator/core_functions.py", "snippet": "def display_vector_layer(layer, name=None):\n displayed_layer = QgsProject.instance().addMapLayer(layer)\n if name:\n displayed_layer.setName(name)" }, { "identifier": "zoom_to_layer", "path": "03_Exercise_2/exercise_2/layout_generator/core_functions.py", "snippet": "def zoom_to_layer(layer):\n canvas = iface.mapCanvas()\n extent = layer.extent()\n canvas.setExtent(extent)\n canvas.refresh()" }, { "identifier": "qml_loader", "path": "03_Exercise_2/exercise_2/layout_generator/core_functions.py", "snippet": "def qml_loader(layer, layer_symbol_path):\n layer.loadNamedStyle(layer_symbol_path)\n layer.triggerRepaint()\n iface.layerTreeView().refreshLayerSymbology(layer.id())\n QgsProject.instance().addMapLayers([layer], False)" }, { "identifier": "get_script_path_plugin", "path": "03_Exercise_2/exercise_2/layout_generator/core_functions.py", "snippet": "def get_script_path_plugin():\n return os.path.dirname(__file__)" }, { "identifier": "layout_executor", "path": "03_Exercise_2/exercise_2/layout_generator/layout.py", "snippet": "def layout_executor(\n layers_name, layout_title, city_name, pdf_path\n):\n legend_layers = []\n for layer_name in layers_name:\n layer = QgsProject.instance().mapLayersByName(layer_name)[0]\n legend_layers.append(layer)\n layout = __layout_creator(layout_title)\n __layout_map_window_creator(layer, layout, 20, 7, 294, 285)\n __layout_label_creator(layout, 23, 266, 82, 24, background_color=QColor(255, 255, 255))\n __layout_label_creator(\n layout, 320, 5, 96, 6, layout_title, QColor(0, 182, 228),\n 14, 'bold', 'Arial'\n )\n __layout_legend_creator(layout, legend_layers, 319, 12, 98, 186)\n __layout_label_creator(\n layout, 320, 220, 96, 20, 'City Name', QColor(0, 182, 228), 20,\n 'bold', 'Arial'\n )\n __layout_label_creator(layout, 320, 230, 96, 20, city_name, QColor(), 16)\n __layout_pdf_exporter(layout, pdf_path)" } ]
from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtWidgets import QAction from .resources import * from .layout_generator_dialog import LayoutGeneratorDialog from .core_functions import ( import_vector_layer, display_vector_layer, zoom_to_layer, qml_loader, get_script_path_plugin ) from .layout import layout_executor import os.path
1,113
# -*- coding: utf-8 -*- """ /*************************************************************************** LayoutGenerator A QGIS plugin auto layout generator Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/ ------------------- begin : 2023-11-24 git sha : $Format:%H$ copyright : (C) 2023 by foss4g-asia email : info@foss4g-asia.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ class LayoutGenerator: """QGIS Plugin Implementation.""" def __init__(self, iface): """Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface """ # Save reference to the QGIS interface self.iface = iface # new variables
# -*- coding: utf-8 -*- """ /*************************************************************************** LayoutGenerator A QGIS plugin auto layout generator Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/ ------------------- begin : 2023-11-24 git sha : $Format:%H$ copyright : (C) 2023 by foss4g-asia email : info@foss4g-asia.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ class LayoutGenerator: """QGIS Plugin Implementation.""" def __init__(self, iface): """Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface """ # Save reference to the QGIS interface self.iface = iface # new variables
self.layout_generator_dialog = LayoutGeneratorDialog()
0
2023-11-17 09:40:49+00:00
2k
micheltlutz/Winged-Python
winged/HTML/table.py
[ { "identifier": "String", "path": "winged/HTML/string.py", "snippet": "class String(GenericElement):\n text = \"\"\n\n def __init__(self, str):\n super().__init__()\n self.text = str\n\n def get_string(self):\n return self.text\n\n def generate(self):\n print(self.get_string())" }, { "identifier": "GenericElement", "path": "winged/core/generic_element.py", "snippet": "class GenericElement(ElementAbstract):\n _elements: [ElementAbstract]\n\n def __init__(self):\n self._elements = []\n\n # This method add a new element\n def add(self, element: ElementAbstract):\n self._elements.append(element)\n\n # This method return tag and all elements\n def get_string(self):\n string = \"\"\n\n for element in self._elements:\n string += element.get_string()\n\n return string\n\n # This method print tag and all elements\n def generate(self):\n print(self.get_string())" }, { "identifier": "Tag", "path": "winged/core/tag.py", "snippet": "class Tag(ElementAbstract):\n _tag: str = \"\"\n _attributes: Attribute\n _container: bool = False\n _form_element: bool = False\n _elements: [ElementAbstract]\n\n def __init__(self, *attributes: AttributeType):\n self._attributes = Attribute()\n self._elements = []\n\n for att in attributes:\n self._attributes.add_attribute(att)\n\n # This method add a new n attributes\n def add_attributes(self, *attributes: AttributeType):\n for att in attributes:\n self._attributes.add_attribute(att)\n\n # This method add a new element\n def add(self, element: ElementAbstract):\n self._elements.append(element)\n\n # This method return open tag and all attributes\n def _get_open_tag(self):\n attr = self._attributes.get_string()\n\n if len(attr) > 0:\n return f\"<{self._tag} {attr}>\"\n else:\n return f\"<{self._tag}>\"\n\n # This method return close tag\n def _get_close_tag(self):\n return f\"</{self._tag}>\"\n\n def get_tag(self):\n return self._tag\n\n def get_attributes(self):\n return self._attributes\n\n def is_container(self):\n return self._container\n\n def is_form_element(self):\n return self._form_element\n\n # This method return tag and all elements\n def get_string(self):\n string = self._get_open_tag()\n\n if self._container:\n for element in self._elements:\n string += element.get_string()\n string += self._get_close_tag()\n return string\n\n # This method print tag and all elements\n def generate(self):\n print(self.get_string())" }, { "identifier": "THead", "path": "winged/HTML/thead.py", "snippet": "class THead(Tag):\n _tag = \"thead\" # Specifies the name of the tag\n _container = True # Specifies that THead can contain other HTML elements\n _form_element = False # Specifies that THead is not a form element" }, { "identifier": "TBody", "path": "winged/HTML/tbody.py", "snippet": "class TBody(Tag):\n _tag = \"tbody\" # Specifies the name of the tag\n _container = True # Specifies that TBody can contain other HTML elements\n _form_element = False # Specifies that TBody is not a form element" }, { "identifier": "Tr", "path": "winged/HTML/tr.py", "snippet": "class Tr(Tag):\n _tag = \"tr\" # Specifies the name of the tag\n _container = True # Specifies that TR can contain other HTML elements\n _form_element = False # Specifies that TR is not a form element" }, { "identifier": "Th", "path": "winged/HTML/th.py", "snippet": "class Th(Tag):\n _tag = \"th\" # Specifies the name of the tag\n _container = True # Specifies that TH can contain other HTML elements\n _form_element = False # Specifies that TH is not a form element" }, { "identifier": "Td", "path": "winged/HTML/td.py", "snippet": "class Td(Tag):\n _tag = \"td\" # Specifies the name of the tag\n _container = True # Specifies that TD can encapsulate other HTML elements\n _form_element = False # Specifies that TD is not a form element" } ]
from winged.HTML.string import String from winged.core.generic_element import GenericElement from winged.core.tag import Tag from winged.HTML.thead import THead from winged.HTML.tbody import TBody from winged.HTML.tr import Tr from winged.HTML.th import Th from winged.HTML.td import Td
1,315
""" The Table class is a specific implementation of the HTML 'table' tag in the Winged-Python library. It provides helper methods to generate table structures. Table creation involves creating headers (th), rows (tr), and data cells (td). # Example Usage: ```python table = Table() table.add_table_headers(["Name", "Age", "Height", "Location"]) # Define headers table.add_row() table.add_in_row(String("John")) table.add_in_row(String("25")) table.add_in_row(String("1.80")) table.add_in_row(String("New York")) ``` This would generate a table with mentioned headers and one row of data. """ class Table(Tag): _tag = "table" _container = True _form_element = False def __init__(self): super().__init__() self.tbody = TBody() self.thead = None self.rows = [] def add_table_headers(self, titles, aligns=None, classes=None):
""" The Table class is a specific implementation of the HTML 'table' tag in the Winged-Python library. It provides helper methods to generate table structures. Table creation involves creating headers (th), rows (tr), and data cells (td). # Example Usage: ```python table = Table() table.add_table_headers(["Name", "Age", "Height", "Location"]) # Define headers table.add_row() table.add_in_row(String("John")) table.add_in_row(String("25")) table.add_in_row(String("1.80")) table.add_in_row(String("New York")) ``` This would generate a table with mentioned headers and one row of data. """ class Table(Tag): _tag = "table" _container = True _form_element = False def __init__(self): super().__init__() self.tbody = TBody() self.thead = None self.rows = [] def add_table_headers(self, titles, aligns=None, classes=None):
self.thead = THead()
3
2023-11-18 17:40:48+00:00
2k
davidhozic/TkClassWizard
tkclasswiz/object_frame/frame_string.py
[ { "identifier": "extendable", "path": "tkclasswiz/extensions.py", "snippet": "@doc_category(\"Extensions\")\r\ndef extendable(obj: Union[T, list]) -> T:\r\n \"\"\"\r\n Decorator that makes the obj extendable.\r\n\r\n It wraps the ``obj``, which is a class or a function, into an extension object.\r\n The extension object will adds 3 methods to the original class or function:\r\n\r\n - ``register_pre_extension``\r\n - ``register_post_extension``\r\n - ``get_extensions``\r\n \r\n The ``get_extensions`` method just returns the list of registered \r\n extensions (:class:`tkclasswiz.extensions.Extension`).\r\n\r\n The ``register_pre_extension`` and ``register_post_extension`` methods allow users to extend\r\n the functionality of original tkclass wiz classes or functions.\r\n They accept the extension (:class:`tkclasswiz.extensions.Extension`) parameter.\r\n\r\n Pre-extensions (``register_pre_extension``) get activated / called before the original ``__init__`` method / \r\n before the original function and accept the ``loader`` of the extension must accept the same arguments\r\n as the original ``__init__`` method / original function.\r\n\r\n Post-extensions differ a bit if the thing being extended is a class or a function.\r\n They both have in common that they get activated after the original ``__init__`` method call / original function\r\n call, but they differ in the arguments they receive:\r\n\r\n - In the case of the extended is a class,\r\n the extension ``loader`` accepts the same arguments as the ``__init__`` method receives.\r\n - In the case of the extended is a function,\r\n the extension ``loader`` accepts the same arguments as the original function and an additional parameter,\r\n which is the result of the original function call. The result parameter is passed to the ``loader`` as the\r\n last positional argument.\r\n\r\n\r\n Parameters\r\n ---------------\r\n obj: T\r\n Function or a class that can be extended.\r\n \"\"\"\r\n\r\n if DOCUMENTATION_MODE:\r\n return obj\r\n\r\n if isclass(obj):\r\n @wraps(obj, updated=[])\r\n class ExtendableClass(obj):\r\n __reg_post_ext__ = []\r\n __reg_pre_ext__ = []\r\n\r\n def __init__(self, *args, **kwargs):\r\n for extension in ExtendableClass.__reg_pre_ext__:\r\n extension(self, *args, **kwargs)\r\n\r\n super().__init__(*args, **kwargs)\r\n\r\n extension: Extension\r\n for extension in ExtendableClass.__reg_post_ext__:\r\n extension(self, *args, **kwargs)\r\n\r\n @classmethod\r\n def register_pre_extension(cls, extension: Extension):\r\n cls.__reg_pre_ext__.append(extension)\r\n\r\n @classmethod\r\n def register_post_extension(obj, extension: Extension):\r\n obj.__reg_post_ext__.append(extension)\r\n\r\n @classmethod\r\n def get_extensions(obj):\r\n return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]\r\n\r\n return ExtendableClass\r\n else:\r\n class ExtendableFunction:\r\n __reg_post_ext__ = []\r\n __reg_pre_ext__ = []\r\n\r\n def __init__(self, bind: object = None) -> None:\r\n self.bind = bind\r\n\r\n def __call__(self, *args, **kwargs):\r\n if self.bind is not None:\r\n extra_args = (self.bind,) # self reference\r\n else:\r\n extra_args = ()\r\n\r\n for ext in ExtendableFunction.__reg_pre_ext__:\r\n ext(*extra_args, *args, **kwargs)\r\n\r\n r = obj(*extra_args, *args, **kwargs)\r\n \r\n for ext in ExtendableFunction.__reg_post_ext__:\r\n r = ext(*extra_args, *args, r, **kwargs)\r\n\r\n return r\r\n\r\n def __get__(self, instance, cls):\r\n # Bind the wrapper callable object into a callable object \"instance\"\r\n return ExtendableFunction(instance)\r\n\r\n @classmethod\r\n def register_pre_extension(cls, extension: Extension):\r\n cls.__reg_pre_ext__.append(extension)\r\n\r\n @classmethod\r\n def register_post_extension(cls, extension: Extension):\r\n cls.__reg_post_ext__.append(extension)\r\n\r\n @classmethod\r\n def get_extensions(obj):\r\n return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]\r\n\r\n return ExtendableFunction()\r" }, { "identifier": "doc_category", "path": "tkclasswiz/doc.py", "snippet": "def doc_category(\n cat: str,\n manual: Optional[bool] = False,\n path: Optional[str] = None,\n api_type: Literal[\"Program\", \"HTTP\"] = \"Program\"\n):\n \"\"\"\n Used to mark the object for documentation.\n Objects marked with this decorator function will\n have :mod:`sphinx.ext.autodoc` directives generated automatically.\n\n Parameters\n ------------\n cat: str\n The name of the category to put this in.\n manual: Optional[bool]\n Generate ``function`` directives instead of ``autofunction``.\n Should be used when dealing with overloads.\n path: Optional[str]\n Custom path to the object.\n api_type: Literal[\"Program\", \"HTTP\"]\n The type of API, the documented item belongs to.\n Defaults to 'Program'\n \"\"\"\n def _category(item): # item == class or function\n if DOCUMENTATION_MODE:\n cat_map[api_type][cat].append((item, manual, path))\n return item\n\n if DOCUMENTATION_MODE:\n if cat not in cat_map[api_type]:\n cat_map[api_type][cat] = []\n\n return _category" } ]
from typing import Any from ..storage import * from .frame_base import * from ..extensions import extendable from ..doc import doc_category import tkinter as tk
1,400
TEXT_MAX_UNDO = 20 __all__ = ( "NewObjectFrameString", )
TEXT_MAX_UNDO = 20 __all__ = ( "NewObjectFrameString", )
@extendable
0
2023-11-14 09:26:01+00:00
2k
har777/snek-evm
test.py
[ { "identifier": "EVM", "path": "vm.py", "snippet": "class EVM:\n def __init__(self):\n self.address_to_contract = {}\n\n def create_contract(self, bytecode, address):\n contract = Contract(bytecode=bytecode, address=address)\n self.address_to_contract[address] = contract\n return contract\n\n def execute_transaction(self, address, transaction_metadata, operation_metadata=None, debug=False):\n if not operation_metadata:\n operation_metadata = OperationMetadata()\n\n operation = Operation(\n evm=self,\n address=address,\n transaction_metadata=transaction_metadata,\n operation_metadata=operation_metadata,\n )\n operation.execute(debug=debug)\n return operation\n\n def __str__(self):\n return f\"EVM(address_to_contract={self.address_to_contract})\"" }, { "identifier": "TransactionMetadata", "path": "vm.py", "snippet": "class TransactionMetadata:\n def __init__(self, from_address, value=\"0\", data=\"0x\"):\n # calldata has to be even length if present\n if len(data) % 2 != 0:\n raise Exception(\"Invalid calldata length\")\n\n self.from_address = from_address.lower()\n self.value = value\n self.data = data.lower()\n\n def __str__(self):\n return f\"TransactionMetadata(from={self.from_address} value={self.value}, data={self.data})\"" }, { "identifier": "get_create_contract_address", "path": "vm.py", "snippet": "def get_create_contract_address(sender_address: str, sender_nonce: int):\n sender = bytes.fromhex(sender_address[2:])\n contract_address = \"0x\" + keccak.new(digest_bits=256, data=rlp.encode([sender, sender_nonce])).hexdigest()[-40:]\n return contract_address" }, { "identifier": "get_create2_contract_address", "path": "vm.py", "snippet": "def get_create2_contract_address(origin_address: str, salt: int, initialisation_code: str):\n contract_address = \"0x\" + keccak.new(digest_bits=256, data=(\n bytes.fromhex(\"ff\") +\n bytes.fromhex(origin_address[2:]) +\n bytes.fromhex(hex(salt)[2:].rjust(64, \"0\")) +\n bytes.fromhex(keccak.new(digest_bits=256, data=bytes.fromhex(initialisation_code)).hexdigest())\n )).hexdigest()[-40:]\n return contract_address" } ]
import unittest from vm import EVM, TransactionMetadata, get_create_contract_address, get_create2_contract_address
1,417
class UtilTestCase(unittest.TestCase): def test_get_create_contract_address(self): sender_address = "0x6ac7ea33f8831ea9dcc53393aaa88b25a785dbf0" self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=0), "0xcd234a471b72ba2f1ccf0a70fcaba648a5eecd8d") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=1), "0x343c43a37d37dff08ae8c4a11544c718abb4fcf8") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=2), "0xf778b86fa74e846c4f0a1fbd1335fe81c00a0c91") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=3), "0xfffd933a0bc612844eaf0c6fe3e5b8e9b6c1d19c") def test_get_create2_contract_address(self): # https://eips.ethereum.org/EIPS/eip-1014 self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="00" ), "0x4d1a2e2bb4f88f0250f26ffff098b0b30b26bf38" ) self.assertEqual( get_create2_contract_address( origin_address="0xdeadbeef00000000000000000000000000000000", salt=0, initialisation_code="00" ), "0xb928f69bb1d91cd65274e3c79d8986362984fda3" ) self.assertEqual( get_create2_contract_address( origin_address="0xdeadbeef00000000000000000000000000000000", salt=1455368932401306996839762510191304720241787928576, initialisation_code="00" ), "0xd04116cdd17bebe565eb2422f2497e06cc1c9833" ) self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="deadbeef" ), "0x70f2b2914a2a4b783faefb75f459a580616fcb5e" ) self.assertEqual( get_create2_contract_address( origin_address="0x00000000000000000000000000000000deadbeef", salt=3405691582, initialisation_code="deadbeef" ), "0x60f3f640a8508fc6a86d45df051962668e1e8ac7" ) self.assertEqual( get_create2_contract_address( origin_address="0x00000000000000000000000000000000deadbeef", salt=3405691582, initialisation_code="deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" ), "0x1d8bfdc5d46dc4f61d6b6115972536ebe6a8854c" ) self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="" ), "0xe33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0" ) class OpcodeTestCase(unittest.TestCase): def setUp(self):
class UtilTestCase(unittest.TestCase): def test_get_create_contract_address(self): sender_address = "0x6ac7ea33f8831ea9dcc53393aaa88b25a785dbf0" self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=0), "0xcd234a471b72ba2f1ccf0a70fcaba648a5eecd8d") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=1), "0x343c43a37d37dff08ae8c4a11544c718abb4fcf8") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=2), "0xf778b86fa74e846c4f0a1fbd1335fe81c00a0c91") self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=3), "0xfffd933a0bc612844eaf0c6fe3e5b8e9b6c1d19c") def test_get_create2_contract_address(self): # https://eips.ethereum.org/EIPS/eip-1014 self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="00" ), "0x4d1a2e2bb4f88f0250f26ffff098b0b30b26bf38" ) self.assertEqual( get_create2_contract_address( origin_address="0xdeadbeef00000000000000000000000000000000", salt=0, initialisation_code="00" ), "0xb928f69bb1d91cd65274e3c79d8986362984fda3" ) self.assertEqual( get_create2_contract_address( origin_address="0xdeadbeef00000000000000000000000000000000", salt=1455368932401306996839762510191304720241787928576, initialisation_code="00" ), "0xd04116cdd17bebe565eb2422f2497e06cc1c9833" ) self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="deadbeef" ), "0x70f2b2914a2a4b783faefb75f459a580616fcb5e" ) self.assertEqual( get_create2_contract_address( origin_address="0x00000000000000000000000000000000deadbeef", salt=3405691582, initialisation_code="deadbeef" ), "0x60f3f640a8508fc6a86d45df051962668e1e8ac7" ) self.assertEqual( get_create2_contract_address( origin_address="0x00000000000000000000000000000000deadbeef", salt=3405691582, initialisation_code="deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" ), "0x1d8bfdc5d46dc4f61d6b6115972536ebe6a8854c" ) self.assertEqual( get_create2_contract_address( origin_address="0x0000000000000000000000000000000000000000", salt=0, initialisation_code="" ), "0xe33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0" ) class OpcodeTestCase(unittest.TestCase): def setUp(self):
self.evm = EVM()
0
2023-11-10 14:13:05+00:00
2k
AvaterClasher/eli
tests/middlewares/test_mindsdb.py
[ { "identifier": "CredentialsError", "path": "eli/exceptions/auth.py", "snippet": "class CredentialsError(Exception): ..." }, { "identifier": "NetworkError", "path": "eli/exceptions/connection.py", "snippet": "class NetworkError(Exception): ..." }, { "identifier": "MINDSDB_HOST", "path": "eli/constants/service.py", "snippet": "MINDSDB_HOST = 'https://cloud.mindsdb.com'" }, { "identifier": "MindsDB", "path": "eli/middlewares/mindsdb.py", "snippet": "class MindsDB:\n \"\"\"\n MindsDB manager class\n \"\"\"\n\n def __init__(self, email: str, password: str) -> None:\n \"\"\"\n initializer class.\n Args:\n email: MindsDB account email address (that is stored as an env var)\n password: MindsDB account password\n \"\"\"\n self.email = email\n self.password = password\n\n self.is_authenticated: bool = False\n self.database: Database\n\n def authenticate(self) -> None:\n \"\"\"\n authorizes the email and password with MindsDB's host\n \"\"\"\n\n try:\n server = mindsdb_sdk.connect(\n MINDSDB_HOST,\n login=self.email,\n password=self.password,\n )\n except HTTPError:\n raise CredentialsError('Email or password is incorrect. Make sure to enter the right credentials.')\n except ConnectionError:\n raise NetworkError('Make sure you have access to the internet and try again.')\n\n self.is_authenticated = True\n self.database = self.collect_database(server)\n\n @staticmethod\n def collect_database(server: Server) -> Database:\n return server.list_databases()[0]\n\n def answer(self, question: str) -> Markdown:\n \"\"\"\n takes the question and queries then converts the response into `rich.Markdown`\n Args:\n question: the value from `ask` positional argument\n\n Returns:\n response from MindsDB in Markdown format\n \"\"\"\n\n return Markdown(to_data(\n self.database.query(\n SQL_ASK_QUERY.substitute(\n ask=question,\n user=getuser(),\n )\n ).fetch()\n ))" } ]
import pytest from pandas import DataFrame from unittest.mock import patch, MagicMock from eli.exceptions.auth import CredentialsError from eli.exceptions.connection import NetworkError from requests.exceptions import HTTPError, ConnectionError from eli.constants.service import MINDSDB_HOST from eli.middlewares.mindsdb import MindsDB
726
@patch('mindsdb_sdk.connect') def test_authenticate(mock_connect): email = 'test@test.com' password = 'testpassword' mock_server = MagicMock() mock_connect.return_value = mock_server mindsdb = MindsDB(email, password) mindsdb.authenticate() mock_connect.assert_called_once_with(MINDSDB_HOST, login=email, password=password) mock_server.list_databases.assert_called_once() assert mindsdb.is_authenticated is True def test_authenticate_incorrect_password(): mindsdb = MindsDB('test@test.com', 'testpassword') with pytest.raises(CredentialsError): with patch('mindsdb_sdk.connect', side_effect=HTTPError): mindsdb.authenticate() def test_authenticate_network_error(): mindsdb = MindsDB('test@test.com', 'testpassword')
@patch('mindsdb_sdk.connect') def test_authenticate(mock_connect): email = 'test@test.com' password = 'testpassword' mock_server = MagicMock() mock_connect.return_value = mock_server mindsdb = MindsDB(email, password) mindsdb.authenticate() mock_connect.assert_called_once_with(MINDSDB_HOST, login=email, password=password) mock_server.list_databases.assert_called_once() assert mindsdb.is_authenticated is True def test_authenticate_incorrect_password(): mindsdb = MindsDB('test@test.com', 'testpassword') with pytest.raises(CredentialsError): with patch('mindsdb_sdk.connect', side_effect=HTTPError): mindsdb.authenticate() def test_authenticate_network_error(): mindsdb = MindsDB('test@test.com', 'testpassword')
with pytest.raises(NetworkError):
1
2023-11-16 13:31:55+00:00
2k
xduck7/AI_Spam_checker
start.py
[ { "identifier": "do_prediction", "path": "predict.py", "snippet": "def do_prediction(message):\n\n #подгрузка модели\n loaded_model = load_model('./Model/your_model.h5')\n loaded_label_encoder = joblib.load('./Model/label_encoder.pkl')\n loaded_vectorizer = joblib.load('./Model/vectorizer.pkl')\n\n #получение сообщения\n input_text = [message]\n X_input = loaded_vectorizer.transform(input_text).toarray()\n\n\n y_pred_prob = loaded_model.predict(X_input)\n #print(y_pred_prob) #вывод шанса \\ отладка\n\n #преобразование шанса в 1, если он >0.515\n threshold = 0.515\n y_pred = (y_pred_prob > threshold).astype(int)\n predicted_class = loaded_label_encoder.inverse_transform(y_pred)\n\n #вывод результата\n print(f'Предсказанный класс: {predicted_class[0]}') #отладка\n return y_pred" }, { "identifier": "add_report", "path": "rqst.py", "snippet": "def add_report(msg, pred):\n\n id = gen_id()\n author = names.get_first_name()\n date_time = datetime.datetime.now()\n\n conn = psycopg2.connect(dbname='postgres',\n user='postgres', \n password='postgres', \n host='localhost',\n port='5432')\n cursor = conn.cursor()\n\n cursor.execute(\"INSERT INTO forspam (id, date_time, author, content, class) values \"+\n f\"('{id}','{date_time}','{author}','{msg}','{pred}')\")\n conn.commit()\n\n cursor.close()\n conn.close()" }, { "identifier": "first_start", "path": "rqst.py", "snippet": "def first_start():\n conn = psycopg2.connect(dbname='postgres',\n user='postgres', \n password='postgres', \n host='localhost',\n port='5432')\n cursor = conn.cursor()\n\n cursor.execute(\"CREATE TABLE IF NOT EXISTS forspam (\"+\n \"ID VARCHAR, \" +\n \"DATE_TIME VARCHAR, \" +\n \"AUTHOR VARCHAR, \" +\n \"CONTENT VARCHAR, \" +\n \"CLASS VARCHAR )\")\n conn.commit()\n\n cursor.close()\n conn.close()" } ]
import tkinter as tk from predict import do_prediction from rqst import add_report from rqst import first_start
899
root= tk.Tk() root.title("SPAM CHECKER") root.geometry("500x600") root.resizable(width=True, height=True) def get_input(): inputValue=textBox.get("1.0","end-1c") print(inputValue) textBox.delete('1.0', 'end') return inputValue def union(): msg = get_input() result = do_prediction(msg) if (result == 1): final_opinion = "✅" else: final_opinion = "❌" #final_opinion = ("Spam result is " + str(result)) label_result.configure(text=final_opinion) label_result.pack() add_report(str(msg), str(result[0][0])) image = tk.PhotoImage(file='./Image/logo.png') smaller_image = image.subsample(5, 5) panel = tk.Label(root, image = smaller_image) textBox= tk.Text(root, height=3, width=80, borderwidth=5, font="Arial 18") panel_text = tk.Label(text="Spam checker", font="Arial 16") panel_values = tk.Label(text="✅ = spam \n ❌ = NOT spam", font="Arial 16") buttonCommit= tk.Button(root, height=1, width=10, text="Check spam",font='Arial 20', command=lambda: union(), borderwidth=5) label_result = tk.Label(text="Loading...", font="Arial 20") filler = tk.Label(text=' ')
root= tk.Tk() root.title("SPAM CHECKER") root.geometry("500x600") root.resizable(width=True, height=True) def get_input(): inputValue=textBox.get("1.0","end-1c") print(inputValue) textBox.delete('1.0', 'end') return inputValue def union(): msg = get_input() result = do_prediction(msg) if (result == 1): final_opinion = "✅" else: final_opinion = "❌" #final_opinion = ("Spam result is " + str(result)) label_result.configure(text=final_opinion) label_result.pack() add_report(str(msg), str(result[0][0])) image = tk.PhotoImage(file='./Image/logo.png') smaller_image = image.subsample(5, 5) panel = tk.Label(root, image = smaller_image) textBox= tk.Text(root, height=3, width=80, borderwidth=5, font="Arial 18") panel_text = tk.Label(text="Spam checker", font="Arial 16") panel_values = tk.Label(text="✅ = spam \n ❌ = NOT spam", font="Arial 16") buttonCommit= tk.Button(root, height=1, width=10, text="Check spam",font='Arial 20', command=lambda: union(), borderwidth=5) label_result = tk.Label(text="Loading...", font="Arial 20") filler = tk.Label(text=' ')
first_start()
2
2023-11-18 17:11:44+00:00
2k
TheJacksonLaboratory/geneweaver-boolean-algebra
tests/unit/test_boolean_algebra_tool.py
[ { "identifier": "BOOLEAN_GENESET_GENES_0", "path": "tests/unit/const.py", "snippet": "BOOLEAN_GENESET_GENES_0 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"B\", value=1),\n GeneValue(symbol=\"C\", value=1),\n GeneValue(symbol=\"D\", value=1),\n}" }, { "identifier": "BOOLEAN_GENESET_GENES_1", "path": "tests/unit/const.py", "snippet": "BOOLEAN_GENESET_GENES_1 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"E\", value=1),\n GeneValue(symbol=\"F\", value=1),\n GeneValue(symbol=\"G\", value=1),\n GeneValue(symbol=\"H\", value=1),\n}" }, { "identifier": "BOOLEAN_GENESET_GENES_2", "path": "tests/unit/const.py", "snippet": "BOOLEAN_GENESET_GENES_2 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"E\", value=1),\n GeneValue(symbol=\"F\", value=1),\n GeneValue(symbol=\"G\", value=1),\n GeneValue(symbol=\"H\", value=1),\n GeneValue(symbol=\"I\", value=1),\n}" }, { "identifier": "DIFF_BOOLEAN_GENESET_GENES_0_1_2", "path": "tests/unit/const.py", "snippet": "DIFF_BOOLEAN_GENESET_GENES_0_1_2 = {\n GeneValue(symbol=\"B\", value=1),\n GeneValue(symbol=\"C\", value=1),\n GeneValue(symbol=\"D\", value=1),\n GeneValue(symbol=\"I\", value=1),\n}" }, { "identifier": "INT_BOOLEAN_GENESET_GENES_0_1", "path": "tests/unit/const.py", "snippet": "INT_BOOLEAN_GENESET_GENES_0_1 = {GeneValue(symbol=\"A\", value=1)}" }, { "identifier": "INT_BOOLEAN_GENESET_GENES_0_1_2", "path": "tests/unit/const.py", "snippet": "INT_BOOLEAN_GENESET_GENES_0_1_2 = {GeneValue(symbol=\"A\", value=1)}" }, { "identifier": "INT_BOOLEAN_GENESET_GENES_0_2", "path": "tests/unit/const.py", "snippet": "INT_BOOLEAN_GENESET_GENES_0_2 = {GeneValue(symbol=\"A\", value=1)}" }, { "identifier": "INT_BOOLEAN_GENESET_GENES_1_2", "path": "tests/unit/const.py", "snippet": "INT_BOOLEAN_GENESET_GENES_1_2 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"E\", value=1),\n GeneValue(symbol=\"F\", value=1),\n GeneValue(symbol=\"G\", value=1),\n GeneValue(symbol=\"H\", value=1),\n}" }, { "identifier": "UNION_BOOLEAN_GENESET_GENES_0_1", "path": "tests/unit/const.py", "snippet": "UNION_BOOLEAN_GENESET_GENES_0_1 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"B\", value=1),\n GeneValue(symbol=\"C\", value=1),\n GeneValue(symbol=\"D\", value=1),\n GeneValue(symbol=\"E\", value=1),\n GeneValue(symbol=\"F\", value=1),\n GeneValue(symbol=\"G\", value=1),\n GeneValue(symbol=\"H\", value=1),\n}" } ]
from pathlib import Path from geneweaver.tools.boolean_algebra.tool import ( BooleanAlgebra, BooleanAlgebraInput, BooleanAlgebraOutput, BooleanAlgebraType, WorkflowType, ) from tests.unit.const import ( BOOLEAN_GENESET_GENES_0, BOOLEAN_GENESET_GENES_1, BOOLEAN_GENESET_GENES_2, DIFF_BOOLEAN_GENESET_GENES_0_1_2, INT_BOOLEAN_GENESET_GENES_0_1, INT_BOOLEAN_GENESET_GENES_0_1_2, INT_BOOLEAN_GENESET_GENES_0_2, INT_BOOLEAN_GENESET_GENES_1_2, UNION_BOOLEAN_GENESET_GENES_0_1, ) import pytest
850
"""Test the boolean algebra tool class.""" @pytest.mark.parametrize( ("input_value", "expected"), [ # Union ( BooleanAlgebraInput( type=BooleanAlgebraType.UNION,
"""Test the boolean algebra tool class.""" @pytest.mark.parametrize( ("input_value", "expected"), [ # Union ( BooleanAlgebraInput( type=BooleanAlgebraType.UNION,
input_genesets=[BOOLEAN_GENESET_GENES_0, BOOLEAN_GENESET_GENES_1],
1
2023-11-15 17:53:26+00:00
2k
jpcadena/fastapi-boilerplate
app/core/lifecycle.py
[ { "identifier": "RedisConnectionManager", "path": "app/api/deps.py", "snippet": "class RedisConnectionManager:\n \"\"\"\n Redis connection manager class\n \"\"\"\n\n def __init__(self, auth_settings: AuthSettings):\n self.url: str = f\"{auth_settings.REDIS_DATABASE_URI}\"\n self.pool: Optional[Redis] = None # type: ignore\n\n async def start(self) -> None:\n \"\"\"\n Start the redis pool connection\n :return: None\n :rtype: NoneType\n \"\"\"\n self.pool = Redis.from_url(self.url, decode_responses=True)\n await self.pool.ping()\n logger.info(\"Redis Database initialized\")\n\n async def stop(self) -> None:\n \"\"\"\n Stops the redis connection\n :return: None\n :rtype: NoneType\n \"\"\"\n await self.pool.close() # type: ignore\n\n async def get_connection(self) -> Optional[Redis]: # type: ignore\n \"\"\"\n Get the connection\n :return: The redis connection\n :rtype: Optional[Redis]\n \"\"\"\n return self.pool\n\n @asynccontextmanager\n async def connection(self) -> AsyncGenerator[Redis, Any]: # type: ignore\n \"\"\"\n Asynchronously get the connection from the pool context manager\n :return: Yields the generator object\n :rtype: AsyncGenerator[Redis, Any]\n \"\"\"\n await self.start()\n yield self.pool # type: ignore\n await self.stop()" }, { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "@lru_cache()\ndef get_auth_settings() -> AuthSettings:\n \"\"\"\n Get auth settings cached\n :return: Auth settings instance\n :rtype: AuthSettings\n \"\"\"\n return AuthSettings()" }, { "identifier": "get_init_settings", "path": "app/config/config.py", "snippet": "@lru_cache()\ndef get_init_settings() -> InitSettings:\n \"\"\"\n Get init settings cached\n :return: The init settings instance\n :rtype: InitSettings\n \"\"\"\n return init_setting" }, { "identifier": "get_settings", "path": "app/config/config.py", "snippet": "@lru_cache()\ndef get_settings() -> Settings:\n \"\"\"\n Get settings cached\n :return: The settings instance\n :rtype: Settings\n \"\"\"\n return Settings()" }, { "identifier": "get_user_repository", "path": "app/crud/user.py", "snippet": "async def get_user_repository() -> UserRepository:\n \"\"\"\n Create a UserRepository with an async database session, an index\n filter, and a unique filter.\n :return: A UserRepository instance\n :rtype: UserRepository\n \"\"\"\n return UserRepository(\n await get_session(),\n await get_index_filter(),\n await get_unique_filter(),\n )" }, { "identifier": "init_db", "path": "app/db/init_db.py", "snippet": "async def init_db(\n user_repo: UserRepository,\n settings: Settings,\n init_settings: InitSettings,\n auth_settings: AuthSettings,\n) -> None:\n \"\"\"\n Initialize the database connection and create the necessary tables.\n :param user_repo: The user repository dependency.\n :type user_repo: UserRepository\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: None\n :rtype: NoneType\n \"\"\"\n await create_db_and_tables()\n await create_superuser(user_repo, settings, init_settings, auth_settings)" }, { "identifier": "get_ip_blacklist_service", "path": "app/services/infrastructure/ip_blacklist.py", "snippet": "def get_ip_blacklist_service(\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> IPBlacklistService:\n \"\"\"\n Get an instance of the IP Blacklist service\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: IPBlacklistService instance\n :rtype: IPBlacklistService\n \"\"\"\n return IPBlacklistService(redis, auth_settings.BLACKLIST_EXPIRATION_SECONDS)" } ]
import logging from contextlib import asynccontextmanager from typing import Any, AsyncGenerator from fastapi import FastAPI from app.api.deps import RedisConnectionManager from app.config.config import get_auth_settings, get_init_settings, get_settings from app.crud.user import get_user_repository from app.db.init_db import init_db from app.services.infrastructure.ip_blacklist import get_ip_blacklist_service
1,182
""" A module for lifecycle in the app-core package. """ logger: logging.Logger = logging.getLogger(__name__) @asynccontextmanager async def lifespan(application: FastAPI) -> AsyncGenerator[Any, None]: """ The lifespan of the application :param application: The FastAPI application :type application: FastAPI :return: An asynchronous generator for the application :rtype: AsyncGenerator[Any, None] """ logger.info("Starting API...") try:
""" A module for lifecycle in the app-core package. """ logger: logging.Logger = logging.getLogger(__name__) @asynccontextmanager async def lifespan(application: FastAPI) -> AsyncGenerator[Any, None]: """ The lifespan of the application :param application: The FastAPI application :type application: FastAPI :return: An asynchronous generator for the application :rtype: AsyncGenerator[Any, None] """ logger.info("Starting API...") try:
application.state.settings = get_settings()
3
2023-11-17 00:32:32+00:00
2k
juliusmarkwei/auth-system
backend/accounts/views.py
[ { "identifier": "UserSerializer", "path": "backend/accounts/serializers.py", "snippet": "class UserSerializer(serializers.ModelSerializer):\n date_joined = serializers.ReadOnlyField()\n password = serializers.CharField(write_only=True)\n class Meta(object):\n model = User\n fields = (\n \"id\",\n \"username\",\n \"is_verified\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"address\",\n \"phone\",\n \"date_joined\",\n \"updated_at\",\n \"password\"\n )\n extra_kwargs = {\"password\": {\"write_only\": True}}\n \n def create(self, validated_data):\n password = validated_data.pop('password', None)\n instance = self.Meta.model(**validated_data)\n \n # Adding the below line made it work for me.\n instance.is_active = True\n if password is not None:\n # Set password does the hash, so you don't need to call make_password \n instance.set_password(password)\n instance.save()\n return instance" }, { "identifier": "User", "path": "backend/accounts/models.py", "snippet": "class User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(max_length=40, unique=True)\n is_verified = models.BooleanField(default=False)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n username = models.CharField(max_length=100, unique=True)\n is_staff = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n address = models.CharField(max_length=100, blank=True, null=True)\n phone = models.CharField(max_length=20, blank=True, null=True)\n date_joined = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)\n\n objects = UserManager()\n\n USERNAME_FIELD = \"username\"\n REQUIRED_FIELDS = [\"email\", \"phone\", \"first_name\", \"last_name\"]\n\n def save(self, *args, **kwargs):\n super(User, self).save(*args, **kwargs)\n return self\n \n def __str__(self):\n return self.username\n\n class Meta:\n ordering = (\"-date_joined\",)\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"" }, { "identifier": "EmailConfirmationToken", "path": "backend/accounts/models.py", "snippet": "class EmailConfirmationToken(models.Model):\n user = models.OneToOneField(User, unique=True, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)\n \n def __str__(self):\n return self.user\n\n class Meta:\n verbose_name = \"Email Confirmation Token\"\n verbose_name_plural = \"Email Confirmation Tokens\"" }, { "identifier": "send_confirmation_email", "path": "backend/accounts/utils.py", "snippet": "def send_confirmation_email(email, token_id, user_id):\n data = {\n \"token_id\": str(token_id),\n \"user_id\": str(user_id)\n }\n \n message = get_template(\"accounts/confirmation_email.txt\").render(data)\n send_mail(\n subject=\"Email Confirmation\",\n message=message,\n from_email=\"juliusmarkwei2000@gmail.com\",\n recipient_list=[email],\n fail_silently=True\n )\n print(\"Email confirmation sent!\")" } ]
from rest_framework.views import APIView from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response from rest_framework import status from .serializers import UserSerializer from .models import User, EmailConfirmationToken from .utils import send_confirmation_email
1,123
class UserAPIView(APIView): permission_classes = [AllowAny,] def post(self, request): user = request.data serializer = UserSerializer(data=user) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) def get_queryset(self): return User.objects.all() def get(self, request, *args, **kwargs): users = self.get_queryset() serializer = UserSerializer(users, many=True) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, *args, **kwargs): serializer_data = request.data.get("user", {}) serializer = UserSerializer(request.user, data=serializer_data, partial=True) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) class UserInformationAPIView(APIView): permission_classes = [IsAuthenticated] def get(self, request, *args, **kwargs): user = request.user email = user.email is_verified = user.is_verified payload = {"email": email, "is_verified": is_verified} return Response(data=payload, status=status.HTTP_200_OK) class SendEmailConfirmationTokenAPIView(APIView): permission_classes = [IsAuthenticated] def post(self, request, format=None): user = request.user token = EmailConfirmationToken.objects.create(user=user)
class UserAPIView(APIView): permission_classes = [AllowAny,] def post(self, request): user = request.data serializer = UserSerializer(data=user) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) def get_queryset(self): return User.objects.all() def get(self, request, *args, **kwargs): users = self.get_queryset() serializer = UserSerializer(users, many=True) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, *args, **kwargs): serializer_data = request.data.get("user", {}) serializer = UserSerializer(request.user, data=serializer_data, partial=True) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) class UserInformationAPIView(APIView): permission_classes = [IsAuthenticated] def get(self, request, *args, **kwargs): user = request.user email = user.email is_verified = user.is_verified payload = {"email": email, "is_verified": is_verified} return Response(data=payload, status=status.HTTP_200_OK) class SendEmailConfirmationTokenAPIView(APIView): permission_classes = [IsAuthenticated] def post(self, request, format=None): user = request.user token = EmailConfirmationToken.objects.create(user=user)
send_confirmation_email(email=user.email, token_id=token.pk, user_id=user.pk)
3
2023-11-17 17:55:59+00:00
2k
vitant-lang/CBAM-ASPP
nets/deeplabv3_plus.py
[ { "identifier": "xception", "path": "nets/xception.py", "snippet": "def xception(pretrained=True, downsample_factor=16):\n model = Xception(downsample_factor=downsample_factor)\n if pretrained:\n model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/xception_pytorch_imagenet.pth'), strict=False)\n return model" }, { "identifier": "mobilenetv2", "path": "nets/mobilenetv2.py", "snippet": "def mobilenetv2(pretrained=False, **kwargs):\n model = MobileNetV2(n_class=1000, **kwargs)\n if pretrained:\n model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar'), strict=False)\n return model" }, { "identifier": "se_block", "path": "nets/attention.py", "snippet": "class se_block(nn.Module):\n def __init__(self, channel, ratio=16):\n super(se_block, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Conv2d(channel, channel // ratio, kernel_size=1, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // ratio, channel, kernel_size=1, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n y = self.avg_pool(x)\n y = self.fc(y)\n return x * y" }, { "identifier": "CBAM", "path": "nets/attention.py", "snippet": "class CBAM(nn.Module):\n def __init__(self, in_channels, ratio=16, kernel_size=7):\n super(CBAM, self).__init__()\n self.channel_attention = ChannelAttention(in_channels, ratio)\n self.spatial_attention = SpatialAttention(in_channels, kernel_size) # 传入 in_channels 参数\n\n def forward(self, x):\n x = self.channel_attention(x)\n x = self.spatial_attention(x)\n return x" }, { "identifier": "eca_block", "path": "nets/attention.py", "snippet": "class eca_block(nn.Module):\n def __init__(self, channel, b=1, gamma=2):\n super(eca_block, self).__init__()\n kernel_size = int(abs((math.log(channel, 2) + b) / gamma))\n kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n y = self.avg_pool(x)\n y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)\n y = self.sigmoid(y)\n return x * y.expand_as(x)" } ]
import torch import torch.nn as nn import torch.nn.functional as F from .xception import xception from .mobilenetv2 import mobilenetv2 from .attention import se_block,CBAM,eca_block from functools import partial
795
atteionb=[se_block,CBAM,eca_block] class MobileNetV2(nn.Module): def __init__(self, downsample_factor=8, pretrained=True): super(MobileNetV2, self).__init__()
atteionb=[se_block,CBAM,eca_block] class MobileNetV2(nn.Module): def __init__(self, downsample_factor=8, pretrained=True): super(MobileNetV2, self).__init__()
model = mobilenetv2(pretrained)
1
2023-11-17 13:25:28+00:00
2k
JiNanPiWang/apple_health_export_gpx_add_heartrate
src/strava_gpx_uploader.py
[ { "identifier": "RateLimitException", "path": "utils/exceptions.py", "snippet": "class RateLimitException(Exception):\n def __init__(self, message=\"API rate limit exceeded\"):\n self.message = message\n super().__init__(self.message)" }, { "identifier": "NoInternetException", "path": "utils/exceptions.py", "snippet": "class NoInternetException(Exception):\n def __init__(self, message=\"No Internet connection\"):\n self.message = message\n super().__init__(self.message)" } ]
import json import os import time from stravalib.util.limiter import RateLimiter, XRateLimitRule from stravalib.client import Client, exc from utils.exceptions import RateLimitException, NoInternetException
830
def get_strava_client(access_token): token = access_token rate_limiter = RateLimiter() rate_limiter.rules.append(XRateLimitRule( {'short': {'usageFieldIndex': 0, 'usage': 0, # 60s * 15 = 15 min 'limit': 100, 'time': (60 * 15), 'lastExceeded': None, }, 'long': {'usageFieldIndex': 1, 'usage': 0, # 60s * 60m * 24 = 1 day 'limit': 1000, 'time': (60 * 60 * 24), 'lastExceeded': None}})) client = Client(rate_limiter=rate_limiter) client.access_token = token return client class StravaGpxUploader: def __init__(self, file_path: str, activity_type): with open("config/strava_config.json", 'r') as f: strava_config = json.load(f) # Edit access_token in the strava_config.json or edit here # like access_token = '***' self.file_path = file_path self.access_token = strava_config["access_token"] self.activity_type = activity_type self.client = get_strava_client(self.access_token) def get_athlete_name(self): athlete = None for i in range(2): try: athlete = self.client.get_athlete() except exc.RateLimitExceeded as err: if i > 0: raise RateLimitException("Daily Rate limit exceeded") print("Rate limit exceeded in connecting - Retrying strava connection in 15 minutes") time.sleep(900) continue break print("Now authenticated for " + athlete.firstname + " " + athlete.lastname) # client, gpxfile, strava_activity_type, notes def upload_gpx(self): gpxfile = self.file_path if not os.path.isfile(gpxfile): print("No file found for " + gpxfile + "!") return False print("Uploading " + gpxfile) for i in range(2): try: # 如果上传成功,则会直接到底下break upload = self.client.upload_activity( activity_file=open(gpxfile, 'r'), data_type='gpx', description='', activity_type=self.activity_type ) except exc.RateLimitExceeded as err: # 第二次循环才会直接到这里 # 这里是说今天已经超过了限制,退出程序 if i > 0: raise RateLimitException("Daily Rate limit exceeded, please try tomorrow") # 第一次循环会直接到这里 # 这里是说这一次超过了限制,等待15分钟 print("Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit") time.sleep(900) continue except ConnectionError as err:
def get_strava_client(access_token): token = access_token rate_limiter = RateLimiter() rate_limiter.rules.append(XRateLimitRule( {'short': {'usageFieldIndex': 0, 'usage': 0, # 60s * 15 = 15 min 'limit': 100, 'time': (60 * 15), 'lastExceeded': None, }, 'long': {'usageFieldIndex': 1, 'usage': 0, # 60s * 60m * 24 = 1 day 'limit': 1000, 'time': (60 * 60 * 24), 'lastExceeded': None}})) client = Client(rate_limiter=rate_limiter) client.access_token = token return client class StravaGpxUploader: def __init__(self, file_path: str, activity_type): with open("config/strava_config.json", 'r') as f: strava_config = json.load(f) # Edit access_token in the strava_config.json or edit here # like access_token = '***' self.file_path = file_path self.access_token = strava_config["access_token"] self.activity_type = activity_type self.client = get_strava_client(self.access_token) def get_athlete_name(self): athlete = None for i in range(2): try: athlete = self.client.get_athlete() except exc.RateLimitExceeded as err: if i > 0: raise RateLimitException("Daily Rate limit exceeded") print("Rate limit exceeded in connecting - Retrying strava connection in 15 minutes") time.sleep(900) continue break print("Now authenticated for " + athlete.firstname + " " + athlete.lastname) # client, gpxfile, strava_activity_type, notes def upload_gpx(self): gpxfile = self.file_path if not os.path.isfile(gpxfile): print("No file found for " + gpxfile + "!") return False print("Uploading " + gpxfile) for i in range(2): try: # 如果上传成功,则会直接到底下break upload = self.client.upload_activity( activity_file=open(gpxfile, 'r'), data_type='gpx', description='', activity_type=self.activity_type ) except exc.RateLimitExceeded as err: # 第二次循环才会直接到这里 # 这里是说今天已经超过了限制,退出程序 if i > 0: raise RateLimitException("Daily Rate limit exceeded, please try tomorrow") # 第一次循环会直接到这里 # 这里是说这一次超过了限制,等待15分钟 print("Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit") time.sleep(900) continue except ConnectionError as err:
raise NoInternetException("No Internet connection: {}".format(err))
1
2023-11-14 01:50:02+00:00
2k
rgrizzell/CircuitPython_LILYGO_T-Deck
examples/lilygo_tdeck_custom_keyboard.py
[ { "identifier": "Keyboard", "path": "lilygo_tdeck.py", "snippet": "class Keyboard:\n \"\"\"Controls the keyboard peripheral. This class can be extended to support additional\n functionality if the keyboard is utilizing custom firmware.\n\n :param i2c: Object representing the I2C interface used to communicate with the keyboard.\n :type i2c: I2C\n :param int device_address: The I2C address of the keyboard device. Default is 0x55 (85).\n \"\"\"\n\n def __init__(self, i2c: I2C, device_address: int = None) -> None:\n self._i2c = i2c\n self._i2c_addr = device_address or _KEYBOARD_I2C_ADDR\n\n def get_keypress(self) -> str | None:\n \"\"\"Get the last keypress.\n\n :return: character representing the key that was pressed\n \"\"\"\n buf = bytearray(1)\n self._i2c.try_lock()\n self._i2c.readfrom_into(self._i2c_addr, buffer=buf)\n self._i2c.unlock()\n\n if buf != b\"\\x00\":\n return buf.decode()\n return None" }, { "identifier": "TDeck", "path": "lilygo_tdeck.py", "snippet": "class TDeck:\n \"\"\"Class representing the LILYGO T-Deck.\n\n :param keyboard: Object representing the keyboard. If none is provided, one is created.\n :type keyboard: Keyboard\n :param trackball: Object representing the trackball. If none is provided, one is created.\n :type trackball: Trackball\n :param bool debug: Print extra debug statements during initialization.\n \"\"\"\n\n def __init__(\n self,\n keyboard: Keyboard = None,\n trackball: Trackball = None,\n debug: bool = False,\n ) -> None:\n self.debug = debug\n if sys.implementation.version[0] < 9:\n raise NotImplementedError(\n \"LILYGO T-Deck only supports CircuitPython version 9.0.0 or greater\"\n )\n\n self._i2c = board.I2C()\n self._spi = board.SPI()\n\n # Touchscreen\n self._debug(\"Init touchscreen\")\n # TODO: Create driver: https://github.com/rgrizzell/CircuitPython_GT911\n # int_pin = DigitalInOut(board.TOUCH_INT)\n # self.touchscreen = GT911(self._i2c, _TOUCHSCREEN_I2C_ADDR, int_pin=int_pin)\n\n # Keyboard\n self._debug(\"Init keyboard\")\n self.keyboard = keyboard or Keyboard(self._i2c)\n self.get_keypress = self.keyboard.get_keypress\n\n # Trackball\n self._debug(\"Init Trackball\")\n self.trackball = trackball or Trackball(\n board.TRACKBALL_UP,\n board.TRACKBALL_RIGHT,\n board.TRACKBALL_DOWN,\n board.TRACKBALL_LEFT,\n board.TRACKBALL_CLICK,\n )\n self.get_trackball = self.trackball.get_trackball\n self.get_click = self.trackball.get_click\n\n # SD Card\n self._debug(\"Init SD Card\")\n self.sdcard = None\n try:\n self.sdcard = SDCard(self._spi, board.SDCARD_CS)\n vfs = storage.VfsFat(self.sdcard)\n storage.mount(vfs, \"/sd\")\n except OSError as error:\n print(\"SD Card disabled:\", error)\n\n # Speaker\n self._debug(\"Init Speaker\")\n self.speaker = None\n try:\n self.speaker = audiobusio.I2SOut(\n board.SPEAKER_SCK, board.SPEAKER_WS, board.SPEAKER_DOUT\n )\n except RuntimeError:\n pass\n\n # Microphone\n self._debug(\"Init Microphone\")\n self.microphone = None\n if hasattr(audiobusio, \"I2SIn\"):\n self.microphone = audiobusio.I2SIn(\n board.MICROPHONE_SCK,\n board.MICROPHONE_WS,\n board.MICROPHONE_DIN,\n board.MICROPHONE_MCK,\n )\n else:\n print(\"Microphone disabled: audiobusio does not support I2S input\")\n\n # LoRa - Optional\n # self._debug(\"Init LoRa\")\n\n def _debug(self, msg):\n if self.debug:\n print(msg)" } ]
import time import board from lilygo_tdeck import Keyboard, TDeck
1,310
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries # SPDX-FileCopyrightText: Copyright (c) 2023 Robert Grizzell # # SPDX-License-Identifier: Unlicense class MyCustomKeyboard(Keyboard): def __init__(self, backlight: bool = True): super().__init__(board.I2C()) self.backlight(backlight) def backlight(self, state: bool = None, register: int = 0x1): """Send an I2C command to control the keyboard backlight. Custom keyboard firmware is required for this to work. """ if state is None: buf = bytearray(1) else: buf = bytearray(2) buf[1] = int(state) buf[0] = register self._i2c.try_lock() self._i2c.writeto(self._i2c_addr, buffer=buf) self._i2c.unlock() k = MyCustomKeyboard()
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries # SPDX-FileCopyrightText: Copyright (c) 2023 Robert Grizzell # # SPDX-License-Identifier: Unlicense class MyCustomKeyboard(Keyboard): def __init__(self, backlight: bool = True): super().__init__(board.I2C()) self.backlight(backlight) def backlight(self, state: bool = None, register: int = 0x1): """Send an I2C command to control the keyboard backlight. Custom keyboard firmware is required for this to work. """ if state is None: buf = bytearray(1) else: buf = bytearray(2) buf[1] = int(state) buf[0] = register self._i2c.try_lock() self._i2c.writeto(self._i2c_addr, buffer=buf) self._i2c.unlock() k = MyCustomKeyboard()
t = TDeck(keyboard=k)
1
2023-11-11 15:13:00+00:00
2k
dataaug/open-interpreter-free
tests/test_interpreter.py
[ { "identifier": "count_messages_tokens", "path": "interpreter/utils/count_tokens.py", "snippet": "def count_messages_tokens(messages=[], model=None):\n \"\"\"\n Count the number of tokens in a list of messages\n \"\"\"\n\n tokens_used = 0\n\n for message in messages:\n if isinstance(message, str):\n tokens_used += count_tokens(message, model=model)\n elif \"message\" in message:\n tokens_used += count_tokens(message[\"message\"], model=model)\n\n if \"code\" in message:\n tokens_used += count_tokens(message[\"code\"], model=model)\n\n if \"output\" in message:\n tokens_used += count_tokens(message[\"output\"], model=model)\n\n prompt_cost = token_cost(tokens_used, model=model)\n\n return (tokens_used, prompt_cost)" }, { "identifier": "count_tokens", "path": "interpreter/utils/count_tokens.py", "snippet": "def count_tokens(text=\"\", model=\"gpt-4\"):\n \"\"\"\n Count the number of tokens in a string\n \"\"\"\n\n encoder = tiktoken.encoding_for_model(model)\n\n return len(encoder.encode(text))" } ]
import os import re import time import interpreter from random import randint from interpreter.utils.count_tokens import count_messages_tokens, count_tokens
1,581
Round to 2 decimal places. """.strip() messages = interpreter.chat(order_of_operations_message) assert str(round(test_result, 2)) in messages[-1]["message"] def test_delayed_exec(): interpreter.chat( """Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!""" ) def test_nested_loops_and_multiple_newlines(): interpreter.chat( """Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!""" ) def test_write_to_file(): interpreter.chat("""Write the word 'Washington' to a .txt file called file.txt""") assert os.path.exists("file.txt") interpreter.messages = [] # Just reset message history, nothing else for this test messages = interpreter.chat( """Read file.txt in the current directory and tell me what's in it.""" ) assert "Washington" in messages[-1]["message"] def test_markdown(): interpreter.chat( """Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw.""" ) def test_generator(): start_of_message_emitted = False end_of_message_emitted = False start_of_code_emitted = False end_of_code_emitted = False executing_emitted = False end_of_execution_emitted = False for chunk in interpreter.chat("What's 38023*40334?", stream=True, display=False): print(chunk) if "start_of_message" in chunk: start_of_message_emitted = True if "end_of_message" in chunk: end_of_message_emitted = True if "start_of_code" in chunk: start_of_code_emitted = True if "end_of_code" in chunk: end_of_code_emitted = True if "executing" in chunk: executing_emitted = True if "end_of_execution" in chunk: end_of_execution_emitted = True assert start_of_message_emitted assert end_of_message_emitted assert start_of_code_emitted assert end_of_code_emitted assert executing_emitted assert end_of_execution_emitted def test_config_loading(): # because our test is running from the root directory, we need to do some # path manipulation to get the actual path to the config file or our config # loader will try to load from the wrong directory and fail currentPath = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(currentPath, "./config.test.yaml") interpreter.extend_config(config_path=config_path) # check the settings we configured in our config.test.yaml file temperature_ok = interpreter.temperature == 0.25 model_ok = interpreter.model == "gpt-3.5-turbo" debug_mode_ok = interpreter.debug_mode == True assert temperature_ok and model_ok and debug_mode_ok def test_system_message_appending(): ping_system_message = ( "Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`." ) ping_request = "ping" pong_response = "pong" interpreter.system_message += ping_system_message messages = interpreter.chat(ping_request) assert messages == [ {"role": "user", "message": ping_request}, {"role": "assistant", "message": pong_response}, ] def test_reset(): # make sure that interpreter.reset() clears out the messages Array assert interpreter.messages == [] def test_token_counter(): system_tokens = count_tokens( text=interpreter.system_message, model=interpreter.model ) prompt = "How many tokens is this?" prompt_tokens = count_tokens(text=prompt, model=interpreter.model) messages = [ {"role": "system", "message": interpreter.system_message} ] + interpreter.messages
# this function will run before each test # we're clearing out the messages Array so we can start fresh and reduce token usage def setup_function(): interpreter.reset() interpreter.temperature = 0 interpreter.auto_run = True interpreter.model = "gpt-4" interpreter.debug_mode = False # this function will run after each test # we're introducing some sleep to help avoid timeout issues with the OpenAI API def teardown_function(): time.sleep(5) def test_hello_world(): hello_world_response = "Hello, World!" hello_world_message = f"Please reply with just the words {hello_world_response} and nothing else. Do not run code. No confirmation just the text." messages = interpreter.chat(hello_world_message) assert messages == [ {"role": "user", "message": hello_world_message}, {"role": "assistant", "message": hello_world_response}, ] def test_math(): # we'll generate random integers between this min and max in our math tests min_number = randint(1, 99) max_number = randint(1001, 9999) n1 = randint(min_number, max_number) n2 = randint(min_number, max_number) test_result = n1 + n2 * (n1 - n2) / (n2 + n1) order_of_operations_message = f""" Please perform the calculation `{n1} + {n2} * ({n1} - {n2}) / ({n2} + {n1})` then reply with just the answer, nothing else. No confirmation. No explanation. No words. Do not use commas. Do not show your work. Just return the result of the calculation. Do not introduce the results with a phrase like \"The result of the calculation is...\" or \"The answer is...\" Round to 2 decimal places. """.strip() messages = interpreter.chat(order_of_operations_message) assert str(round(test_result, 2)) in messages[-1]["message"] def test_delayed_exec(): interpreter.chat( """Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!""" ) def test_nested_loops_and_multiple_newlines(): interpreter.chat( """Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!""" ) def test_write_to_file(): interpreter.chat("""Write the word 'Washington' to a .txt file called file.txt""") assert os.path.exists("file.txt") interpreter.messages = [] # Just reset message history, nothing else for this test messages = interpreter.chat( """Read file.txt in the current directory and tell me what's in it.""" ) assert "Washington" in messages[-1]["message"] def test_markdown(): interpreter.chat( """Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw.""" ) def test_generator(): start_of_message_emitted = False end_of_message_emitted = False start_of_code_emitted = False end_of_code_emitted = False executing_emitted = False end_of_execution_emitted = False for chunk in interpreter.chat("What's 38023*40334?", stream=True, display=False): print(chunk) if "start_of_message" in chunk: start_of_message_emitted = True if "end_of_message" in chunk: end_of_message_emitted = True if "start_of_code" in chunk: start_of_code_emitted = True if "end_of_code" in chunk: end_of_code_emitted = True if "executing" in chunk: executing_emitted = True if "end_of_execution" in chunk: end_of_execution_emitted = True assert start_of_message_emitted assert end_of_message_emitted assert start_of_code_emitted assert end_of_code_emitted assert executing_emitted assert end_of_execution_emitted def test_config_loading(): # because our test is running from the root directory, we need to do some # path manipulation to get the actual path to the config file or our config # loader will try to load from the wrong directory and fail currentPath = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(currentPath, "./config.test.yaml") interpreter.extend_config(config_path=config_path) # check the settings we configured in our config.test.yaml file temperature_ok = interpreter.temperature == 0.25 model_ok = interpreter.model == "gpt-3.5-turbo" debug_mode_ok = interpreter.debug_mode == True assert temperature_ok and model_ok and debug_mode_ok def test_system_message_appending(): ping_system_message = ( "Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`." ) ping_request = "ping" pong_response = "pong" interpreter.system_message += ping_system_message messages = interpreter.chat(ping_request) assert messages == [ {"role": "user", "message": ping_request}, {"role": "assistant", "message": pong_response}, ] def test_reset(): # make sure that interpreter.reset() clears out the messages Array assert interpreter.messages == [] def test_token_counter(): system_tokens = count_tokens( text=interpreter.system_message, model=interpreter.model ) prompt = "How many tokens is this?" prompt_tokens = count_tokens(text=prompt, model=interpreter.model) messages = [ {"role": "system", "message": interpreter.system_message} ] + interpreter.messages
system_token_test = count_messages_tokens(
0
2023-11-16 03:10:42+00:00
2k
TheJacksonLaboratory/geneweaver-client
tests/unit/utils/cli/prompt/pydantic/test_prompt_for_missing_fields.py
[ { "identifier": "MOCK_EXISTING_COMBINATIONS", "path": "tests/unit/utils/cli/prompt/pydantic/conftest.py", "snippet": "MOCK_EXISTING_COMBINATIONS = [\n dict(e)\n for e in chain.from_iterable(\n combinations(MOCK_EXISTING_FIELDS, r)\n for r in range(len(MOCK_EXISTING_FIELDS) + 1)\n )\n]" }, { "identifier": "MOCK_MODEL_FIELD_COMBINATIONS", "path": "tests/unit/utils/cli/prompt/pydantic/conftest.py", "snippet": "MOCK_MODEL_FIELD_COMBINATIONS = [\n set(s)\n for s in chain.from_iterable(\n combinations(MOCK_MODEL_FIELDS, r) for r in range(len(MOCK_MODEL_FIELDS) + 1)\n )\n]" }, { "identifier": "MOCK_MODEL_FIELDS", "path": "tests/unit/utils/cli/prompt/pydantic/conftest.py", "snippet": "MOCK_MODEL_FIELDS = [field_name for field_name in MockModel.__fields__.keys()]" }, { "identifier": "MockModel", "path": "tests/unit/utils/cli/prompt/pydantic/conftest.py", "snippet": "class MockModel(MockInternalModel):\n \"\"\"Mock model for testing.\"\"\"\n\n sub_model: MockInternalModel\n sub_model_optional: Optional[MockInternalModel]" } ]
from unittest.mock import Mock from geneweaver.client.utils.cli.prompt.pydantic import prompt_for_missing_fields from tests.unit.utils.cli.prompt.pydantic.conftest import ( MOCK_EXISTING_COMBINATIONS, MOCK_MODEL_FIELD_COMBINATIONS, MOCK_MODEL_FIELDS, MockModel, ) import pytest
656
"""Test the prompt_for_missing_fields function.""" # We can't use every combination of fields because the number of combinations # grows much too large to be practical. # Instead, we use the first 25 and last 25 combinations. @pytest.mark.parametrize( "existing", MOCK_EXISTING_COMBINATIONS[:25] + MOCK_EXISTING_COMBINATIONS[-25:] ) @pytest.mark.parametrize( "exclude", MOCK_MODEL_FIELD_COMBINATIONS[:25] + MOCK_MODEL_FIELD_COMBINATIONS[-25:] ) @pytest.mark.parametrize("prompt_to_keep_existing", [True, False]) def test_prompt_for_missing(existing, exclude, prompt_to_keep_existing, monkeypatch): """Test the prompt_for_missing_fields function.""" mock_prompt_to_keep = Mock() mock_prompt_for_field_by_type = Mock() monkeypatch.setattr( "geneweaver.client.utils.cli.prompt.pydantic.prompt_to_keep_field", mock_prompt_to_keep, ) monkeypatch.setattr( "geneweaver.client.utils.cli.prompt.pydantic.prompt_for_field_by_type", mock_prompt_for_field_by_type, ) prompt_for_missing_fields(MockModel, existing, exclude, prompt_to_keep_existing) # We should prompt for every field in `existing` that is not in `exclude`. if prompt_to_keep_existing and len(existing) > 0: assert mock_prompt_to_keep.call_count == len(set(existing.keys()) - exclude) # We should prompt for every field in `MockModel` that is not in # `existing` or `exclude`. assert mock_prompt_for_field_by_type.call_count == len(
"""Test the prompt_for_missing_fields function.""" # We can't use every combination of fields because the number of combinations # grows much too large to be practical. # Instead, we use the first 25 and last 25 combinations. @pytest.mark.parametrize( "existing", MOCK_EXISTING_COMBINATIONS[:25] + MOCK_EXISTING_COMBINATIONS[-25:] ) @pytest.mark.parametrize( "exclude", MOCK_MODEL_FIELD_COMBINATIONS[:25] + MOCK_MODEL_FIELD_COMBINATIONS[-25:] ) @pytest.mark.parametrize("prompt_to_keep_existing", [True, False]) def test_prompt_for_missing(existing, exclude, prompt_to_keep_existing, monkeypatch): """Test the prompt_for_missing_fields function.""" mock_prompt_to_keep = Mock() mock_prompt_for_field_by_type = Mock() monkeypatch.setattr( "geneweaver.client.utils.cli.prompt.pydantic.prompt_to_keep_field", mock_prompt_to_keep, ) monkeypatch.setattr( "geneweaver.client.utils.cli.prompt.pydantic.prompt_for_field_by_type", mock_prompt_for_field_by_type, ) prompt_for_missing_fields(MockModel, existing, exclude, prompt_to_keep_existing) # We should prompt for every field in `existing` that is not in `exclude`. if prompt_to_keep_existing and len(existing) > 0: assert mock_prompt_to_keep.call_count == len(set(existing.keys()) - exclude) # We should prompt for every field in `MockModel` that is not in # `existing` or `exclude`. assert mock_prompt_for_field_by_type.call_count == len(
set(MOCK_MODEL_FIELDS) - set(existing.keys()) - exclude
2
2023-11-10 19:28:53+00:00
2k
hmmbug/pythaidate
pythaidate/lsyear.py
[ { "identifier": "DAYS_IN_800_YEARS", "path": "pythaidate/constants.py", "snippet": "DAYS_IN_800_YEARS = 292207" }, { "identifier": "TIME_UNITS_IN_1_DAY", "path": "pythaidate/constants.py", "snippet": "TIME_UNITS_IN_1_DAY = 800" }, { "identifier": "EPOCH_OFFSET", "path": "pythaidate/constants.py", "snippet": "EPOCH_OFFSET = 373" }, { "identifier": "UCCAPON_CONSTANT", "path": "pythaidate/constants.py", "snippet": "UCCAPON_CONSTANT = 2611" }, { "identifier": "APOGEE_ROTATION_DAYS", "path": "pythaidate/constants.py", "snippet": "APOGEE_ROTATION_DAYS = 3232" }, { "identifier": "CAL_TYPE_DAY_COUNTS", "path": "pythaidate/constants.py", "snippet": "CAL_TYPE_DAY_COUNTS = {\n \"A\": 354,\n \"B\": 355,\n \"C\": 384,\n \"c\": 384,\n}" } ]
from .constants import ( DAYS_IN_800_YEARS, TIME_UNITS_IN_1_DAY, EPOCH_OFFSET, UCCAPON_CONSTANT, APOGEE_ROTATION_DAYS, CAL_TYPE_DAY_COUNTS, )
1,012
class LSYear: """ A lightweight class representing a lunisolar year on new year's day. """ def __init__(self, year: int): self.offset = False # adjusted later self.year = year # this year self.horakhun = (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 self.kammacapon = TIME_UNITS_IN_1_DAY - (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY # ucc_i = (2611 + self.ahargana) // APOGEE_ROTATION_DAYS self.uccapon = (UCCAPON_CONSTANT + self.horakhun) % APOGEE_ROTATION_DAYS avo_quot = (self.horakhun * 11 + 650) // 692 self.avoman = (self.horakhun * 11 + 650) % 692 if self.avoman == 0: self.avoman = 692 self.masaken = (avo_quot + self.horakhun) // 30 self.tithi = (avo_quot + self.horakhun) % 30 if self.avoman == 692: self.tithi -= 1 # rest_quot = self.horakhun // 7 self.weekday = self.horakhun % 7 # next year horakhun1 = ((year + 1) * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 quot1 = (horakhun1 * 11 + 650) // 692 # avo1 = (ahargana1 * 11 + 650) % 692 # mas1 = (quot1 + ahargana1) // 30 tithi1 = (quot1 + horakhun1) % 30 # Faraut, pg 28 self.langsak = max(1, self.tithi) self.nyd = self.langsak if self.nyd < 6: self.nyd += 29 self.nyd = (self.weekday - self.nyd + 1 + 35) % 7 # is there a solar year leap day? self.leapday = self.kammacapon <= 207 # A: normal year, 354 days; B: leap day, 355 days; C: leap month, 384 days self.cal_type = 'A' # normal year if self.tithi > 24 or self.tithi < 6: self.cal_type = 'C' # leap month if self.tithi == 25 and tithi1 == 5: self.cal_type = 'A' if (self.leapday and self.avoman <= 126) or (not self.leapday and self.avoman <= 137): self.cal_type = 'B' if self.cal_type != 'C' else 'c' # start of next year if self.cal_type == 'A': self.next_nyd = (self.nyd + 4) % 7 elif self.cal_type == 'B': self.next_nyd = (self.nyd + 5) % 7 elif self.cal_type == 'C' or self.cal_type == 'c': self.next_nyd = (self.nyd + 6) % 7
class LSYear: """ A lightweight class representing a lunisolar year on new year's day. """ def __init__(self, year: int): self.offset = False # adjusted later self.year = year # this year self.horakhun = (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 self.kammacapon = TIME_UNITS_IN_1_DAY - (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY # ucc_i = (2611 + self.ahargana) // APOGEE_ROTATION_DAYS self.uccapon = (UCCAPON_CONSTANT + self.horakhun) % APOGEE_ROTATION_DAYS avo_quot = (self.horakhun * 11 + 650) // 692 self.avoman = (self.horakhun * 11 + 650) % 692 if self.avoman == 0: self.avoman = 692 self.masaken = (avo_quot + self.horakhun) // 30 self.tithi = (avo_quot + self.horakhun) % 30 if self.avoman == 692: self.tithi -= 1 # rest_quot = self.horakhun // 7 self.weekday = self.horakhun % 7 # next year horakhun1 = ((year + 1) * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 quot1 = (horakhun1 * 11 + 650) // 692 # avo1 = (ahargana1 * 11 + 650) % 692 # mas1 = (quot1 + ahargana1) // 30 tithi1 = (quot1 + horakhun1) % 30 # Faraut, pg 28 self.langsak = max(1, self.tithi) self.nyd = self.langsak if self.nyd < 6: self.nyd += 29 self.nyd = (self.weekday - self.nyd + 1 + 35) % 7 # is there a solar year leap day? self.leapday = self.kammacapon <= 207 # A: normal year, 354 days; B: leap day, 355 days; C: leap month, 384 days self.cal_type = 'A' # normal year if self.tithi > 24 or self.tithi < 6: self.cal_type = 'C' # leap month if self.tithi == 25 and tithi1 == 5: self.cal_type = 'A' if (self.leapday and self.avoman <= 126) or (not self.leapday and self.avoman <= 137): self.cal_type = 'B' if self.cal_type != 'C' else 'c' # start of next year if self.cal_type == 'A': self.next_nyd = (self.nyd + 4) % 7 elif self.cal_type == 'B': self.next_nyd = (self.nyd + 5) % 7 elif self.cal_type == 'C' or self.cal_type == 'c': self.next_nyd = (self.nyd + 6) % 7
self.caldays = CAL_TYPE_DAY_COUNTS[self.cal_type]
5
2023-11-18 21:14:01+00:00
2k
finalparanoia/Bert-VITS2-Preprocess
main.py
[ { "identifier": "create", "path": "utils/create.py", "snippet": "def create(dataset_name: str):\n raw_files = ls(f\"{raw_dir}/*.wav\")\n current_dataset_path = f\"{dataset_dir}/{dataset_name}\"\n i = 0\n\n if exist(current_dataset_path):\n mv(current_dataset_path, current_dataset_path+\".old\")\n\n mk_dataset_dir(current_dataset_path)\n\n tasks = []\n for raw_file in raw_files:\n tasks.append(delayed(cut_long_silences)(dataset_name, raw_file, i))\n i += 1\n multi_work = Parallel(n_jobs=16, backend='multiprocessing')\n multi_work(tasks)\n\n for tmp in ls(f\"{tmp_dir}/*.wav\"):\n mv(tmp, f\"{current_dataset_path}/audios/Raw\")" }, { "identifier": "tag", "path": "utils/tag.py", "snippet": "def tag(dataset_name: str):\n current_dataset_dir = f\"{dataset_dir}/{dataset_name}\"\n audio_dir = f\"{current_dataset_dir}/audios/wavs\"\n filelist = ls(f\"{audio_dir}/*.wav\")\n file_list_path = f\"{current_dataset_dir}/filelists/{dataset_name}.list\"\n\n complete_list = read_breakpoint(file_list_path)\n\n inference_pipeline = pipeline(\n task=Tasks.auto_speech_recognition,\n model=modelscope_model\n )\n\n for file in filelist:\n if file[-3:] != 'wav':\n continue\n\n # todo 支持单数据集多角色\n\n if file in complete_list:\n continue\n\n rec_result = inference_pipeline(file)\n\n if 'text' not in rec_result:\n continue\n\n line = file + \"|\" + dataset_name + \"|ZH|\" + rec_result['text'] + \"\\n\"\n\n with open(file_list_path, 'a', encoding='utf-8') as f:\n f.write(line)" }, { "identifier": "resample", "path": "utils/resample.py", "snippet": "def resample(dataset_name: str):\n current_dataset_dir = f\"{dataset_dir}/{dataset_name}\"\n processes = cpu_count() - 2 if cpu_count() > 4 else 1\n\n pool = Pool(processes=processes)\n\n tasks = []\n\n in_dir = f\"{current_dataset_dir}/audios/raw\"\n out_dir = f\"{current_dataset_dir}/audios/wavs\"\n for dir_path, _, filenames in os.walk(in_dir):\n # 子级目录\n spk_dir = os.path.relpath(dir_path, in_dir)\n spk_dir_out = os.path.join(out_dir, spk_dir)\n if not os.path.isdir(spk_dir_out):\n os.makedirs(spk_dir_out, exist_ok=True)\n for filename in filenames:\n if filename.endswith(\".wav\"):\n tasks.append((spk_dir, filename, in_dir, out_dir))\n\n for _ in tqdm(\n pool.imap_unordered(process, tasks),\n ):\n pass\n\n pool.close()\n pool.join()\n\n print(\"音频重采样完毕!\")" }, { "identifier": "clean", "path": "utils/clean.py", "snippet": "def clean(dataset_name: str):\n\n for raw_wav in ls(f\"{dataset_dir}/{dataset_name}/audios/raw/*.wav\"):\n rm(raw_wav)" }, { "identifier": "gen_config", "path": "utils/model_conf.py", "snippet": "def gen_config(dataset_name: str):\n with open(\"./config/config.json\", \"r\") as f:\n conf = loads(f.read())\n conf[\"data\"][\"spk2id\"][dataset_name] = 0\n with open(f\"{dataset_dir}/{dataset_name}/config.json\", \"w\") as f:\n f.write(dumps(conf))" } ]
from utils.create import create from utils.tag import tag from utils.resample import resample from utils.clean import clean from utils.model_conf import gen_config
935
if __name__ == "__main__": pass dataset_name = input("请为数据集命名:") create(dataset_name) resample(dataset_name) tag(dataset_name) clean(dataset_name)
if __name__ == "__main__": pass dataset_name = input("请为数据集命名:") create(dataset_name) resample(dataset_name) tag(dataset_name) clean(dataset_name)
gen_config(dataset_name)
4
2023-11-12 09:42:20+00:00
2k
itzshukla/STRANGER-SPAM
TheXSpam/extra.py
[ { "identifier": "SUDO_USERS", "path": "config.py", "snippet": "SUDO_USERS = list(map(lambda x: int(x), getenv(\"SUDO_USERS\", \"6163010926\").split(\" \")))" }, { "identifier": "ALIVE_PIC", "path": "config.py", "snippet": "ALIVE_PIC = getenv(\"ALIVE_PIC\", \"https://telegra.ph/file/aa4bf1e57d11fb75b602e.jpg\")" }, { "identifier": "OWNER_ID", "path": "config.py", "snippet": "OWNER_ID = int(getenv(\"OWNER_ID\", \"5518687442\"))" }, { "identifier": "HEROKU_APP_NAME", "path": "config.py", "snippet": "HEROKU_APP_NAME = getenv(\"HEROKU_APP_NAME\")" }, { "identifier": "HEROKU_API_KEY", "path": "config.py", "snippet": "HEROKU_API_KEY = getenv(\"HEROKU_API_KEY\")" } ]
import heroku3 from os import getenv from config import SUDO_USERS, ALIVE_PIC, OWNER_ID, HEROKU_APP_NAME, HEROKU_API_KEY from pyrogram import Client, filters from pyrogram.types import Message
724
# © @shiva_ansh_op FIRST_TEXT = f"""★ 𝗦𝘁𝗿𝗮𝗻𝗴𝗲𝗿-𝙎𝙥𝙖𝙢 𝙃𝙚𝙡𝙥 𝙈𝙚𝙣𝙪 ★ **» ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/5) **» ʀᴀɪᴅ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/6) **» ꜱᴘᴀᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/7) **» ᴅᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/8)""" @Client.on_message(filters.user(SUDO_USERS) & filters.command(["help"], [".", "!", "/"])) async def help(client: Client, message: Message): await client.send_photo( chat_id=message.chat.id, photo=ALIVE_PIC, caption=FIRST_TEXT ) @Client.on_message(filters.user(OWNER_ID) & filters.command(["sudo"], ["/", ".", "!"])) async def add_sudo(_, message: Message): if not message.reply_to_message: await message.reply_text("» ʀᴇᴘʟʏ ᴛᴏ ᴀ ᴜꜱᴇʀ !!") return
# © @shiva_ansh_op FIRST_TEXT = f"""★ 𝗦𝘁𝗿𝗮𝗻𝗴𝗲𝗿-𝙎𝙥𝙖𝙢 𝙃𝙚𝙡𝙥 𝙈𝙚𝙣𝙪 ★ **» ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/5) **» ʀᴀɪᴅ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/6) **» ꜱᴘᴀᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/7) **» ᴅᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/8)""" @Client.on_message(filters.user(SUDO_USERS) & filters.command(["help"], [".", "!", "/"])) async def help(client: Client, message: Message): await client.send_photo( chat_id=message.chat.id, photo=ALIVE_PIC, caption=FIRST_TEXT ) @Client.on_message(filters.user(OWNER_ID) & filters.command(["sudo"], ["/", ".", "!"])) async def add_sudo(_, message: Message): if not message.reply_to_message: await message.reply_text("» ʀᴇᴘʟʏ ᴛᴏ ᴀ ᴜꜱᴇʀ !!") return
elif HEROKU_APP_NAME is None:
3
2023-11-14 05:14:00+00:00
2k
fg320/DEASC
deasc/wf_model.py
[ { "identifier": "floris_input_handler", "path": "deasc/utils_floris.py", "snippet": "def floris_input_handler(input_file, path):\n \"\"\"Convert input file into a FLORIS interface object.\"\"\"\n # No input file\n if input_file == None:\n err_msg = \"Input file required\"\n raise Exception(err_msg)\n\n # Multiple input files\n elif isinstance(input_file, list) is True:\n err_msg = \"Required a single input file, multiple are provided\"\n raise Exception(err_msg)\n\n # Initialize single floris object\n else:\n fi = FI(path+input_file)\n print(\"Successfull single file import!\")\n\n return fi" }, { "identifier": "floris_properties", "path": "deasc/utils_floris.py", "snippet": "def floris_properties(wf_model):\n \"\"\"Extract wind farm model information from FLORIS object.\"\"\"\n fi = wf_model.interface\n D = (fi.floris.farm.rotor_diameters).flatten()[0] # Flatten over wd and ws\n H_hub = (fi.floris.farm.hub_heights).flatten()[0] # Flatten over wd and ws\n n_turbs = len(wf_model.interface.get_turbine_layout()[0])\n\n return D, H_hub, n_turbs" }, { "identifier": "floris_current_yaw", "path": "deasc/utils_floris.py", "snippet": "def floris_current_yaw(wf_model):\n \"\"\"Extract and returns the current wind farm yaw angles.\"\"\"\n return wf_model.interface.floris.farm.yaw_angles[0][0]" }, { "identifier": "floris_reinitialise_layout", "path": "deasc/utils_floris.py", "snippet": "def floris_reinitialise_layout(wf_model, layout_x, layout_y):\n \"\"\"\n Modify wind farm layout based on the coordinates provided and also\n reinitialises the flow field. If the number of turbines is unchanged,\n yaw angles are stored in the farm object. Limited to a FLORIS interface\n object.\n \"\"\"\n # Extract FLORIS interface object\n fi = wf_model.interface\n\n # As floris reinitializes, it sets all yaw angles to 0 deg.\n yaw_temp = fi.floris.farm.yaw_angles[0][0]\n fi.reinitialize(layout_x=layout_x, layout_y=layout_y)\n # Update number of turbines\n wf_model.n_turbs = len(wf_model.interface.get_turbine_layout()[0])\n # Keep old yaw angles only if the number of turbines is the same\n if len(yaw_temp) == len(layout_x):\n fi.calculate_wake(yaw_angles=np.array([[yaw_temp]]))" }, { "identifier": "floris_farm_eval", "path": "deasc/utils_floris.py", "snippet": "def floris_farm_eval(wf_model, yaw, ws, wd, ti, shear):\n \"\"\"\n Calculate wind farm power and wind turbine powers given an\n atmopheric and yaw condition. Farm layout is unchanged and information such\n as yaw angles preserved even if not explicitly specified.\n \"\"\"\n # Extract FLORIS interface object\n fi = wf_model.interface\n\n # Start floris farm computational time\n start = time.time()\n\n # Get yaw angles before reinitializing - set to 0 when reinitializing flow\n yaw = fi.floris.farm.yaw_angles[0][0] if yaw is None else yaw\n\n # Get wd and ws as None is not an option in reinitialize flow\n ws = fi.floris.flow_field.wind_speeds[0] if ws is None else ws\n wd = fi.floris.flow_field.wind_directions[0] if wd is None else wd\n\n # Error if yaw angles don't match turbine number\n if len(yaw) != len(fi.floris.farm.yaw_angles[0][0]):\n err_msg = \"Yaw prescribed not matching turbine number\"\n raise Exception(err_msg)\n\n # Reinitialize flow field and set previous yaw angles\n fi.reinitialize(wind_speeds=[ws],\n wind_directions=[wd],\n turbulence_intensity=ti,\n wind_shear=shear)\n yaw = np.array([float(item) for item in yaw])\n fi.calculate_wake(yaw_angles=np.array([[yaw]]))\n\n # Calculate wf power, wt powers, wt turbulence intensities, wt yaw angles\n wf_pow = (fi.get_farm_power()*10**(-6))[0][0]\n wt_pow = (np.array(fi.get_turbine_powers())*10**(-6))[0][0]\n wt_ti = (fi.get_turbine_TIs())[0][0]\n wt_yaw = np.array(fi.floris.farm.yaw_angles[0][0])\n\n # Report CPU time\n cpu_time = time.time()-start\n return (wf_pow, wt_pow, wt_ti, wt_yaw, cpu_time)" } ]
import warnings import numpy as np from .utils_floris import ( floris_input_handler, floris_properties, floris_current_yaw, floris_reinitialise_layout, floris_farm_eval )
1,390
# Copyright 2023 Filippo Gori # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class WfModel: """ Class for wind farm modelling (Interface setup but not limited to FLORIS framework). """ def __init__(self, input_file, path): """ Initialise wind farm object by pointing towards an input file. (FLORIS interface object). Args ---- input file:(FLORIS .json input file). """ # Read and initialize input file self.input_file = input_file self.interface = floris_input_handler(self.input_file, path) # Assign wind farm model proporties
# Copyright 2023 Filippo Gori # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. class WfModel: """ Class for wind farm modelling (Interface setup but not limited to FLORIS framework). """ def __init__(self, input_file, path): """ Initialise wind farm object by pointing towards an input file. (FLORIS interface object). Args ---- input file:(FLORIS .json input file). """ # Read and initialize input file self.input_file = input_file self.interface = floris_input_handler(self.input_file, path) # Assign wind farm model proporties
self.D, self.H_hub, self.n_turbs = floris_properties(self)
1
2023-11-10 18:13:27+00:00
2k
CPES-Power-and-Energy-Systems/interoperable-recommender-tso
energy_app/src/energy_app_client/Controller.py
[ { "identifier": "Endpoint", "path": "energy_app/src/energy_app_client/Endpoint.py", "snippet": "class Endpoint:" }, { "identifier": "RequestController", "path": "energy_app/src/energy_app_client/RequestController.py", "snippet": "class RequestController:\n \"\"\"\n Manages api calls to remote endpoint using Python *requests* package\n \"\"\"\n\n def __init__(self):\n self.retries = settings.ENERGYAPP[\"n_retries\"]\n self.remote_host = settings.ENERGYAPP[\"host\"]\n self.remote_port = settings.ENERGYAPP[\"port\"]\n self.remote_uri = f\"http://{self.remote_host}:{self.remote_port}\"\n self.headers = {\n 'content-type': 'application/json'\n }\n\n # note the endpoint is forced to follow the standard Endpoint class\n def request(self,\n endpoint: Endpoint,\n data=None,\n params=None,\n url_params=None,\n auth_token=None) -> Response:\n \"\"\"\n :param endpoint:\n :param data:\n :param params:\n :param url_params:\n :param auth_token:\n :return:\n \"\"\"\n\n url = self.remote_uri + endpoint.uri\n if url_params is not None:\n if url[-1] != \"/\":\n url += \"/\"\n for p in url_params:\n url += f\"{p}\"\n logger.debug(f\"[{endpoint.http_method}]Request to: {url}\")\n\n data = None if data is None else json.dumps(data)\n headers_ = self.headers\n if auth_token:\n headers_['Authorization'] = f'Bearer {auth_token}'\n try:\n response = self.__requests_retry_session().request(\n method=endpoint.http_method,\n url=url,\n data=data,\n params=params,\n headers=headers_\n )\n\n except (requests.HTTPError, requests.exceptions.ConnectionError,\n requests.exceptions.InvalidURL) as e:\n raise e\n\n return response\n\n def __requests_retry_session(self,\n back_off_factor=0.3,\n status_force_list=(500, 502, 504),\n session=None\n ):\n \"\"\"\n https://www.peterbe.com/plog/best-practice-with-retries-with-requests\n :param back_off_factor:\n :param status_force_list:\n :param session:\n :return:\n \"\"\"\n session = session or requests.Session()\n\n retry = Retry(\n total=self.retries,\n read=self.retries,\n connect=self.retries,\n backoff_factor=back_off_factor,\n status_forcelist=status_force_list,\n )\n\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session" }, { "identifier": "LoginException", "path": "energy_app/src/energy_app_client/exception/APIException.py", "snippet": "class LoginException(Exception):\n def __init__(self, message, errors):\n # Call the base class constructor with the parameters it needs\n super().__init__(message)\n\n # Now for your custom code...\n self.errors = errors" }, { "identifier": "PostActionsException", "path": "energy_app/src/energy_app_client/exception/APIException.py", "snippet": "class PostActionsException(Exception):\n def __init__(self, message, errors):\n # Call the base class constructor with the parameters it needs\n super().__init__(message)\n\n # Now for your custom code...\n self.errors = errors" } ]
from time import time from loguru import logger from http import HTTPStatus from .Endpoint import Endpoint, post_actions from .RequestController import RequestController from .exception import LoginException, PostActionsException
965
class Controller(RequestController): def __init__(self): RequestController.__init__(self) self.access_token = "" def __check_if_token_exists(self): if self.access_token is None: e_msg = "Access token is not yet available. Login first." logger.error(e_msg) raise ValueError(e_msg) def set_access_token(self, token): self.access_token = token def login(self, email: str, password: str): raise NotImplementedError("Method not implemented.") def __request_template(self,
class Controller(RequestController): def __init__(self): RequestController.__init__(self) self.access_token = "" def __check_if_token_exists(self): if self.access_token is None: e_msg = "Access token is not yet available. Login first." logger.error(e_msg) raise ValueError(e_msg) def set_access_token(self, token): self.access_token = token def login(self, email: str, password: str): raise NotImplementedError("Method not implemented.") def __request_template(self,
endpoint_cls: Endpoint,
0
2023-11-17 09:23:38+00:00
2k
PlaxtonFlarion/NexaFlow
nexaflow/hook.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):" }, { "identifier": "VideoFrame", "path": "nexaflow/video.py", "snippet": "class VideoFrame(Frame):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n super().__init__(frame_id, timestamp, data)\n\n def __str__(self):\n return f\"<VideoFrame id={self.frame_id} timestamp={self.timestamp}>\"\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"VideoFrame\":\n frame_id = toolbox.get_current_frame_id(cap)\n timestamp = toolbox.get_current_frame_time(cap)\n new_frame = toolbox.compress_frame(frame, 0.5, (350, 700), False)\n return VideoFrame(frame_id, timestamp, new_frame)\n\n def copy(self) -> \"VideoFrame\":\n return VideoFrame(self.frame_id, self.timestamp, self.data[:])\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"\n 检查给定图像(通过路径或numpy对象)是否存在于当前帧中,并返回匹配的字典\n \"\"\"\n assert image_path or (\n image_object is not None\n ), \"should fill image_path or image_object\"\n\n if image_path:\n logger.debug(f\"found image path, use it first: {image_path}\")\n return toolbox.match_template_with_path(image_path, self.data, **kwargs)\n image_object = toolbox.turn_grey(image_object)\n return toolbox.match_template_with_object(image_object, self.data, **kwargs)" } ]
import os import cv2 import typing from loguru import logger from nexaflow import toolbox from nexaflow.video import VideoFrame
1,228
class BaseHook(object): def __init__(self, *_, **__): # logger.debug(f"start initialing: {self.__class__.__name__} ...") logger.info(f"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...") self.result = dict() def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]: # info = f"execute hook: {self.__class__.__name__}" frame_id = frame.frame_id if frame_id != -1: # logger.debug(f"{info}, frame id: {frame_id}") pass return frame class ExampleHook(BaseHook): def __init__(self, *_, **__): super().__init__(*_, **__) def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]: super().do(frame, *_, **__)
class BaseHook(object): def __init__(self, *_, **__): # logger.debug(f"start initialing: {self.__class__.__name__} ...") logger.info(f"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...") self.result = dict() def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]: # info = f"execute hook: {self.__class__.__name__}" frame_id = frame.frame_id if frame_id != -1: # logger.debug(f"{info}, frame id: {frame_id}") pass return frame class ExampleHook(BaseHook): def __init__(self, *_, **__): super().__init__(*_, **__) def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]: super().do(frame, *_, **__)
frame.data = toolbox.turn_grey(frame.data)
0
2023-11-13 05:27:34+00:00
2k
OpenBMB/XAgent
tests/test_run.py
[ { "identifier": "parse_args", "path": "run.py", "snippet": "def parse_args() -> argparse.Namespace:\n \"\"\"\n Parse the command line arguments and return them as an argparse.Namespace object.\n\n Returns:\n argparse.Namespace: An object containing command line arguments and their values.\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--task\", type=str, required=True, help=\"The task description.\")\n parser.add_argument(\"--upload-files\", nargs='+', dest=\"upload_files\", help=\"List of files to upload.\")\n parser.add_argument(\"--model\", type=str, help=\"Model identifier for the task.\")\n parser.add_argument(\"--record-dir\", type=str, dest=\"record_dir\", help=\"Directory to record task execution logs.\")\n parser.add_argument(\"--mode\", type=str, default=\"auto\", help=\"Operational mode: 'auto' or 'manual'.\")\n parser.add_argument(\"--quiet\", action=\"store_true\", default=False, help=\"Run in quiet mode; minimal output.\")\n parser.add_argument(\"--max-subtask-chain-length\", type=int, dest=\"max_subtask_chain_length\",\n help=\"Maximum length of subtask chain.\")\n parser.add_argument(\"--enable-ask-human-for-help\", action=\"store_true\", dest=\"enable_ask_human_for_help\",\n help=\"Flag to enable asking for human assistance.\")\n parser.add_argument(\"--max-plan-refine-chain-length\", type=int, dest=\"max_plan_refine_chain_length\",\n help=\"Maximum length of plan refinement chain.\")\n parser.add_argument(\"--max-plan-tree-depth\", type=int, dest=\"max_plan_tree_depth\",\n help=\"Maximum depth of the plan tree.\")\n parser.add_argument(\"--max-plan-tree-width\", type=int, dest=\"max_plan_tree_width\",\n help=\"Maximum width of the plan tree.\")\n parser.add_argument(\"--max-retry-times\", type=int, dest=\"max_retry_times\", help=\"Maximum number of retry attempts.\")\n parser.add_argument(\"--config-file\", type=str, default=os.getenv('CONFIG_FILE', 'assets/config.yml'),\n dest=\"config_file\", help=\"Path to the configuration file.\")\n\n return parser.parse_args()" }, { "identifier": "execute_command_line_process", "path": "run.py", "snippet": "def execute_command_line_process(args: argparse.Namespace, quiet_mode: bool = False) -> None:\n \"\"\"\n Execute the command line process based on the parsed arguments. If quiet mode is enabled,\n redirect stdout to a file specified by the recorder's record_root_dir.\n\n Args:\n args (argparse.Namespace): Parsed command line arguments.\n quiet_mode (bool): Whether to run in quiet mode, outputting to a file instead of the terminal.\n \"\"\"\n args_dict = vars(args)\n for key, value in args_dict.items():\n if value is not None:\n if key == 'model':\n ARGS['default_completion_kwargs'] = deepcopy(CONFIG['default_completion_kwargs'])\n ARGS['default_completion_kwargs']['model'] = value\n else:\n ARGS[key] = value\n\n # Redirect stdout to a file if quiet mode is true\n if quiet_mode:\n from XAgent.running_recorder import recorder\n record_file_path = os.path.join(recorder.record_root_dir, \"command_line.ansi\")\n with open(record_file_path, \"w\", encoding=\"utf-8\") as file, redirect_stdout(file):\n start_command_line(args_dict)\n else:\n start_command_line(args_dict)" }, { "identifier": "start_command_line", "path": "run.py", "snippet": "def start_command_line(args_dict: dict) -> None:\n \"\"\"\n Start the command line interface with the provided arguments.\n\n Args:\n args_dict (dict): A dictionary of command line arguments.\n \"\"\"\n param = CommandLineParam(\n task=args_dict['task'],\n upload_files=args_dict.get('upload_files'),\n role=\"Assistant\",\n mode=args_dict[\"mode\"],\n )\n cmd = CommandLine(param)\n cmd.start()" } ]
import pytest import sys from run import parse_args, execute_command_line_process, start_command_line from unittest.mock import patch
1,008
@pytest.fixture def mock_argv(monkeypatch): """ A pytest fixture to mock the command line arguments. It sets the sys.argv to mimic command line input for testing. """ test_args = ["--task", "example_task", "--upload-files", "file1", "file2", "--model", "model1"] monkeypatch.setattr(sys, 'argv', ['test_script.py'] + test_args) def test_parse_args(mock_argv): """ Test to ensure that the parse_args function correctly parses command line arguments. """
@pytest.fixture def mock_argv(monkeypatch): """ A pytest fixture to mock the command line arguments. It sets the sys.argv to mimic command line input for testing. """ test_args = ["--task", "example_task", "--upload-files", "file1", "file2", "--model", "model1"] monkeypatch.setattr(sys, 'argv', ['test_script.py'] + test_args) def test_parse_args(mock_argv): """ Test to ensure that the parse_args function correctly parses command line arguments. """
args = parse_args()
0
2023-10-16 03:44:57+00:00
2k
pytorch-labs/gpt-fast
GPTQ.py
[ { "identifier": "setup_cache_padded_seq_input_pos_max_seq_length_for_prefill", "path": "eval.py", "snippet": "def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(\n model: LLaMA,\n prompt: torch.Tensor,\n max_new_tokens: int,\n max_seq_length: Optional[int] = None,\n):\n \"\"\"\n Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length\n that are needed for prefill or model_forward\n\n Args:\n model (LLaMA): The model whose cache gets set up\n prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence.\n max_new_tokens (int): The desired maximum number of new tokens that can be generated.\n max_seq_length (Optional[int], optional): The maximum sequence length allowed.\n\n Returns:\n seq (torch.Tensor): prompt but padded with zeros to size max_seq_length\n input_pos (torch.Tensor): tensor of integers in increasing order\n max_seq_length (int): The maximum sequence length allowed, updated based on other numbers\n \"\"\"\n T = prompt.size(0)\n T_new = T + max_new_tokens\n if max_seq_length is None:\n max_seq_length = min(T_new, model.config.block_size)\n\n device, dtype = prompt.device, prompt.dtype\n # create an empty tensor of the expected final shape and fill in the current tokens\n empty = torch.empty(T_new, dtype=dtype, device=device)\n empty[:T] = prompt\n seq = empty\n input_pos = torch.arange(0, T, device=device)\n\n with torch.device(device):\n model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)\n\n return seq, input_pos, max_seq_length" }, { "identifier": "encode_tokens", "path": "generate.py", "snippet": "def encode_tokens(tokenizer, string, bos=True, device='cuda'):\n tokens = tokenizer.encode(string)\n if bos:\n tokens = [tokenizer.bos_id()] + tokens\n return torch.tensor(tokens, dtype=torch.int, device=device)" } ]
import os import sys import torch import main as lm_evaluation_harness_main import torch.fx as fx import torch.nn as nn import torch.nn.functional as F import lm_eval from torch.utils._pytree import tree_flatten, tree_unflatten from eval import setup_cache_padded_seq_input_pos_max_seq_length_for_prefill from generate import encode_tokens
1,471
aten = torch.ops.aten try: class InputRecorder(lm_eval.base.BaseLM): """ This is a fake evaluation wrapper that just records the inputs so that they can be used in calibration. If pad_calibration_inputs is enabled, the input recorder will take each input and pad/truncate it down to the calibration_seq_length. It will also edit the model embeddings to be zero for the 0 token used in padding and avoid any inputs with the 0 token. If not, it will only truncate inputs to the desired length. """ def __init__( self, model, tokenizer, calibration_seq_length, pad_calibration_inputs=False, ): super().__init__() self._model = model self._tokenizer = tokenizer self._device = torch.device("cpu") self.vocab_size = model.config.vocab_size self.calibration_seq_length = calibration_seq_length self.pad_calibration_inputs = pad_calibration_inputs self.inputs = None if self.pad_calibration_inputs: # This is needed for the pad_calibration_inputs option # to work properly, the 0 token's embeddings are set to 0 so that # the padded inputs will not affect the model numerics. This token isn't used # commonly in the eval tasks for the meta-llama tokenizer and we skip any inputs # where it appears try: if isinstance(self._model.transformer.wte, nn.Embedding): self.mod.transformer.wte.weight.data[0, :] *= 0 except: print( "Did not find embeddings in model.transformer.wte, disabling padding" ) self.pad_calibration_inputs = False @property def eot_token_id(self): return self._tokenizer.eos_id() @property def max_length(self): return self.calibration_seq_length @property def max_gen_toks(self): return 50 @property def batch_size(self): return 1 @property def device(self): return self._device def tok_encode(self, string: str): encoded = encode_tokens( self._tokenizer, string, bos=True, eos=False, device=self._device ) # encoded is a pytorch tensor, but some internal logic in the # eval harness expects it to be a list instead # TODO: verify this for multi-batch as well encoded = encoded.tolist() return encoded def tok_decode(self, tokens): decoded = self._tokenizer.decode(tokens) return decoded def add_input(self, args): if self.inputs is None: self.inputs = [MultiInput([arg]) for arg in args] else: self.inputs = [ multi.add_input(arg) for (multi, arg) in zip(self.inputs, args) ] def get_recorded_inputs(self): return self.inputs def _model_call(self, inps): inps = inps.squeeze(0) T = len(inps) if ( # can't use inputs that are too short when padding disabled (T < self.calibration_seq_length and not self.pad_calibration_inputs) or # can't use inputs that actually use token we use for padding (self.pad_calibration_inputs and 0 in inps) ): # give random output return torch.randn( (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device ) # pad or truncate to the right size if T >= self.calibration_seq_length: inps = inps[: self.calibration_seq_length] else: inps = F.pad(inps, (0, self.calibration_seq_length - T)) max_new_tokens = 1 ( seq, input_pos, max_seq_length,
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. lm_evaluation_harness_path = "/".join( os.getcwd().split("/")[:-1] + ["lm-evaluation-harness"] ) sys.path.insert(0, lm_evaluation_harness_path) aten = torch.ops.aten try: class InputRecorder(lm_eval.base.BaseLM): """ This is a fake evaluation wrapper that just records the inputs so that they can be used in calibration. If pad_calibration_inputs is enabled, the input recorder will take each input and pad/truncate it down to the calibration_seq_length. It will also edit the model embeddings to be zero for the 0 token used in padding and avoid any inputs with the 0 token. If not, it will only truncate inputs to the desired length. """ def __init__( self, model, tokenizer, calibration_seq_length, pad_calibration_inputs=False, ): super().__init__() self._model = model self._tokenizer = tokenizer self._device = torch.device("cpu") self.vocab_size = model.config.vocab_size self.calibration_seq_length = calibration_seq_length self.pad_calibration_inputs = pad_calibration_inputs self.inputs = None if self.pad_calibration_inputs: # This is needed for the pad_calibration_inputs option # to work properly, the 0 token's embeddings are set to 0 so that # the padded inputs will not affect the model numerics. This token isn't used # commonly in the eval tasks for the meta-llama tokenizer and we skip any inputs # where it appears try: if isinstance(self._model.transformer.wte, nn.Embedding): self.mod.transformer.wte.weight.data[0, :] *= 0 except: print( "Did not find embeddings in model.transformer.wte, disabling padding" ) self.pad_calibration_inputs = False @property def eot_token_id(self): return self._tokenizer.eos_id() @property def max_length(self): return self.calibration_seq_length @property def max_gen_toks(self): return 50 @property def batch_size(self): return 1 @property def device(self): return self._device def tok_encode(self, string: str): encoded = encode_tokens( self._tokenizer, string, bos=True, eos=False, device=self._device ) # encoded is a pytorch tensor, but some internal logic in the # eval harness expects it to be a list instead # TODO: verify this for multi-batch as well encoded = encoded.tolist() return encoded def tok_decode(self, tokens): decoded = self._tokenizer.decode(tokens) return decoded def add_input(self, args): if self.inputs is None: self.inputs = [MultiInput([arg]) for arg in args] else: self.inputs = [ multi.add_input(arg) for (multi, arg) in zip(self.inputs, args) ] def get_recorded_inputs(self): return self.inputs def _model_call(self, inps): inps = inps.squeeze(0) T = len(inps) if ( # can't use inputs that are too short when padding disabled (T < self.calibration_seq_length and not self.pad_calibration_inputs) or # can't use inputs that actually use token we use for padding (self.pad_calibration_inputs and 0 in inps) ): # give random output return torch.randn( (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device ) # pad or truncate to the right size if T >= self.calibration_seq_length: inps = inps[: self.calibration_seq_length] else: inps = F.pad(inps, (0, self.calibration_seq_length - T)) max_new_tokens = 1 ( seq, input_pos, max_seq_length,
) = setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
0
2023-10-17 05:30:32+00:00
2k
deepseek-ai/DeepSeek-Coder
Evaluation/MBPP/human_eval/evaluate_functional_correctness.py
[ { "identifier": "HUMAN_EVAL", "path": "Evaluation/MBPP/human_eval/data.py", "snippet": "HUMAN_EVAL = os.path.join(ROOT, \"..\", \"data\", \"HumanEval.jsonl.gz\")" }, { "identifier": "evaluate_functional_correctness", "path": "Evaluation/MBPP/human_eval/evaluation.py", "snippet": "def evaluate_functional_correctness(\n input_file: str = None,\n tmp_dir: str = \"./\",\n n_workers: int = 32,\n timeout: float = 10.0,\n problem_file: str = \"../data/humaneval_python.jsonl.gz\",\n out_dir: str = None,\n k: List[int] = [1, 10, 100],\n test_groundtruth: bool = False,\n example_test: bool = False,\n is_mbpp: bool = False,\n language: str = \"python\",\n):\n \"\"\"\n Evaluates the functional correctness of a model.\n \"\"\"\n if example_test:\n print(\"Example test...\")\n\n problems = read_dataset(problem_file,\n dataset_type=\"humaneval\")\n sample_jsonl = stream_jsonl_all(input_file)\n\n\n with ThreadPoolExecutor(max_workers=n_workers) as executor:\n\n futures = []\n completion_id = Counter()\n n_samples = 0\n results = defaultdict(list)\n\n if test_groundtruth:\n print(\"Testing ground truth...\")\n for sample in tqdm(problems.values()):\n task_id = sample[\"task_id\"]\n lang = task_id.split(\"/\")[0].lower()\n if lang == \"javascript\":\n lang = \"js\"\n tmp_dir_ = os.path.join(tmp_dir, lang, \"evaluation\")\n sample[\"generation\"] = sample[\"canonical_solution\"]\n sample[\"test_code\"] = process_humaneval_test(sample, problems, example_test, language)\n if sample[\"test_code\"] is None:\n continue\n args = (task_id, sample, lang, timeout, tmp_dir_, completion_id[task_id])\n future = executor.submit(check_correctness, *args)\n futures.append(future)\n completion_id[task_id] += 1\n n_samples += 1\n else:\n print(\"Reading samples...\")\n for sample in tqdm(sample_jsonl):\n task_id = sample[\"task_id\"]\n if not is_mbpp:\n lang = language\n if not is_mbpp and lang == \"javascript\":\n lang = \"js\"\n if is_mbpp:\n lang = \"python\"\n tmp_dir_ = os.path.join(tmp_dir, lang, \"evaluation\")\n sample[\"task_id\"] = task_id\n sample[\"test_code\"] = process_humaneval_test(sample, problems, example_test, is_mbpp, language)\n if sample[\"test_code\"] is None:\n continue\n if \"completion_id\" in sample:\n completion_id_ = sample[\"completion_id\"]\n else:\n completion_id_ = completion_id[task_id]\n args = (task_id, sample, lang, timeout, tmp_dir_, completion_id_)\n future = executor.submit(check_correctness, *args)\n futures.append(future)\n completion_id[task_id] += 1\n n_samples += 1\n\n if len(completion_id) == len(problems):\n evaluate_pass_at_k = True\n else:\n evaluate_pass_at_k = False\n\n print(\"Running test suites...\")\n for future in tqdm(as_completed(futures), total=len(futures)):\n result = future.result()\n results[result[\"task_id\"]].append((result[\"completion_id\"], result))\n\n # Calculate pass@k.\n total, correct = [], []\n for result in results.values():\n passed = [r[1][\"passed\"] for r in result]\n total.append(len(passed))\n correct.append(sum(passed))\n total = np.array(total)\n correct = np.array(correct)\n if evaluate_pass_at_k:\n ks = k\n pass_at_k = {f\"pass@{k}\": estimate_pass_at_k(total, correct, k).mean()\n for k in ks if (total >= k).all()}\n print(pass_at_k)\n else:\n print(\"Total:\", np.sum(total))\n print(\"Correct:\", np.sum(correct))\n return pass_at_k" } ]
import fire import sys from .data import HUMAN_EVAL from .evaluation import evaluate_functional_correctness
1,125
def entry_point( sample_file: str, k: str = "1,10,100", n_workers: int = 4, timeout: float = 3.0, problem_file: str = "", is_mbpp: bool = False, ): """ Evaluates the functional correctness of generated samples, and writes results to f"{sample_file}_results.jsonl.gz" """ k = list(map(int, k.split(",")))
def entry_point( sample_file: str, k: str = "1,10,100", n_workers: int = 4, timeout: float = 3.0, problem_file: str = "", is_mbpp: bool = False, ): """ Evaluates the functional correctness of generated samples, and writes results to f"{sample_file}_results.jsonl.gz" """ k = list(map(int, k.split(",")))
results = evaluate_functional_correctness(sample_file, k, n_workers, timeout, problem_file, is_mbpp)
1
2023-10-20 06:38:01+00:00
2k
PKU-YuanGroup/Video-LLaVA
llava/model/llava_arch.py
[ { "identifier": "build_image_tower", "path": "llava/model/multimodal_encoder/builder.py", "snippet": "def build_image_tower(image_tower_cfg, **kwargs):\n image_tower = getattr(image_tower_cfg, 'mm_image_tower', getattr(image_tower_cfg, 'image_tower', None))\n is_absolute_path_exists = os.path.exists(image_tower)\n if is_absolute_path_exists or image_tower.startswith(\"openai\") or image_tower.startswith(\"laion\"):\n return CLIPVisionTower(image_tower, args=image_tower_cfg, **kwargs)\n if image_tower.endswith('LanguageBind_Image'):\n return LanguageBindImageTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs)\n if 'mae' in image_tower:\n print('maemaemaemaemaemaemaemae')\n print('maemaemaemaemaemaemaemae')\n print('maemaemaemaemaemaemaemae')\n print('maemaemaemaemaemaemaemae')\n print('maemaemaemaemaemaemaemae')\n return MAEVisionTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs)\n raise ValueError(f'Unknown image tower: {image_tower}')" }, { "identifier": "build_video_tower", "path": "llava/model/multimodal_encoder/builder.py", "snippet": "def build_video_tower(video_tower_cfg, **kwargs):\n video_tower = getattr(video_tower_cfg, 'mm_video_tower', getattr(video_tower_cfg, 'video_tower', None))\n if video_tower.endswith('LanguageBind_Video_merge'):\n return LanguageBindVideoTower(video_tower, args=video_tower_cfg, cache_dir='./cache_dir', **kwargs)\n raise ValueError(f'Unknown video tower: {video_tower}')" }, { "identifier": "build_vision_projector", "path": "llava/model/multimodal_projector/builder.py", "snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n elif projector_type == 'identity':\n return IdentityMap()\n\n elif projector_type.startswith('qformer'): # qformer2_64\n qformer_config = qformer_config_template(config, projector_type)\n return Blip2Model(qformer_config)\n else:\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n raise ValueError(f'Unknown projector type: {projector_type}')" }, { "identifier": "IGNORE_INDEX", "path": "llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "X_TOKEN_INDEX", "path": "llava/constants.py", "snippet": "X_TOKEN_INDEX = {'IMAGE': -200, 'VIDEO': -201, 'AUDIO': -202, 'THERMAL': -203, 'DEPTH': -204}" }, { "identifier": "DEFAULT_X_PATCH_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_X_PATCH_TOKEN = {'IMAGE': \"<im_patch>\", 'VIDEO': \"<vi_patch>\", 'AUDIO': \"<au_patch>\", 'THERMAL': \"<th_patch>\", 'DEPTH': \"<de_patch>\"}" }, { "identifier": "DEFAULT_X_START_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_X_START_TOKEN = {'IMAGE': \"<im_start>\", 'VIDEO': \"<vi_start>\", 'AUDIO': \"<au_start>\", 'THERMAL': \"<th_start>\", 'DEPTH': \"<de_start>\"}" }, { "identifier": "DEFAULT_X_END_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_X_END_TOKEN = {'IMAGE': \"<im_end>\", 'VIDEO': \"<vi_end>\", 'AUDIO': \"<au_end>\", 'THERMAL': \"<th_end>\", 'DEPTH': \"<de_end>\"}" } ]
from abc import ABC, abstractmethod from .multimodal_encoder.builder import build_image_tower, build_video_tower from .multimodal_projector.builder import build_vision_projector from llava.constants import IGNORE_INDEX, X_TOKEN_INDEX, DEFAULT_X_PATCH_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN import torch import torch.nn as nn
1,164
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LlavaMetaModel: def __init__(self, config): super(LlavaMetaModel, self).__init__(config) if hasattr(config, "mm_image_tower"): self.image_tower = build_image_tower(config, delay_load=True)
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LlavaMetaModel: def __init__(self, config): super(LlavaMetaModel, self).__init__(config) if hasattr(config, "mm_image_tower"): self.image_tower = build_image_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
2
2023-10-23 05:43:54+00:00
2k
deepseek-ai/DreamCraft3D
extern/ldm_zero123/models/diffusion/ddim.py
[ { "identifier": "norm_thresholding", "path": "extern/ldm_zero123/models/diffusion/sampling_util.py", "snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)" }, { "identifier": "renorm_thresholding", "path": "extern/ldm_zero123/models/diffusion/sampling_util.py", "snippet": "def renorm_thresholding(x0, value):\n # renorm\n pred_max = x0.max()\n pred_min = x0.min()\n pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1\n pred_x0 = 2 * pred_x0 - 1.0 # -1 ... 1\n\n s = torch.quantile(rearrange(pred_x0, \"b ... -> b (...)\").abs(), value, dim=-1)\n s.clamp_(min=1.0)\n s = s.view(-1, *((1,) * (pred_x0.ndim - 1)))\n\n # clip by threshold\n # pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max\n\n # temporary hack: numpy on cpu\n pred_x0 = (\n np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy())\n / s.cpu().numpy()\n )\n pred_x0 = torch.tensor(pred_x0).to(self.model.device)\n\n # re.renorm\n pred_x0 = (pred_x0 + 1.0) / 2.0 # 0 ... 1\n pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range\n return pred_x0" }, { "identifier": "spatial_norm_thresholding", "path": "extern/ldm_zero123/models/diffusion/sampling_util.py", "snippet": "def spatial_norm_thresholding(x0, value):\n # b c h w\n s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)\n return x0 * (value / s)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_ddim_sampling_parameters", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt(\n (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)\n )\n if verbose:\n print(\n f\"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}\"\n )\n print(\n f\"For the chosen value of eta, which is {eta}, \"\n f\"this results in the following sigma_t schedule for ddim sampler {sigmas}\"\n )\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(\n ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True\n):\n if ddim_discr_method == \"uniform\":\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == \"quad\":\n ddim_timesteps = (\n (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2\n ).astype(int)\n else:\n raise NotImplementedError(\n f'There is no ddim discretization method called \"{ddim_discr_method}\"'\n )\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f\"Selected timesteps for ddim sampler: {steps_out}\")\n return steps_out" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" } ]
from functools import partial from tqdm import tqdm from extern.ldm_zero123.models.diffusion.sampling_util import ( norm_thresholding, renorm_thresholding, spatial_norm_thresholding, ) from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, ) import numpy as np import torch
1,515
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def to(self, device): """Same as to in torch module Don't really underestand why this isn't a module in the first place""" for k, v in self.__dict__.items(): if isinstance(v, torch.Tensor): new_v = getattr(self, k).to(device) setattr(self, k, new_v) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ):
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def to(self, device): """Same as to in torch module Don't really underestand why this isn't a module in the first place""" for k, v in self.__dict__.items(): if isinstance(v, torch.Tensor): new_v = getattr(self, k).to(device) setattr(self, k, new_v) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ):
self.ddim_timesteps = make_ddim_timesteps(
5
2023-10-23 07:40:20+00:00
2k
YORG-AI/Open-Assistant
package/src/yorgassistant/core/nodes/github/github_search.py
[ { "identifier": "BaseNode", "path": "package/src/yorgassistant/core/nodes/base_node.py", "snippet": "class BaseNode(ABC):\n config: NodeConfig\n func_mapping: dict[str, Callable]\n\n def __init__(self):\n # initialize func_mapping\n self.func_mapping = {}\n avail_funcs = [\n func_name for func_name in dir(self) if not func_name.startswith(\"_\")\n ]\n for func_name in self.config.functions.keys():\n if func_name not in avail_funcs:\n raise Exception(\n f\"Node {self.config.name} does not contain {func_name} method.\"\n )\n else:\n self.func_mapping[func_name] = getattr(self, func_name)\n\n def run(self, input: NodeInput):\n if input.func_name not in self.func_mapping.keys():\n raise Exception(\n f\"Node {self.config.name} does not contain {input.func_name} method.\"\n )\n else:\n return self.func_mapping[input.func_name](input.func_input)" }, { "identifier": "NodeConfig", "path": "package/src/yorgassistant/core/nodes/base_node.py", "snippet": "class NodeConfig(BaseModel):\n name: str = Field(description=\"Node 名称\")\n description: str = Field(default=\"\", description=\"Node 描述\")\n functions: dict[str, str] = Field(default={}, description=\"Node 所有功能描述\")" }, { "identifier": "GithubNode", "path": "package/src/yorgassistant/core/nodes/github/github_node.py", "snippet": "class GithubNode(BaseNode):\n def __init__(self):\n self.token = os.environ.get(\"GITHUB_TOKEN\") # Retrieving the token from the environment\n if not self.token:\n raise ValueError(\"GITHUB_TOKEN is not set in the environment.\")\n self.g = Github(self.token) # Initializing the GitHub instance with the token\n super().__init__()" }, { "identifier": "SearchCodeInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchCodeInput(BaseSearchInput):\n sort: str = None\n order: str = None" }, { "identifier": "SearchCommitsInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchCommitsInput(BaseSearchInput):\n sort: str = None\n order: str = None" }, { "identifier": "SearchIssuesAndPRsInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchIssuesAndPRsInput(BaseSearchInput):\n sort: str = None\n order: str = None" }, { "identifier": "SearchLabelsInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchLabelsInput(BaseSearchInput):\n repository_id: int\n sort: str = None\n order: str = None" }, { "identifier": "SearchRepositoriesInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchRepositoriesInput(BaseSearchInput):\n sort: str = None\n order: str = None" }, { "identifier": "SearchTopicsInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchTopicsInput(BaseSearchInput):\n sort: str = None\n order: str = None" }, { "identifier": "SearchUsersInput", "path": "package/src/yorgassistant/core/nodes/github/github_model.py", "snippet": "class SearchUsersInput(BaseSearchInput):\n sort: str = None\n order: str = None" } ]
from ..base_node import BaseNode, NodeConfig from .github_node import GithubNode from .github_model import ( SearchCodeInput, SearchCommitsInput, SearchIssuesAndPRsInput, SearchLabelsInput, SearchRepositoriesInput, SearchTopicsInput, SearchUsersInput, )
889
github_search_node_config = { "name": "github_search", "description": "A node for searching various entities on GitHub.", "functions": { "search_code": "Search code.", "search_commits": "Search commits.", "search_issues_and_prs": "Search issues and pull requests.", "search_labels": "Search labels.", "search_repositories": "Search repositories.", "search_topics": "Search topics.", "search_users": "Search users.", }, }
github_search_node_config = { "name": "github_search", "description": "A node for searching various entities on GitHub.", "functions": { "search_code": "Search code.", "search_commits": "Search commits.", "search_issues_and_prs": "Search issues and pull requests.", "search_labels": "Search labels.", "search_repositories": "Search repositories.", "search_topics": "Search topics.", "search_users": "Search users.", }, }
class GithubSearchNode(GithubNode):
2
2023-10-24 15:15:48+00:00
2k
zju3dv/4K4D
scripts/realtime4dv/charger.py
[ { "identifier": "to_numpy", "path": "easyvolcap/utils/data_utils.py", "snippet": "def to_numpy(batch, non_blocking=False, ignore_list: bool = False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block\n if isinstance(batch, (tuple, list)) and not ignore_list:\n batch = [to_numpy(b, non_blocking, ignore_list) for b in batch]\n elif isinstance(batch, dict):\n batch = dotdict({k: to_numpy(v, non_blocking, ignore_list) for k, v in batch.items()})\n elif isinstance(batch, torch.Tensor):\n batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy()\n else: # numpy and others\n batch = np.asarray(batch)\n return batch" }, { "identifier": "save_npz", "path": "easyvolcap/utils/net_utils.py", "snippet": "def save_npz(model: nn.Module,\n model_dir: str = '',\n epoch: int = -1,\n latest: int = True,\n ):\n from easyvolcap.utils.data_utils import to_numpy\n npz_path = join(model_dir, 'latest.npz' if latest else f'{epoch}.npz')\n state_dict = model.state_dict() if not isinstance(model, DDP) else model.module.state_dict()\n param_dict = to_numpy(state_dict) # a shallow dict\n os.makedirs(dirname(npz_path), exist_ok=True)\n np.savez_compressed(npz_path, **param_dict)\n log(yellow(f'Saved model {blue(npz_path)} at epoch {blue(epoch)}'))" } ]
from os.path import join from easyvolcap.utils.console_utils import * from easyvolcap.utils.data_utils import to_numpy from easyvolcap.utils.net_utils import save_npz from easyvolcap.scripts.main import test # will do everything a normal user would do from easyvolcap.engine import cfg from easyvolcap.engine import SAMPLERS from easyvolcap.runners.volumetric_video_runner import VolumetricVideoRunner import sys import torch import argparse
690
# This function will try to invoke evc programmatically @catch_throw def main(): # fmt: off sys.path.append('.') sep_ind = sys.argv.index('--') our_args = sys.argv[1:sep_ind] evv_args = sys.argv[sep_ind + 1:] sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args parser = argparse.ArgumentParser() parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB') parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV') parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3') parser.add_argument('--save_fp32', action='store_true') parser.add_argument('--save_pt', action='store_true') parser.add_argument('--no_save_npz', action='store_false', dest='save_npz') args = parser.parse_args(our_args) # You have to save at least one type of model
# This function will try to invoke evc programmatically @catch_throw def main(): # fmt: off sys.path.append('.') sep_ind = sys.argv.index('--') our_args = sys.argv[1:sep_ind] evv_args = sys.argv[sep_ind + 1:] sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args parser = argparse.ArgumentParser() parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB') parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV') parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3') parser.add_argument('--save_fp32', action='store_true') parser.add_argument('--save_pt', action='store_true') parser.add_argument('--no_save_npz', action='store_false', dest='save_npz') args = parser.parse_args(our_args) # You have to save at least one type of model
assert args.save_pt or args.save_npz
1
2023-10-17 04:48:46+00:00
2k
pchunduri6/rag-demystified
complex_qa.py
[ { "identifier": "generate_subquestions", "path": "subquestion_generator.py", "snippet": "def generate_subquestions(\n question,\n file_names: List[str] = None,\n system_prompt=DEFAULT_SUBQUESTION_GENERATOR_PROMPT,\n user_task=DEFAULT_USER_TASK,\n llm_model=\"gpt-4-0613\",\n):\n \"\"\"Generates a list of subquestions from a user question along with the\n file name and the function to use to answer the question using OpenAI LLM.\n \"\"\"\n FilenameEnum = Enum(\"FilenameEnum\", {x.upper(): x for x in file_names})\n FilenameEnum.__doc__ = f\"The names of the file to use to answer the corresponding subquestion - e.g. {file_names[0]}\"\n\n # Create pydantic class dynamically\n QuestionBundle = create_model(\n \"QuestionBundle\",\n question=(\n str,\n Field(\n None, description=\"The subquestion extracted from the user's question\"\n ),\n ),\n function=(FunctionEnum, Field(None)),\n file_name=(FilenameEnum, Field(None)),\n )\n\n SubQuestionBundleList = create_model(\n \"SubQuestionBundleList\",\n subquestion_bundle_list=(\n List[QuestionBundle],\n Field(\n None,\n description=\"A list of subquestions - each item in the list contains a question, a function, and a file name\",\n ),\n ),\n __base__=OpenAISchema,\n )\n\n user_prompt = f\"{user_task}\\n Here is the user question: {question}\"\n\n few_shot_examples = [\n {\n \"role\": \"user\",\n \"content\": \"Compare the population of Atlanta and Toronto?\",\n },\n {\n \"role\": \"function\",\n \"name\": \"SubQuestionBundleList\",\n \"content\": \"\"\"\n {\n \"subquestion_bundle_list\": [\n {\n \"question\": \"What is the population of Atlanta?\",\n \"function\": \"vector_retrieval\",\n \"file_name\": \"Atlanta\"\n },\n {\n \"question\": \"What is the population of Toronto?\"\n \"function\": \"vector_retrieval\",\n \"file_name\": \"Toronto\"\n }\n ]\n }\"\"\",\n },\n {\n \"role\": \"user\",\n \"content\": \"Summarize the history of Chicago and Houston.\",\n },\n {\n \"role\": \"function\",\n \"name\": \"SubQuestionBundleList\",\n \"content\": \"\"\"\n {\n \"subquestion_bundle_list\": [\n {\n \"question\": \"What is the history of Chicago?\",\n \"function\": \"llm_retrieval\",\n \"file_name\": \"Chicago\"\n },\n {\n \"question\": \"What is the history of Houston?\",\n \"function\": \"llm_retrieval\",\n \"file_name\": \"Houston\"\n }\n ]\n }\"\"\",\n },\n ]\n\n response, cost = llm_call(\n model=llm_model,\n function_schema=[SubQuestionBundleList.openai_schema],\n output_schema={\"name\": SubQuestionBundleList.openai_schema[\"name\"]},\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n few_shot_examples=few_shot_examples,\n )\n\n subquestions_list = json.loads(response.choices[0].message.function_call.arguments)\n\n subquestions_pydantic_obj = SubQuestionBundleList(**subquestions_list)\n subquestions_list = subquestions_pydantic_obj.subquestion_bundle_list\n return subquestions_list, cost" }, { "identifier": "llm_call", "path": "openai_utils.py", "snippet": "def llm_call(\n model,\n function_schema=None,\n output_schema=None,\n system_prompt=\"You are an AI assistant that answers user questions using the context provided.\",\n user_prompt=\"Please help me answer the following question:\",\n few_shot_examples=None,\n):\n kwargs = {}\n if function_schema is not None:\n kwargs[\"functions\"] = function_schema\n if output_schema is not None:\n kwargs[\"function_call\"] = output_schema\n\n messages = []\n if system_prompt is not None:\n messages.append({\"role\": \"system\", \"content\": system_prompt})\n if few_shot_examples is not None:\n messages.extend(few_shot_examples)\n if user_prompt is not None:\n messages.append({\"role\": \"user\", \"content\": user_prompt})\n\n response = completion_with_backoff(\n model=model,\n temperature=0,\n messages=messages,\n **kwargs\n )\n\n # print cost of call\n call_cost = llm_call_cost(response)\n print(f\"🤑 LLM call cost: ${call_cost:.4f}\")\n return response, call_cost" } ]
import os import requests import warnings import evadb from dotenv import load_dotenv from pathlib import Path from subquestion_generator import generate_subquestions from openai_utils import llm_call
1,593
warnings.filterwarnings("ignore") if not load_dotenv(): print( "Could not load .env file or it is empty. Please check if it exists and is readable." ) exit(1) def generate_vector_stores(cursor, docs): """Generate a vector store for the docs using evadb. """ for doc in docs: print(f"Creating vector store for {doc}...") cursor.query(f"DROP TABLE IF EXISTS {doc};").df() cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df() evadb_path = os.path.dirname(evadb.__file__) cursor.query( f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor IMPL '{evadb_path}/functions/sentence_feature_extractor.py'; """).df() cursor.query( f"""CREATE TABLE IF NOT EXISTS {doc}_features AS SELECT SentenceFeatureExtractor(data), data FROM {doc};""" ).df() cursor.query( f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;" ).df() print(f"Successfully created vector store for {doc}.") def vector_retrieval(cursor, llm_model, question, doc_name): """Returns the answer to a factoid question using vector retrieval. """ res_batch = cursor.query( f"""SELECT data FROM {doc_name}_features ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features) LIMIT 3;""" ).df() context_list = [] for i in range(len(res_batch)): context_list.append(res_batch["data"][i]) context = "\n".join(context_list) user_prompt = f"""You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. Question: {question} Context: {context} Answer:"""
warnings.filterwarnings("ignore") if not load_dotenv(): print( "Could not load .env file or it is empty. Please check if it exists and is readable." ) exit(1) def generate_vector_stores(cursor, docs): """Generate a vector store for the docs using evadb. """ for doc in docs: print(f"Creating vector store for {doc}...") cursor.query(f"DROP TABLE IF EXISTS {doc};").df() cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df() evadb_path = os.path.dirname(evadb.__file__) cursor.query( f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor IMPL '{evadb_path}/functions/sentence_feature_extractor.py'; """).df() cursor.query( f"""CREATE TABLE IF NOT EXISTS {doc}_features AS SELECT SentenceFeatureExtractor(data), data FROM {doc};""" ).df() cursor.query( f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;" ).df() print(f"Successfully created vector store for {doc}.") def vector_retrieval(cursor, llm_model, question, doc_name): """Returns the answer to a factoid question using vector retrieval. """ res_batch = cursor.query( f"""SELECT data FROM {doc_name}_features ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features) LIMIT 3;""" ).df() context_list = [] for i in range(len(res_batch)): context_list.append(res_batch["data"][i]) context = "\n".join(context_list) user_prompt = f"""You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. Question: {question} Context: {context} Answer:"""
response, cost = llm_call(model=llm_model, user_prompt=user_prompt)
1
2023-10-18 16:32:51+00:00
2k
predibase/lorax
server/lorax_server/utils/sources/hub.py
[ { "identifier": "BaseModelSource", "path": "server/lorax_server/utils/sources/source.py", "snippet": "class BaseModelSource:\n def remote_weight_files(self, extension: str = None):\n raise NotImplementedError\n\n def weight_files(self, extension: str = None):\n raise NotImplementedError\n \n def download_weights(self, filenames: List[str]):\n raise NotImplementedError\n \n def download_model_assets(self):\n \"\"\" The reason we need this function is that for s3 \n we need to download all the model files whereas for \n hub we only need to download the weight files. And maybe \n for other future sources we might need something different. \n So this function will take the necessary steps to download\n the needed files for any source \"\"\"\n raise NotImplementedError" }, { "identifier": "try_to_load_from_cache", "path": "server/lorax_server/utils/sources/source.py", "snippet": "def try_to_load_from_cache(\n repo_cache: Path, revision: Optional[str], filename: str\n) -> Optional[Path]:\n \"\"\"Try to load a file from the Hugging Face cache\"\"\"\n if revision is None:\n revision = \"main\"\n\n if not repo_cache.is_dir():\n # No cache for this model\n return None\n\n refs_dir = repo_cache / \"refs\"\n snapshots_dir = repo_cache / \"snapshots\"\n\n # Resolve refs (for instance to convert main to the associated commit sha)\n if refs_dir.is_dir():\n revision_file = refs_dir / revision\n if revision_file.exists():\n with revision_file.open() as f:\n revision = f.read()\n\n # Check if revision folder exists\n if not snapshots_dir.exists():\n return None\n cached_shas = os.listdir(snapshots_dir)\n if revision and revision not in cached_shas:\n # No cache for this revision and we won't try to return a random revision\n return None\n\n # Check if file exists in cache\n cached_file = snapshots_dir / revision / filename\n return cached_file if cached_file.is_file() else None" } ]
import time import os from datetime import timedelta from loguru import logger from pathlib import Path from typing import Optional, List from huggingface_hub import HfApi, hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.utils import ( LocalEntryNotFoundError, EntryNotFoundError, RevisionNotFoundError, # Import here to ease try/except in other part of the lib ) from .source import BaseModelSource, try_to_load_from_cache
1,180
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) def get_hub_model_local_dir(model_id: str) -> Path: object_id = model_id.replace("/", "--") repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}" return repo_cache def weight_hub_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() info = api.model_info(model_id, revision=revision) filenames = [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model if Path(model_id).exists() and Path(model_id).is_dir(): local_files = list(Path(model_id).glob(f"*{extension}")) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return local_files try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files repo_cache = get_hub_model_local_dir(model_id) files = [] for filename in filenames:
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) def get_hub_model_local_dir(model_id: str) -> Path: object_id = model_id.replace("/", "--") repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}" return repo_cache def weight_hub_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() info = api.model_info(model_id, revision=revision) filenames = [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model if Path(model_id).exists() and Path(model_id).is_dir(): local_files = list(Path(model_id).glob(f"*{extension}")) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return local_files try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files repo_cache = get_hub_model_local_dir(model_id) files = [] for filename in filenames:
cache_file = try_to_load_from_cache(
1
2023-10-20 18:19:49+00:00
2k
codefuse-ai/Test-Agent
chat/server/monitor/clean_chat_data.py
[ { "identifier": "NUM_SERVERS", "path": "chat/server/monitor/basic_stats.py", "snippet": "NUM_SERVERS = 14" }, { "identifier": "to_openai_format", "path": "chat/server/monitor/clean_battle_data.py", "snippet": "def to_openai_format(messages):\n roles = [\"user\", \"assistant\"]\n ret = []\n for i, x in enumerate(messages):\n ret.append({\"role\": roles[i % 2], \"content\": x[1]})\n return ret" }, { "identifier": "replace_model_name", "path": "chat/server/monitor/clean_battle_data.py", "snippet": "def replace_model_name(old_name):\n return (\n old_name.replace(\"bard\", \"palm-2\")\n .replace(\"claude-v1\", \"claude-1\")\n .replace(\"claude-instant-v1\", \"claude-instant-1\")\n .replace(\"oasst-sft-1-pythia-12b\", \"oasst-pythia-12b\")\n )" }, { "identifier": "detect_language", "path": "chat/utils.py", "snippet": "def detect_language(text: str) -> str:\n \"\"\"Detect the langauge of a string.\"\"\"\n import polyglot # pip3 install polyglot pyicu pycld2\n from polyglot.detect import Detector\n from polyglot.detect.base import logger as polyglot_logger\n import pycld2\n\n polyglot_logger.setLevel(\"ERROR\")\n\n try:\n lang_code = Detector(text).language.name\n except (pycld2.error, polyglot.detect.base.UnknownLanguage):\n lang_code = \"unknown\"\n return lang_code" } ]
import argparse import datetime import json import os import time from pytz import timezone from tqdm import tqdm from chat.server.monitor.basic_stats import NUM_SERVERS from chat.server.monitor.clean_battle_data import ( to_openai_format, replace_model_name, ) from chat.utils import detect_language
854
""" Clean chatbot arena chat log. Usage: python3 clean_chat_data.py --mode conv_release """ NETWORK_ERROR_MSG = ( "NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower() ) def get_log_files(max_num_files=None): dates = [] for month in [4, 5, 6, 7]: for day in range(1, 32): dates.append(f"2023-{month:02d}-{day:02d}") for month in [8]: for day in range(1, 32): dates.append(f"2023-{month:02d}-{day:02d}") filenames = [] for d in dates: for i in range(NUM_SERVERS): name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json") if os.path.exists(name): filenames.append(name) max_num_files = max_num_files or len(filenames) # filenames = list(reversed(filenames)) filenames = filenames[-max_num_files:] return filenames def clean_chat_data(log_files): raw_data = [] for filename in tqdm(log_files, desc="read files"): for retry in range(5): try: lines = open(filename).readlines() break except FileNotFoundError: time.sleep(2) for l in lines: row = json.loads(l) if row["type"] == "chat": raw_data.append(row) all_models = set() all_ips = dict() chats = [] ct_invalid_conv_id = 0 ct_invalid = 0 ct_network_error = 0 for row in raw_data: if "conv_id" not in row["state"]: ct_invalid_conv_id += 1 continue conversation_id = row["state"]["conv_id"] if conversation_id is None: ct_invalid_conv_id += 1 continue state = row["state"]
""" Clean chatbot arena chat log. Usage: python3 clean_chat_data.py --mode conv_release """ NETWORK_ERROR_MSG = ( "NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower() ) def get_log_files(max_num_files=None): dates = [] for month in [4, 5, 6, 7]: for day in range(1, 32): dates.append(f"2023-{month:02d}-{day:02d}") for month in [8]: for day in range(1, 32): dates.append(f"2023-{month:02d}-{day:02d}") filenames = [] for d in dates: for i in range(NUM_SERVERS): name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json") if os.path.exists(name): filenames.append(name) max_num_files = max_num_files or len(filenames) # filenames = list(reversed(filenames)) filenames = filenames[-max_num_files:] return filenames def clean_chat_data(log_files): raw_data = [] for filename in tqdm(log_files, desc="read files"): for retry in range(5): try: lines = open(filename).readlines() break except FileNotFoundError: time.sleep(2) for l in lines: row = json.loads(l) if row["type"] == "chat": raw_data.append(row) all_models = set() all_ips = dict() chats = [] ct_invalid_conv_id = 0 ct_invalid = 0 ct_network_error = 0 for row in raw_data: if "conv_id" not in row["state"]: ct_invalid_conv_id += 1 continue conversation_id = row["state"]["conv_id"] if conversation_id is None: ct_invalid_conv_id += 1 continue state = row["state"]
conversation = to_openai_format(state["messages"][state["offset"] :])
1
2023-10-20 08:56:20+00:00
2k