id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
9745155 | # USAGE
# python /home/nmorales/cxgn/DroneImageScripts/ImageProcess/RemoveBackground.py --image_path /folder/mypic.png --outfile_path /export/mychoppedimages/outimage.png
# import the necessary packages
import argparse
import imutils
import cv2
import numpy as np
import math
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image_path", required=True, help="image path")
ap.add_argument("-o", "--outfile_path", required=True, help="file path directory where the output will be saved")
ap.add_argument("-t", "--lower_threshold", required=True, help="lower threshold value to remove from image")
ap.add_argument("-l", "--upper_threshold", required=True, help="upper threshold value to remove from image")
args = vars(ap.parse_args())
input_image = args["image_path"]
outfile_path = args["outfile_path"]
upper_thresh = args["upper_threshold"]
lower_thresh = args["lower_threshold"]
src = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
th, dst = cv2.threshold(src, int(float(lower_thresh)), int(float(upper_thresh)), cv2.THRESH_TOZERO)
#cv2.imshow("Result", dst)
cv2.imwrite(outfile_path, dst)
#cv2.waitKey(0)
| StarcoderdataPython |
11367794 | <reponame>tdiprima/code
class itemproperty(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
if doc is None and fget is not None and hasattr(fget, "__doc__"):
doc = fget.__doc__
self._get = fget
self._set = fset
self._del = fdel
self.__doc__ = doc
def __get__(self, instance, owner):
if instance is None:
return self
else:
return bounditemproperty(self, instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def getter(self, fget):
return itemproperty(fget, self._set, self._del, self.__doc__)
def setter(self, fset):
return itemproperty(self._get, fset, self._del, self.__doc__)
def deleter(self, fdel):
return itemproperty(self._get, self._set, fdel, self.__doc__)
class bounditemproperty(object):
def __init__(self, item_property, instance):
self.__item_property = item_property
self.__instance = instance
def __getitem__(self, key):
fget = self.__item_property._get
if fget is None:
raise AttributeError("unreadable attribute item")
return fget(self.__instance, key)
def __setitem__(self, key, value):
fset = self.__item_property._set
if fset is None:
raise AttributeError("can't set attribute item")
fset(self.__instance, key, value)
def __delitem__(self, key):
fdel = self.__item_property._del
if fdel is None:
raise AttributeError("can't delete attribute item")
fdel(self.__instance, key)
if __name__ == "__main__":
class Element(object):
def __init__(self, tag, value=None):
self.tag = tag
self.value = value
self.children = {}
@itemproperty
def xpath(self, path):
"""Get or set the value at a relative path."""
path = path.split('/')
element = self
for tag in path:
if tag in element.children:
element = element.children[tag]
else:
raise KeyError('path does not exist')
return element.value
@xpath.setter
def xpath(self, path, value):
path = path.split('/')
element = self
for tag in path:
element = element.children.setdefault(tag, Element(tag))
element.value = value
@xpath.deleter
def xpath(self, path):
path = path.split('/')
element = self
for tag in path[:-1]:
if tag in element.children:
element = element.children[tag]
else:
raise KeyError('path does not exist')
tag = path[-1]
if tag in element.children:
del element.children[tag]
else:
raise KeyError('path does not exist')
tree = Element('root')
tree.xpath['unladen/swallow'] = 'african'
assert tree.xpath['unladen/swallow'] == 'african'
assert tree.children['unladen'].xpath['swallow'] == 'african'
assert tree.children['unladen'].children['swallow'].value == 'african'
tree.xpath['unladen/swallow'] = 'european'
assert tree.xpath['unladen/swallow'] == 'european'
assert len(tree.children) == 1
assert len(tree.children['unladen'].children) == 1
tree.xpath['unladen/swallow/airspeed'] = 42
assert tree.xpath['unladen/swallow'] == 'european'
assert tree.xpath['unladen/swallow/airspeed'] == 42
del tree.xpath['unladen/swallow']
assert 'swallow' not in tree.children['unladen'].children
try:
tree.xpath['unladen/swallow/airspeed']
except KeyError:
pass
else:
assert False
| StarcoderdataPython |
6610117 | <filename>bbcprc/old/files.py
import contextlib
import os
def with_suffix(root, suffix=None):
for f in os.listdir(root):
if not suffix or f.endswith(suffix):
yield os.path.join(root, f)
@contextlib.contextmanager
def delete_on_fail(fname, mode='wb', open=open, delete=True):
with open(fname, mode) as fp:
try:
yield fp
except Exception:
if delete:
try:
os.remove(fname)
except Exception:
pass
raise
| StarcoderdataPython |
11207087 | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the different models available in the lightweightMMM lib.
Currently this file contains a main model with three possible options for
processing the media data. Which essentially grants the possibility of building
three different models.
- Adstock
- Hill-Adstock
- Carryover
"""
from typing import Any, Callable, Mapping, Optional
import frozendict
import jax.numpy as jnp
import numpyro
from numpyro import distributions as dist
from lightweight_mmm import media_transforms
def transform_adstock(media_data: jnp.ndarray,
normalise: bool = True) -> jnp.ndarray:
"""Transforms the input data with the adstock function and exponent.
Args:
media_data: Media data to be transformed.
normalise: Whether to normalise the output values.
Returns:
The transformed media data.
"""
with numpyro.plate("lag_weight_plate", media_data.shape[1]):
lag_weight = numpyro.sample("lag_weight",
dist.Beta(concentration1=2., concentration0=1.))
with numpyro.plate("exponent_plate", media_data.shape[1]):
exponent = numpyro.sample("exponent",
dist.Beta(concentration1=9., concentration0=1.))
adstock = media_transforms.adstock(
data=media_data, lag_weight=lag_weight, normalise=normalise)
return media_transforms.apply_exponent_safe(data=adstock, exponent=exponent)
def transform_hill_adstock(media_data: jnp.ndarray,
normalise: bool = True) -> jnp.ndarray:
"""Transforms the input data with the adstock and hill functions.
Args:
media_data: Media data to be transformed.
normalise: Whether to normalise the output values.
Returns:
The transformed media data.
"""
with numpyro.plate("lag_weight_plate", media_data.shape[1]):
lag_weight = numpyro.sample("lag_weight",
dist.Beta(concentration1=2., concentration0=1.))
with numpyro.plate("half_max_effective_concentration_plate",
media_data.shape[1]):
half_max_effective_concentration = numpyro.sample(
"half_max_effective_concentration",
dist.Gamma(concentration=1., rate=1.))
with numpyro.plate("slope_plate", media_data.shape[1]):
slope = numpyro.sample("slope", dist.Gamma(concentration=1., rate=1.))
return media_transforms.hill(
data=media_transforms.adstock(
data=media_data, lag_weight=lag_weight, normalise=normalise),
half_max_effective_concentration=half_max_effective_concentration,
slope=slope)
def transform_carryover(media_data: jnp.ndarray,
number_lags: int = 13) -> jnp.ndarray:
"""Transforms the input data with the carryover function and exponent.
Args:
media_data: Media data to be transformed.
number_lags: Number of lags for the carryover function.
Returns:
The transformed media data.
"""
with numpyro.plate("ad_effect_retention_rate_plate", media_data.shape[1]):
ad_effect_retention_rate = numpyro.sample(
"ad_effect_retention_rate",
dist.Beta(concentration1=1., concentration0=1.))
with numpyro.plate("peak_effect_delay_plate", media_data.shape[1]):
peak_effect_delay = numpyro.sample("peak_effect_delay",
dist.HalfNormal(scale=2.))
with numpyro.plate("exponent_plate", media_data.shape[1]):
exponent = numpyro.sample("exponent",
dist.Beta(concentration1=9., concentration0=1.))
carryover = media_transforms.carryover(
data=media_data,
ad_effect_retention_rate=ad_effect_retention_rate,
peak_effect_delay=peak_effect_delay,
number_lags=number_lags)
return media_transforms.apply_exponent_safe(data=carryover, exponent=exponent)
def media_mix_model(media_data: jnp.ndarray,
target_data: jnp.ndarray,
cost_prior: jnp.ndarray,
degrees_seasonality: int,
frequency: int,
transform_function: Callable[[jnp.array], jnp.array],
transform_kwargs: Mapping[str,
Any] = frozendict.frozendict(),
weekday_seasonality: bool = False,
extra_features: Optional[jnp.array] = None) -> None:
"""Media mix model.
Args:
media_data: Media data to be be used in the model.
target_data: Target data for the model.
cost_prior: Cost prior for each of the media channels.
degrees_seasonality: Number of degrees of seasonality to use.
frequency: Frequency of the time span which was used to aggregate the data.
Eg. if weekly data then frequency is 52.
transform_function: Function to use to transform the media data in the
model. Currently the following are supported: 'transform_adstock',
'transform_carryover' and 'transform_hill_adstock'.
transform_kwargs: Any extra keyword arguments to pass to the transform
function. For example the adstock function can take a boolean to noramlise
output or not.
weekday_seasonality: In case of daily data you can estimate a weekday (7)
parameter.
extra_features: Extra features data to include in the model.
"""
data_size = media_data.shape[0]
intercept = numpyro.sample("intercept", dist.Normal(loc=0., scale=2.))
sigma = numpyro.sample("sigma", dist.Gamma(concentration=1., rate=1.))
beta_trend = numpyro.sample("beta_trend", dist.Normal(loc=0., scale=1.))
expo_trend = numpyro.sample("expo_trend",
dist.Beta(concentration1=1., concentration0=1.))
with numpyro.plate("media_plate", media_data.shape[1]) as i:
beta_media = numpyro.sample("beta_media",
dist.HalfNormal(scale=cost_prior[i]))
with numpyro.plate("gamma_seasonality_plate", 2):
with numpyro.plate("seasonality_plate", degrees_seasonality):
gamma_seasonality = numpyro.sample("gamma_seasonality",
dist.Normal(loc=0., scale=1.))
if weekday_seasonality:
with numpyro.plate("weekday_plate", 7):
weekday = numpyro.sample("weekday", dist.Normal(loc=0., scale=.5))
weekday_series = weekday[jnp.arange(data_size) % 7]
media_transformed = numpyro.deterministic(
name="media_transformed",
value=transform_function(media_data, **transform_kwargs))
seasonality = media_transforms.calculate_seasonality(
number_periods=data_size,
degrees=degrees_seasonality,
frequency=frequency,
gamma_seasonality=gamma_seasonality)
# expo_trend is B(1, 1) so that the exponent on time is in [.5, 1.5].
prediction = (
intercept + beta_trend * jnp.arange(data_size) ** (expo_trend + 0.5) +
seasonality + media_transformed.dot(beta_media))
if extra_features is not None:
with numpyro.plate("extra_features_plate", extra_features.shape[1]):
beta_extra_features = numpyro.sample("beta_extra_features",
dist.Normal(loc=0., scale=1.))
prediction += extra_features.dot(beta_extra_features)
if weekday_seasonality:
prediction += weekday_series
mu = numpyro.deterministic(name="mu", value=prediction)
numpyro.sample(
name="target", fn=dist.Normal(loc=mu, scale=sigma), obs=target_data)
| StarcoderdataPython |
24685 | <gh_stars>1-10
import gzip
import numpy as np
import os
import pandas as pd
import shutil
import sys
import tarfile
import urllib
import zipfile
from scipy.sparse import vstack
from sklearn import datasets
from sklearn.externals.joblib import Memory
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
mem = Memory("./mycache")
@mem.cache
def get_higgs(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
filename = 'HIGGS.csv'
if not os.path.isfile(filename):
urlretrieve(url, filename + '.gz')
with gzip.open(filename + '.gz', 'rb') as f_in:
with open(filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
higgs = pd.read_csv(filename)
X = higgs.iloc[:, 1:].values
y = higgs.iloc[:, 0].values
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_cover_type(num_rows=None):
data = datasets.fetch_covtype()
X = data.data
y = data.target
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_synthetic_regression(num_rows=None):
if num_rows is None:
num_rows = 10000000
return datasets.make_regression(n_samples=num_rows, bias=100, noise=1.0)
@mem.cache
def get_year(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip'
filename = 'YearPredictionMSD.txt'
if not os.path.isfile(filename):
urlretrieve(url, filename + '.zip')
zip_ref = zipfile.ZipFile(filename + '.zip', 'r')
zip_ref.extractall()
zip_ref.close()
year = pd.read_csv('YearPredictionMSD.txt', header=None)
X = year.iloc[:, 1:].values
y = year.iloc[:, 0].values
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_url(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/url/url_svmlight.tar.gz'
filename = 'url_svmlight.tar.gz'
if not os.path.isfile(filename):
urlretrieve(url, filename)
tar = tarfile.open(filename, "r:gz")
tar.extractall()
tar.close()
num_files = 120
files = ['url_svmlight/Day{}.svm'.format(day) for day in range(num_files)]
data = datasets.load_svmlight_files(files)
X = vstack(data[::2])
y = np.concatenate(data[1::2])
y[y < 0.0] = 0.0
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
| StarcoderdataPython |
4925091 | from typing import Optional
from .event import Event
from .event import NONAME
from .output import Output, ConsoleOutput, FileOutput
class Core(Output):
project: str
env: str
console_output: Optional[ConsoleOutput]
file_output: Optional[FileOutput]
"""
Core 维护着日志系统的输出器(包括命令行输出器和文件输出器),保持全局配置
"""
def __init__(self, opts: Optional[dict]):
self.project = NONAME
self.env = NONAME
self.console_output = None
self.file_output = None
if opts is None:
return
if "project" in opts:
self.project = str(opts["project"])
if "env" in opts:
self.env = str(opts["env"])
if "console" in opts:
self.console_output = ConsoleOutput(opts["console"])
if "file" in opts:
self.file_output = FileOutput(opts["file"])
def append_event(self, event: Event) -> None:
if self.console_output is not None:
self.console_output.append_event(event)
if self.file_output is not None:
self.file_output.append_event(event)
def create_event(self) -> Event:
e = Event()
e.project = self.project
e.env = self.env
e.output = self
return e
class CoreProvider(object):
"""
CoreProvider 封装一层 Core 为 Logger 切换 Core 成为可能
"""
core: Core
def get_core(self) -> Core:
return self.core
| StarcoderdataPython |
306285 | <gh_stars>10-100
class TestDemo:
print('testing')
| StarcoderdataPython |
16705 | from systems.plugins.index import BaseProvider
import os
class Provider(BaseProvider('task', 'upload')):
def execute(self, results, params):
file_path = self.get_path(self.field_file)
if not os.path.exists(file_path):
self.command.error("Upload task provider file {} does not exist".format(file_path))
ssh = self._get_ssh()
ssh.upload(file_path, self.field_remote_path,
mode = self.field_mode,
owner = self.field_owner,
group = self.field_group
)
| StarcoderdataPython |
3241909 | <reponame>shantanusharma/bigmler<filename>bigmler/whizzml/dispatcher.py
# -*- coding: utf-8 -*-
#
# Copyright 2016-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer whizzml main processing
Functions to process the whizzml options
"""
import sys
import os
import bigmler.utils as u
from bigmler.whizzml.package import create_package
from bigmler.dispatcher import SESSIONS_LOG, clear_log_files
from bigmler.command import get_context
COMMAND_LOG = ".bigmler_whizzml"
DIRS_LOG = ".bigmler_whizzml_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG}
def whizzml_dispatcher(args=sys.argv[1:]):
"""Main processing of the parsed options for BigMLer whizzml
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
command_args, command, api, _, resume = get_context(args, SETTINGS)
# package_dir
if command_args.package_dir is not None:
command_args.package_dir = os.path.expanduser(command_args.package_dir)
create_package(command_args, api, command,
resume=resume)
else:
sys.exit("You must use the --package-dir flag pointing to the"
" directory where the metadata.json file is. Type\n"
" bigmler whizzml --help\n"
" to see all the available options.")
| StarcoderdataPython |
6626215 | # Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test-compile-as-managed',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'true',
'ExceptionHandling': '0' # /clr is incompatible with /EHs
}
},
'sources': ['compile-as-managed.cc'],
},
{
'target_name': 'test-compile-as-unmanaged',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'false',
}
},
'sources': ['compile-as-managed.cc'],
},
]
}
| StarcoderdataPython |
3251813 | # See https://github.com/confluentinc/confluent-kafka-python
from confluent_kafka.admin import AdminClient, NewTopic
app_settings = {
"bootstrap.servers": "TODO",
"topics": [
"topic1",
"topic2",
],
}
a = AdminClient({"bootstrap.servers": app_settings["bootstrap.servers"]})
# Note: In a multi-cluster production scenario, it is more typical to use a replication_factor of 3 for durability.
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in app_settings["topics"]]
# Call create_topics to asynchronously create topics. A dict of <topic,future> is returned.
fs = a.create_topics(new_topics)
# Wait for each operation to finish.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print(f"Topic {topic} created")
except Exception as e:
print(f"Failed to create topic {topic}: {e}")
| StarcoderdataPython |
4884236 | <reponame>ezekielkibiego/projects254
# Generated by Django 2.2.24 on 2022-02-12 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='images')),
('description', models.TextField(max_length=600)),
('techs_used', models.TextField(max_length=100, null=True)),
('url', models.URLField(null=True)),
('link', models.URLField(null=True)),
('date', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| StarcoderdataPython |
6422045 | <reponame>gembcior/FortressTools<filename>src/fortresstools/command/__init__.py<gh_stars>0
from .base import UnsupportedExecutor
from .dir import *
from .git import *
from .cmake import *
from .pip import *
from .venv import *
from .rsync import *
from .svn import *
from .test import *
| StarcoderdataPython |
6618548 | <reponame>baggakunal/learning-python<filename>src/prime_number.py
from math import sqrt
def is_prime(num: int) -> bool:
if num < 2:
return False
for i in range(2, int(sqrt(num)) + 1):
if num % i == 0:
return False
return True
def main():
print([n for n in range(101) if is_prime(n)])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3458579 | from svbench.io_tools import *
from svbench.quant_tools import *
from svbench.loaders import * | StarcoderdataPython |
9659937 | <reponame>MaciejTe/integration
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"define factories from where to create namespaces"
from .docker_compose_manager import (
DockerComposeStandardSetup,
DockerComposeMonitorCommercialSetup,
DockerComposeDockerClientSetup,
DockerComposeRofsClientSetup,
DockerComposeLegacyClientSetup,
DockerComposeSignedArtifactClientSetup,
DockerComposeShortLivedTokenSetup,
DockerComposeFailoverServerSetup,
DockerComposeEnterpriseSetup,
DockerComposeCustomSetup,
DockerComposeCompatibilitySetup,
DockerComposeMTLSSetup,
DockerComposeMenderClient_2_5,
)
from .kubernetes_manager import KubernetesEnterpriseSetup, isK8S
class ContainerManagerFactory:
def getStandardSetup(self, name=None, num_clients=1):
"""Standard setup consisting on all core backend services and optionally clients
The num_clients define how many QEMU Mender clients will be spawn.
"""
pass
def getMonitorCommercialSetup(self, name=None, num_clients=1):
"""Monitor client setup consisting on all core backend services and monitor-client
The num_clients define how many QEMU Mender clients will be spawn.
"""
pass
def getDockerClientSetup(self, name=None):
"""Standard setup with one Docker client instead of QEMU one"""
pass
def getRofsClientSetup(self, name=None):
"""Standard setup with one QEMU Read-Only FS client instead of standard R/W"""
pass
def getLegacyClientSetup(self, name=None):
"""Setup with one Mender client v1.7"""
pass
def getSignedArtifactClientSetup(self, name=None):
"""Standard setup with pre-installed verification key in the client"""
pass
def getShortLivedTokenSetup(self, name=None):
"""Standard setup on which deviceauth has a short lived token (expire timeout = 0)"""
pass
def getFailoverServerSetup(self, name=None):
"""Setup with two servers and one client.
First server (A) behaves as usual, whereas the second server (B) should
not expect any clients. Client is initially set up against server A.
"""
pass
def getEnterpriseSetup(self, name=None, num_clients=0):
"""Setup with enterprise versions for the applicable services"""
pass
def getEnterpriseSMTPSetup(self, name=None):
"""Enterprise setup with SMTP enabled"""
pass
def getCustomSetup(self, name=None):
"""A noop setup for tests that use custom setups
It only implements teardown() for these tests to still have a way
for the framework to clean after them (most importantly on errors).
"""
pass
class DockerComposeManagerFactory(ContainerManagerFactory):
def getStandardSetup(self, name=None, num_clients=1):
return DockerComposeStandardSetup(name, num_clients)
def getMonitorCommercialSetup(self, name=None, num_clients=0):
return DockerComposeMonitorCommercialSetup(name, num_clients)
def getDockerClientSetup(self, name=None):
return DockerComposeDockerClientSetup(name)
def getRofsClientSetup(self, name=None):
return DockerComposeRofsClientSetup(name)
def getLegacyClientSetup(self, name=None):
return DockerComposeLegacyClientSetup(name)
def getSignedArtifactClientSetup(self, name=None):
return DockerComposeSignedArtifactClientSetup(name)
def getShortLivedTokenSetup(self, name=None):
return DockerComposeShortLivedTokenSetup(name)
def getFailoverServerSetup(self, name=None):
return DockerComposeFailoverServerSetup(name)
def getEnterpriseSetup(self, name=None, num_clients=0):
return DockerComposeEnterpriseSetup(name, num_clients)
def getCompatibilitySetup(self, name=None, **kwargs):
return DockerComposeCompatibilitySetup(name, **kwargs)
def getMTLSSetup(self, name=None, **kwargs):
return DockerComposeMTLSSetup(name, **kwargs)
def getMenderClient_2_5(self, name=None, **kwargs):
return DockerComposeMenderClient_2_5(name, **kwargs)
def getCustomSetup(self, name=None):
return DockerComposeCustomSetup(name)
class KubernetesManagerFactory(ContainerManagerFactory):
def getEnterpriseSetup(self, name=None, num_clients=0):
return KubernetesEnterpriseSetup(name, num_clients)
def getMonitorCommercialSetup(self, name=None, num_clients=0):
return KubernetesEnterpriseSetup(name, num_clients)
def get_factory():
if isK8S():
return KubernetesManagerFactory()
else:
return DockerComposeManagerFactory()
| StarcoderdataPython |
5128469 | from .alexnet import AlexNetV1, AlexNetV2, AlexNetV3
from .resnet import ResNet
from .resnet2plus1d import ResNet2Plus1d
from .resnet3d import ResNet3d
from .resnet3d_csn import ResNet3dCSN
from .resnet3d_slowfast import ResNet3dSlowFast
from .resnet3d_slowonly import ResNet3dSlowOnly
from .resnet_tin import ResNetTIN
from .resnet_tsm import ResNetTSM
__all__ = [
'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast',
'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'AlexNetV1', 'AlexNetV2',
'AlexNetV3'
]
| StarcoderdataPython |
8034011 | <reponame>marici/recipebook
# -*- coding: utf-8 -*-
'''
The MIT License
Copyright (c) 2009 Marici, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from datetime import datetime
from django.conf import settings
from django.core import urlresolvers, serializers
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.template import loader, Context, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from maricilib.django.decorators import postmethod
from maricilib.django.shortcuts import render_to_response_of_class
from maricilib.django.core.paginator import Paginator
from maricilib.django.apps.taskqueue.queue import get_taskqueue
from maricilib.django.apps.taskqueue.tasks import SendEmailTask
from recipes.models import Contest, Recipe
per_page = 10
def show_current_contest_list(request, page=1):
'''
募集中のお題を表示します。募集中とは、以下のものを指します。
* contest.publised_atが現在時刻より大きい
* contest.closed_atが現在時刻より小さい
@param page: ページ (デフォルトは1)
@context page_obj: object_listにクエリセットを含むPageオブジェクト
@return: 200レスポンス (成功)
'''
contests = Contest.objects.get_current_contests()
page_obj = Paginator(contests, per_page).page(page)
d = {'current': True,
'title': u'レシピを募集中のお題',
'page_obj': page_obj}
return render_to_response('recipes/contests.html',
d, RequestContext(request))
def show_closed_contest_list(request, page=1):
'''
募集が終了したお題を表示します。募集が終了したとは、以下のものを指します。
* contest.publised_atが現在時刻より小さい
* contest.closed_atが現在時刻より小さい
@param page: ページ (デフォルトは1)
@context page_obj: object_listにクエリセットを含むPageオブジェクト
@return: 200レスポンス (成功)
'''
contests = Contest.objects.get_closed_contests_qs()
page_obj = Paginator(contests, per_page).page(page)
d = {'current': False,
'title': u'募集終了したお題',
'page_obj': page_obj}
return render_to_response('recipes/contests.html',
d, RequestContext(request))
def show_contest(request, contest_id=None):
'''
お題の詳細を表示します。
is_publishedが現在時刻より小さいお題は表示できません。
@param page: ページ (デフォルトは1)
@context contest: Contestインスタンス
@context contests: 全ての募集中のお題
@return: 404レスポンス (お題が存在しないか、published_atが未来の場合)
@return: 200レスポンス (成功)
'''
contest = get_object_or_404(Contest, pk=contest_id)
if not contest.is_published():
raise Http404
contests = Contest.objects.get_current_contests()
d = {'contest': contest,
'contests': contests}
if contest.is_really_finished():
award_recipes = contest.get_awarded_recipes()
d['top_award_recipes'] = award_recipes[:2]
d['award_recipes'] = award_recipes[2:]
return render_to_response('recipes/contest.html',
d, RequestContext(request))
def show_recipes(request, contest_id=None, page=1):
'''
お題に対するレシピの一覧を新しい順に表示します。
対象になるのは以下のレシピです。
* recipe.contest が指定されたお題
* is_draftがFalse
@param contest_id: お題ID
@param page: ページ (デフォルトは1)
@context page_obj: object_listにRecipeインスタンスを持つPageオブジェクト
@return: 400レスポンス (お題が存在しないか、TODO: published_atが未来の場合)
@return: 200レスポンス (成功)
'''
contest = get_object_or_404(Contest, pk=contest_id)
recipes = contest.recipe_set.filter(is_draft=False)
page_obj = Paginator(recipes, per_page).page(page)
links = [{'url': urlresolvers.reverse('recipes-contests-show',
kwargs={'contest_id': contest.id}),
'name': contest.name}]
d = {'title': u'%s に投稿されたレシピ一覧' % contest.name,
'page_obj': page_obj, 'links': links}
return render_to_response('recipes/recipes.html',
d, RequestContext(request))
@postmethod
@login_required
def submit_recipe(request, contest_id=None, recipe_id=None):
'''
指定されたIDのお題に指定されたIDのレシピを投稿します。
投稿されたレシピは、recipe.contest = contestとなります。
レシピの作成者でなければ投稿を行うことはできません。
また、既にお題にひもづいているレシピを再投稿することはできません。
@param contest_id: ContestインスタンスのID
@param recipe_id: RecipeインスタンスのID
@return: 200レスポンス (成功。JSONを返す)
@return: 302レスポンス (ログインしていない場合。ログインページへ)
@return: 403レスポンス (recipe.context != None or
request.user != recipe.user の場合)
@return: 404レスポンス (指定されたIDのRecipe, Contestインスタンスが存在しない場合)
'''
contest = get_object_or_404(Contest, pk=contest_id)
recipe = get_object_or_404(Recipe, pk=recipe_id)
if recipe.user != request.user or recipe.contest:
return render_to_response_of_class(HttpResponseForbidden, '403.html')
recipe.contest = contest
recipe.save()
data = serializers.serialize('json', [recipe])
return HttpResponse(data, mimetype='application/javascript')
def search_contests(request, query=None, page=1):
'''
お題を検索します。
@param query: 検索文字列
@param page: 表示ページ デフォルトは1
@context page_obj: object_listに結果を含むオブジェクト
@return: 200レスポンス (成功)
'''
query = query or request.GET['query']
title = u'%s のコンテスト検索結果' % query
queries = query.split()
contests = Contest.objects.search(queries, page=page, per_page=per_page)
page_obj = Paginator(contests.get('object_list'), per_page).page(page)
links = [{'name': u'全体から検索',
'url': urlresolvers.reverse('gp-search',
kwargs={'query': query})}]
return render_to_response('recipes/contests.html',
{'page_obj': page_obj,
'title': title,
'links': links},
RequestContext(request))
@postmethod
@login_required
def mail_recipe_template(request, contest_id=None):
'''
レシピテンプレートをPOSTのalter_emailで指定されたアドレスにメールで送信します。
ログインユーザだけが行うことができます。
alter_emailの値がprofile.alter_emailと異なる場合はprofile.alter_emailを変更します。
@param contest_id: お題ID
@return: 302レスポンス (ログインページへ。ログインしていない場合)
@return: 404レスポンス (指定されたIDのお題が存在しない場合)
@return: 200レスポンス (レシピのJSONデータを返す。成功した場合)
'''
site = Site.objects.get_current()
profile = request.user.get_profile()
contest = get_object_or_404(Contest, pk=contest_id) if contest_id else None
email = request.POST.get('alter_email', profile.alter_email)
if email != profile.alter_email:
profile.alter_email = email
if profile.has_available_token():
profile.token_issued_at = datetime.now()
else:
profile.issue_recipe_token()
profile.save()
c = Context({'user': request.user, 'contest': contest,
'token': profile.recipe_token})
t = loader.get_template('recipes/email/recipe_template.txt')
if contest:
subject = u'[%s] %s へのレシピ投稿' % (site.name, contest.name)
else:
subject = u'[%s] レシピ投稿' % site.name
body = t.render(c)
task = SendEmailTask(dict(subject=subject, body=body,
from_address=settings.EMAIL_FROM,
to_list=[email]))
get_taskqueue().send_task(task, queue_name=settings.QUEUENAME_EMAIL)
json = serializers.serialize('json', [])
return HttpResponse(json, mimetype='application/json')
| StarcoderdataPython |
6665419 | <reponame>sbruch/xe-ndcg-experiments<filename>lib.py<gh_stars>1-10
import math
import numpy as np
import random
import lightgbm as gbm
class SplitConfig(object):
def __init__(self, population_pct, sample_size, transformations=None):
"""Creates a split configuration.
Args:
population_pct: (float) The percentage of the original dataset
to use as the population.
sample_size: (int) The number of queries to sample from the population
to form the split.
transformations: list of `Transformation` objects to apply to
sampled queries.
"""
self.population_pct = population_pct
self.sample_size = sample_size
self.transformations = transformations
if self.transformations is None:
self.transformations = []
class Collection(object):
"""Data structure that holds a collection of queries."""
def __init__(self, paths):
self.features = {}
self.relevances = {}
for path in paths:
for line in open(path, "r"):
items = line.split()
rel = int(items[0])
qid = int(items[1].split(":")[1])
if qid not in self.features:
self.features[qid] = []
self.relevances[qid] = []
self.features[qid].append(
np.array([float(s.split(':')[1]) for s in items[2:]]))
self.relevances[qid].append(rel)
self.qids = [x for x, _ in self.features.items()]
@property
def num_queries(self):
return len(self.qids)
def generate_splits(self, configs, params=None):
"""Generates splits for training and evaluation.
Args:
configs: list of `SplitConfig` objects.
params: (dict) Parameters to pass to LightGBM.Dataset.
Returns:
List of `lightgbm.Dataset` objects.
"""
# Randomly shuffle the query IDs.
random.shuffle(self.qids)
# Gather query IDs for each split population.
population_qids = []
lower = 0
for pct in [c.population_pct for c in configs]:
upper = int(lower + pct * self.num_queries + 1)
if upper >= self.num_queries:
upper = self.num_queries
population_qids.append(self.qids[lower:upper])
lower = upper
# Sample queries to form each split.
split_qids = []
for sample_size in [c.sample_size for c in configs]:
split_qids.append(np.random.choice(
population_qids[len(split_qids)], sample_size))
# List of datasets to return
datasets = []
for qids in split_qids:
# Create a deep copy of features and relevances.
relevances = [np.copy(self.relevances[qid]) for qid in qids]
features = [np.copy(self.features[qid]) for qid in qids]
for transform in configs[len(datasets)].transformations:
features, relevances = transform.apply(features, relevances)
groups = [len(rels) for rels in relevances]
relevances = np.concatenate(relevances)
features = np.concatenate(features).reshape([len(relevances), -1])
if len(datasets) == 0:
dataset = gbm.Dataset(data=features, label=relevances,
group=groups, params=params,
silent=True, free_raw_data=False)
else:
dataset = gbm.Dataset(data=features, label=relevances,
group=groups, reference=datasets[0],
silent=True, free_raw_data=False)
datasets.append(dataset)
return datasets
class Transformation(object):
def apply(self, features, relevances):
"""Applies a transformation.
Args:
features: A 3D ndarray.
relevances: A 2D ndarray.
Returns:
A tuple consisting of new features and relevances.
"""
raise NotImplementedError
class PerturbLabels(Transformation):
def __init__(self, factor, dist):
"""Creates a `Transformation` to perturb labels.
Args:
factor: (float) Percentage of labels to perturb per query.
dist: list of floats. The probabilities associated with each label.
"""
self.factor = factor
self.dist = dist
def apply(self, features, relevances):
for idx, rels in enumerate(relevances):
labels = np.random.choice(len(self.dist), len(rels), p=self.dist)
v = np.random.rand(len(rels))
relevances[idx] = np.where(np.less(v, self.factor), labels, rels)
return features, relevances
class AugmentListByExternalNegativeSamples(Transformation):
def __init__(self, factor):
"""
Creates a `Transformation` to augment lists by sampling negative
examples from other queries.
Args:
factor: (float) Factor by which each list will be augmented.
"""
self.factor = factor
def apply(self, features, relevances):
extra_features = []
for idx in range(len(features)):
size = int(self.factor * len(features[idx]))
v = np.random.randint(0, len(features) - 1, size)
indices = np.where(np.less(v, idx), v, v + 1)
extras = []
for r in indices:
b = np.random.randint(0, len(features[r]))
extras.append(np.copy(features[r][b]))
extra_features.append(extras)
for idx in range(len(features)):
features[idx] = np.append(features[idx], extra_features[idx])
relevances[idx] = np.append(
relevances[idx], np.zeros(len(extra_features[idx])))
return features, relevances
class GenerateClicks(Transformation):
def __init__(self, impressions, click_prob):
"""
Creates a `Transformation` to generate clicks using a random ranker.
Args:
impressions: (int) Number of impressions per query.
click_prob: list of floats. Click probability given relevance.
"""
self.impressions = impressions
self.click_prob = click_prob
def apply(self, features, relevances):
_features = []
_relevances = []
for idx in range(len(features)):
indices = np.arange(len(features[idx]))
for _ in range(self.impressions):
np.random.shuffle(indices)
v = np.random.rand(len(indices))
f = []
clicked = False
for i in indices:
f.append(np.copy(features[idx][i]))
if v[i] <= self.click_prob[relevances[idx][i]]:
clicked = True
break
r = np.zeros(len(f))
if clicked:
r[-1] = 1
_features.append(f)
_relevances.append(r)
return _features, _relevances
class NDCG(object):
def __init__(self, cutoffs):
self.cutoffs = cutoffs
def eval(self, preds, data):
"""Computes NDCG at rank cutoff.
Args:
preds: list of floats.
data: A `lightgbm.Dataset` object.
"""
# Transform the relevance labels and predictions to the correct shape.
relevances = []
scores = []
idx = 0
for group in data.group:
relevances.append(data.label[idx:idx + group])
scores.append(preds[idx:idx + group])
idx += group
ndcg_at = {}
count = 0
for s, r in zip(scores, relevances):
# Skip queries with no relevant documents.
if sum(r) == 0:
continue
count += 1
sorted_by_scores = [i for _,i in sorted(zip(s,r), key=lambda p: p[0], reverse=True)]
gains_scores = [pow(2, i) - 1. for i in sorted_by_scores]
gains_rels = sorted(gains_scores, reverse=True)
discounts = [1./math.log(i+2, 2) for i, _ in enumerate(sorted_by_scores)]
for cutoff in self.cutoffs:
dcg = sum([g*d for g, d in zip(gains_scores[:cutoff], discounts[:cutoff])])
max_dcg = sum([g*d for g, d in zip(gains_rels[:cutoff], discounts[:cutoff])])
if cutoff not in ndcg_at:
ndcg_at[cutoff] = 0.
ndcg_at[cutoff] += dcg / max_dcg
results = []
for cutoff in self.cutoffs:
results.append(('ndcg@{}'.format(cutoff), ndcg_at[cutoff]/count, True))
return results
| StarcoderdataPython |
372799 | #!/usr/bin/env python3
# pylint: disable=missing-docstring,too-many-public-methods
import pathlib
import shutil
import tempfile
import time
import unittest
import uuid
from typing import List, Optional # pylint: disable=unused-import
import zmq
import persizmq
import persizmq.filter
class TestContext:
def __init__(self, base_url: str = "inproc://persizmq_test") -> None:
self.url = base_url + str(uuid.uuid4())
self.context = zmq.Context()
self.publisher = self.context.socket(zmq.PUB) # pylint: disable=no-member
self.subscribers = [] # type: List[zmq.Socket]
self.tmp_dir = None # type: Optional[pathlib.Path]
def subscriber(self) -> zmq.Socket:
"""
Creates a new subscriber that listens to whatever the publisher of this instance
publishes.
The subscriber will be closed by this instance.
:return: zmq subscriber
"""
subscriber = self.context.socket(zmq.SUB) # pylint: disable=no-member
self.subscribers.append(subscriber)
subscriber.setsockopt_string(zmq.SUBSCRIBE, "") # pylint: disable=no-member
subscriber.connect(self.url)
return subscriber
def __enter__(self):
self.tmp_dir = pathlib.Path(tempfile.mkdtemp())
self.publisher.bind(self.url)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for subscriber in self.subscribers:
subscriber.close()
shutil.rmtree(self.tmp_dir.as_posix())
self.publisher.close()
self.context.term()
class TestThreadedSubscriber(unittest.TestCase):
def test_operational(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
thread_sub = persizmq.ThreadedSubscriber(
callback=lambda msg: None, subscriber=subscriber, on_exception=lambda exc: None)
# Threaded subscriber is already operational after the constructor.
self.assertTrue(thread_sub.operational)
with thread_sub:
self.assertTrue(thread_sub.operational)
# Threaded subscriber is not operational after exiting the context.
self.assertFalse(thread_sub.operational)
def test_a_message(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
class Helper:
def __init__(self):
self.msg_received = None
def callback(self, msg: bytes):
self.msg_received = msg
helper = Helper()
thread_sub = persizmq.ThreadedSubscriber(
callback=helper.callback, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
ctx.publisher.send(b"0001")
time.sleep(0.01)
self.assertEqual(b"0001", helper.msg_received)
def test_exception(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
def callback(msg: bytes) -> None: # pylint: disable=unused-argument
raise Exception("Here I come!")
exception = None
def on_exception(exc):
nonlocal exception
exception = exc
thread_sub = persizmq.ThreadedSubscriber(
callback=callback, subscriber=subscriber, on_exception=on_exception)
with thread_sub:
ctx.publisher.send(b"0002")
time.sleep(0.01)
self.assertIsNotNone(exception)
self.assertEqual("Here I come!", str(exception))
class TestPersistentSubscriber(unittest.TestCase):
def test_no_message_received(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_a_message(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
ctx.publisher.send(b"1984")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1984", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_multiple_messages(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# publish a message
ctx.publisher.send(b"1985")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1985", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
# publish two in a row
ctx.publisher.send(b"1986")
ctx.publisher.send(b"1987")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1986", msg)
# ask for the same front
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1986", msg)
self.assertTrue(storage.pop_front())
# publish a third one
ctx.publisher.send(b"1988")
time.sleep(0.01)
# check the second one
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1987", msg)
self.assertTrue(storage.pop_front())
# check the third one
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1988", msg)
self.assertTrue(storage.pop_front())
def test_persistency(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# publish a message
ctx.publisher.send(b"1985")
time.sleep(0.01)
# simulate a restart
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1985", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_order(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# Make sure the correct order is kept even for a lot of messages.
for i in range(2000, 2020):
ctx.publisher.send("{}".format(i).encode())
time.sleep(0.01)
# simulate a restart
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
for i in range(2000, 2020):
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual("{}".format(i).encode(), msg)
self.assertTrue(storage.pop_front())
class TestFilters(unittest.TestCase):
def test_that_it_works(self):
# pylint: disable=too-many-statements
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
pers_dir_filter = ctx.tmp_dir / 'filter'
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
subscriber=subscriber, callback=lambda msg: None, on_exception=lambda exc: None)
thread_sub.callback = \
lambda msg: storage.add_message(
persizmq.filter.MinPeriod(min_period=1, persistent_dir=pers_dir_filter)(msg))
with thread_sub:
# Send two messages.
ctx.publisher.send(b"3000")
ctx.publisher.send(b"3001")
time.sleep(0.01)
# Make sure only one arrived.
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"3000", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
# Rebuild the persistent subscriber.
del storage
del thread_sub
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
subscriber=subscriber, callback=lambda msg: None, on_exception=lambda exc: None)
thread_sub.callback = lambda msg: storage.add_message(
persizmq.filter.MinPeriod(min_period=10, persistent_dir=pers_dir_filter)(msg))
with thread_sub:
# Send one message and make sure that the last timestamp was correctly loaded
# (the new message must be rejected).
ctx.publisher.send(b"3002")
time.sleep(0.01)
msg = storage.front()
self.assertIsNone(msg)
thread_sub.callback = lambda msg: storage.add_message(persizmq.filter.MaxSize(max_size=1000)(msg))
# Generate a too large message and check that it is rejected.
ctx.publisher.send(b"x" * 1001)
time.sleep(0.01)
msg = storage.front()
self.assertIsNone(msg)
class TestPersistentLatest(unittest.TestCase):
def test_that_it_works(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
persi_latest = persizmq.PersistentLatestStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
callback=persi_latest.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# Make sure only the newest one is kept.
self.assertFalse(persi_latest.new_message)
ctx.publisher.send(b"4000")
time.sleep(0.01)
self.assertTrue(persi_latest.new_message)
ctx.publisher.send(b"4001")
time.sleep(0.01)
self.assertTrue(persi_latest.new_message)
msg = persi_latest.message()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"4001", msg)
self.assertFalse(persi_latest.new_message)
# The same for lots of messages.
for i in range(4010, 4020):
ctx.publisher.send("{}".format(i).encode())
time.sleep(0.01)
msg = persi_latest.message()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"4019", msg)
self.assertFalse(persi_latest.new_message)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9696920 | # ai.py
#
# Author: <NAME>
# Created On: 21 Feb 2019
import numpy as np
from . import astar
SEARCH_TARGET = 0
MOVE = 1
class AI:
def __init__(self, player):
self.player = player
self.path = []
self.state = SEARCH_TARGET
self.weight_self = 3
self.weight_enemy = 6
self.weight_crossroad = 3
self.map_positions = np.empty((0, 0))
self.bomb_times = np.empty((0, 0))
def __update_map_positions(self, map):
if map.size != self.map_positions.shape:
width, height = map.size
self.map_positions = np.empty((width, height, 2))
self.map_positions[:, :, 0] = np.arange(width) \
.reshape(1, width).repeat(height, 0)
self.map_positions[:, :, 1] = np.arange(height) \
.reshape(height, 1).repeat(width, 1)
def __update_bomb_times(self, bombs, map):
if map.size != self.bomb_times.shape:
self.bomb_times = np.empty(map.size, dtype=np.int)
self.bomb_times[:, :] = 1e16
# define the four diections west, east, south, north
directions = np.array([(1, 0), (-1, 0), (0, 1), (0, -1)])
for bomb in bombs:
pos = bomb.pos
self.bomb_times[pos[0], pos[1]] = bomb.time
for dir in directions:
# try to spread the explosions as far as possible
for delta in range(1, bomb.range):
npos = pos + dir * delta
# check if the position is valid, if not stop explosion
# spread here
if not map.is_valid(npos) or map.is_blocked(npos) or \
map.has_explosion(npos):
break
self.bomb_times[pos[0], pos[1]] = bomb.time
def update(self, world):
self.player.drop_bomb = False
self.player.move[:] = 0
if self.state == MOVE:
if self.path:
next_pos = self.path.pop(0)
if world.map.is_blocked(next_pos) or world.map.has_explosion(next_pos):
self.path = []
self.state = SEARCH_TARGET
next_pos = np.array(next_pos, dtype=np.int)
self.player.move = next_pos - self.player.pos
else:
self.player.drop_bomb = True
self.state = SEARCH_TARGET
if self.state == SEARCH_TARGET:
# init score board, each tile gets a score the maximum is chosen as
# target
score = np.zeros(world.map.size)
# get mask of tiles which are not blocked
unblock = ~world.map.blocked
width, height = score.shape
# create array of tile positions, create lazily
self.__update_map_positions(world.map)
self.__update_bomb_times(world.bombs, world.map)
# calculate distances of this player to all other tiles (manhatten)
self_dist = np.abs(self.map_positions - self.player.pos).sum(2)
# normalize distances into interval [0,1]
self_dist /= self_dist.max()
# make shortest distances have greates value
self_dist -= 1
self_dist *= -1
# check if there are any other players than this one
if len(world.players) > 1:
# calculate distances of all enemies to all other tiles
enemy_dist = []
for enemy in world.players:
# check if this player is not the one controlled by ai
if enemy.id != self.player.id:
diff = self.map_positions - enemy.pos
dist = np.abs(diff).sum(2)
enemy_dist.append(dist)
# convert distance to numpy array
enemy_dist = np.array(enemy_dist)
# find element wise minimum of all player distances
enemy_dist = np.min(enemy_dist, axis=0)
# normalize distances into interval [0,1]
enemy_dist /= enemy_dist.max()
# make shortest distances have greates value
enemy_dist -= 1
enemy_dist *= -1
else:
# no enemies, distances are zero
enemy_dist = np.zeros((width, height))
# detect how many neighbouring unblocked tiles each tile has
crossroads = np.zeros((width, height))
# add +1 if left neighbour is not blocked
crossroads[1:, :] += unblock[:-1, :] * 1
# add +1 if right neighbour is not blocked
crossroads[:-1, :] += unblock[1:, :] * 1
# add +1 if upper neighbour is not blocked
crossroads[:, 1:] += unblock[:, :-1] * 1
# add +1 if lower neighbour is not blocked
crossroads[:, :-1] += unblock[:, 1:] * 1
# normalize into interval [0,1]
crossroads /= 4
# calculate score as weighted sum
score += self.weight_self * self_dist
score += self.weight_enemy * enemy_dist
score += self.weight_crossroad * crossroads
# set all blocked tiles to zero
score[world.map.blocked] = 0
def is_valid(node, path):
return world.map.is_valid(node) and \
not world.map.is_blocked(node) and \
not world.map.has_explosion(node) and \
self.bomb_times[node[0], node[1]] - len(path) - 1 > 0
found = False
iterations = 0
while not found and iterations < 10:
# retrieve tile with maximum score
target = np.unravel_index(np.argmax(score), score.shape)
# set score to 0
score[target[0], target[1]] = 0
# search path with astar
self.path = astar.search(self.player.pos, target,
is_valid=is_valid)
if self.path:
self.state = MOVE
found = True
iterations += 1
if not found:
print('No path found!')
| StarcoderdataPython |
6537889 | #!/usr/bin/env python2
import random
import math
import copy
from Spell import *
class Pokemon:
def __init__(self, name, baseHp, lifePerLevel, attack, attackPerLevel, baseDef, defencePerLevel, spells, elements):
self.level = 1
self.exp = 0
self.name = name
self.baseHp = baseHp
self.lifePerLevel = lifePerLevel
self.defencePerLevel = defencePerLevel
self.spells = spells
self.attackPerLevel = attackPerLevel
self.baseDef = baseDef
self.pokeid = 0
self.attack = attack
self.active_pokemon = self.pokeid
self.battle = None
self.life = self.getMaxLife()
self.username = self.name
self.elements = elements
def getActivePokemon(self):
return self
def hasAlivePokemon(self):
return False
def removePokemon(self, pokemon):
self.active_pokemon = None
def addSpell(self, spell):
self.spells.append(spell)
def getSpells(self):
return self.spells
def getSpell(self, name):
for e in self.spells:
if e.name == name:
return e
return False
def expForNextLevel(self):
# return 1.2 * (self.level**3) - 15 * (self.level**2) + 100 * self.level - 140
return 1.25 * (self.level**3) + 50
def gainExp(self, fromPokemon):
self.addExp(self.calcGainedExp(fromPokemon))
def calcGainedExp(self, fromPokemon):
return 200 * fromPokemon.level / 7
def addExp(self, exp):
self.exp += exp
needed = self.expForNextLevel()
while self.exp >= needed:
self.exp = self.exp - needed
self.level += 1
self.life = self.getMaxLife()
def getMaxLife(self):
return self.lifePerLevel * self.level + self.baseHp
def getAttack(self):
return self.attackPerLevel * self.level + self.attack
def getDefence(self):
return self.defencePerLevel * self.level + self.baseDef
def str(self):
spells = ""
for elem in self.spells:
if len(spells) != 0:
spells += ", "
spells += elem.name
elements = ""
for elem in self.elements:
if len(elements) != 0:
elements += ", "
elements += elem.name
return "(pokeId(" + str(self.pokeid) + "), Nom(" + self.name + "), Niveau(" + str(self.level) + "), Attaque(" + str(self.getAttack()) + "), VieMax(" + str(self.getMaxLife()) + "), Defense(" + str(self.getDefence()) + "), Sorts: (" + spells + "), Exp: (" + str(self.exp) + " / " + str(self.expForNextLevel()) + "), Elements(" + elements + "))"
def fight(self, spellName, defencer):
spell = self.getSpell(spellName)
if spell == False:
return "Sort '" + spellName + "' introuvable."
rep = spell.use(self, defencer)
return rep[0] + self.name + " utilise " + spell.name + " (" + spell.element.name + ") et fait " + str(rep[1]) + " dommages a " + defencer.name + " (pv: " + str(defencer.life) + " / " + str(defencer.getMaxLife()) + ")"
class PokemonsManager:
def __init__(self):
self.pokemons = [
Pokemon("ZeratoR", 140, 6, 10, 1, 5, 0.1, [
Spell("Son_Pere", 10, 90, elementsManager.get("Feu")),
Spell("Mute", 15, 50, elementsManager.get("Feu")),
Spell("Rend_l'argent", 50, 100, elementsManager.get("Feu")),
Spell("Dailymotion_drop", 100, 100, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
]),
Pokemon("Noxer", 80, 6, 5, 1, 0.5, 0.2, [
Spell("Ventre_devoreur", 30, 80, elementsManager.get("Terre")),
Spell("Millenium", 50, 80, elementsManager.get("Terre"))
], [
elementsManager.get("Terre")
]),
Pokemon("Furiie", 100, 4, 10, 1, 2, 0.05, [
Spell("Cri_strident", 20, 100, elementsManager.get("Eau")),
Spell("League_of_legends", 100, 20, elementsManager.get("Terre")),
Spell("Bisous", 20, 50, elementsManager.get("Eau"))
], [
elementsManager.get("Eau")
]),
Pokemon("MisterMV", 140, 6, 9, 1, 0.1, 0.1, [
Spell("SAUCISSON", 10, 100, elementsManager.get("Terre")),
Spell("Speedrun", 20, 80, elementsManager.get("Feu")),
Spell("Jeu_a_la_pisse", 100, 30, elementsManager.get("Terre"))
], [
elementsManager.get("Terre")
]),
Pokemon("<NAME>", 100, 5, 20, 1, 0.1, 0.1, [
Spell("LEEEEEROY_JENKINS", 5000, 10, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
]),
Pokemon("AlexMog", 180, 5, 20, 1, 3, 0.5, [
Spell("Tardbecile", 30, 100, elementsManager.get("Eau")),
Spell("Equilibrage_ratte", 70, 10, elementsManager.get("Eau")),
Spell("Blague_de_merde", 50, 30, elementsManager.get("Eau"))
], [
elementsManager.get("Eau")
]),
Pokemon("Demoneth", 160, 5, 10, 1, 4, 0.2, [
Spell("Molotov_sur_orange", 20, 50, elementsManager.get("Feu")),
Spell("Live_o_maniaque", 15, 100, elementsManager.get("Feu")),
Spell("La_co_marche", 100, 10, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
])
]
def getRandom(self):
ret = copy.copy(self.pokemons[random.randint(0, len(self.pokemons) - 1)])
return ret
def getFromName(self, name):
for elem in self.pokemons:
if elem.name == name:
return elem
return False
global pokemonsManager
pokemonsManager = PokemonsManager()
| StarcoderdataPython |
6502011 | from attr import Factory, NOTHING
from prettyprinter.prettyprinter import pretty_call_alt, register_pretty
def is_instance_of_attrs_class(value):
cls = type(value)
try:
cls.__attrs_attrs__
except AttributeError:
return False
return True
def pretty_attrs(value, ctx):
cls = type(value)
attributes = cls.__attrs_attrs__
kwargs = []
for attribute in attributes:
if not attribute.repr:
continue
display_attr = False
if attribute.default == NOTHING:
display_attr = True
elif isinstance(attribute.default, Factory):
default_value = (
attribute.default.factory(value)
if attribute.default.takes_self
else attribute.default.factory()
)
if default_value != getattr(value, attribute.name):
display_attr = True
else:
if attribute.default != getattr(value, attribute.name):
display_attr = True
if display_attr:
kwargs.append((attribute.name, getattr(value, attribute.name)))
return pretty_call_alt(ctx, cls, kwargs=kwargs)
def install():
register_pretty(predicate=is_instance_of_attrs_class)(pretty_attrs)
| StarcoderdataPython |
11287236 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode
import opengeode_io_py_model as model_io
def test_brep_cube(brep):
# Number of components
if brep.nb_corners() != 8:
raise ValueError("[Test] Number of corners is not correct" )
if brep.nb_lines() != 12:
raise ValueError("[Test] Number of lines is not correct" )
if brep.nb_surfaces() != 6:
raise ValueError("[Test] Number of surfaces is not correct" )
if brep.nb_blocks() != 1:
raise ValueError("[Test] Number of blocks is not correct" )
# Number of vertices and elements in components
for c in brep.corners():
if c.mesh().nb_vertices() != 1:
raise ValueError("[Test] Number of vertices in corners should be 1" )
for l in brep.lines():
if l.mesh().nb_vertices() != 5:
raise ValueError("[Test] Number of vertices in lines should be 5" )
if l.mesh().nb_edges() != 4:
raise ValueError("[Test] Number of edges in lines should be 4" )
for s in brep.surfaces():
if s.mesh().nb_vertices() != 29:
raise ValueError("[Test] Number of vertices in surfaces should be 29" )
if s.mesh().nb_polygons() != 40:
raise ValueError("[Test] Number of polygons in surfaces should be 40" )
for b in brep.blocks():
if b.mesh().nb_vertices() != 131:
raise ValueError("[Test] Number of vertices in blocks should be 131" )
if b.mesh().nb_polyhedra() != 364:
raise ValueError("[Test] Number of polyhedra in blocks should be 364" )
# Number of component boundaries and incidences
for c in brep.corners():
if brep.nb_boundaries( c.id() ) != 0:
raise ValueError("[Test] Number of corner boundary should be 0" )
if brep.nb_incidences( c.id() ) != 3:
raise ValueError("[Test] Number of corner incidences should be 3" )
for l in brep.lines():
if brep.nb_boundaries( l.id() ) != 2:
raise ValueError("[Test] Number of line boundary should be 2" )
if brep.nb_incidences( l.id() ) != 2:
raise ValueError("[Test] Number of line incidences should be 2" )
for s in brep.surfaces():
if brep.nb_boundaries( s.id() ) != 4:
raise ValueError("[Test] Number of surface boundary should be 4" )
if brep.nb_incidences( s.id() ) != 1:
raise ValueError("[Test] Number of surface incidences should be 1" )
for b in brep.blocks():
if brep.nb_boundaries( b.id() ) != 6:
raise ValueError("[Test] Number of block boundary should be 6" )
if brep.nb_incidences( b.id() ) != 0:
raise ValueError("[Test] Number of block incidences should be 0" )
def test_brep_cone(brep):
# Number of components
if brep.nb_corners() != 6:
raise ValueError("[Test] Number of corners is not correct")
if brep.nb_lines() != 13:
raise ValueError("[Test] Number of lines is not correct")
if brep.nb_surfaces() != 12:
raise ValueError("[Test] Number of surfaces is not correct")
if brep.nb_blocks() != 4:
raise ValueError("[Test] Number of blocks is not correct")
# Number of vertices and elements in components
for c in brep.corners():
if c.mesh().nb_vertices() != 1:
raise ValueError("[Test] Number of vertices in corners should be 1")
for l in brep.lines():
if l.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in lines should not be null")
if l.mesh().nb_edges() == 0:
raise ValueError("[Test] Number of edges in lines should not be null")
for s in brep.surfaces():
if s.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in surfaces should not be null")
if s.mesh().nb_polygons() == 0:
raise ValueError("[Test] Number of polygons in surfaces should not be null")
for b in brep.blocks():
if b.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in blocks should not be null")
if b.mesh().nb_polyhedra() == 0:
raise ValueError("[Test] Number of polyhedra in blocks should not be null")
# Number of component boundaries and incidences
for c in brep.corners():
if brep.nb_boundaries( c.id() ) != 0:
raise ValueError("[Test] Number of corner boundary should be 0" )
if brep.nb_incidences( c.id() ) != 4 and brep.nb_incidences( c.id() ) != 5:
raise ValueError("[Test] Number of corner incidences should be 4 or 5" )
for l in brep.lines():
if brep.nb_boundaries( l.id() ) != 2:
raise ValueError("[Test] Number of line boundary should be 2" )
if brep.nb_incidences( l.id() ) < 2 or brep.nb_incidences( l.id() ) > 4:
raise ValueError("[Test] Number of line incidences should be 2, 3 or 4" )
for s in brep.surfaces():
if brep.nb_boundaries( s.id() ) != 3:
raise ValueError("[Test] Number of surface boundary should be 3" )
if brep.nb_incidences( s.id() ) != 1 and brep.nb_incidences( s.id() ) != 2:
raise ValueError("[Test] Number of surface incidences should be 1 or 2" )
for b in brep.blocks():
if brep.nb_boundaries( b.id() ) != 4:
raise ValueError("[Test] Number of block boundary should be 4" )
if brep.nb_incidences( b.id() ) != 0:
raise ValueError("[Test] Number of block incidences should be 0" )
if __name__ == '__main__':
model_io.initialize_model_io()
test_dir = os.path.dirname(__file__)
data_dir = os.path.abspath(os.path.join(test_dir, "../../../../tests/data"))
brep_cube = opengeode.load_brep( os.path.join(data_dir, "cube_v22.msh" ))
test_brep_cube(brep_cube)
opengeode.save_brep(brep_cube, "cube_v22.og_brep")
reloaded_brep_cube = opengeode.load_brep("cube_v22.og_brep")
test_brep_cube(reloaded_brep_cube)
brep_cone = opengeode.load_brep(os.path.join(data_dir, "cone_v4.msh" ))
test_brep_cone(brep_cone)
opengeode.save_brep(brep_cone, "cone_v4.og_brep")
reloaded_brep_cone = opengeode.load_brep("cone_v4.og_brep")
test_brep_cone(reloaded_brep_cone)
| StarcoderdataPython |
6649675 | <reponame>ethansaxenian/RosettaDecode
LONGMONTHS = (1, 3, 5, 7, 8, 10, 12) # Jan Mar May Jul Aug Oct Dec
def fiveweekendspermonth2(start=START, stop=STOP):
return [date(yr, month, 31)
for yr in range(START.year, STOP.year)
for month in LONGMONTHS
if date(yr, month, 31).timetuple()[6] == 6 # Sunday
]
dates2 = fiveweekendspermonth2()
assert dates2 == dates
| StarcoderdataPython |
328889 | from manim import *
class s08b_Algorithms_Activity(Scene):
def construct(self):
# Actors.
title = Text("Algorithms")
subtitle = Text("(Activity)").scale(0.75)
# Positioning.
title.shift(0.50*UP)
subtitle.next_to(title, DOWN)
# Animations.
actors = [title, subtitle]
for actor in actors:
self.play(Write(actor))
self.wait(0.5)
# Cleanup.
self.wait(0.5)
self.play(*[FadeOut(actor) for actor in actors])
| StarcoderdataPython |
6656577 | # Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Elastic File System"
prefix = "elasticfilesystem"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
Backup = Action("Backup")
ClientMount = Action("ClientMount")
ClientRootAccess = Action("ClientRootAccess")
ClientWrite = Action("ClientWrite")
CreateAccessPoint = Action("CreateAccessPoint")
CreateFileSystem = Action("CreateFileSystem")
CreateMountTarget = Action("CreateMountTarget")
CreateTags = Action("CreateTags")
DeleteAccessPoint = Action("DeleteAccessPoint")
DeleteFileSystem = Action("DeleteFileSystem")
DeleteFileSystemPolicy = Action("DeleteFileSystemPolicy")
DeleteMountTarget = Action("DeleteMountTarget")
DeleteTags = Action("DeleteTags")
DescribeAccessPoints = Action("DescribeAccessPoints")
DescribeBackupPolicy = Action("DescribeBackupPolicy")
DescribeFileSystemPolicy = Action("DescribeFileSystemPolicy")
DescribeFileSystems = Action("DescribeFileSystems")
DescribeLifecycleConfiguration = Action("DescribeLifecycleConfiguration")
DescribeMountTargetSecurityGroups = Action("DescribeMountTargetSecurityGroups")
DescribeMountTargets = Action("DescribeMountTargets")
DescribeTags = Action("DescribeTags")
ListTagsForResource = Action("ListTagsForResource")
ModifyMountTargetSecurityGroups = Action("ModifyMountTargetSecurityGroups")
PutBackupPolicy = Action("PutBackupPolicy")
PutFileSystemPolicy = Action("PutFileSystemPolicy")
PutLifecycleConfiguration = Action("PutLifecycleConfiguration")
Restore = Action("Restore")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateFileSystem = Action("UpdateFileSystem")
| StarcoderdataPython |
1850660 | <filename>libs/helpers.py
from ncclient import manager
from lxml import etree
def get_running_config(ip, port, uname, pw, device_params):
session = manager.connect(host=ip, port=port, username=uname, password=pw, device_params=device_params, hostkey_verify=False)
config = session.get_config(source='running').data_xml
config_tree = etree.fromstring(config.encode('UTF-8'))
return config_tree | StarcoderdataPython |
3519367 | # -*- coding:utf-8 -*-
from conf import *
from utils import *
import abc
class CNNModel(metaclass=abc.ABCMeta):
def __init__(self, param):
# input_shape = x_train.shape[1:]
self.param = param
self.train_poison = None
self.test_poison = None
self.classifier = None
def init(self, data):
self.input_shape = data.x_train.shape[1:]
self.min_ = data.min_
self.max_ = data.max_
def set_learning_phase(self, learning_phase):
K.set_learning_phase(learning_phase)
@abc.abstractmethod
def init_model(self):
pass
def predict_acc(self, x, y, is_poison, type_str):
# Evaluate the classifier on the test set
self.test_preds = np.argmax(self.classifier.predict(x), axis=1)
self.test_acc = np.sum(self.test_preds == np.argmax(y, axis=1)) / y.shape[0]
print("\n%s accuracy: %.2f%%" % (type_str, self.test_acc * 100))
# Evaluate the classifier on poisonous data in test set
# self.poison_preds = np.argmax(self.classifier.predict(x[is_poison]), axis=1)
self.poison_preds = self.test_preds[is_poison]
self.poison_acc = np.sum(self.poison_preds == np.argmax(y[is_poison], axis=1)) / max(is_poison.sum(),1)
print("\nPoisonous %s set accuracy (i.e. effectiveness of poison): %.2f%%" % (type_str, self.poison_acc * 100))
# Evaluate the classifier on clean data
# self.clean_preds = np.argmax(self.classifier.predict(x[is_poison == 0]), axis=1)
self.clean_preds = self.test_preds[is_poison==0]
self.clean_acc = np.sum(self.clean_preds == np.argmax(y[is_poison == 0], axis=1)) / y[is_poison == 0].shape[0]
print("\nClean %s set accuracy: %.2f%%" % (type_str, self.clean_acc * 100))
# when result_dict is not empty, start record experiment results
# to validate backdoor insert effectiveness
# check whether the backdoor data with poison label is predicted by the model with poison label
def predict(self, data):
# Evaluate the classifier on the train set
self.predict_acc(data.x_train, data.y_train, data.is_poison_train, 'train')
# visualize predict
# for i in range(3):
# data.visiualize_img_by_idx(np.where(np.array(data.is_poison_train) == 1)[0][i], self.poison_preds[i])
# Evaluate the classifier on the test set
self.predict_acc(data.x_test, data.y_test, data.is_poison_test, 'test')
'''
# visualize predict
for i in range(3):
print(np.where(np.array(data.is_poison_test) == 1)[0][i])
data.visiualize_img_by_idx(np.where(np.array(data.is_poison_test) == 1)[0][i], self.poison_preds[i], False)
'''
def predict_robust(self, x, y, is_poison, type_str=''):
self.test_preds = np.argmax(self.classifier.predict(x), axis=1)
self.test_acc = np.sum(self.test_preds == np.argmax(y, axis=1)) / y.shape[0]
print("\n%s accuracy: %.2f%%" % (type_str, self.test_acc * 100))
# Evaluate the classifier on poisonous data in test set
# self.poison_preds = np.argmax(self.classifier.predict(x[is_poison]), axis=1)
self.poison_preds = self.test_preds[is_poison]
self.poison_acc = np.sum(self.poison_preds == np.argmax(y[is_poison], axis=1)) / max(is_poison.sum(),1)
print("\nPoisonous %s set accuracy (i.e. effectiveness of poison): %.2f%%" % (type_str, self.poison_acc * 100))
# Evaluate the classifier on clean data
# self.clean_preds = np.argmax(self.classifier.predict(x[is_poison == 0]), axis=1)
self.clean_preds = self.test_preds[is_poison==0]
self.clean_acc = np.sum(self.clean_preds == np.argmax(y[is_poison == 0], axis=1)) / y[is_poison == 0].shape[0]
print("\nClean %s set accuracy: %.2f%%" % (type_str, self.clean_acc * 100))
def set_param(self, param):
self.classifier.param = param
self.param = param
def get_train_poison(self):
return self.train_poison
def set_train_poison(self, poison):
self.train_poison = poison
def get_test_poison(self):
return self.test_poison
def set_test_poison(self, poison):
self.test_poison = poison
def predict_instance(self, x):
return self.classifier.predict(x)[0]
def get_input_shape(self):
return self.input_shape
def set_input_shape(self, input_shape):
self.input_shape = input_shape
def get_classifier(self):
return self.classifier
def set_classifier(self, classifier):
self.classifier = classifier
def get_input_tensor(self):
return self.classifier.get_input_tensor()
def get_output_tensor(self):
return self.classifier.get_output_tensor()
@abc.abstractmethod
def get_dense_tensor(self):
pass
| StarcoderdataPython |
115214 | <filename>yj_anova_test.py
#coding:utf-8
from scipy import stats
import numpy as np
from pandas import Series,DataFrame
from openpyxl import load_workbook
import math
import uuid
import os
def chart(data_ws,result_ws):
pass
def _produc_random_value(mean,stdrange):
b = np.random.uniform(*stdrange)
a = b/math.sqrt(2)
x1,x2 = mean-a, mean+a
return x1,x2,b
def _set_od_value(ws,row,x1,x2):
if row % 2 == 1:
ws['F'+str(row)]=x1
ws['F'+str(row+1)]=x2
def _get_mean_value(ws,row):
if row % 2 == 1:
return ws['G'+str(row)].value
else:
return ws['G'+str(row-1)].value
def _get_stdev_value(ws,row):
if row % 2 == 1:
return ws['H'+str(row)].value
else:
return ws['H'+str(row-1)].value
def _set_stdev_value(ws,row,stdev):
if row % 2 == 1:
ws['H'+str(row)] = stdev
def _get_one_row(ws,row):
time = ws['A'+str(row)].value
organ = ws['B'+str(row)].value
sp = ws['C'+str(row)].value
c = ws['D'+str(row)].value
rep = ws['E'+str(row)].value
od = ws['F'+str(row)].value
mean = _get_mean_value(ws,row)
stdev = _get_stdev_value(ws,row)
return Series([time,organ,sp,c,rep,float(od),float(mean),stdev],\
index=['time','organ','sp','c','rep','od','mean','stdev'])
def get_whole_dataframe(ws):
data={}
for i in range(3,ws.max_row+1):
data[i]=_get_one_row(ws,i)
return DataFrame(data).T
def _fill_data_ws(ws,stdrange):
for i in range(3,ws.max_row+1,2):
mean = _get_mean_value(ws,i)
x1,x2,b=_produc_random_value(mean,stdrange)
_set_od_value(ws,i,x1,x2)
_set_stdev_value(ws,i,b)
def _set_p_talbe_header(ws,result_ws):
for i in range(3,ws.max_row+1,10):
group = []
for j in range(i,i+10,2):
gname=ws['A'+str(j)].value+'_'+\
ws['B'+str(j)].value+'_'+\
ws['C'+str(j)].value+'_'+\
str(ws['D'+str(j)].value)
group.append(gname)
for k in range(5):
result_ws['B'+str(i+k+1)]=group[k]
result_ws[chr(ord('C')+k)+str(i)]=group[k]
# for i in range(3,ws.max_row+1,20):
# group = []
# for j in range(i,i+10,2):
# gname=ws['A'+str(j)].value+'_'+\
# ws['B'+str(j)].value+'_'+\
# ws['C'+str(j)].value+'_'+\
# ws['C'+str(j+10)].value+'_'+\
# str(ws['D'+str(j)].value)
# group.append(gname)
# for k in range(5):
# result_ws['J'+str(i+2*k+6)] = group[k]
def produce_p_table(ws,result_ws):
df = get_whole_dataframe(ws)
_set_p_talbe_header(ws,result_ws)
for (time,organ,sp),group_l1 in df.groupby(['time','organ','sp']):
group_l2 = [g for c,g in group_l1.groupby(['c'])]
i = group_l2[0].index[0]
for m in range(5):
for n in range(m+1,5):
g1 = group_l2[m]
g2 = group_l2[n]
f,p = stats.f_oneway(g1['od'],g2['od'])
result_ws[chr(ord('C')+m)+str(i+1+n)]=p
# for (time,organ,c),group_l1 in df.groupby(['time','organ','c']):
# group_l2 = [g for c,g in group_l1.groupby(['sp'])]
# i = group_l2[0].index[0]
# g1 = group_l2[0]
# g2 = group_l2[1]
# f,p = stats.f_oneway(g1['od'],g2['od'])
# result_ws['K'+str(i+6)]=p
def calc(data_ws,result_ws):
_fill_data_ws(data_ws,(0.1,0.6))
for i in range(3,data_ws.max_row+1,10):
group=[]
for j in range(i,i+10,2):
gname=data_ws['A'+str(j)].value+'_'+\
data_ws['B'+str(j)].value+'_'+\
data_ws['C'+str(j)].value+'_'+\
str(data_ws['D'+str(j)].value)
group.append([gname,Series([data_ws['F'+str(j)].value,\
data_ws['F'+str(j+1)].value])])
for k in range(5):
result_ws['B'+str(i+k+1)]=group[k][0]
result_ws[chr(ord('C')+k)+str(i)]=group[k][0]
for m in range(5):
for n in range(m,5):
args = [group[m][1],group[n][1]]
f,p = stats.f_oneway(*args)
result_ws[chr(ord('C')+m)+str(i+1+n)]=p
def main():
wb = load_workbook(filename = 'data/PODz.xlsx')
salt = wb.get_sheet_by_name('salt')
alkali = wb.get_sheet_by_name('alkali')
salt_result = wb.create_sheet(title="salt_result")
alkali_result = wb.create_sheet(title="alkali_result")
calc(salt,salt_result)
calc(alkali,alkali_result)
wb.save(filename = 'data/PODz_result.xlsx')
print('处理完成!')
def test(data_file,result_file):
wb = load_workbook(data_file)
sheetnames = wb.get_sheet_names()
for name in sheetnames:
sheet = wb.get_sheet_by_name(name)
result_sheet = wb.create_sheet(title='result_'+name)
r = input(name+'->请输入标准差范围(以英文逗号隔开):')
x,y = r.split(',')
x,y = float(x),float(y)
_fill_data_ws(sheet, (x,y))
print(name+"->填充随机值完成!")
produce_p_table(sheet, result_sheet)
print(name+"->计算P值完成!")
# salt = wb.get_sheet_by_name('salt')
# alkali = wb.get_sheet_by_name('alkali')
# salt_result = wb.create_sheet(title='salt_result')
# alkali_result = wb.create_sheet(title="alkali_result")
# _fill_data_ws(salt, stdrange)
# _fill_data_ws(alkali, stdrange)
# produce_p_table(salt, salt_result)
# produce_p_table(alkali, alkali_result)
wb.save(result_file)
def add_tags(result_file):
wb = load_workbook(result_file)
if __name__ == "__main__":
# main()
data_file = 'data2/ggb (copy).xlsx'
result_file = data_file.split('.')[0]+'_result('\
+str(uuid.uuid1())[:8]+').xlsx'
test(data_file,result_file)
print(data_file+':处理完成!')
| StarcoderdataPython |
4886512 | <gh_stars>0
""" Swagger documentation. """
INDEX = {
"responses": {
"200": {
"description": "A greeting."
}
},
}
| StarcoderdataPython |
8060658 | # -*- coding: utf-8 -*-
from django.shortcuts import HttpResponse, render_to_response
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.utils.translation import ugettext as _
from grappelli.models.bookmarks import Bookmark, BookmarkItem
from grappelli.settings import ADMIN_TITLE, ADMIN_URL
def add_bookmark(request):
"""
Add Site to Bookmarks.
"""
if request.method == 'POST':
if request.POST.get('path') and request.POST.get('title'):
next = request.POST.get('path')
try:
bookmark = Bookmark.objects.get(user=request.user)
except Bookmark.DoesNotExist:
bookmark = Bookmark(user=request.user)
bookmark.save()
try:
bookmarkitem = BookmarkItem.objects.get(bookmark=bookmark, link=request.POST.get('path'))
msg = _('Site is already bookmarked.')
except BookmarkItem.DoesNotExist:
try:
bookmarkitem = BookmarkItem(bookmark=bookmark, title=request.POST.get('title'), link=request.POST.get('path'))
bookmarkitem.save()
msg = _('Site was added to Bookmarks.')
except:
msg = _('Error: Site could not be added to Bookmarks.')
else:
msg = _('Error: Site could not be added to Bookmarks.')
next = request.POST.get('path')
else:
msg = _('Error: Site could not be added to Bookmarks.')
next = ADMIN_URL
# MESSAGE & REDIRECT
request.user.message_set.create(message=msg)
return HttpResponseRedirect(next)
add_bookmark = staff_member_required(add_bookmark)
def remove_bookmark(request):
"""
Remove Site from Bookmarks.
"""
if request.GET:
if request.GET.get('path'):
next = request.GET.get('path')
try:
bookmarkitem = BookmarkItem.objects.get(bookmark__user=request.user, link=request.GET.get('path'))
bookmarkitem.delete()
msg = _('Site was removed from Bookmarks.')
except BookmarkItem.DoesNotExist:
msg = _('Error: Site could not be removed from Bookmarks.')
else:
msg = _('Error: Site could not be removed from Bookmarks.')
next = ADMIN_URL
else:
msg = _('Error: Site could not be removed from Bookmarks.')
# MESSAGE & REDIRECT
request.user.message_set.create(message=msg)
return HttpResponseRedirect(next)
remove_bookmark = staff_member_required(remove_bookmark)
def get_bookmark(request):
"""
Get Bookmarks for the currently logged-in User (AJAX request).
"""
if request.method == 'GET':
if request.GET.get('path'):
object_list = BookmarkItem.objects.filter(bookmark__user=request.user).order_by('order')
try:
bookmark = Bookmark.objects.get(user=request.user)
except Bookmark.DoesNotExist:
bookmark = Bookmark(user=request.user)
bookmark.save()
try:
BookmarkItem.objects.get(bookmark__user=request.user, link=request.GET.get('path'))
is_bookmark = True
except BookmarkItem.DoesNotExist:
is_bookmark = False
else:
object_list = ""
is_bookmark = ""
else:
object_list = ""
is_bookmark = ""
return render_to_response('admin/includes_grappelli/bookmarks.html', {
'object_list': object_list,
'bookmark': bookmark,
'is_bookmark': is_bookmark,
'admin_title': ADMIN_TITLE,
'path': request.GET.get('path', ''),
})
get_bookmark = staff_member_required(get_bookmark)
| StarcoderdataPython |
9605754 | <reponame>mohibeyki/remoteAPI<filename>remoteAPI/exceptions.py
#!/usr/bin/env python3
from rest_framework import status
class ServiceError(Exception):
"""
Base class for microservice errors
Typically a Http response is generated from this.
"""
def __init__(self, type, message, suggested_http_status=None):
super().__init__(message)
self.type = type
self.message = message
self.suggested_http_status = suggested_http_status
class BadRequestError(ServiceError):
"""
Is raised when an invalid request comes from client
"""
def __init__(self, type, message, suggested_http_status=None):
super().__init__(type, message, suggested_http_status or status.HTTP_400_BAD_REQUEST)
class NotFoundError(ServiceError):
"""
Is raised when a requested entity does not exist
"""
def __init__(self, type, message):
super().__init__(type, message, status.HTTP_404_NOT_FOUND)
class ServerError(ServiceError):
"""
Is raised when an internal server error occurs
"""
def __init__(self,
type='server_error',
message='Unknown error; please try again later',
suggested_http_status=None):
super().__init__(type, message, suggested_http_status or status.HTTP_500_INTERNAL_SERVER_ERROR)
class ApiCallError(ServiceError):
"""
Is raised when a valid (expected) error status is returned from a remote API call.
"""
def __init__(self, type, message, status):
super().__init__(type, message, status)
| StarcoderdataPython |
389815 | <filename>DD/IP/TEMPLATES/Session 3/propContours.py
############################################
## PROJECT CELL
## Image Processing Workshop
############################################
## Import OpenCV
import numpy
import cv2
############################################
## Read the image
img = cv2.imread('map.png')
## Do the processing
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,170,255,0)
##find the contours
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
##select any contour
#i =
##find the co-ordinates of centroid of the contour
#M = cv2.moments(contours[i])
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
##find the area of the contour
cv2.drawContours(img,contours,i,(0,0,255),3)
cv2.imshow('contours',img)
############################################
############################################
## Close and exit
cv2.waitKey(0)
cv2.destroyAllWindows()
############################################
| StarcoderdataPython |
6544032 | import pandas as pd
import numpy as np
import altair as alt
import streamlit as st
import sys, argparse, logging
import json
def spell(spell_inputs):
mana = spell_inputs
x_col = st.selectbox("Select x axis for line chart", mana.columns)
xcol_string = x_col + ":O"
if st.checkbox("Show as continuous?", key="line_chart_x_is_cont"):
xcol_string = x_col + ":Q"
y_col = st.selectbox("Select y axis for line chart", mana.columns)
z_col = st.selectbox("Select z axis for line chart", mana.columns)
if st.checkbox("Show chart?", key="line_chart_show"):
chart = (
alt.Chart(mana)
.mark_line(point=True)
.encode(x=xcol_string, y=y_col, color=z_col, tooltip=list(mana.columns))
.interactive()
.properties(title="Line Chart for " + x_col + "," + y_col)
.configure_title(fontSize=20,)
.configure_axis(labelFontSize=20, titleFontSize=20)
.configure_legend(labelFontSize=20, titleFontSize=20)
)
st.altair_chart(chart, use_container_width=True)
return None, mana
| StarcoderdataPython |
1819465 | #!/usr/bin/env Python3
'''
TypeLoader backend functionality
'''
| StarcoderdataPython |
3460864 | <reponame>danmar3/twodlearn<gh_stars>0
# ***********************************************************************
# General purpose optimizer
#
# Wrote by: <NAME> (<EMAIL>)
# Modern Heuristics Research Group (MHRG)
# Virginia Commonwealth University (VCU), Richmond, VA
# http://www.people.vcu.edu/~mmanic/
#
# ***********************************************************************
from __future__ import division
from __future__ import print_function
import os
try:
import queue
except ImportError:
import Queue as queue
import shutil
import warnings
import threading
import collections
import numpy as np
from time import time
import tensorflow as tf
import twodlearn as tdl
from twodlearn import monitoring
from tqdm import tqdm
try:
from types import SimpleNamespace
except ImportError:
from argparse import Namespace as SimpleNamespace
class DataFeeder:
def __init__(self, feed_train, feed_valid=None):
self.train_feeder = feed_train
if feed_valid is None:
self.valid_feeder = None
else:
self.valid_feeder = feed_valid
def stop(self):
# self.train_feeder.stop()
# if self.valid_feeder is not None:
# self.valid_feeder.stop()
return
def __del__(self):
self.stop()
def feed_train(self):
return self.train_feeder()
def feed_valid(self):
return self.valid_feeder()
class ConstantLr(object):
def __init__(self, value):
self.placeholder = tf.placeholder(tf.float32)
self.value = value
def __call__(self, step, global_step):
return self.value
class OptimizationManager:
''' Performs a standard mini-batch training with validation evaluation '''
def _init_options(self, options):
default = {'progress/window_size': 50,
'progress/reset_multiplier': 10,
'progress/max_trials': 20}
options = tdl.core.check_defaults(options, default)
return options
def __init__(self, session, optimizer=None, step_op=None, monitor_manager=None,
n_logging=100, saver=None, options=None, optimizer_op=None):
self.session = session
self.optimizer = optimizer
self.step_op = step_op
if optimizer_op is not None:
warnings.warn('optimizer_op is deprecated, specify optimizer and '
'step_op instead')
self.optimizer = None
self.step_op = optimizer_op
self.monitor_manager = monitor_manager
self.n_logging = n_logging
self.n_steps = 0
self.saver = saver
self.options = self._init_options(options)
def check_progress(self, step, xp):
"""Check if progress was made in the last call to the optimizer
Args:
step (int): current optimizer step.
xp (list): list of outputs from the training monitors.
Returns:
bool: variables were reset.
"""
if (self.monitor_manager is None) or (self.saver is None):
return False
if len(self.monitor_manager.train.monitors) == 1:
monitor = self.monitor_manager.train.monitors[0]
xp = xp[0]
else:
# TODO: add a way to specify which monitor is measuring performance
# of the optimization process
return False
if ((self.options['progress/window_size'] < step) and
(monitor.min is not np.inf) and
(len(self.saver.checkpoints) > 1)):
mean = monitor.mean(self.options['progress/window_size'])
if (self.options['progress/reset_multiplier']*(mean - monitor.min)
< (xp - monitor.min)):
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(xp))
self.saver.restore()
return True
return False
def check_nan(self, step, xp):
"""Check if the result from the optimizer includes Nan values.
Args:
step (int): current step of the optimizer.
xp (list): list of outputs from the optimizer
Returns:
bool: True if variables were reset.
"""
if any([np.isnan(oi).any() for oi in xp
if oi is not None]):
if self.saver is None:
raise ValueError(
'Optimization returned NaN at step {}.'
'No checkpoint saver to restore state.'.format(step))
else:
print('Optimization returned NaN at step {}.'
'Restoring last checkpoint'.format(step))
self.saver.restore()
return True
return False
def run_step(self, step, ops, feed_dict):
"""Run a step of the optimizer.
Args:
step (type): Description of parameter `step`.
ops (type): Description of parameter `ops`.
feed_dict (type): Description of parameter `feed_dict`.
Returns:
type: Description of returned object.
"""
step_op, train_ops, monitor_ops = ops
n_trials = 0
while True:
out = self.session.run([step_op] + train_ops + monitor_ops,
feed_dict=feed_dict)
# check number of trials
n_trials += 1
if n_trials > self.options['progress/max_trials']:
return out
# Check for NaN
if self.check_nan(step, xp=out):
continue
# check for progress
if self.check_progress(step=step, xp=out[1:1 + len(train_ops)]):
continue
break
return out
def run(self,
n_train_steps, feed_train=None,
n_valid_steps=1, valid_eval_freq=1, feed_valid=None,
monitor_training=True):
if feed_train is None:
def feed_train(): return None
if feed_valid is None:
def feed_valid(): return None
data_feeder = DataFeeder(feed_train, feed_valid)
if self.monitor_manager:
train_monitors = self.monitor_manager.train.tf_monitors
train_ops = [m.op for m in train_monitors]
valid_monitors = self.monitor_manager.valid.tf_monitors
valid_ops = [m.op for m in valid_monitors]
else:
train_monitors = []
train_ops = []
valid_monitors = []
valid_ops = []
if monitor_training and self.monitor_manager:
monitor_monitors = self.monitor_manager.monitoring.tf_monitors
monitor_ops = [m.op for m in monitor_monitors]
else:
monitor_monitors = []
monitor_ops = []
# safer function
if self.saver is not None:
self.saver.reset()
# run optimizer
try:
for step in range(1, n_train_steps):
# Run optimization step
out = self.run_step(
step=step,
ops=(self.step_op, train_ops, monitor_ops),
feed_dict=data_feeder.feed_train())
self.n_steps += 1
# feed data to monitors
if train_ops:
train_output = out[1:1 + len(train_ops)]
for i, monitor in enumerate(train_monitors):
monitor.feed(train_output[i], self.n_steps)
if monitor_ops:
monitor_output = out[1 + len(train_ops):]
for i, monitor in enumerate(monitor_monitors):
monitor.feed(monitor_output[i], self.n_steps)
# file loggers
self.monitor_manager.train.write_data()
self.monitor_manager.monitoring.write_data()
# run validation evaluation
if valid_ops and (step % valid_eval_freq == 0):
for step_valid in range(0, n_valid_steps):
valid_output = self.session.run(
valid_ops,
feed_dict=data_feeder.feed_valid())
for i, monitor in enumerate(valid_monitors):
monitor.feed(valid_output[i], self.n_steps)
# file loggers
self.monitor_manager.valid.write_data()
# saver function
if (self.saver is not None):
self.saver.add_checkpoint(step)
# log
if (step % self.n_logging == 0) and self.monitor_manager:
# print information
train_info = [(m.name, m.mean()) for m in train_monitors]
valid_info = [(m.name, m.mean()) for m in valid_monitors]
# log information in files
# self.monitor_manager.train.write_stats()
# self.monitor_manager.valid.write_stats()
# self.monitor_manager.monitoring.write_stats()
print("{} | {} | {}".format(step, train_info, valid_info))
finally:
# clean up
data_feeder.stop()
self.monitor_manager.flush()
if self.saver is not None:
self.saver.restore()
self.saver.save()
class Optimizer(tdl.core.TdlModel):
_submodels = ['learning_rate', 'monitor_manager', 'optimizer', 'saver']
def _init_options(self, options):
default = {'progress/window_size': 50,
'progress/reset_multiplier': 10,
'progress/max_trials': 20}
options = tdl.core.check_defaults(options, default)
return options
@tdl.core.InputArgument
def session(self, value):
return (value if value is not None
else tf.get_default_session()
if tf.get_default_session() is not None
else tf.InteractiveSession())
@tdl.core.InputArgument
def log_folder(self, value):
if value is None:
if tdl.core.is_property_set(self, 'monitor_manager'):
value = self.monitor_manager.log_folder
else:
value = 'tmp/monitors/'
return value
def _monitor_from_dict(self, value):
train = (value if 'train' not in value
else value['train'] if isinstance(value['train'], dict)
else {'train': value['train']})
valid = (None if 'valid' not in value
else value['valid'] if isinstance(value['value'], dict)
else {'valid': value['valid']})
monitor = (None if 'monitoring' not in value
else value['monitoring']
if isinstance(value['monitoring'], dict)
else {'monitoring': value['monitoring']})
return monitoring.SimpleTrainingMonitor(
train_vars=train, valid_vars=valid, monitoring_vars=monitor,
log_folder=self.log_folder)
@tdl.core.Submodel
def monitor_manager(self, value):
tdl.core.assert_initialized_if_available(
self, 'monitor_manager', ['log_folder'])
if value is None:
value = {'train': {'train/loss': self.loss}}
monitor_manager = (self._monitor_from_dict(value)
if isinstance(value, dict)
else value)
loss_monitor = filter(
lambda monitor: (tf.convert_to_tensor(monitor.op) ==
tf.convert_to_tensor(self.loss)),
monitor_manager.train.monitors)
if not loss_monitor:
monitor_manager.train.add_monitor(
monitoring.OpMonitor(self.loss, name=self.loss.name))
return monitor_manager
@tdl.core.LazzyProperty
def loss_monitor(self):
return list(filter(lambda monitor: monitor.op == self.loss,
self.monitor_manager.train.monitors))[0]
@tdl.core.Submodel
def learning_rate(self, value):
if value is None:
return ConstantLr(0.02)
else:
return value
@tdl.core.Submodel
def optimizer(self, value):
if value is None:
Optimizer = tf.train.AdamOptimizer
elif callable(value):
Optimizer = value
else:
return value
if hasattr(self.learning_rate, 'placeholder'):
optimizer = Optimizer(learning_rate=self.learning_rate.placeholder)
else:
optimizer = Optimizer(learning_rate=self.learning_rate)
return optimizer
@tdl.core.Submodel
def step_op(self, _):
step_op = self.optimizer.minimize(tf.convert_to_tensor(self.loss),
var_list=self.var_list)
self.reset()
return step_op
@property
def var_optim(self):
'''Variables created by the optimizer'''
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=self.scope)
for var in self.var_list:
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=var.name.split(':')[0])
vars += [vi for vi in var_list
if vi is not var]
return vars
def reset(self):
'''Reset optimizer variables (var_optim)'''
list(map(lambda x: x.initializer.run(), self.var_optim))
@tdl.core.Submodel
def saver(self, value):
tdl.core.assert_initialized(self, 'saver', ['monitor_manager'])
if value != 'default':
return value
if self.monitor_manager is None:
return None
monitor = (self.monitor_manager.valid.monitors[0]
if self.monitor_manager.valid.monitors
else self.monitor_manager.train.monitors[0])
return EarlyStopping(
monitor=monitor,
var_list=self.var_list,
logger_path=self.monitor_manager.log_folder,
session=self.session)
def __init__(self, loss, var_list, session=None, metrics=None,
n_logging=100, log_folder=None, options=None,
**kargs):
self.loss = tf.convert_to_tensor(loss)
self.var_list = (var_list if var_list is not None
else tf.trainable_variables())
self.n_logging = n_logging
self.n_steps = 0
if 'saver' not in kargs:
kargs['saver'] = 'default'
if metrics is not None and 'monitor_manager' in kargs:
raise ValueError('cannot specify metrics and monitor_manager '
'at the same time')
metrics = (kargs['monitor_manager'] if 'monitor_manager' in kargs
else metrics)
kargs = {key: val for key, val in kargs.items()
if key is not 'monitor_manager'}
if log_folder is not None:
kargs['log_folder'] = log_folder
super(Optimizer, self).__init__(session=session,
monitor_manager=metrics,
options=options, **kargs)
def feed_train(self):
return dict()
def check_progress(self, step, xp):
"""Check if progress was made in the last call to the optimizer
Args:
step (int): current optimizer step.
xp (list): list of outputs from the training monitors.
Returns:
bool: variables were reset.
"""
if (self.monitor_manager is None) or (self.saver is None):
return False
monitor = self.loss_monitor
xp = xp[self.loss]
if ((self.options['progress/window_size'] < step) and
(monitor.min is not np.inf) and self.saver.checkpoints):
mean = monitor.mean(self.options['progress/window_size'])
if (self.options['progress/reset_multiplier']*(mean - monitor.min)
< (xp - monitor.min)):
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(xp))
self.saver.restore()
return True
return False
def check_nan(self, step, xp):
"""Check if the result from the optimizer includes Nan values.
Args:
step (int): current step of the optimizer.
xp (list): list of outputs from the optimizer
Returns:
bool: True if variables were reset.
"""
if any([np.isnan(oi).any() for oi in xp
if oi is not None]):
if self.saver is None:
print('Optimization returned NaN at step {}.'
'Re-initializing variables'.format(step))
self.session.run([v.initializer for v in self.var_list])
else:
print('Optimization returned NaN at step {}.'
'Restoring last checkpoint'.format(step))
if self.saver.checkpoints:
self.saver.restore()
else:
self.session.run([v.initializer for v in self.var_list])
return True
return False
def run_step(self, step, ops, feed_dict):
"""Run a step of the optimizer.
Args:
step (type): Description of parameter `step`.
ops (type): Description of parameter `ops`.
feed_dict (type): Description of parameter `feed_dict`.
Returns:
type: Description of returned object.
"""
if isinstance(self.learning_rate, ConstantLr):
feed_dict[self.learning_rate.placeholder] = \
self.learning_rate(step, self.n_steps)
n_trials = 0
while True:
output = self.session.run(ops, feed_dict=feed_dict)
output = {op: output[idx] for idx, op in enumerate(ops)}
# check number of trials
n_trials += 1
if n_trials > self.options['progress/max_trials']:
self.session.run([v.initializer for v in self.var_list])
output = self.session.run(ops, feed_dict=feed_dict)
output = {op: output[idx] for idx, op in enumerate(ops)}
return output
# Check for NaN
if self.check_nan(step, xp=output.values()):
continue
# check for progress
if self.check_progress(step=step, xp=output):
continue
break
return output
def run(self,
n_train_steps, feed_train=None,
n_valid_steps=1, valid_eval_freq=1, feed_valid=None,
monitor_training=True):
if feed_train is None:
def feed_train(): return dict()
if feed_valid is None:
def feed_valid(): return dict()
data_feeder = DataFeeder(feed_train, feed_valid)
if self.monitor_manager:
train_monitors = self.monitor_manager.train.tf_monitors
train_ops = [m.op for m in train_monitors]
valid_monitors = self.monitor_manager.valid.tf_monitors
valid_ops = [m.op for m in valid_monitors]
else:
train_monitors = []
train_ops = []
valid_monitors = []
valid_ops = []
if monitor_training and self.monitor_manager:
monitor_monitors = self.monitor_manager.monitoring.tf_monitors
monitor_ops = [m.op for m in monitor_monitors]
else:
monitor_monitors = []
monitor_ops = []
# saver function
if self.saver is not None:
self.saver.reset()
# run optimizer
try:
for step in tqdm(range(1, n_train_steps)):
# Run optimization step
xp = self.run_step(
step=step,
ops=[self.step_op] + train_ops + monitor_ops,
feed_dict=data_feeder.feed_train())
self.n_steps += 1
# feed data to monitors
if train_ops:
for i, monitor in enumerate(train_monitors):
monitor.feed(xp[monitor.op], self.n_steps)
if monitor_ops:
for i, monitor in enumerate(monitor_monitors):
monitor.feed(xp[monitor.op], self.n_steps)
# file loggers
self.monitor_manager.train.write_data()
self.monitor_manager.monitoring.write_data()
# run validation evaluation
if valid_ops and (step % valid_eval_freq == 0):
for step_valid in range(0, n_valid_steps):
valid_output = self.session.run(
valid_ops,
feed_dict=data_feeder.feed_valid())
for i, monitor in enumerate(valid_monitors):
monitor.feed(valid_output[i], self.n_steps)
# file loggers
self.monitor_manager.valid.write_data()
# saver function
if (self.saver is not None):
self.saver.add_checkpoint(step)
# log
if (step % self.n_logging == 0) and self.monitor_manager:
# print information
train_info = [(m.name, m.mean()) for m in train_monitors]
valid_info = [(m.name, m.mean()) for m in valid_monitors]
# log information in files
print("{} | {} | {}".format(step, train_info, valid_info))
finally:
# clean up
data_feeder.stop()
self.monitor_manager.flush()
if self.saver is not None:
if self.saver.checkpoints:
self.saver.restore()
self.saver.save()
class SimpleSaver(tdl.core.TdlObject):
@property
def checkpoints(self):
return None
def __init__(self, var_list, logger_path, session):
self.session = session
self.var_list = var_list
super(SimpleSaver, self).__init__(save={'logger_path': logger_path})
@tdl.core.EncapsulatedMethod
def save(self, locals, value):
self._saver = tf.train.Saver(var_list=self.var_list)
self._saver_id = 0
self._logger_path = os.path.join(value['logger_path'], 'optimizer')
if os.path.exists(self._logger_path):
shutil.rmtree(self._logger_path)
os.makedirs(self._logger_path)
@save.eval
def save(self, locals):
print('saving weights in {}'.format(self._logger_path))
saver_path = os.path.join(self._logger_path, 'var_checkpoint')
self._saver.save(
sess=self.session,
save_path=saver_path,
global_step=self._saver_id)
self._saver_id += 1
def add_checkpoint(self, step):
return
def reset(self):
return
def restore_file(self):
saver_path = os.path.join(self._logger_path,
'var_checkpoint-{}'.format(self._saver_id-1))
self._saver.restore(self.session, saver_path)
class EarlyStoppingV2(tdl.core.TdlObject):
@property
def checkpoints(self):
return self._checkpoints
@property
def optimizer(self):
return self._optimizer
@property
def session(self):
return self.optimizer.session
@tdl.core.Submodel
def objective(self, value):
if isinstance(value, monitoring.TrainingMonitor):
return value
else:
valid_match = filter(lambda x: x.op == value,
self.optimizer.monitor_manager.valid.monitors)
if valid_match:
return valid_match[0]
else:
raise ValueError('{} not found in set of valid monitors.'
''.format(value))
@tdl.core.EncapsulatedMethod
def restore(self, local, value):
local.placeholders = {var: tf.placeholder(tf.float32)
for var in self.optimizer.var_list}
assign_vars = [var.assign(local.placeholders[var])
for var in self.optimizer.var_list]
local.assign_vars = tf.group(assign_vars)
@restore.eval
def restore(self, local):
ckpt = self.checkpoints[-1]
feed_dict = {local.placeholders[var]: ckpt[var]
for var in self.optimizer.var_list}
self.session.run(local.assign_vars, feed_dict=feed_dict)
def reset(self):
self.check_progress.local.best_value = np.nan
def __init__(self, optimizer, objective, minimize=True):
self._optimizer = optimizer
super(type(self), self).__init__(objective=objective)
def __bool__(self):
return len(self.checkpoints) > 0
class EarlyStopping(tdl.core.TdlObject):
@property
def checkpoints(self):
return self._ckpts
def _init_options(self, options):
default = {'start_steps': 300,
'ckpts_dt': 5.0,
'window_size': 50}
options = tdl.core.check_defaults(options, default)
return options
def __init__(self, monitor, var_list, logger_path,
session, check_func=None, options=None):
self.monitor = monitor
self.session = session
self.var_list = var_list
self.options = self._init_options(options)
super(EarlyStopping, self).__init__(save={'logger_path': logger_path})
if check_func is not None:
raise NotImplementedError('Custom check_func not yet implemented.'
'Use None for the moment.')
self.check_func = (check_func if check_func is not None
else self.check_lower)
def check_progress(self, step, monitor):
if ((self.options['window_size'] < step) and
(monitor.min is not np.inf) and
(len(self._ckpts) > 1)):
mean = monitor.mean(self.options['window_size'])
current_value = monitor.current_value
if 10*(mean - monitor.min) < (current_value - monitor.min):
# pdb.set_trace()
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(current_value))
self.restore()
@tdl.core.EncapsulatedMethod
def check_lower(self, local, value):
local.time_last_ckpt = time()
local.best_value = np.nan
@check_lower.eval
def check_lower(self, local, step):
current_value = self.monitor.mean(self.options['window_size'])
save = (True if local.best_value is np.nan
else current_value < local.best_value)
save = ((time() - local.time_last_ckpt) > self.options['ckpts_dt']
and (step > self.options['start_steps'])
and save)
if save:
print(np.abs(local.best_value - current_value) /
(self.monitor.max - self.monitor.min))
local.time_last_ckpt = time()
local.best_value = current_value
return save
def check_greather(self, local):
current_value = self.monitor.mean(self.options['window_size'])
save = (True if local.best_value is np.nan
else local.best_value < current_value)
save = ((time() - local.time_last_ckpt) > self.options['ckpts_dt']
and save)
if save:
print(np.abs(local.best_value - current_value) /
(self.monitor.max - self.monitor.min))
local.time_last_ckpt = time()
local.best_value = current_value
return save
@tdl.core.EncapsulatedMethod
def add_checkpoint(self, local, value):
self._ckpts = collections.deque(maxlen=10)
@add_checkpoint.eval
def add_checkpoint(self, local, step):
if self.check_func(step):
print('checkpoint created')
values = self.session.run(self.var_list)
vars = {var: value for var, value in zip(self.var_list, values)}
self._ckpts.append(vars)
@tdl.core.EncapsulatedMethod
def restore(self, local, value):
local.placeholders = {var: tf.placeholder(tf.float32)
for var in self.var_list}
set_vars = [var.assign(local.placeholders[var])
for var in self.var_list]
local.set_vars = tf.group(set_vars)
@restore.eval
def restore(self, local):
ckpt = self._ckpts[-1]
feed_dict = {local.placeholders[var]: ckpt[var]
for var in self.var_list}
self.session.run(local.set_vars, feed_dict=feed_dict)
def reset(self):
self.check_lower.local.best_value = np.nan
@tdl.core.EncapsulatedMethod
def save(self, locals, value):
self._saver = tf.train.Saver(var_list=self.var_list)
self._saver_id = 0
self._logger_path = os.path.join(value['logger_path'], 'optimizer')
if os.path.exists(self._logger_path):
shutil.rmtree(self._logger_path)
os.makedirs(self._logger_path)
self._save_time = time()
@save.eval
def save(self, locals):
print('saving weights in {}'.format(self._logger_path))
saver_path = os.path.join(self._logger_path, 'var_checkpoint')
self._saver.save(
sess=self.session,
save_path=saver_path,
global_step=self._saver_id)
self._saver_id += 1
def restore_file(self):
saver_path = os.path.join(self._logger_path,
'var_checkpoint-{}'.format(self._saver_id-1))
self._saver.restore(self.session, saver_path)
| StarcoderdataPython |
178866 | def _longest_common_subsequence(s1: str, s2: str) -> int:
"""
Let m and n be the lengths of two strings.
Build L[m+1][n+1] from the bottom up.
Note: L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
Runtime: O(mn)
Space Complexity: O(mn)
"""
m, n = len(s1), len(s2)
L = [[0] * (n + 1) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if s1[i - 1] == s2[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
def longest_common_subsequence(s1: str, s2: str) -> int:
"""
Space-optimized version of LCS.
Let m and n be the lengths of two strings.
Runtime: O(mn)
Space Complexity: O(min(m, n))
"""
m, n = len(s1), len(s2)
if m < n:
s1, s2 = s2, s1
L = [0] * (n + 1)
for a in s1:
prev_row, prev_row_col = 0, 0
for j, b in enumerate(s2):
prev_row, prev_row_col = L[j + 1], prev_row
if a == b:
L[j + 1] = prev_row_col + 1
else:
L[j + 1] = max(L[j], prev_row)
return L[-1]
| StarcoderdataPython |
1845176 | <gh_stars>0
import asterid as ad
def asterid_dm_to_dendropy_dm(D, ts):
pdm = dendropy.PhylogeneticDistanceMatrix()
pdm.taxon_namespace = dendropy.TaxonNamespace()
pdm._mapped_taxa = set()
for i in range(len(ts)):
for j in enumerate(ts):
si = ts[i]
sj = ts[j]
dij = D[i, j]
xi = pdm.taxon_namespace.get_taxon(si)
if not xi:
xi = dendropy.Taxon(si)
pdm.taxon_namespace.add_taxon(xi)
pdm._mapped_taxa.add(xi)
pdm._taxon_phylogenetic_distances[xi] = {}
xj = pdm.taxon_namespace.get_taxon(sj)
if not xj:
xj = dendropy.Taxon(sj)
pdm.taxon_namespace.add_taxon(xj)
pdm._mapped_taxa.add(xj)
pdm._taxon_phylogenetic_distances[xj] = {}
dij = float(dij)
pdm._taxon_phylogenetic_distances[xi][xj] = dij
return pdm
| StarcoderdataPython |
3254314 | <filename>cogs/misc.py
import datetime
import asyncio
import strawpy
import random
import re
import sys
import subprocess
from PythonGists import PythonGists
from appuselfbot import bot_prefix
from discord.ext import commands
from cogs.utils.checks import *
'''Module for miscellaneous commands'''
class Misc:
def __init__(self, bot):
self.bot = bot
self.regionals = {'a': '\N{REGIONAL INDICATOR SYMBOL LETTER A}', 'b': '\N{REGIONAL INDICATOR SYMBOL LETTER B}', 'c': '\N{REGIONAL INDICATOR SYMBOL LETTER C}',
'd': '\N{REGIONAL INDICATOR SYMBOL LETTER D}', 'e': '\N{REGIONAL INDICATOR SYMBOL LETTER E}', 'f': '\N{REGIONAL INDICATOR SYMBOL LETTER F}',
'g': '\N{REGIONAL INDICATOR SYMBOL LETTER G}', 'h': '\N{REGIONAL INDICATOR SYMBOL LETTER H}', 'i': '\N{REGIONAL INDICATOR SYMBOL LETTER I}',
'j': '\N{REGIONAL INDICATOR SYMBOL LETTER J}', 'k': '\N{REGIONAL INDICATOR SYMBOL LETTER K}', 'l': '\N{REGIONAL INDICATOR SYMBOL LETTER L}',
'm': '\N{REGIONAL INDICATOR SYMBOL LETTER M}', 'n': '\N{REGIONAL INDICATOR SYMBOL LETTER N}', 'o': '\N{REGIONAL INDICATOR SYMBOL LETTER O}',
'p': '\N{REGIONAL INDICATOR SYMBOL LETTER P}', 'q': '\N{REGIONAL INDICATOR SYMBOL LETTER Q}', 'r': '\N{REGIONAL INDICATOR SYMBOL LETTER R}',
's': '\N{REGIONAL INDICATOR SYMBOL LETTER S}', 't': '\N{REGIONAL INDICATOR SYMBOL LETTER T}', 'u': '\N{REGIONAL INDICATOR SYMBOL LETTER U}',
'v': '\N{REGIONAL INDICATOR SYMBOL LETTER V}', 'w': '\N{REGIONAL INDICATOR SYMBOL LETTER W}', 'x': '\N{REGIONAL INDICATOR SYMBOL LETTER X}',
'y': '\N{REGIONAL INDICATOR SYMBOL LETTER Y}', 'z': '\N{REGIONAL INDICATOR SYMBOL LETTER Z}', '0': '0⃣', '1': '1⃣', '2': '2⃣', '3': '3⃣',
'4': '4⃣', '5': '5⃣', '6': '6⃣', '7': '7⃣', '8': '8⃣', '9': '9⃣'}
@commands.command(pass_context=True)
async def about(self, ctx):
"""Links to the bot's github page."""
if embed_perms(ctx.message) and ctx.message.content[7:] != 'short':
em = discord.Embed(color=0xad2929, title='\ud83e\udd16 Appu\'s Discord Selfbot', description='**Features:**\n- Custom commands/reactions\n- Save last x images in a channel to your computer\n- Keyword notifier\n'
'- Set/cycle your game status and your avatar\n- Google web and image search\n- MyAnimeList search\n- Spoiler tagging\n'
'- Server info commands\n- Quoting, calculator, creating polls, and much more')
em.add_field(name='\ud83d\udd17 Link to download', value='[Github link](https://github.com/appu1232/Discord-Selfbot/tree/master)')
em.add_field(name='\ud83c\udfa5Quick examples:', value='[Simple commands](http://i.imgur.com/3H9zpop.gif)')
em.set_footer(text='Made by appu1232#2569', icon_url='https://i.imgur.com/RHagTDg.png')
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
await self.bot.send_message(ctx.message.channel, 'https://github.com/appu1232/Selfbot-for-Discord')
await self.bot.delete_message(ctx.message)
@commands.group(aliases=['status'], pass_context=True)
async def stats(self, ctx):
"""Bot stats."""
uptime = (datetime.datetime.now() - self.bot.uptime)
hours, rem = divmod(int(uptime.total_seconds()), 3600)
minutes, seconds = divmod(rem, 60)
days, hours = divmod(hours, 24)
if days:
time = '%s days, %s hours, %s minutes, and %s seconds' % (days, hours, minutes, seconds)
else:
time = '%s hours, %s minutes, and %s seconds' % (hours, minutes, seconds)
try:
game = self.bot.game
except:
game = 'None'
if embed_perms(ctx.message):
em = discord.Embed(title='Bot Stats', color=0x32441c)
em.add_field(name=u'\U0001F553 Uptime', value=time, inline=False)
em.add_field(name=u'\U0001F4E4 Messages sent', value=str(self.bot.icount))
em.add_field(name=u'\U0001F4E5 Messages recieved', value=str(self.bot.message_count))
em.add_field(name=u'\u2757 Mentions', value=str(self.bot.mention_count))
em.add_field(name=u'\u2694 Servers', value=str(len(self.bot.servers)))
em.add_field(name=u'\u270F Keywords logged', value=str(self.bot.keyword_log))
em.add_field(name=u'\U0001F3AE Game', value=game)
mem_usage = '{:.2f} MiB'.format(__import__('psutil').Process().memory_full_info().uss / 1024**2)
em.add_field(name=u'\U0001F4BE Memory usage:', value=mem_usage)
em.set_footer(text='Selfbot made by appu1232#2569')
try:
g = git.cmd.Git(working_dir=os.getcwd())
g.execute(["git", "fetch", "origin", "master"])
version = g.execute(["git", "rev-list", "--right-only", "--count", "master...origin/master"])
commits = g.execute(["git", "rev-list", "--max-count=%s" % version, "origin/master"])
if version == '0':
status = 'Up to date.'
else:
latest = g.execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "--stat", "--pretty", "-%s" % version, "origin/master"])
gist_latest = PythonGists.Gist(description='Latest changes for the selfbot.', content=latest, name='latest.txt')
if version == '1':
status = 'Behind by 1 release. [Latest update.](%s)' % gist_latest
else:
status = '%s releases behind. [Latest updates.](%s)' % (version, gist_latest)
em.add_field(name=u'\U0001f4bb Update status:', value=status)
except:
raise
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
msg = '**Bot Stats:** ```Uptime: %s\nMessages Sent: %s\nMessages Recieved: %s\nMentions: %s\nServers: %s\nKeywords logged: %s\nGame: %s```' % (time, str(self.bot.icount), str(self.bot.message_count), str(self.bot.mention_count), str(len(self.bot.servers)), str(self.bot.keyword_log), game)
await self.bot.send_message(ctx.message.channel, bot_prefix + msg)
await self.bot.delete_message(ctx.message)
# Embeds the message
@commands.command(pass_context=True)
async def embed(self, ctx):
"""Embed given text. Ex: Do >embed for more help"""
if ctx.message.content[6:].strip():
if embed_perms(ctx.message):
msg = ctx.message.content[6:].strip()
title = description = image = thumbnail = color = footer = author = None
embed_values = msg.split('|')
for i in embed_values:
if i.strip().lower().startswith('title='):
title = i.strip()[6:].strip()
elif i.strip().lower().startswith('description='):
description = i.strip()[12:].strip()
elif i.strip().lower().startswith('desc='):
description = i.strip()[5:].strip()
elif i.strip().lower().startswith('image='):
image = i.strip()[6:].strip()
elif i.strip().lower().startswith('thumbnail='):
thumbnail = i.strip()[10:].strip()
elif i.strip().lower().startswith('colour='):
color = i.strip()[7:].strip()
elif i.strip().lower().startswith('color='):
color = i.strip()[6:].strip()
elif i.strip().lower().startswith('footer='):
footer = i.strip()[7:].strip()
elif i.strip().lower().startswith('author='):
author = i.strip()[7:].strip()
if color:
if not color.startswith('0x'):
color = '0x' + color
if color:
em = discord.Embed(title=title, description=description, color=int(color, 16))
else:
em = discord.Embed(title=title, description=description)
for i in embed_values:
if i.strip().lower().startswith('field='):
field_inline = True
field = i.strip().lstrip('field=')
field_name, field_value = field.split('value=')
if 'inline=' in field_value:
field_value, field_inline = field_value.split('inline=')
if 'false' in field_inline.lower() or 'no' in field_inline.lower():
field_inline = False
field_name = field_name.strip().lstrip('name=')
em.add_field(name=field_name, value=field_value.strip(), inline=field_inline)
if author:
if 'icon=' in author:
text, icon = author.split('icon=')
em.set_author(name=text.strip()[5:], icon_url=icon)
else:
em.set_author(name=author)
if image:
em.set_image(url=image)
if thumbnail:
em.set_thumbnail(url=thumbnail)
if footer:
if 'icon=' in footer:
text, icon = footer.split('icon=')
em.set_footer(text=text.strip()[5:], icon_url=icon)
else:
em.set_footer(text=footer)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No embed permissions in this channel.')
else:
msg = '**How to use the >embed command:**\n**Example:** >embed title=test this | description=some words | color=3AB35E | field=name=test value=test\n\n**You do NOT need to specify every property, only the ones you want.**\n**All properties and the syntax:**\ntitle=words\ndescription=words\ncolor=hexvalue\nimage=url_to_image (must be https)\nthumbnail=url_to_image\nauthor=words **OR** author=name=words icon=url_to_image\nfooter=words **OR** footer=name=words icon=url_to_image\nfield=name=words value=words (you can add as many fields as you want)\n\n**NOTE:** After the command is sent, the bot will delete your message and replace it with the embed. Make sure you have it saved or else you\'ll have to type it all again if the embed isn\'t how you want it.'
await self.bot.send_message(ctx.message.channel, bot_prefix + msg)
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def game(self, ctx):
"""Set playing status. Ex: >game napping >help game for more info
Your game status will not show for yourself, only other people can see it. This is a limitation of how the client works and how the api interacts with the client.
To set a rotating game status, do >game game1 | game2 | game3 | etc.
It will then prompt you with an interval in seconds to wait before changing the game and after that the order in which to change (in order or random)
Ex: >game with matches | sleeping | watching anime"""
if ctx.message.content[6:]:
game = str(ctx.message.clean_content[6:])
# Cycle games if more than one game is given.
if ' | ' in ctx.message.content[6:]:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Input interval in seconds to wait before changing to the next game (``n`` to cancel):')
def check(msg):
return msg.content.isdigit() or msg.content.lower().strip() == 'n'
def check2(msg):
return msg.content == 'random' or msg.content.lower().strip() == 'r' or msg.content.lower().strip() == 'order' or msg.content.lower().strip() == 'o'
reply = await self.bot.wait_for_message(author=ctx.message.author, check=check, timeout=60)
if not reply:
return
if reply.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled')
elif reply.content.strip().isdigit():
interval = int(reply.content.strip())
if interval >= 10:
self.bot.game_interval = interval
games = game.split(' | ')
if len(games) != 2:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Change game in order or randomly? Input ``o`` for order or ``r`` for random:')
s = await self.bot.wait_for_message(author=ctx.message.author, check=check2, timeout=60)
if not s:
return
if s.content.strip() == 'r' or s.content.strip() == 'random':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Game set. Game will randomly change every ``%s`` seconds' % reply.content.strip())
loop_type = 'random'
else:
loop_type = 'ordered'
else:
loop_type = 'ordered'
if loop_type == 'ordered':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Game set. Game will change every ``%s`` seconds' % reply.content.strip())
games = {'games': game.split(' | '), 'interval': interval, 'type': loop_type}
with open('settings/games.json', 'w') as g:
json.dump(games, g, indent=4)
self.bot.game = game.split(' | ')[0]
else:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled. Interval is too short. Must be at least 10 seconds.')
# Set game if only one game is given.
else:
self.bot.game_interval = None
self.bot.game = game
games = {'games': str(self.bot.game), 'interval': '0', 'type': 'none'}
with open('settings/games.json', 'w') as g:
json.dump(games, g, indent=4)
await self.bot.change_presence(game=discord.Game(name=game))
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Game set as: ``Playing %s``' % ctx.message.content[6:])
# Remove game status.
else:
self.bot.game_interval = None
self.bot.game = None
await self.bot.change_presence(game=None)
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Set playing status off')
if os.path.isfile('settings/games.json'):
os.remove('settings/games.json')
@commands.group(aliases=['avatars'], pass_context=True)
async def avatar(self, ctx):
"""Rotate avatars. See README for more info."""
if ctx.invoked_subcommand is None:
with open('settings/avatars.json', 'r+') as a:
avi_config = json.load(a)
if avi_config['password'] == '':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cycling avatars requires you to input your password. Your password will not be sent anywhere and no one will have access to it. Enter your password with``>avatar password <password>`` Make sure you are in a private channel where no one can see!')
if avi_config['interval'] != '0':
self.bot.avatar = None
self.bot.avatar_interval = None
avi_config['interval'] = '0'
with open('settings/avatars.json', 'w') as avi:
json.dump(avi_config, avi, indent=4)
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Disabled cycling of avatars.')
else:
if os.listdir('avatars'):
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Enabled cycling of avatars. Input interval in seconds to wait before changing avatars (``n`` to cancel):')
def check(msg):
return msg.content.isdigit() or msg.content.lower().strip() == 'n'
def check2(msg):
return msg.content == 'random' or msg.content.lower().strip() == 'r' or msg.content.lower().strip() == 'order' or msg.content.lower().strip() == 'o'
interval = await self.bot.wait_for_message(author=ctx.message.author, check=check, timeout=60)
if not interval:
return
if interval.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled.')
elif int(interval.content) < 1800:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled. Interval is too short. Must be at least 1800 seconds (30 minutes).')
else:
avi_config['interval'] = int(interval.content)
if len(os.listdir('avatars')) != 2:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Change avatars in order or randomly? Input ``o`` for order or ``r`` for random:')
cycle_type = await self.bot.wait_for_message(author=ctx.message.author, check=check2, timeout=60)
if not cycle_type:
return
if cycle_type.content.strip() == 'r' or cycle_type.content.strip() == 'random':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Avatar cycling enabled. Avatar will randomly change every ``%s`` seconds' % interval.content.strip())
loop_type = 'random'
else:
loop_type = 'ordered'
else:
loop_type = 'ordered'
avi_config['type'] = loop_type
if loop_type == 'ordered':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Avatar cycling enabled. Avatar will change every ``%s`` seconds' % interval.content.strip())
with open('settings/avatars.json', 'r+') as avi:
avi.seek(0)
avi.truncate()
json.dump(avi_config, avi, indent=4)
self.bot.avatar_interval = interval.content
self.bot.avatar = random.choice(os.listdir('avatars'))
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No images found under ``avatars``. Please add images (.jpg .jpeg and .png types only) to that folder and try again.')
@avatar.command(aliases=['pass', 'pw'], pass_context=True)
async def password(self, ctx, *, msg):
"""Set your discord acc password to rotate avatars. See README for more info."""
with open('settings/avatars.json', 'r+') as a:
avi_config = json.load(a)
avi_config['password'] = msg.strip().strip('"').lstrip('<').rstrip('>')
a.seek(0)
a.truncate()
json.dump(avi_config, a, indent=4)
await self.bot.delete_message(ctx.message)
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Password set. Do ``>avatar`` to toggle cycling avatars.')
@commands.command(pass_context=True)
async def choose(self, ctx, *, choices: str):
"""Choose randomly from the options you give. >choose this | that"""
await self.bot.send_message(ctx.message.channel, bot_prefix + 'I choose: ``{}``'.format(random.choice(choices.split("|"))))
@commands.command(pass_context=True)
async def emoji(self, ctx, *, msg):
"""Get url of emoji (across any server). Ex: >emoji :smug:"""
url = None
exact_match = False
for server in self.bot.servers:
for emoji in server.emojis:
if msg.strip().lower() in str(emoji):
url = emoji.url
if msg.strip() == str(emoji).split(':')[1]:
url = emoji.url
exact_match = True
break
if exact_match:
break
if embed_perms(ctx.message) and url:
em = discord.Embed()
em.set_image(url=url)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
elif not embed_perms(ctx.message) and url:
await self.bot.send_message(ctx.message.channel, url)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Could not find emoji.')
return await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def ping(self, ctx):
"""Get response time."""
msgtime = ctx.message.timestamp.now()
await self.bot.send_message(ctx.message.channel, bot_prefix + ' pong')
now = datetime.datetime.now()
ping = now - msgtime
if embed_perms(ctx.message):
pong = discord.Embed(title='Response Time:', description=str(ping), color=0x7A0000)
pong.set_thumbnail(url='http://odysseedupixel.fr/wp-content/gallery/pong/pong.jpg')
await self.bot.send_message(ctx.message.channel, content=None, embed=pong)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + '``Response Time: %s``' % str(ping))
@commands.command(pass_context=True)
async def quote(self, ctx, *, msg: str = None):
"""Quote the last message sent in the channel. >help quote for more info.
>quote - quotes the last message sent in the channel.
>quote <words> - tries to search for a message sent recently that contains the given words and quotes it.
>quote <message_id> - quotes the given message. (Enable developer mode to copy message ids)."""
result = None
if msg:
length = len(self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id])
if length < 201:
size = length
else:
size = 200
for i in range(length-2, length-size, -1):
search = self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id][i]
if ctx.message.clean_content[6:].lower().strip() in search[0].clean_content.lower() and (search[0].author != ctx.message.author or search[0].content[:7] != '>quote '):
result = [search[0], search[0].author, search[0].timestamp]
break
if ctx.message.clean_content[6:].strip() == search[0].id:
result = [search[0], search[0].author, search[0].timestamp]
break
else:
search = self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id][-2]
result = [search[0], search[0].author, search[0].timestamp]
if result:
await self.bot.delete_message(ctx.message)
if embed_perms(ctx.message) and result[0].content:
em = discord.Embed(description=result[0].content, timestamp=result[2], color=0xbc0b0b)
em.set_author(name=result[1].name, icon_url=result[1].avatar_url)
await self.bot.send_message(ctx.message.channel, embed=em)
else:
await self.bot.send_message(ctx.message.channel, '%s - %s```%s```' % (result[1].name, result[2], result[0].content))
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No quote found.')
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def poll(self, ctx, *, msg):
"""Create a strawpoll. Ex: >poll Favorite color = Blue | Red | Green"""
try:
options = [op.strip() for op in msg.split('|')]
if '=' in options[0]:
title, options[0] = options[0].split('=')
options[0] = options[0].strip()
else:
title = 'Poll by %s' % ctx.message.author.name
except:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Invalid Syntax. Example use: ``>poll Favorite color = Blue | Red | Green | Purple``')
poll = strawpy.create_poll(title.strip(), options)
await self.bot.send_message(ctx.message.channel, bot_prefix + poll.url)
@commands.command(pass_context=True)
async def calc(self, ctx, *, msg):
"""Simple calculator. Ex: >calc 2+2"""
equation = msg.strip().replace('^', '**')
if '=' in equation:
left = eval(equation.split('=')[0])
right = eval(equation.split('=')[1])
answer = str(left == right)
else:
answer = str(eval(equation))
if embed_perms(ctx.message):
em = discord.Embed(color=0xD3D3D3, title='Calculator')
em.add_field(name='Input:', value=msg.replace('**', '^'), inline=False)
em.add_field(name='Output:', value=answer, inline=False)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + answer)
@commands.command(pass_context=True)
async def l2g(self, ctx, *, msg: str):
"""Creates a googleitfor.me link. Ex: >l2g how do i become cool."""
lmgtfy = 'http://googleitfor.me/?q='
words = msg.lower().strip().split(' ')
for word in words:
lmgtfy += word + '+'
await self.bot.send_message(ctx.message.channel, bot_prefix + lmgtfy[:-1])
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def d(self, ctx):
"""Deletes the last message sent or n messages sent. Ex: >d 5"""
# If number of seconds/messages are specified
if len(ctx.message.content.lower().strip()) > 2:
if ctx.message.content[3] == '!':
killmsg = self.bot.self_log[ctx.message.channel.id][len(self.bot.self_log[ctx.message.channel.id]) - 2]
timer = int(ctx.message.content[4:].lower().strip())
# Animated countdown because screw rate limit amirite
destroy = await self.bot.edit_message(ctx.message, bot_prefix + 'The above message will self-destruct in:')
msg = await self.bot.send_message(ctx.message.channel, '``%s |``' % timer)
for i in range(0, timer, 4):
if timer - 1 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s |``' % int(timer - 1 - i))
await asyncio.sleep(1)
if timer - 1 - i != 0:
if timer - 2 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s /``' % int(timer - 2 - i))
await asyncio.sleep(1)
if timer - 2 - i != 0:
if timer - 3 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s -``' % int(timer - 3 - i))
await asyncio.sleep(1)
if timer - 3 - i != 0:
if timer - 4 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s \ ``' % int(timer - 4 - i))
await asyncio.sleep(1)
await self.bot.edit_message(msg, ':bomb:')
await asyncio.sleep(.5)
await self.bot.edit_message(msg, ':fire:')
await self.bot.edit_message(killmsg, ':fire:')
await asyncio.sleep(.5)
await self.bot.delete_message(msg)
await self.bot.delete_message(killmsg)
else:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
for i in range(0, int(ctx.message.content[3:])):
try:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
except:
pass
# If no number specified, delete message immediately
else:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
@commands.command(pass_context=True)
async def spoiler(self, ctx, *, msg : str):
"""Spoiler tag. Ex: >spoiler Some book | They get married."""
try:
if " | " in msg:
spoiled_work, spoiler = msg.lower().split(" | ", 1)
else:
spoiled_work, _, spoiler = msg.lower().partition(" ")
await self.bot.edit_message(ctx.message, bot_prefix + 'Spoiler for `' + spoiled_work + '`: \n`'
+ ''.join(map(lambda c: chr(ord('a') + (((ord(c) - ord('a')) + 13) % 26)) if c >= 'a' and c <= 'z' else c, spoiler))
+ '`\n' + bot_prefix + 'Use http://rot13.com to decode')
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Could not encrypt spoiler.')
@commands.group(pass_context=True)
async def gist(self, ctx):
"""Posts to gist"""
if ctx.invoked_subcommand is None:
url = PythonGists.Gist(description='Created in channel: {} in server: {}'.format(ctx.message.channel, ctx.message.server), content=ctx.message.content[6:], name='Output')
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Gist output: ' + url)
await self.bot.delete_message(ctx.message)
@gist.command(pass_context=True)
async def file(self, ctx, *, msg):
"""Create gist of file"""
try:
with open(msg) as fp:
output = fp.read()
url = PythonGists.Gist(description='Created in channel: {} in server: {}'.format(ctx.message.channel, ctx.message.server), content=output, name=msg.replace('/', '.'))
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Gist output: ' + url)
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'File not found.')
finally:
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def regional(self, ctx, *, msg):
"""Replace letters with regional indicator emojis"""
await self.bot.delete_message(ctx.message)
msg = list(msg)
regional_list = [self.regionals[x.lower()] if x.isalnum() else x for x in msg]
regional_output = ' '.join(regional_list)
await self.bot.send_message(ctx.message.channel, regional_output)
@commands.command(pass_context=True)
async def space(self, ctx, *, msg):
"""Add n spaces between each letter. Ex: >space 2 thicc"""
await self.bot.delete_message(ctx.message)
if msg.split(' ', 1)[0].isdigit():
spaces = int(msg.split(' ', 1)[0]) * ' '
msg = msg.split(' ', 1)[1].strip()
else:
spaces = ' '
msg = list(msg)
spaced_message = '{}'.format(spaces).join(msg)
await self.bot.send_message(ctx.message.channel, spaced_message)
@commands.command(pass_context=True)
async def react(self, ctx, msg: str, id: int = None):
"""Add letter(s) as reaction to previous message. Ex: >react hot"""
await self.bot.delete_message(ctx.message)
reactions = []
if id:
limit = 25
else:
limit = 1
for i in msg:
if i.isalnum():
reactions.append(self.regionals[i.lower()])
else:
reactions.append(i)
async for message in self.bot.logs_from(ctx.message.channel, limit=limit):
if (not id and message.id != ctx.message.id) or (str(id) == message.id):
for i in reactions:
await self.bot.add_reaction(message, i)
def setup(bot):
bot.add_cog(Misc(bot))
| StarcoderdataPython |
1786879 | <filename>polyA/fill_consensus_position_matrix.py
from typing import Dict, List, Tuple
from .matrices import ConsensusMatrixContainer
from .performance import timeit
@timeit()
def fill_consensus_position_matrix(
row_count: int,
column_count: int,
start_all: int,
subfams: List[str],
chroms: List[str],
starts: List[int],
stops: List[int],
consensus_starts: List[int],
strands: List[str],
) -> ConsensusMatrixContainer:
"""
Fills matrix that holds the consensus position for each subfam at that
position in the alignment. Walks along the alignments one nucleotide at a time adding
the consensus position to the matrix.
At same time, fills ActiveCells.
input:
column_count: number of columns in alignment matrix - will be same number of
columns in consensus_matrix
row_count: number of rows in matrices
start_all: min start position on chromosome/target sequences for whole alignment
subfams: actual subfamily/consensus sequences from alignment
chroms: actual target/chromosome sequences from alignment
starts: start positions for all competing alignments (on target)
stops: stop positions for all competing alignments (on target)
consensus_starts: where alignment starts in the subfam/consensus sequence
strands: what strand each of the alignments are on - reverse strand will count down instead of up
output:
ConsensusMatrixContainer
>>> subs = ["", ".AA", "TT-"]
>>> chrs = ["", ".AA", "TTT"]
>>> strts = [0, 1, 0]
>>> stps = [0, 2, 2]
>>> con_strts = [-1, 0, 10]
>>> strandss = ["", "+", "-"]
>>> active, con_mat = fill_consensus_position_matrix(3, 3, 0, subs, chrs, strts, stps, con_strts, strandss)
>>> con_mat
{(1, 2): 0, (1, 3): 1, (2, 1): 10, (2, 2): 9, (2, 3): 9, (0, 0): 0, (0, 1): 0, (0, 2): 0}
>>> active
{2: [0, 1, 2], 3: [0, 1, 2], 1: [0, 2], 0: [0]}
"""
active_cells: Dict[int, List[int]] = {}
consensus_matrix: Dict[Tuple[int, int], int] = {}
# start at 1 to ignore 'skip state'
for row_index in range(1, row_count):
if strands[row_index] == "+":
consensus_pos = consensus_starts[row_index] - 1
col_index: int = starts[row_index] - start_all + 1
seq_index: int = 0
while col_index < stops[row_index] + 1 - start_all + 1:
# consensus pos only advances when there is not a gap in the subfam seq
if subfams[row_index][seq_index] != "-":
consensus_pos += 1
consensus_matrix[row_index, col_index] = consensus_pos
# matrix position only advances when there is not a gap in the chrom seq
if chroms[row_index][seq_index] != "-":
if col_index in active_cells:
active_cells[col_index].append(row_index)
else:
active_cells[col_index] = [0, row_index]
col_index += 1
seq_index += 1
else: # reverse strand
consensus_pos2 = consensus_starts[row_index] + 1
col_index2: int = starts[row_index] - start_all + 1
seq_index2: int = 0
while col_index2 < stops[row_index] + 1 - start_all + 1:
if subfams[row_index][seq_index2] != "-":
consensus_pos2 -= 1
consensus_matrix[row_index, col_index2] = consensus_pos2
if chroms[row_index][seq_index2] != "-":
if col_index2 in active_cells:
active_cells[col_index2].append(row_index)
else:
active_cells[col_index2] = [0, row_index]
col_index2 += 1
seq_index2 += 1
for i in range(column_count):
consensus_matrix[0, i] = 0
if i not in active_cells:
active_cells[i] = [0]
return ConsensusMatrixContainer(active_cells, consensus_matrix)
| StarcoderdataPython |
5089534 | <reponame>lelechen63/idinvert_pytorch
import numpy as np
import cv2, PIL.Image
# show image in Jupyter Notebook (work inside loop)
from io import BytesIO
from IPython.display import display, Image
def show_img_arr(arr, bgr_mode = False):
if bgr_mode is True:
arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
im = PIL.Image.fromarray(arr)
bio = BytesIO()
im.save(bio, format='png')
display(Image(bio.getvalue(), format='png'))
# show depth array in Jupyter Notebook (work inside loop)
def show_depth_arr(depth_map):
depth_max = np.max(depth_map)
depth_min = np.min(depth_map)
depth_map = (depth_map - depth_min)/(depth_max - depth_min)*255
show_img_arr(depth_map.astype(np.uint8))
# rotate verts along y axis
def rotate_verts_y(verts, y):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = y*np.math.pi/180
R = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
# rotate verts along x axis
def rotate_verts_x(verts, x):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = x*np.math.pi/180
R = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
# rotate verts along z axis
def rotate_verts_z(verts, z):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = z*np.math.pi/180
R = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
| StarcoderdataPython |
8016443 | <filename>caldavclientlibrary/protocol/url.py
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import urllib
class URL(object):
eAbsolute = 0
eRelative = 1
eLastPath = 2
URLEscape = '%'
URLReserved = "/?:@&="
URLUnreserved = ( # Allowable URL chars
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLCharacter = ( # Allowable URL chars -- all
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLXCharacter = ( # Allowable URL chars (all)
# RFC2732 uses '[...]' for IPv6 addressing - [] are now allowed
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, # 48 - 63
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLSchemeDoubleSlash = ("http", "https", "webcal",)
def __init__(self, url=None, scheme=None, server=None, path=None, extended=None, decode=False):
self.scheme = ""
self.server = ""
self.path = ""
self.extended = ""
if not url:
self.scheme = scheme
self.server = server
self.path = path
if self.path and decode:
self.path = urllib.unquote(self.path)
self.extended = extended
if self.extended and decode:
self.extended = urllib.unquote_plus(self.extended)
else:
self._parse(url, decode)
def __str__(self):
return "URL: %s" % (self.toString(),)
def __repr__(self):
return "URL: %s" % (self.toString(),)
def __cmp__(self, other):
return cmp(self.toString(), other.toString())
def absoluteURL(self):
return self.toString()
def relativeURL(self):
return self.toString(conversion=URL.eRelative)
def toString(self, conversion=eAbsolute, encode=True):
result = ""
# Add scheme & host if not relative
if conversion == URL.eAbsolute and self.scheme and self.server:
result += self.scheme + ":"
if self.scheme in URL.URLSchemeDoubleSlash:
result += "//"
result += self.server
# Get path (or last part of it if required)
if self.path and conversion == URL.eLastPath:
path = self.path[self.path.rfind("/"):]
else:
path = self.path
# Now encode if required
if path:
result += (urllib.quote(path) if encode else path)
if self.extended:
result += (urllib.quote_plus(self.extended, "?&=") if encode else self.extended)
return result
def equal(self, comp):
# Compare each component
if self.scheme != comp.scheme:
return False
if self.server != comp.server:
return False
# Ignore trailing slash
if self.path.rstrip("/") != comp.path.rstrip("/"):
return False
return True
def equalRelative(self, comp):
# Must be relative
if comp.server:
return False
if not self.path and not comp.path:
return True
if not self.path or not comp.path:
return False
# Just compare paths, ignore trailing slash
return self.path.rstrip("/") == comp.path.rstrip("/")
def dirname(self):
if self.path:
newpath = os.path.dirname(self.path.rstrip("/")) + "/"
return URL(scheme=self.scheme, server=self.server, path=newpath)
def _parse(self, url, decode=False):
# Strip off main scheme
if url.lower().startswith("url:"):
url = url[4:]
# Special - if it starts with "/" its a relative HTTP url
if url[0] == '/':
self.scheme = "http"
self.server = None
self._parsePath(url, decode)
else:
# Get protocol scheme
self.scheme = url[:url.find(":")].lower()
url = url[len(self.scheme):]
if self.scheme in URL.URLSchemeDoubleSlash:
assert(url.startswith("://"))
# Look for server
splits = url[3:].split("/", 1)
self.server = splits[0]
if len(splits) == 2:
self._parsePath("/" + splits[1], decode)
elif self.scheme in ("mailto", "urn",):
assert(url.startswith(":"))
# Look for server
self.server = url[1:]
def _parsePath(self, path, decode=False):
# Look for extended bits
splits = path.split("?", 1)
self.path = splits[0]
if decode:
self.path = urllib.unquote(self.path)
if len(splits) == 2:
self.extended = "?" + splits[1]
if decode:
self.extended = urllib.unquote_plus(self.extended)
| StarcoderdataPython |
3525205 | <filename>prepare_verbs.py
import jsonpickle as jp
from utils import open_file, write_file, collator
jp.set_encoder_options('simplejson', sort_keys=True, indent=4, ensure_ascii=False)
content = open_file('input/monlam_verbs.json')
json = jp.decode(content)
dadrag = open_file('input/dadrag_syllables.txt').strip().split('\n')
entries = []
for inflected, context in json.items():
# a few entries don't have any content in monlam_verbs.json and are filtered here
# like : ལྷོགས་ | ༡བྱ་ཚིག 1. ༡བརྡ་རྙིང་། རློགས། 2. ཀློགས། that parses into "ལྷོགས": []
if context == []:
continue
possible_verbs = []
for verb in context:
# inflected verbs
if 'བྱ་ཚིག' in verb.keys():
possible_verbs.append(verb['བྱ་ཚིག'])
# non-inflected verbs (གཟུགས་མི་འགྱུར་བ།)
else:
possible_verbs.append(inflected)
# de-duplicate the verbs
possible_verbs = list(set(possible_verbs))
# add an entry for every possible verb
if inflected in dadrag:
for verb in possible_verbs:
entries.append((inflected+'ད', '/'+verb))
else:
for verb in possible_verbs:
if inflected == verb:
entries.append((inflected, '='))
else:
entries.append((inflected, '/'+verb))
tib_sorted = sorted(entries, key=lambda x: collator.getSortKey(x[0]))
lines = ['{} {}'.format(inflected, lemma) for inflected, lemma in tib_sorted]
write_file('output/parsed_verbs.txt', '\n'.join(lines))
| StarcoderdataPython |
1744126 | import io
from typing import BinaryIO, Optional
from .base import CIOType, CFixedType
from .buffer import c_buffer
from ..encoding import auto_decode
__all__ = [
'CStringType',
'c_str',
'CSizedStringType',
'c_sized_str',
]
def _auto_encode(s: str, encoding) -> bytes:
return s.encode(encoding if encoding is not None else 'utf-8')
class CStringType(CIOType):
"""
Overview:
Simple string type.
It should end with a single ``\\x00``, which is quite common in C language.
"""
def __init__(self, encoding=None):
"""
Constructor of :class:`CStringType`.
:param encoding: Encoding type, default is ``None`` which means auto-detect the encodings.
"""
self.__encoding = encoding
@property
def encoding(self) -> Optional[str]:
"""
Encoding type.
"""
return self.__encoding
def read(self, file: BinaryIO) -> str:
"""
Read simple string value.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: String value.
"""
b = bytearray()
while True:
bt = file.read(1)[0]
if bt:
b.append(bt)
else:
break
return auto_decode(bytes(b), self.__encoding)
def write(self, file: BinaryIO, val: str):
"""
Write simple string value to binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: String to write.
"""
if not isinstance(val, str):
raise TypeError(f'String value expected, but {repr(val)} found.')
file.write(_auto_encode(val, self.__encoding) + b'\x00')
c_str = CStringType()
"""
Overview:
Reading and writing simple string, ends with a single ``\\x00``.
Examples::
>>> import io
>>> from hbutils.binary import c_str
>>>
>>> with io.BytesIO(
... b'kdsfjldsjflkdsmgds\\x00'
... b'\\xd0\\x94\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd1\\x8b\\xd0\\xb9 \\xd0'
... b'\\xb2\\xd0\\xb5\\xd1\\x87\\xd0\\xb5\\xd1\\x80\\x00'
... b'\\xa4\\xb3\\xa4\\xf3\\xa4\\xd0\\xa4\\xf3\\xa4\\xcf\\x00'
... b'\\xcd\\xed\\xc9\\xcf\\xba\\xc3\\x00'
... ) as file:
... print(c_str.read(file))
... print(c_str.read(file))
... print(c_str.read(file))
... print(c_str.read(file))
kdsfjldsjflkdsmgds
Добрый вечер
こんばんは
晚上好
>>> with io.BytesIO() as file:
... c_str.write(file, "kdsfjld")
... c_str.write(file, "Добрый")
... print(file.getvalue())
b'kdsfjld\\x00\\xd0\\x94\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd1\\x8b\\xd0\\xb9\\x00'
"""
class CSizedStringType(CFixedType):
"""
Overview:
Sized string type.
It should have a fixed size, which is defined like ``char s[size]`` in C language.
"""
def __init__(self, size: int, encoding=None):
"""
Constructor of :class:`CStringType`.
:param size: Size of the string's space.
:param encoding: Encoding type, default is ``None`` which means auto-detect the encodings.
"""
CFixedType.__init__(self, size)
self.__encoding = encoding
self._buffer = c_buffer(size)
@property
def encoding(self) -> Optional[str]:
"""
Encoding type.
"""
return self.__encoding
def read(self, file: BinaryIO) -> str:
"""
Read sized string value.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: String value.
"""
bytes_ = self._buffer.read(file)
with io.BytesIO(bytes_ + b'\x00') as bf:
return c_str.read(bf)
def write(self, file: BinaryIO, val: str):
"""
Write sized string value to binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: String to write.
"""
if not isinstance(val, str):
raise TypeError(f'String value expected, but {repr(val)} found.')
self._buffer.write(file, _auto_encode(val, self.__encoding))
def c_sized_str(size: int) -> CSizedStringType:
"""
Overview:
Reading and writing sized string, which occupy a fixed space..
:param size: Size of the string's space.
Examples::
>>> import io
>>> from hbutils.binary import c_sized_str
>>>
>>> with io.BytesIO(
... b'kdsfjld\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
... b'\\xd0\\x94\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd1\\x8b\\xd0\\xb9\\x00\\x00\\x00'
... ) as file:
... print(c_sized_str(15).read(file))
... print(c_sized_str(15).read(file))
kdsfjld
Добрый
>>> with io.BytesIO() as file:
... c_sized_str(15).write(file, "kdsfjld")
... c_sized_str(15).write(file, "Добрый")
... print(file.getvalue())
b'kdsfjld\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xd0\\x94\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd1\\x8b\\xd0\\xb9\\x00\\x00\\x00'
"""
return CSizedStringType(size)
| StarcoderdataPython |
6703249 | # Copyright (c) 2015-2021 <NAME> and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
__all__ = [
'ROOT',
'ignore_system_exit',
'parray',
'saveascii', 'loadascii',
'savebin', 'loadbin',
'isfile',
'burn',
'default_parnames',
]
import os
import sys
import functools
import numpy as np
from .log import Log
ROOT = os.path.realpath(os.path.dirname(__file__) + '/../..') + '/'
def ignore_system_exit(func):
"""Decorator to ignore SystemExit exceptions."""
@functools.wraps(func)
def new_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except SystemExit:
return None
return new_func
def parray(string):
"""
Convert a string containin a list of white-space-separated (and/or
newline-separated) values into a numpy array
"""
if string == 'None':
return None
try: # If they can be converted into doubles, do it:
return np.asarray(string.split(), np.double)
except: # Else, return a string array:
return string.split()
def saveascii(data, filename, precision=8):
"""
Write (numeric) data to ASCII file.
Parameters
----------
data: 1D/2D numeric iterable (ndarray, list, tuple, or combination)
Data to be stored in file.
filename: String
File where to store the arrlist.
precision: Integer
Maximum number of significant digits of values.
Example
-------
>>> import numpy as np
>>> import mc3.utils as mu
>>> a = np.arange(4) * np.pi
>>> b = np.arange(4)
>>> c = np.logspace(0, 12, 4)
>>> outfile = 'delete.me'
>>> mu.saveascii([a,b,c], outfile)
>>> # This will produce this file:
>>> with open(outfile) as f:
>>> print(f.read())
0 0 1
3.1415927 1 10000
6.2831853 2 1e+08
9.424778 3 1e+12
"""
# Force it to be a 2D ndarray:
data = np.array(data, ndmin=2).T
# Save arrays to ASCII file:
with open(filename, 'w') as f:
for parvals in data:
f.write(' '.join(f'{v:9.{precision:d}g}' for v in parvals) + '\n')
def loadascii(filename):
"""
Extract data from file and store in a 2D ndarray (or list of arrays
if not square). Blank or comment lines are ignored.
Parameters
----------
filename: String
Name of file containing the data to read.
Returns
-------
array: 2D ndarray or list
See parameters description.
"""
# Open and read the file:
lines = []
for line in open(filename, 'r'):
if not line.startswith('#') and line.strip() != '':
lines.append(line)
# Count number of lines:
npars = len(lines)
# Extract values:
ncolumns = len(lines[0].split())
array = np.zeros((npars, ncolumns), np.double)
for i, line in enumerate(lines):
array[i] = line.strip().split()
array = np.transpose(array)
return array
def savebin(data, filename):
"""
Write data variables into a numpy npz file.
Parameters
----------
data: List of data objects
Data to be stored in file. Each array must have the same length.
filename: String
File where to store the arrlist.
Note
----
This wrapper around np.savez() preserves the data type of list and
tuple variables when the file is open with loadbin().
Example
-------
>>> import mc3.utils as mu
>>> import numpy as np
>>> # Save list of data variables to file:
>>> datafile = 'datafile.npz'
>>> indata = [np.arange(4), 'one', np.ones((2,2)), True, [42], (42, 42)]
>>> mu.savebin(indata, datafile)
>>> # Now load the file:
>>> outdata = mu.loadbin(datafile)
>>> for data in outdata:
>>> print(repr(data))
array([0, 1, 2, 3])
'one'
array([[ 1., 1.],
[ 1., 1.]])
True
[42]
(42, 42)
"""
# Get the number of elements to determine the key's fmt:
ndata = len(data)
fmt = len(str(ndata))
key = []
for i, datum in enumerate(data):
dkey = 'file{:{}d}'.format(i, fmt)
# Encode in the key if a variable is a list or tuple:
if isinstance(datum, list):
dkey += '_list'
elif isinstance(datum, tuple):
dkey += '_tuple'
elif isinstance(datum, str):
dkey += '_str'
elif isinstance(datum, bool):
dkey += '_bool'
key.append(dkey)
# Use a dictionary so savez() include the keys for each item:
d = dict(zip(key, data))
np.savez(filename, **d)
def loadbin(filename):
"""
Read a binary npz array, casting list and tuple variables into
their original data types.
Parameters
----------
filename: String
Path to file containing the data to be read.
Return
------
data: List
List of objects stored in the file.
Example
-------
See example in savebin().
"""
# Unpack data:
npz = np.load(filename)
data = []
for key, val in sorted(npz.items()):
data.append(val[()])
# Check if val is a str, bool, list, or tuple:
if '_' in key:
exec('data[-1] = ' + key[key.find('_')+1:] + '(data[-1])')
return data
def isfile(input, iname, log, dtype, unpack=True, not_none=False):
"""
Check if an input is a file name; if it is, read it.
Genereate error messages if it is the case.
Parameters
----------
input: Iterable or String
The input variable.
iname: String
Input-variable name.
log: File pointer
If not None, print message to the given file pointer.
dtype: String
File data type, choose between 'bin' or 'ascii'.
unpack: Bool
If True, return the first element of a read file.
not_none: Bool
If True, throw an error if input is None.
"""
# Set the loading function depending on the data type:
if dtype == 'bin':
load = loadbin
elif dtype == 'ascii':
load = loadascii
else:
log.error(
f"Invalid data type '{dtype}', must be either 'bin' or 'ascii'.",
tracklev=-3)
# Check if the input is None, throw error if requested:
if input is None:
if not_none:
log.error(f"'{iname}' is a required argument.", tracklev=-3)
return None
# Check that it is an iterable:
if not np.iterable(input):
log.error(f'{iname} must be an iterable or a file name.', tracklev=-3)
# Check if it is a string, a string in a list, or an array:
if isinstance(input, str):
ifile = input
elif isinstance(input[0], str):
ifile = input[0]
else:
return input
# It is a file name:
if not os.path.isfile(ifile):
log.error(f"{iname} file '{ifile}' not found.", tracklev=-3)
if unpack: # Unpack (remove outer dimension) if necessary
return load(ifile)[0]
return load(ifile)
def burn(Zdict=None, burnin=None, Z=None, zchain=None, sort=True):
"""
Return a posterior distribution removing the burnin initial iterations
of each chain from the input distribution.
Parameters
----------
Zdict: dict
A dictionary (as in mc3's output) containing a posterior distribution
(Z) and number of iterations to burn (burnin).
burnin: Integer
Number of iterations to remove from the start of each chain.
If specified, it overrides value from Zdict.
Z: 2D float ndarray
Posterior distribution (of shape [nsamples,npars]) to consider
if Zdict is None.
zchain: 1D integer ndarray
Chain indices for the samples in Z (used only of Zdict is None).
sort: Bool
If True, sort the outputs by chain index.
Returns
-------
posterior: 2D float ndarray
Burned posterior distribution.
zchain: 1D integer ndarray
Burned zchain array.
zmask: 1D integer ndarray
Indices that transform Z into posterior.
Examples
--------
>>> import mc3.utils as mu
>>> import numpy as np
>>> # Mock a posterior-distribution output:
>>> Z = np.expand_dims([0., 1, 10, 20, 30, 11, 31, 21, 12, 22, 32], axis=1)
>>> zchain = np.array([-1, -1, 0, 1, 2, 0, 2, 1, 0, 1, 2])
>>> Zdict = {'posterior':Z, 'zchain':zchain, 'burnin':1}
>>> # Simply apply burn() into the dict:
>>> posterior, zchain, zmask = mu.burn(Zdict)
>>> print(posterior[:,0])
[11. 12. 21. 22. 31. 32.]
>>> print(zchain)
[0 0 1 1 2 2]
>>> print(zmask)
[ 5 8 7 9 6 10]
>>> # Samples were sorted by chain index, but one can prevent with:
>>> posterior, zchain, zmask = mu.burn(Zdict, sort=False)
>>> print(posterior[:,0])
[11. 31. 21. 12. 22. 32.]
>>> # One can also override the burn-in samples:
>>> posterior, zchain, zmask = mu.burn(Zdict, burnin=0)
>>> print(posterior[:,0])
[10. 11. 12. 20. 21. 22. 30. 31. 32.]
>>> # Or apply directly to arrays:
>>> posterior, zchain, zmask = mu.burn(Z=Z, zchain=zchain, burnin=1)
>>> print(posterior[:,0])
[11. 12. 21. 22. 31. 32.]
"""
if Zdict is None and (Z is None or zchain is None or burnin is None):
raise ValueError(
'Need to input either Zdict or all three of burnin, Z, and zchain')
if Zdict is not None:
Z = Zdict['posterior']
zchain = Zdict['zchain']
if burnin is None:
burnin = Zdict['burnin']
mask = np.zeros_like(zchain, bool)
nchains = np.amax(zchain) + 1
for c in range(nchains):
mask[np.where(zchain == c)[0][burnin:]] = True
if sort:
zsort = np.lexsort([zchain])
zmask = zsort[np.where(mask[zsort])]
else:
zmask = np.where(mask)[0]
# Values accepted for posterior stats:
posterior = Z[zmask]
zchain = zchain[zmask]
return posterior, zchain, zmask
def default_parnames(npars):
"""
Create an array of parameter names with sequential indices.
Parameters
----------
npars: Integer
Number of parameters.
Results
-------
1D string ndarray of parameter names.
"""
namelen = len(str(npars))
return np.array([f'Param {i+1:0{namelen}d}' for i in range(npars)])
| StarcoderdataPython |
8148429 | import sys
# ######################
if len(sys.argv) != 2:
print "need an input file"
exit(1)
f = open(sys.argv[1])
transitions = {}
starting = None
for line in f:
line = line.strip()
if "=>" in line:
line = line.split("=>")
start = line[0].strip()
end = line[1].strip()
if start not in transitions:
transitions[start] = []
transitions[start].append(end)
else:
starting = line
molecules = set()
for i in range(0, len(starting)):
for base in transitions:
if starting[i:i+len(base)] == base:
for transition in transitions[base]:
new = starting[:i] + transition + starting[i+len(base):]
molecules.add(new)
print molecules, len(molecules)
| StarcoderdataPython |
11395308 | <reponame>ali493/pyro
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
import numpy as np
###########################
# Load libs
###########################
from AlexRobotics.dynamic import Manipulator
from AlexRobotics.control import linear
from AlexRobotics.control import ComputedTorque
from AlexRobotics.planning import RandomTree
from AlexRobotics.control import DPO
###########################
# Objectives
###########################
x_start = np.array([-3.0, 0.0])
x_goal = np.array([ 0.0, 0.0])
###########################
# Create objects
###########################
Robot = Manipulator.OneLinkManipulator()
PD = linear.PD( kp = 5 , kd = 2 )
PID = linear.PID( kp = 5 , kd = 2 , ki = 4 )
CTC = ComputedTorque.ComputedTorqueController( Robot )
SLD = ComputedTorque.SlidingModeController( Robot )
RRT = RandomTree.RRT( Robot , x_start )
VI = DPO.ValueIteration1DOF( Robot , 'quadratic' )
############################
# Params
############################
tmax = 8 # max motor torque
Robot.u_ub = np.array([ tmax]) # Control Upper Bounds
Robot.u_lb = np.array([-tmax]) # Control Lower Bounds
RRT.x_start = x_start
RRT.discretizeactions( 3 )
RRT.dt = 0.1
RRT.goal_radius = 0.3
RRT.max_nodes = 5000
RRT.max_solution_time = 5
RRT.dyna_plot = True
RRT.dyna_node_no_update = 10
RRT.traj_ctl_kp = 25
RRT.traj_ctl_kd = 10
PID.dt = 0.001
CTC.w0 = 2
SLD.lam = 1
SLD.nab = 0
SLD.D = 5
###########################
# Offline Plannning
###########################
#RRT.find_path_to_goal( x_goal )
#RRT.plot_2D_Tree()
###########################
# Offline Optimization
###########################
#VI.first_step()
#VI.load_data( 'data/' + 'R1' + 'quadratic' )
#VI.compute_steps(1)
#
## Plot Value Iteration Results
#ValueIterationAlgo.plot_raw()
#ValueIterationAlgo.plot_J_nice( 2 )
###########################
# Assign controller
###########################
#Robot.ctl = PD.ctl
#Robot.ctl = PID.ctl
#Robot.ctl = CTC.ctl
Robot.ctl = SLD.ctl
#Robot.ctl = RRT.trajectory_controller
#VI.assign_interpol_controller()
###########################
# Simulation
###########################
Robot.plotAnimation( x_start , tf=10, n=10001, solver='euler' )
###########################
# Plots
###########################
Robot.Sim.phase_plane_trajectory()
Robot.Sim.phase_plane_trajectory( PP_OL = False , PP_CL = True )
Robot.Sim.plot_CL()
###########################
# and more
###########################
#from AlexRobotics.dynamic import CustomManipulator
#BoeingArm = CustomManipulator.BoeingArm()
#BoeingArm.plot3DAnimation( x0 = np.array([0.2,0,0,0,0,0]) )
# Hold script in console
import matplotlib.pyplot as plt
plt.show() | StarcoderdataPython |
1634639 | <filename>crnn/metrics/__init__.py
# -*- coding: utf-8 -*-
# @Time : 2021/2/22 17:17
# @Author : JianjinL
# @eMail : <EMAIL>
# @File : __init__
# @Software : PyCharm
# @Dscription: 准确率指标
from crnn.metrics.sequenceAcc import SequenceAccuracy
from crnn.metrics.editDistanceAcc import EditDistance
from configs.config import params
if params['accuracy'] == 'EditDistance':
Accuracy = EditDistance
else:
Accuracy = SequenceAccuracy
| StarcoderdataPython |
1727202 | <filename>src/dashView/initializeData.py
from src.kMerAlignmentData import KMerAlignmentData
from src.kMerPCAData import KMerPCAData
from src.kMerScatterPlotData import KMerScatterPlotData
from src.processing import Processing
import src.layout.plot_theme_templates as ptt
import plotly.express as px
from src.secStructure import SecStructure
# starts preprocess to calculate k-mer-frequencies,etc
# data: file list
# selected: two files, which are processed
# k: k-mer length
# peak: peak: peak-position, where sequences should be aligned
# top: number of best values
# feature: number of T or k-mer-Frequency for pcas
def initData(data, selected, k, peak, top, feature, sec_struct_data, no_sec_peak):
process = Processing(data, selected, k, peak, top, feature, False, sec_struct_data, no_sec_peak)
return process
# gets RNA-structure template(s), dotbracket-string(s) and color-scale/-vector data
# process: object containing all information about settings and files
# norm_vector: normalization vector for element-string 2-mer
# normalization_option: status (-1= no normalization, 0= for A.thaliana, 1= custom rates) for normalization
# no_seq_peak: status (-1= no data,0= False,1= True) if peak position in RNA-Structure should be considered
def getTemplateSecondaryStructure(process, norm_vector, normalization_option, no_seq_peak):
color_scale = px.colors.sequential.Viridis # used color-scale
# checks if and how normalization should be done
if normalization_option == 1:
process.setNormVector(norm_vector)
elif normalization_option == 0:
at_norm_vector = process.getATnormVector()
process.setNormVector(at_norm_vector)
# initialize SecStructure object to calculate RNA-Structure data
# list containing template(s) and dotbracket-string representation(s)
templates_dotbrs = SecStructure.processData(process)
# method is called if at least one structural data file is available
file1_t_d = templates_dotbrs[0]
file2_t_d = None
file1_template = file1_t_d[0]
file2_template = None
# check if more than one template was generated (=> more than one structural data file available)
if len(templates_dotbrs) > 1:
file2_t_d = templates_dotbrs[1]
file2_template = file2_t_d[0]
# get color data based on given template
heat_map_coloring = SecStructure.createHeatMapColoring(process, file1_template, file2_template,
no_seq_peak)
# color-vector, highest value in color-vector, not matched 2-mers (should be empty for 2-mer)
color1, color_domain_max1, not_matched1 = heat_map_coloring[0]
color2, color_domain_max2, not_matched2 = [None, None, None]
if len(heat_map_coloring) > 1:
color2, color_domain_max2, not_matched2 = heat_map_coloring[1]
return file1_t_d, file2_t_d, color1, color2, color_domain_max1, color_domain_max2, color_scale
# gets alignment data
# process: object, which contains information for further calculation-processes
def getAlignmentData(process):
alignment_lists, f1_name, f2_name = KMerAlignmentData.processData(process)
algn1 = alignment_lists[0]
algn2 = alignment_lists[1]
# if peak position is not given, record-type alignment must be casted to strings
if process.getSettings().getPeak() is None:
algn1 = [str(e.seq) for e in algn1]
algn2 = [str(e.seq) for e in algn2]
return algn1, algn2, f1_name, f2_name
# gets data for scatterplot and creates scatterplot-figure
# process: object, which contains information for further calculation-processes
def getScatterPlot(process):
result = KMerScatterPlotData.processData(process)
df = result[0]
# list of k-mers
label = result[1]
file_names = result[2]
fig = px.scatter(df, x=file_names[0], y=file_names[1], hover_name=label,
color='highlight',
color_discrete_map={"TOP {}-mer".format(process.getSettings().getK()): "red",
"{}-mer".format(process.getSettings().getK()): "black"},
title='Scatterplot of k-Mer occurrences (#)',
opacity=0.55,
size="size_score",
hover_data={'highlight': False, file_names[0]: True, file_names[1]: True, 'size_score': False},
)
fig.update_layout(dict(template=ptt.custom_plot_template, legend=dict(title=None)),
title=dict(font_size=20))
fig.update_xaxes(title="#k-Mer of " + file_names[0], title_font=dict(size=15))
fig.update_yaxes(title="#k-Mer of " + file_names[1], title_font=dict(size=15))
return fig
# gets pca data and created two pca-figures
# process: object, which contains information for further calculation-processes
def getPCA(process):
pca_dfs = KMerPCAData.processData(process)
pca_df1 = pca_dfs[0]
pca_df2 = pca_dfs[1]
file_name1 = pca_dfs[2]
file_name2 = pca_dfs[3]
top_list1 = pca_dfs[4]
top_list2 = pca_dfs[5]
# if feature was changed, name for color-scale and highlighting must also be changed
feature = process.getSettings().getFeature()
if feature is "1":
feature_name = 'Frequency'
colorscale_feat_name = feature_name
elif feature is "2":
feature_name = 'T'
colorscale_feat_name = '#T'
elif feature is "3":
feature_name = 'A'
colorscale_feat_name = '#A'
elif feature is "4":
feature_name = 'C'
colorscale_feat_name = '#C'
else:
feature_name = 'G'
colorscale_feat_name = '#G'
feature_df1 = top_list1[feature_name]
feature_df2 = top_list2[feature_name]
pca_df1 = pca_df1.join(feature_df1)
pca_df2 = pca_df2.join(feature_df2)
figures = []
for p in [pca_df1, pca_df2]:
fig = px.scatter(p, x='PC1', y='PC2', hover_name=p.index.tolist(),
color=feature_name,
opacity=0.6,
color_continuous_scale='plasma',
hover_data={"PC1": False, "PC2": False})
fig.update_layout(template=ptt.custom_plot_template, xaxis=dict(zeroline=False, showline=True),
yaxis=dict(zeroline=False, showline=True), coloraxis_colorbar=dict(
title=colorscale_feat_name))
fig.update_xaxes(title_font=dict(size=15))
fig.update_yaxes(title_font=dict(size=15))
fig.update_traces(marker=dict(size=12, line=dict(width=2,
color='DarkSlateGrey')))
figures.append(fig)
return figures, file_name1, file_name2
| StarcoderdataPython |
3228869 | from django.db import models
class LiveQuerySet(models.query.QuerySet):
def delete(self):
# Override Django's built-in default.
self.soft_delete()
def soft_delete(self):
self.update(live=False)
def hard_delete(self):
# Default Django behavior.
super(LiveQuerySet, self).delete()
def live(self):
return self.filter(live=True)
def non_dead(self):
return self.live()
def dead(self):
return self.filter(live__isnull=True)
| StarcoderdataPython |
6514485 | import kabusapi
url = "localhost"
port = "18081" # 検証用, 本番用は18080
# 初期設定 PUSH配信にトークン・パスワードは不要
api = kabusapi.Context(url, port, )
# 受信用関数 情報が受信される度にここが呼ばれる
@api.websocket
def recieve(msg):
print(msg)
# ここで処理を行う
# 受信開始
api.websocket.run()
| StarcoderdataPython |
6599444 | <filename>cola/amt_connector/publish_hits.py
'''publish batches of HITs on MTurk'''
import time
import json
import configparser
import os
import aws_config
from slurk_link_generator import insert_names_and_tokens
RESULTS = []
SLIDES = ['https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_001.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_002.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_003.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_004.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_005.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_006.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_007.jpeg',
'https://raw.githubusercontent.com/nattari/cola_instructions/master/cola_inst_008.jpeg']
HTML = open('./CoLA.html', 'r').read()
QUESTION_XML = """
<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">
<HTMLContent><![CDATA[{}]]></HTMLContent>
<FrameHeight>650</FrameHeight>
</HTMLQuestion>"""
QUESTION = QUESTION_XML.format(HTML)
Q_ATTR = {
# Amount of assignments per HIT
'MaxAssignments': 1,
# How long the task is available on MTurk (1 day)
'LifetimeInSeconds': 60*60*1,
# How much time Workers have in order to complete each task (20 minutes)
'AssignmentDurationInSeconds': 60*20,
# the HIT is automatically approved after this number of minutes (0.5 day)
'AutoApprovalDelayInSeconds': 60*720,
# The reward we offer Workers for each task
'Reward': '0.10',
'Title': 'Play our Chat Game for 2 workers and earn up to 0.85$ in 3 minutes!',
'Keywords': 'dialogue, game',
'Description': 'You and your partner need to discuss and reason,\
togther. It is important in this game that both,\
of you must reach a common agreement.'
}
def publish(number_of_hits):
'''publish HITs with creates URLs in predefined HTML template'''
link = insert_names_and_tokens(number_of_hits)
for login_url in link:
create(login_url)
def create(login_url):
'''defining HITs' template for MTurk'''
print(login_url)
question = QUESTION.replace('${Link}', login_url).\
replace('${Image1}', SLIDES[0]).\
replace('${Image2}', SLIDES[1]).\
replace('${Image3}', SLIDES[2]).\
replace('${Image4}', SLIDES[3]).\
replace('${Image5}', SLIDES[4]).\
replace('${Image6}', SLIDES[5]).\
replace('${Image7}', SLIDES[6]).\
replace('${Image8}', SLIDES[7])
#print(question)
mturk_connector = aws_config.ConnectToMTurk()
#mturk_connector.create_command_qualification()
mturk = mturk_connector.mturk
new_hit = mturk.create_hit(
**Q_ATTR,
Question=question,
QualificationRequirements=[
#{
# 'QualificationTypeId' : '3ETJLUMS0DM8X13DGYGLAJ6V7SNU3X',
# 'Comparator' : 'NotIn',
# 'IntegerValues' :
# [
# 6, 7, 8, 9, 10
# ],
# 'ActionsGuarded' : 'PreviewAndAccept'
#},
{
'QualificationTypeId' : '00000000000000000071',
'Comparator' : 'In',
'LocaleValues' : [
{'Country':'GB'}, {'Country':'US'},
{'Country':'AU'}, {'Country':'CA'},
{'Country':'IE'}, {'Country':'DE'}
],
'ActionsGuarded': 'PreviewAndAccept'
},
{
'QualificationTypeId' : '00000000000000000040',
'Comparator' : 'GreaterThanOrEqualTo',
'IntegerValues' : [
2000
],
'ActionsGuarded': 'PreviewAndAccept'
}
#{
# 'QualificationTypeId': '3X8OU3XHWD1ZRF1SJZ3XJDGXPEXDUV',
# 'Comparator': 'EqualTo',
# 'IntegerValues': [100]
#}
]
)
RESULTS.append({
'link': login_url,
'hit_id': new_hit['HIT']['HITId']
})
print('A new HIT has been created. You can preview it here:')
print('https://worker.mturk.com/mturk/preview?groupId=' + new_hit['HIT']['HITGroupId'])
print('HITID = ' + new_hit['HIT']['HITId'] + ' (Use to Get Results)')
if __name__ == "__main__":
CONFIG = configparser.ConfigParser()
CONFIG.read('config.ini')
SESSION = CONFIG['session']['name']
HITS = CONFIG['session']['hits']
publish(HITS)
if not os.path.isdir('./published/' + SESSION):
os.mkdir('./published/' + SESSION)
MOMENT = time.strftime("%Y-%b-%d__%H_%M_%S", time.localtime())
with open('./published/' + SESSION + '/data_'+ MOMENT +'.json', 'w') as outfile:
json.dump(RESULTS, outfile)
| StarcoderdataPython |
1829195 | <filename>setup.py
#!/usr/bin/env python
import sys
import os
from setuptools import setup
from tempita_lite import __version__ as version
setup(name='Tempita-lite',
version=version,
description="A very small text templating language",
long_description="""\
Tempita-lite is a small templating language for text substitution.
Based on Tempita but a reduced set of functionality and bundled as
only one file usable as python module. Easy to embedded in your own
project.
It's just a handy little templating language for when your project outgrows
``string.Template`` or ``%`` substitution or ``.format()``.
It's small, simple and extendable.
""",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='templating template language html',
author='<NAME>',
author_email='<EMAIL>',
url='https://bitbucket.org/tds/tempita-lite',
license='MIT',
packages=['tempita_lite'],
tests_require=['pytest'],
test_suite='pytest',
include_package_data=True,
zip_safe=True,
**kwargs
)
| StarcoderdataPython |
6667061 | from PIL import Image
import math
import sys
def getbytes(bitData):
byteLength = math.ceil(len(bitData)/4)
byteData = bytearray(byteLength)
bitConvert = [0] * 4
for i in range(byteLength):
bitConvert[0] = bitData[4*i]
bitConvert[1] = (bitData[4*i + 1])<<2
bitConvert[2] = (bitData[4*i + 2])<<4
bitConvert[3] = (bitData[4*i + 3])<<6
byteData[i] = bitConvert[0] + bitConvert[1] + bitConvert[2] + bitConvert[3]
byteDataOutput = bytes(byteData)
return byteDataOutput
def getMaxSize(imgData):
maxSize = imgData.size[0]*imgData.size[1]*6
return maxSize
def fileSizeStoreBits(maxSize):
storeBit = math.log2(maxSize)
storePixels = math.ceil(storeBit/6)
storeBits = storePixels*3
return storeBits
imageFileName = sys.argv[1]
messageFileName = sys.argv[2]
img = Image.open(imageFileName)
pixels = img.load()
maxSize = getMaxSize(img)
storeBitsSize = fileSizeStoreBits(maxSize)
bitFileSize = [0] * storeBitsSize
for i in range(math.ceil(storeBitsSize/3)):
bitFileSize[3*i] = (pixels[img.size[0]-1,img.size[1]-1-i][0])&3
bitFileSize[3*i+1] = (pixels[img.size[0]-1,img.size[1]-1-i][1])&3
bitFileSize[3*i+2] = (pixels[img.size[0]-1,img.size[1]-1-i][2])&3
bitLength = 0
for i in range(storeBitsSize):
bitLength = bitLength + (bitFileSize[i]<<(2*i))
messageBits = [0] * bitLength
position = 0
finish = False
for i in range(img.size[0]):
for j in range(img.size[1]):
currentBitsInt = [0]*3
currentBitsInt[0] = pixels[i,j][0]&3
currentBitsInt[1] = pixels[i,j][1]&3
currentBitsInt[2] = pixels[i,j][2]&3
for k in range(3):
if position < bitLength:
messageBits[position] = currentBitsInt[k]
position += 1
else:
finish = True
if finish:
break
if finish:
break
message = open(messageFileName,"wb")
messageBytes = getbytes(messageBits)
message.write(messageBytes)
message.close() | StarcoderdataPython |
9713342 | from theplease.specific.git import git_support
@git_support
def match(command):
return (' rm ' in command.script and
'error: the following file has local modifications' in command.output and
'use --cached to keep the file, or -f to force removal' in command.output)
@git_support
def get_new_command(command):
command_parts = command.script_parts[:]
index = command_parts.index('rm') + 1
command_parts.insert(index, '--cached')
command_list = [u' '.join(command_parts)]
command_parts[index] = '-f'
command_list.append(u' '.join(command_parts))
return command_list
| StarcoderdataPython |
3389102 | <gh_stars>1-10
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.RunScriptAction import RunScriptAction
import os
class PassExtension(Extension):
def __init__(self):
super(PassExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
pipe = os.popen("if tmux ls > /dev/null 2>&1; then tmux ls; else echo none; fi")
terminal_binary = extension.preferences['terminal_binary']
console_parameters_attach = extension.preferences['console_parameters_attach']
console_parameters_new = extension.preferences['console_parameters_new']
output = pipe.read()
if output.splitlines()[0] != "none":
for line in output.splitlines():
items.append(
ExtensionResultItem(
icon='images/tmux.png',
name='Attach session %s' % line.split(' ')[0],
description=line,
on_enter=RunScriptAction(terminal_binary + ' ' + console_parameters_attach % line.split(' ')[0], None)
)
)
else:
items.append(
ExtensionResultItem(
icon='images/tmux.png',
name='Create a new tmux session',
description='No active tmux sessions found',
on_enter=RunScriptAction(terminal_binary + ' ' + console_parameters_new, None)
)
)
return RenderResultListAction(items)
if __name__ == '__main__':
PassExtension().run()
| StarcoderdataPython |
113659 | from geneal.genetic_algorithms._binary import BinaryGenAlgSolver
from geneal.genetic_algorithms._continuous import ContinuousGenAlgSolver
| StarcoderdataPython |
1771868 | <reponame>FreibergVlad/port-scanner<filename>test/core/layers/inet/ip/test_ip_packet.py
from unittest import TestCase
from nally.core.layers.inet.ip.ip_diff_service_values import IpDiffServiceValues
from nally.core.layers.inet.ip.ip_ecn_values import IpEcnValues
from nally.core.layers.inet.ip.ip_fragmentation_flags import IpFragmentationFlags
from nally.core.layers.inet.ip.ip_packet import IpPacket
#
# DSCP = 0
# total length = 25 bytes (20 + 5)
# identification = 39434
# flags = 0 (no flags set)
# ttl = 64
# protocol = Test (255)
# source IP = 192.168.1.8
# destination IP = 172.16.58.3
# payload = 5 * 0x58 bytes
#
PACKET_DUMP_1 = "450000199a0a000040fd91dec0a801087e0c0e435858585858"
#
# DSCP = 0xb8 (EF PHB + Non-ECN)
# total length = 20 bytes
# identification = 29320
# flags = 0
# ttl = 64
# protocol = TCP (6)
# source IP = 192.168.1.8
# destination IP = 8.8.8.8
#
PACKET_DUMP_2 = "45b8001472880000400635e4c0a8010808080808"
#
# DSCP = 0xbb (EF PHB + CE)
# total length = 20 bytes
# identification = 55463
# flags = 0
# ttl = 64
# protocol = TCP (6)
# source IP = 192.168.1.8
# destination IP = 8.8.8.8
#
PACKET_DUMP_3 = "45bb0014d8a700004006cfc1c0a8010808080808"
# test protocol type according to
# https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
TEST_PROTO_TYPE = 253
class TestIpv4Packet(TestCase):
def test_to_bytes(self):
ip_packet_1 = IpPacket(
source_addr_str="192.168.1.8",
dest_addr_str="172.16.58.3",
flags=IpFragmentationFlags(),
identification=39434,
protocol=TEST_PROTO_TYPE
) / bytes([0x58] * 5)
hex_dump_1 = ip_packet_1.to_bytes().hex()
self.assertEqual(PACKET_DUMP_1, hex_dump_1)
self.assertEqual(ip_packet_1, IpPacket.from_bytes(ip_packet_1.to_bytes()))
ip_packet_2 = IpPacket(
source_addr_str="192.168.1.8",
dest_addr_str="8.8.8.8",
dscp=IpDiffServiceValues.EF,
flags=IpFragmentationFlags(),
identification=29320
)
hex_dump_2 = ip_packet_2.to_bytes().hex()
self.assertEqual(PACKET_DUMP_2, hex_dump_2)
self.assertEqual(ip_packet_2, IpPacket.from_bytes(ip_packet_2.to_bytes()))
ip_packet_3 = IpPacket(
source_addr_str="192.168.1.8",
dest_addr_str="8.8.8.8",
dscp=IpDiffServiceValues.EF,
ecn=IpEcnValues.CE,
flags=IpFragmentationFlags(),
identification=55463
)
hex_dump_3 = ip_packet_3.to_bytes().hex()
self.assertEqual(PACKET_DUMP_3, hex_dump_3)
self.assertEqual(ip_packet_3, IpPacket.from_bytes(ip_packet_3.to_bytes()))
def test_packet_creation_with_invalid_fields(self):
# pass too long payload
invalid_ip_packet = IpPacket(
source_addr_str="10.10.128.44",
dest_addr_str="192.168.127.12",
) / bytearray(65535)
self.assertRaises(ValueError, invalid_ip_packet.to_bytes)
# pass too long Identification field
self.assertRaises(
ValueError,
IpPacket,
source_addr_str="10.10.128.44",
dest_addr_str="192.168.127.12",
identification=pow(2, 16)
)
# pass too long Fragment Offset field
self.assertRaises(
ValueError,
IpPacket,
source_addr_str="10.10.128.44",
dest_addr_str="192.168.127.12",
fragment_offset=pow(2, 13)
)
| StarcoderdataPython |
8061460 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import zbar
import requests
from PIL import Image
from io import BytesIO
from numpy import array, uint8
import base64
import urllib
import json
import logging
scanner = zbar.Scanner()
def decode(string):
try:
return str(
base64.urlsafe_b64decode(
bytes(
string.strip('/') + (4 - len(string.strip('/')) % 4) * '=' + '====',
'utf-8')), 'utf-8')
except Exception as e:
print(e, string)
raise Exception(e, string)
def encode(decoded):
return base64.urlsafe_b64encode(
bytes(str(decoded), 'utf-8')).decode('utf-8').replace('=', '')
def parse(uri, default_title='untitled'):
server = dict()
stripped = re.sub('ssr?://', '', uri)
if uri[2] == ':':
# ss
if '#' in uri:
stripped, remarks = stripped.split('#')[:2]
server['remarks'] = urllib.parse.unquote(remarks)
else:
server['remarks'] = default_title
decoded = decode(stripped)
data = decoded.split('@', maxsplit=1)
server['method'], server['password'] = data[0].split(':', maxsplit=1)
server['server'], server['server_port'] = data[1].rsplit(':', maxsplit=1)
elif uri[2] == 'r':
# ssr
decoded = decode(stripped)
data = decoded.split('/?')
[
server['server'],
server['server_port'],
server['ssr_protocol'],
server['method'],
server['obfs'],
password_enc,
] = data[0].rsplit(':', maxsplit=5)
server['password'] = decode(password_enc)
server['remarks'] = default_title
if len(data) > 1:
appendix = data[1].split('&')
content = {i.split('=')[0]: i.split('=')[1] for i in appendix}
for key in content:
server[key] = decode(content[key])
if server['ssr_protocol'] != 'origin' and server['obfs'] != 'plain':
server['remarks'] += ' SSR'
return server
def scanNetQR(img_url, headers=None):
if img_url.startswith('http'):
img_bytes = requests.get(img_url, headers=headers).content
elif img_url.startswith('data:image'):
img_bytes = base64.decodebytes(bytes(img_url.split(',')[1], 'utf-8'))
img = array(Image.open(BytesIO(img_bytes)))
info = scanner.scan(img.astype(uint8) * 255) + scanner.scan((1 - img).astype(uint8) * 255)
if len(info) == 0:
raise ValueError('scanner fail to identify qr code')
return info[0].data.decode('utf-8')
def get_href(string, pattern='.*'):
found = re.findall(r'(?<=<a\s+href=")[^"]+(?=">%s</a>)' % pattern, string)
if found:
return found[0]
def gen_uri(servers):
'''{
"server": server['server'],
"server_ipv6": "::",
"server_port": int(server['server_port']),
"local_address": "127.0.0.1",
"local_port": 1080,
"password": server['password'],
"timeout": 300,
"udp_timeout": 60,
"method": method,
"protocol": ssr_protocol,
"protocol_param": "",
"obfs": obfs,
"obfs_param": "",
"fast_open": False,
"workers": 1,
"group": "ss.pythonic.life"
},'''
result_servers = list()
for server in servers:
if 'password' not in server:
server['password'] = ''
try:
for key in ['method', 'password', 'server', 'server_port']:
assert key in server, '{key} not in server data'.format(key)
for k, v in (('ssr_protocol', 'origin'), ('obfs', 'plain')):
if k in server and server[k] == v:
server.pop(k)
is_ss = 'ssr_protocol' not in server and 'obfs' not in server
if is_ss:
# if not completed, it's ss
decoded = '{method}:{password}@{hostname}:{port}'.format(
method=server['method'],
password=server['password'],
hostname=server['server'],
port=server['server_port'],
)
ss_uri = 'ss://{}#{}'.format(
str(base64.urlsafe_b64encode(bytes(decoded, encoding='utf8')), encoding='utf-8'),
urllib.parse.quote(server['remarks'])
)
# ssr formatted account info
ssr_decoded = ':'.join([
server['server'],
server['server_port'],
'origin',
server['method'],
'plain',
encode(server['password']),
])
ssr_decoded += '/?remarks={remarks}&group={group}'.format(
remarks=encode(server['remarks']),
group=encode("ssr_luck"),
)
ssr_uri = 'ssr://{endoced}'.format(
endoced=encode(ssr_decoded)
)
else:
decoded_head = ':'.join([str(i) for i in [
server['server'],
server['server_port'],
server.get('ssr_protocol', 'origin'),
server['method'],
server.get('obfs', 'plain'),
encode(server['password'])
]])
appendix = [(key, server[key]) for key in ['obfsparam', 'protoparam', 'remarks'] if key in server]
appendix.append(('group', 'ssr_luck'))
appendix_str = '&'.join(['{key}={val}'.format(
key=item[0],
val=encode(item[1])
) for item in appendix])
decoded = '/?'.join([decoded_head, appendix_str])
ss_uri = 'ssr://{endoced}'.format(endoced=encode(decoded))
ssr_uri = ss_uri
server['uri'] = ss_uri
server['ssr_uri'] = ssr_uri
server['decoded_url'] = urllib.parse.unquote(ss_uri)
server_data_to_json = {
"server": server['server'],
"server_ipv6": "::",
"server_port": int(server['server_port']),
"local_address": "127.0.0.1",
"local_port": 1080,
"password": server['password'],
"group": "ssr_luck"
}
if 'ssr_protocol' in server:
server['protocol'] = server['ssr_protocol']
for key in ['obfs', 'method', 'protocol', 'obfsparam', 'protoparam', 'udpport', 'uot']:
if key in server:
server_data_to_json[key] = server.get(key)
server['json'] = json.dumps(
server_data_to_json,
ensure_ascii=False,
indent=2,
)
result_servers.append(server)
except (KeyError, EOFError, ValueError) as e:
logging.exception(e, stack_info=True)
return result_servers
| StarcoderdataPython |
1989811 | import argparse
import os
import time
import data_generator
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
from threading import Thread
R = robjects.r
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
import numpy as np
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-n','--n_samples', help='Number of training samples', default=1000, type=int)
parser.add_argument('-s','--seed', help='Random seed', default=1, type=int)
parser.add_argument('--endo', help='Endogeneity', default=0.5, type=float)
parser.add_argument('--heartbeat', help='Use philly heartbeat', action='store_true')
parser.add_argument('--results', help='Results file', default='nonpar_iv.csv')
args = parser.parse_args()
DONE = False # global variable for the heartbeat
def dummy_heartbeat(freq = 20, timeout=100):
'''
Heartbeat function that outputs a dummy progress bar.
Only necessary for our cluster.
'''
i = 0
global DONE
while not DONE:
print("PROGRESS: %1.2f%%" % (100*float(i) / timeout))
for _ in xrange(freq):
# check if we're done every second
if DONE:
break
time.sleep(1)
i += 1
if args.heartbeat:
t = Thread(target=lambda:dummy_heartbeat(60, 10))
t.start()
def test_points(data_fn, ntest=5000, has_latent=False, debug=False):
'''
Generate and return test set points with their true values.
'''
seed = np.random.randint(1e9)
try:
# test = True ensures we draw test set images
x, z, t, y, g_true = data_fn(ntest, seed, test=True)
except ValueError:
warnings.warn("Too few images, reducing test set size")
ntest = int(ntest * 0.7)
# test = True ensures we draw test set images
x, z, t, y, g_true = data_fn(ntest, seed, test=True)
## re-draw to get new independent treatment and implied response
t = np.linspace(np.percentile(t, 2.5), np.percentile(t, 97.5), ntest).reshape(-1, 1)
## we need to make sure z _never_ does anything in these g functions (fitted and true)
## above is necesary so that reduced form doesn't win
if has_latent:
x_latent, _, _, _, _ = data_fn(ntest, seed, images=False)
y = g_true(x_latent, z, t)
else:
y = g_true(x, z, t)
y_true = y.flatten()
return (x,t), y_true
def to_array(x):
'''
Convert r vector to numpy array
'''
return np.array(list(x))
def fit_and_evaluate(x,z,t,y,df):
'''
Fit and evaluate non-parametric regression using <NAME> and Renault (2011)
Implemented in the `np` package in R.
See [the np package documation](https://cran.r-project.org/web/packages/np/np.pdf) for details.
'''
npr=importr('np')
y_R = robjects.FloatVector(list(y.flatten()))
(x_eval, t_eval), y_true = test_points(df, 10000)
mod = npr.npregiv(y_R, t, z, x=x, zeval=t_eval, xeval=x_eval,
method="Tikhonov", p=0, optim_method ="BFGS")
return ((y_true - to_array(mod.rx2('phi.eval')))**2).mean()
def prepare_file(filename):
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write('n,seed,endo,mse\n')
df = lambda n, s, test: data_generator.demand(n, s, ypcor=args.endo, test=test)
x,z,t,y,g = df(args.n_samples, args.seed, False)
mse = fit_and_evaluate(x,z,t,y,df)
DONE = True # turn off the heartbeat
prepare_file(args.results)
with open(args.results, 'a') as f:
f.write('%d,%d,%f,%f\n' % (args.n_samples, args.seed, args.endo, mse))
| StarcoderdataPython |
267139 | <gh_stars>0
# Rinobot-plugin python helpers
# API docs at http://github.com/rinocloud/rinobot-plugin
# Authors:
# <NAME> <<EMAIL>>
from .plugin import *
| StarcoderdataPython |
6440160 | # Generated by Django 3.2.7 on 2021-10-24 00:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('room', '0010_auto_20211023_2132'),
('asks', '0007_auto_20211021_2149'),
]
operations = [
migrations.AlterModelOptions(
name='question',
options={'verbose_name': 'Pergunta', 'verbose_name_plural': 'Perguntas'},
),
migrations.AlterField(
model_name='question',
name='answered',
field=models.BooleanField(default=False, verbose_name='Respondida'),
),
migrations.AlterField(
model_name='question',
name='creation',
field=models.DateTimeField(auto_now_add=True, verbose_name='Data de criação'),
),
migrations.AlterField(
model_name='question',
name='creator',
field=models.CharField(max_length=128, verbose_name='Criador'),
),
migrations.AlterField(
model_name='question',
name='down_votes',
field=models.PositiveIntegerField(default=0, verbose_name='Votos Negativos'),
),
migrations.AlterField(
model_name='question',
name='text',
field=models.TextField(max_length=512, verbose_name='Texto'),
),
migrations.AlterField(
model_name='question',
name='theme',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='room.theme', verbose_name='Tema'),
),
migrations.AlterField(
model_name='question',
name='up_votes',
field=models.PositiveIntegerField(default=0, verbose_name='Votos Positivos'),
),
]
| StarcoderdataPython |
8128309 | x = None
y = None
z = None
import time
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
x = 10
y = 20
z = 12
mc.player.setPos(x, y, z)
while True:
x, y, z = mc.player.getPos()
mc.setBlock(x, y, z, 35.1)
time.sleep(.2)
| StarcoderdataPython |
328230 | <reponame>ShawYN/StyleGAN_Image_Detecting
import torch
import torch.nn as nn
import torchvision
import numpy as np
from .BasicModule import BasicModule
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
__all__ = ['ResNet50', 'ResNet101','ResNet152']
def Conv1(in_planes, places, stride=2):
return nn.Sequential(
nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=7,stride=stride,padding=3, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
class Bottleneck(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 4):
super(Bottleneck,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places*self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(places*self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet152(BasicModule):
def __init__(self,blocks=[3, 8, 36, 3], num_classes=1000, expansion = 4):
super(ResNet152,self).__init__()
self.expansion = expansion
self.conv1 = Conv1(in_planes = 3, places= 64)
self.layer1 = self.make_layer(in_places = 64, places= 64, block=blocks[0], stride=1)
self.layer2 = self.make_layer(in_places = 256,places=128, block=blocks[1], stride=2)
self.layer3 = self.make_layer(in_places=512,places=256, block=blocks[2], stride=2)
self.layer4 = self.make_layer(in_places=1024,places=512, block=blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(2048,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layer(self, in_places, places, block, stride):
layers = []
layers.append(Bottleneck(in_places, places,stride, downsampling =True))
for i in range(1, block):
layers.append(Bottleneck(places*self.expansion, places))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
'''
def ResNet50():
return ResNet152([3, 4, 6, 3])
def ResNet101():
return ResNet152([3, 4, 23, 3])
def ResNet152():
return ResNet152([3, 8, 36, 3])
if __name__=='__main__':
#model = torchvision.models.resnet50()
model = ResNet152()
print(model)
input = torch.randn(1, 3, 224, 224)
out = model(input)
print(out.shape)
''' | StarcoderdataPython |
5195037 | <gh_stars>0
try:
from urllib.parse import urlparse, ParseResult
except ImportError:
from urlparse import urlparse, ParseResult
from twilio.rest import Client as TwilioClient
from twilio.rest.api import Api as TwilioApi
from twilio.base.exceptions import TwilioRestException
from twilio.base import deserialize, values
from twilio.rest.api.v2010.account.application import ApplicationInstance
from twilio.rest.api.v2010.account import AccountInstance
from twilio.rest.api.v2010.account.call import CallInstance
from twilio.rest.api.v2010.account.recording import RecordingInstance
from twilio.rest.api.v2010.account.transcription import TranscriptionInstance
from twilio.rest.api.v2010.account.available_phone_number.local import LocalInstance, LocalList, LocalPage
from twilio.rest.api.v2010.account.available_phone_number.toll_free import TollFreeInstance
from twilio.rest.api.v2010.account.incoming_phone_number import IncomingPhoneNumberInstance
from twilio.rest.fax import Fax as TwilioFax
from twilio.rest.fax.v1 import V1 as TwilioV1
import sys
from six import u
import os
def patched_str(self):
""" Try to pretty-print the exception, if this is going on screen. """
def red(words):
return u("\033[31m\033[49m%s\033[0m") % words
def white(words):
return u("\033[37m\033[49m%s\033[0m") % words
def blue(words):
return u("\033[34m\033[49m%s\033[0m") % words
def teal(words):
return u("\033[36m\033[49m%s\033[0m") % words
def get_uri(code):
return "https://www.signalwire.com/docs/errors/{0}".format(code)
# If it makes sense to print a human readable error message, try to
# do it. The one problem is that someone might catch this error and
# try to display the message from it to an end user.
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
msg = (
"\n{red_error} {request_was}\n\n{http_line}"
"\n\n{sw_returned}\n\n{message}\n".format(
red_error=red("HTTP Error"),
request_was=white("Your request was:"),
http_line=teal("%s %s" % (self.method, self.uri)),
sw_returned=white(
"Signalwire returned the following information:"),
message=blue(str(self.msg))
))
if self.code:
msg = "".join([msg, "\n{more_info}\n\n{uri}\n\n".format(
more_info=white("More information may be available here:"),
uri=blue(get_uri(self.code))),
])
return msg
else:
return "HTTP {0} error: {1}".format(self.status, self.msg)
def patched_applicationinstance_init(self, version, payload, account_sid, sid=None):
"""
Initialize the ApplicationInstance
:returns: twilio.rest.api.v2010.account.application.ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
super(ApplicationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'message_status_callback': payload.get('message_status_callback', ''), #missing
'sid': payload['sid'],
'sms_fallback_method': payload['sms_fallback_method'],
'sms_fallback_url': payload['sms_fallback_url'],
'sms_method': payload['sms_method'],
'sms_status_callback': payload['sms_status_callback'],
'sms_url': payload['sms_url'],
'status_callback': payload['status_callback'],
'status_callback_method': payload['status_callback_method'],
'uri': payload['uri'],
'voice_caller_id_lookup': payload['voice_caller_id_lookup'],
'voice_fallback_method': payload['voice_fallback_method'],
'voice_fallback_url': payload['voice_fallback_url'],
'voice_method': payload['voice_method'],
'voice_url': payload['voice_url'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
def patched_accountinstance_init(self, version, payload, sid=None):
"""
Initialize the AccountInstance
:returns: twilio.rest.api.v2010.account.AccountInstance
:rtype: twilio.rest.api.v2010.account.AccountInstance
"""
super(AccountInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'auth_token': payload['auth_token'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'owner_account_sid': payload.get('owner_account_sid', ''),
'sid': payload['sid'],
'status': payload['status'],
'subresource_uris': payload['subresource_uris'],
'type': payload['type'],
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
def patched_localinstance_init(self, version, payload, account_sid, country_code):
"""
Initialize the LocalInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
"""
super(LocalInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload['friendly_name'],
'phone_number': payload['phone_number'],
'lata': payload['lata'],
'locality': payload.get('locality', ''), #missing
'rate_center': payload['rate_center'],
'latitude': deserialize.decimal(payload['latitude']),
'longitude': deserialize.decimal(payload['longitude']),
'region': payload['region'],
'postal_code': payload['postal_code'],
'iso_country': payload['iso_country'],
'beta': payload['beta'],
'capabilities': payload['capabilities'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
def patched_locallist_stream(self, area_code=values.unset, contains=values.unset,
starts_with=values.unset, ends_with=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Streams LocalInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: Find phone numbers in the specified area code.
:param unicode contains: A pattern on which to match phone numbers.
:param bool sms_enabled: This indicates whether the phone numbers can receive text messages.
:param bool mms_enabled: This indicates whether the phone numbers can receive MMS messages.
:param bool voice_enabled: This indicates whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Indicates whether the response includes phone numbers which require any Address.
:param bool exclude_local_address_required: Indicates whether the response includes phone numbers which require a local Address.
:param bool exclude_foreign_address_required: Indicates whether the response includes phone numbers which require a foreign Address.
:param bool beta: Include phone numbers new to the Twilio platform.
:param unicode near_number: Given a phone number, find a geographically close number within Distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within Distance miles. (US/Canada only)
:param unicode distance: Specifies the search radius for a Near- query in miles. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific Local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality. (US/Canada only)
:param bool fax_enabled: This indicates whether the phone numbers can receive faxes.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
starts_with=starts_with,
ends_with=ends_with,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def patched_locallist_list(self, area_code=values.unset, contains=values.unset,
starts_with=values.unset, ends_with=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Lists LocalInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: Find phone numbers in the specified area code.
:param unicode contains: A pattern on which to match phone numbers.
:param bool sms_enabled: This indicates whether the phone numbers can receive text messages.
:param bool mms_enabled: This indicates whether the phone numbers can receive MMS messages.
:param bool voice_enabled: This indicates whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Indicates whether the response includes phone numbers which require any Address.
:param bool exclude_local_address_required: Indicates whether the response includes phone numbers which require a local Address.
:param bool exclude_foreign_address_required: Indicates whether the response includes phone numbers which require a foreign Address.
:param bool beta: Include phone numbers new to the Twilio platform.
:param unicode near_number: Given a phone number, find a geographically close number within Distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within Distance miles. (US/Canada only)
:param unicode distance: Specifies the search radius for a Near- query in miles. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific Local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality. (US/Canada only)
:param bool fax_enabled: This indicates whether the phone numbers can receive faxes.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
starts_with=starts_with,
ends_with=ends_with,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
limit=limit,
page_size=page_size,
))
def patched_locallist_page(self, area_code=values.unset, contains=values.unset,
starts_with=values.unset, ends_with=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of LocalInstance records from the API.
Request is executed immediately
:param unicode area_code: Find phone numbers in the specified area code.
:param unicode contains: A pattern on which to match phone numbers.
:param bool sms_enabled: This indicates whether the phone numbers can receive text messages.
:param bool mms_enabled: This indicates whether the phone numbers can receive MMS messages.
:param bool voice_enabled: This indicates whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Indicates whether the response includes phone numbers which require any Address.
:param bool exclude_local_address_required: Indicates whether the response includes phone numbers which require a local Address.
:param bool exclude_foreign_address_required: Indicates whether the response includes phone numbers which require a foreign Address.
:param bool beta: Include phone numbers new to the Twilio platform.
:param unicode near_number: Given a phone number, find a geographically close number within Distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within Distance miles. (US/Canada only)
:param unicode distance: Specifies the search radius for a Near- query in miles. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific Local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality. (US/Canada only)
:param bool fax_enabled: This indicates whether the phone numbers can receive faxes.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of LocalInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalPage
"""
params = values.of({
'AreaCode': area_code,
'Contains': contains,
'StartsWith': starts_with,
'EndsWith': ends_with,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'InLocality': in_locality,
'FaxEnabled': fax_enabled,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return LocalPage(self._version, response, self._solution)
def patched_incomingphonenumberinstance_init(self, version, payload, account_sid, sid=None):
"""
Initialize the IncomingPhoneNumberInstance
:returns: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberInstance
"""
super(IncomingPhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid', ''), #missing
'address_sid': payload.get('address_sid', ''), #missing
'address_requirements': payload.get('address_requirements', ''), #missing
'api_version': payload['api_version'],
'beta': payload['beta'],
'capabilities': payload['capabilities'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'identity_sid': payload.get('identity_sid', ''), #missing,
'phone_number': payload['phone_number'],
'origin': payload.get('origin', ''), #missing,
'sid': payload['sid'],
'sms_application_sid': payload['sms_application_sid'],
'sms_fallback_method': payload['sms_fallback_method'],
'sms_fallback_url': payload['sms_fallback_url'],
'sms_method': payload['sms_method'],
'sms_url': payload['sms_url'],
'status_callback': payload['status_callback'],
'status_callback_method': payload['status_callback_method'],
'trunk_sid': payload.get('trunk_sid', ''), #missing,
'uri': payload['uri'],
'voice_application_sid': payload['voice_application_sid'],
'voice_caller_id_lookup': payload['voice_caller_id_lookup'],
'voice_fallback_method': payload['voice_fallback_method'],
'voice_fallback_url': payload['voice_fallback_url'],
'voice_method': payload['voice_method'],
'voice_url': payload['voice_url'],
'emergency_status': payload.get('emergency_status', ''), #missing,
'emergency_address_sid': payload.get('emergency_address_sid', ''), #missing,
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
def patched_tollfreeinstance_init(self, version, payload, account_sid, country_code):
"""
Initialize the TollFreeInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance
"""
super(TollFreeInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload['friendly_name'],
'phone_number': payload['phone_number'],
'lata': payload['lata'],
'locality': payload.get('locality', ''), #missing
'rate_center': payload['rate_center'],
'latitude': deserialize.decimal(payload['latitude']),
'longitude': deserialize.decimal(payload['longitude']),
'region': payload['region'],
'postal_code': payload['postal_code'],
'iso_country': payload['iso_country'],
'beta': payload['beta'],
'capabilities': payload['capabilities'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
def patched_recordinginstance_init(self, version, payload, account_sid, sid=None):
"""
Initialize the RecordingInstance
:returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance
"""
super(RecordingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'call_sid': payload['call_sid'],
'conference_sid': payload['conference_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'start_time': deserialize.rfc2822_datetime(payload['start_time']),
'duration': payload['duration'],
'sid': payload['sid'],
'price': deserialize.decimal(payload['price']),
'uri': payload['uri'],
'encryption_details': payload.get('encryption_details', ''), #missing
# 'encryption_details': payload['encryption_details'],
'price_unit': payload['price_unit'],
'status': payload['status'],
'channels': deserialize.integer(payload.get('channels', 1)), #missing
# 'channels': deserialize.integer(payload['channels']),
'source': payload['source'],
'error_code': deserialize.integer(payload['error_code']),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
def patched_transcriptioninstance_init(self, version, payload, account_sid, sid=None):
"""
Initialize the TranscriptionInstance
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
super(TranscriptionInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'duration': payload['duration'],
'price': deserialize.decimal(payload['price']),
'price_unit': payload['price_unit'],
'recording_sid': payload['recording_sid'],
'sid': payload['sid'],
'status': payload['status'],
'transcription_text': payload['transcription_text'],
'type': payload.get('type', ''), #missing parameter
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
def patched_fax_init(self, twilio):
"""
Initialize the Fax Domain
:returns: Domain for Fax
:rtype: twilio.rest.fax.Fax
"""
super(TwilioFax, self).__init__(twilio)
self.base_url = ''
self.account_sid = twilio.account_sid
# Versions
self._v1 = None
def patched_fax_v1_init(self, domain):
"""
Initialize the V1 version of Fax
:returns: V1 version of Fax
:rtype: twilio.rest.fax.v1.V1.V1
"""
super(TwilioV1, self).__init__(domain)
self.version = "2010-04-01/Accounts/" + domain.account_sid
self._faxes = None
class Client(TwilioClient):
def __init__(self, *args, **kwargs):
if 'signalwire_space_url' in kwargs:
signalwire_space_url = kwargs.pop('signalwire_space_url', "api.signalwire.com")
else:
signalwire_space_url = os.environ['SIGNALWIRE_SPACE_URL']
p = urlparse(signalwire_space_url, 'http')
netloc = p.netloc or p.path
path = p.path if p.netloc else ''
p = ParseResult('https', netloc, path, *p[3:])
super(Client, self).__init__(*args, **kwargs)
self._api = TwilioApi(self)
self._api.base_url = p.geturl()
TwilioFax.__init__ = patched_fax_init
TwilioV1.__init__ = patched_fax_v1_init
self._fax = TwilioFax(self)
self._fax.base_url = p.geturl()
TwilioRestException.__str__ = patched_str
AccountInstance.__init__ = patched_accountinstance_init
LocalInstance.__init__ = patched_localinstance_init
LocalList.list = patched_locallist_list
LocalList.page = patched_locallist_page
LocalList.stream = patched_locallist_stream
TollFreeInstance.__init__ = patched_tollfreeinstance_init
ApplicationInstance.__init__ = patched_applicationinstance_init
IncomingPhoneNumberInstance.__init__ = patched_incomingphonenumberinstance_init
RecordingInstance.__init__ = patched_recordinginstance_init
TranscriptionInstance.__init__ = patched_transcriptioninstance_init
| StarcoderdataPython |
3495668 | import random
import wsnsimpy.wsnsimpy_tk as wsp
SOURCE = 35
###########################################################
class MyNode(wsp.Node):
tx_range = 100
##################
def init(self):
super().init()
self.recv = False
##################
def run(self):
if self.id == SOURCE:
self.scene.nodecolor(self.id,0,0,0)
self.recv = True
yield self.timeout(2)
self.broadcast()
else:
self.scene.nodecolor(self.id,.7,.7,.7)
##################
def broadcast(self):
self.scene.nodewidth(self.id, 3)
self.log(f"Broadcast message")
self.send(wsp.BROADCAST_ADDR)
##################
def on_receive(self, sender, **kwargs):
self.log(f"Receive message from {sender}")
if self.recv:
self.log(f"Message seen; reject")
return
self.log(f"New message; prepare to rebroadcast")
self.recv = True
self.scene.nodecolor(self.id,1,0,0)
yield self.timeout(random.uniform(0.5,1.0))
self.broadcast()
###########################################################
sim = wsp.Simulator(
until=100,
timescale=1,
visual=True,
terrain_size=(700,700),
title="Flooding Demo")
for x in range(10):
for y in range(10):
px = 50 + x*60 + random.uniform(-20,20)
py = 50 + y*60 + random.uniform(-20,20)
node = sim.add_node(MyNode, (px,py))
node.tx_range = 75
node.logging = True
sim.run()
| StarcoderdataPython |
11299147 | from django.urls import include, path
from django.views.generic import ListView, DetailView
from places.models import Chain, Place
app_name = "places"
urlpatterns = [
path("chains/", ListView.as_view(model=Chain), name="chain_list"),
path("chain/<slug:slug>", DetailView.as_view(model=Chain), name="chain_detail"),
path("places/", ListView.as_view(model=Place), name="index"),
path(
"place/<int:pk>/<slug:slug>/",
DetailView.as_view(model=Place, query_pk_and_slug=True),
name="detail",
),
]
| StarcoderdataPython |
374591 | import logging
import yfinance
class StockDataProvider:
def download_between_dates(self, ticker, interval, start, end):
logging.debug("Requesting ticker {}".format(ticker))
opts = dict(
tickers=ticker, interval=interval, start=start, end=end, progress=False
)
return yfinance.download(**opts)
def download_for_period(self, ticker, period, interval):
logging.debug("Requesting ticker {}".format(ticker))
opts = dict(tickers=ticker, interval=interval, period=period, progress=False)
return yfinance.download(**opts)
def check(self, ticker):
return self.download_for_period(ticker, "1d", "1d")
stock_data_provider = StockDataProvider()
| StarcoderdataPython |
3532962 | # 幅優先探索
H, W = map(int, input().split())
S = [input() for _ in range(H)]
def f(i, j):
t = [[-1] * W for _ in range(H)]
t[i][j] = 0
q = [(i, j)]
while q:
y, x = q.pop(0)
if y - 1 >= 0 and S[y - 1][x] != '#' and t[y - 1][x] == -1:
t[y - 1][x] = t[y][x] + 1
q.append((y - 1, x))
if y + 1 < H and S[y + 1][x] != '#' and t[y + 1][x] == -1:
t[y + 1][x] = t[y][x] + 1
q.append((y + 1, x))
if x - 1 >= 0 and S[y][x - 1] != '#' and t[y][x - 1] == -1:
t[y][x - 1] = t[y][x] + 1
q.append((y, x - 1))
if x + 1 < W and S[y][x + 1] != '#' and t[y][x + 1] == -1:
t[y][x + 1] = t[y][x] + 1
q.append((y, x + 1))
return max(max(tt) for tt in t)
result = 0
for i in range(H):
for j in range(W):
if S[i][j] != '#':
result = max(result, f(i, j))
print(result)
| StarcoderdataPython |
8146253 | <filename>refactor_csp.py
# -*- coding: utf-8 -*-
import os
import shutil
import glob
from bs4 import BeautifulSoup
# ビールの絵文字
created_marc = u"\U0001F37A"
# 寿司の絵文字
completed_marc = u"\U0001F363"
def main():
current_dir = os.getcwd()
target_htmls = []
for f in get_all_html_files(current_dir):
if(f.split(".")[-1] == "html"):
target_htmls.append(f)
for target in target_htmls:
refactor_csp_main(target)
print(completed_marc)
def get_all_html_files(cwd):
for root, dirs, files in os.walk(cwd):
if '.git' in dirs:
dirs.remove('.git')
if '.cvs' in dirs:
dirs.remove('.cvs')
yield root
for file in files:
yield os.path.join(root, file)
def refactor_csp_main(target_file):
# 対象のHTMLファイル名を格納
htmlfile = target_file
html = open(htmlfile, "rw")
soup = BeautifulSoup(html)
# 対応すべきスクリプトタグのリストを取得する
scripts = getEmbedScriptTags(soup)
if len(scripts) > 0:
# 元データのコピーを生成する
createHtmlCopy(htmlfile)
# スクリプトを外部化する
generated_js_names = createScriptFiles(scripts, htmlfile)
# HTML内のスクリプトパスを編集する
editHtmlScriptTag(generated_js_names, scripts, soup, htmlfile)
# HTMLを閉じる
html.close()
def getEmbedScriptTags(soup):
'''
HTMLファイル内に埋め込みScriptタグがあるか否かを調べる関数
・ある場合: Scriptタグの中身の配列を返す
・ない場合: 空の配列を返す
'''
scripts = soup.find_all("script")
embed_scripts = []
for script in scripts:
src = script.get('src')
if src == None:
embed_scripts.append(script)
return embed_scripts
def createHtmlCopy(filename):
'''
元データHTMLファイルのコピーを生成する関数
命名規則: index.html.pre_csp
'''
copyfile = "{}.pre_csp".format(filename)
shutil.copy(filename, copyfile)
print(u"{} {}".format(created_marc, copyfile))
return 1
def createScriptFiles(scripts, filename):
'''
埋め込みScriptの内容を外部ファイルとして出力する関数
命名規則: index.html.0.js, index.html.1.js, ..., index.html.N.js
'''
reg = "{}.*.js".format(filename)
jss = glob.glob(reg)
# 最大の N を求める
maxN = -1
for js in jss:
js = js.split('.')
n = int(js[-2])
if n > maxN:
maxN = n
# 外部スクリプトを生成する
# 生成したファイル名の配列を返す
nextN = maxN + 1
created_js_name = []
for script in scripts:
js_name = "{}.{}.js".format(filename, nextN)
f = open(js_name, "w")
f.write(script.string.encode('utf8'))
f.close()
print(u"{} {}".format(created_marc, js_name))
# ファイル名を整形する
js_name = js_name.split("/")[-1]
created_js_name.append(js_name)
nextN += 1
return created_js_name
def editHtmlScriptTag(generated_jss, scripts_soup, soup, filename):
'''
Scriptタグを編集してsrc属性を追加しパスを張る関数
'''
for i, script in enumerate(scripts_soup):
script['src'] = generated_jss[i]
script.string = ''
# HTMLファイルを上書きする
f = open(filename, "w")
f.write((soup.prettify()).encode('utf8'))
print(u"{} {}".format(created_marc, filename))
f.close()
return scripts_soup
if __name__ == '__main__':
main()
| StarcoderdataPython |
1659284 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Base provider tests."""
import pytest
from flask_babelex import lazy_gettext as _
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_rdm_records.services.pids.providers import BasePIDProvider
def test_base_provider_create(app, db):
provider = BasePIDProvider()
created_pid = provider.create_by_pid(pid_value="1234", pid_type="testid")
db_pid = PersistentIdentifier.get(pid_value="1234", pid_type="testid")
assert created_pid == db_pid
assert created_pid.pid_value == "1234"
assert created_pid.pid_type == "testid"
assert created_pid.status == PIDStatus.NEW
@pytest.fixture(scope='function')
def base_provider():
"""Application factory fixture."""
return BasePIDProvider(pid_type="testid")
def test_base_provider_create_default_pid_type(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
# NOTE: DB level requires pid_type
db_pid = PersistentIdentifier.get(pid_value="1234", pid_type="testid")
assert created_pid == db_pid
assert db_pid.pid_value == "1234"
assert created_pid.pid_type == "testid"
assert created_pid.status == PIDStatus.NEW
def test_base_provider_get_existing_different_pid_type(
app, db, base_provider
):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234", pid_type="diffid")
get_pid = provider.get(pid_value="1234", pid_type="diffid")
assert created_pid == get_pid
assert get_pid.pid_value == "1234"
assert get_pid.pid_type == "diffid"
assert get_pid.status == PIDStatus.NEW
def test_base_provider_get_existing_different_status(
app, db, base_provider
):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234",
status=PIDStatus.RESERVED)
get_pid = provider.get(pid_value="1234")
assert created_pid == get_pid
assert get_pid.pid_value == "1234"
assert get_pid.pid_type == "testid"
assert get_pid.status == PIDStatus.RESERVED
def test_base_provider_reserve(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
assert provider.reserve(created_pid, {})
# NOTE: DB level requires pid_type
db_pid = PersistentIdentifier.get(pid_value="1234", pid_type="testid")
assert db_pid.status == PIDStatus.RESERVED
assert db_pid.pid_value == "1234"
def test_base_provider_register(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
assert provider.register(created_pid, {})
# NOTE: DB level requires pid_type
db_pid = PersistentIdentifier.get(pid_value="1234", pid_type="testid")
assert db_pid.status == PIDStatus.REGISTERED
assert db_pid.pid_value == "1234"
def test_base_provider_hard_delete(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
assert provider.delete(created_pid, {})
# NOTE: DB level requires pid_type
with pytest.raises(PIDDoesNotExistError):
PersistentIdentifier.get(pid_value="1234", pid_type="testid")
def test_base_provider_soft_delete(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
assert provider.reserve(created_pid, {})
assert provider.delete(created_pid, {})
# NOTE: DB level requires pid_type
db_pid = PersistentIdentifier.get(pid_value="1234", pid_type="testid")
assert db_pid.status == PIDStatus.DELETED
assert db_pid.pid_value == "1234"
def test_base_provider_get_status(app, db, base_provider):
provider = base_provider
created_pid = provider.create_by_pid(pid_value="1234")
assert provider.get_status(created_pid.pid_value) == PIDStatus.NEW
def test_base_provider_validate_no_values_given(
running_app, db, base_provider, record
):
provider = base_provider
# base has name set to None
success, errors = provider.validate(
record=record, identifier=None, client=None, provider=None)
assert success
assert not errors
def test_base_provider_validate_failure(
running_app, db, base_provider, record
):
provider = base_provider
with pytest.raises(Exception):
provider.validate(
record=record, identifier=None, client=None, provider="fail")
| StarcoderdataPython |
3262349 | <filename>py/gps_building_blocks/cloud/workflows/futures_test.py
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.third_party.gps_building_blocks.py.cloud.workflows.futures."""
import unittest
from gps_building_blocks.cloud.workflows import futures
class TasksTest(unittest.TestCase):
def test_bq_future_should_parse_bq_success_logs(self):
# a fake bq message for job complete
bq_message = {
'protoPayload': {
'status': {},
'serviceData': {
'jobCompletedEvent': {
'job': {
'jobName': {
'projectId': 'test-project',
'jobId': 'test-bq-job-id',
}
}
}
}
},
'resource': {
'type': 'bigquery_resource'
}
}
result = futures.BigQueryFuture.handle_message(bq_message)
self.assertTrue(result.is_success)
self.assertEqual(result.trigger_id, 'test-bq-job-id')
def test_bq_future_should_parse_bq_fail_logs(self):
# a fake bq message for job complete with failed status
bq_message = {
'protoPayload': {
'status': {
'code': 1,
'message': 'test error message'
},
'serviceData': {
'jobCompletedEvent': {
'job': {
'jobName': {
'projectId': 'test-project',
'jobId': 'test-bq-job-id',
}
}
}
}
},
'resource': {
'type': 'bigquery_resource'
}
}
result = futures.BigQueryFuture.handle_message(bq_message)
self.assertFalse(result.is_success)
self.assertEqual(result.trigger_id, 'test-bq-job-id')
self.assertEqual(result.error, 'test error message')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11283613 | <gh_stars>0
# Read in an Apple II ROM, do some optional modifications,
# then write out the bytes as an Arduino header file
# <NAME>, May 2016
from convert_roms_arduino import *
dir = '/Users/chris/AppleII/ROMs/'
inputFile = 'AppleIIPlus-341-0020-ApplesoftBasicAutostartMonitorF800-2716.bin'
# inputFile = 'Apple IIe CD Enhanced - 342-0304-A - 2764.bin'
f = open(dir + inputFile, 'rb')
data = f.read()
f.close()
data = np.fromstring(data,dtype=np.uint8)
print(dir + inputFile)
# Hack adapted from the Videx Enhancer ][ manual, p. A-4
# FD83: AND #$DF --> AND #$FF
data[0xFD83 - 0xF800] = 0xFF
#
# FD11: AND #$3F --> JSR $FBB4
# FD13: ORA #$40 --> NOP
i = 0xFD11 - 0xF800
data[i:i+4] = [0x20, 0xB4, 0xFB, 0xEA]
#
# FBB4: AND #$7F
# CMP #$20 ; <space> character
# BNE SKIP
# ORA #$40 ; turn on flashing for <space>
# SKIP: RTS
i = 0xFBB4 - 0xF800
new = [0x29, 0x7F, 0xC9, 0x20, 0xD0, 0x02, 0x09, 0x40, 0x60]
data[i:i+len(new)] = new
writeHeaderFile(inputFile, 'ProgData.h', data)
| StarcoderdataPython |
6407328 | import shutil
from pathlib import Path
import gdown
import tqdm
URLS = [
'https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ', # CNN stories
'https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs', # Daily Mail stories
]
DOWNLOAD_PATH = Path('downloaded')
TARGET_PATH = Path('raw_stories')
DOWNLOAD_PATH.mkdir(exist_ok=True)
TARGET_PATH.mkdir(exist_ok=True)
def download_data():
for url in URLS:
url_id = url.split('id=')[1]
if not (DOWNLOAD_PATH / url_id).exists():
print(f'Downloading {url}...')
gdown.download(url, str(DOWNLOAD_PATH / url_id), quiet=True)
for archive_path in DOWNLOAD_PATH.iterdir():
print(f'Extracting {archive_path}...')
shutil.unpack_archive(archive_path, str(DOWNLOAD_PATH), format='gztar')
story_files = list(DOWNLOAD_PATH.rglob('*.story'))
for story_file in tqdm.tqdm(story_files, desc='Merging folders'):
shutil.move(str(story_file), str(TARGET_PATH))
print(f"Removing '{DOWNLOAD_PATH}'...")
shutil.rmtree(str(DOWNLOAD_PATH), ignore_errors=True)
print('Done.')
if __name__ == '__main__':
download_data()
| StarcoderdataPython |
352706 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 01:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160413_2209'),
]
operations = [
migrations.AlterModelOptions(
name='provider',
options={'ordering': ['name'], 'verbose_name': 'Provedor', 'verbose_name_plural': 'Provedores'},
),
]
| StarcoderdataPython |
6600184 | <filename>tests/integration/conftest.py
import pytest
from dotlock import resolve
@pytest.fixture(name='aiohttp_resolved_requirements')
async def resolve_aiohttp_requirements(event_loop):
requirements = [
resolve.Requirement(
info=resolve.RequirementInfo.from_specifier_str('aiohttp', '==3.1.2'),
parent=None,
),
]
await resolve.resolve_requirements_list(
requirements=requirements,
package_types=[
resolve.PackageType.bdist_wheel,
resolve.PackageType.sdist,
],
sources=[
'https://pypi.org/pypi',
],
update=False,
)
return requirements
| StarcoderdataPython |
8169289 | #!/usr/bin/python
#coding:utf-8
# ***************************************************************
# 绘制正态分布曲线
# author: pruce
# email: <EMAIL>
# date: 20180919
# ***************************************************************
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def normDistribution():
mu, sigma , num_bins = 0, 1, 50
x = mu + sigma * np.random.randn(1000000)
# 正态分布的数据
n, bins, patches = plt.hist(x, num_bins, normed=True, facecolor = 'black', alpha = 0.5)
# 拟合曲线
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('Expectation')
plt.ylabel('Probability')
plt.title('$N(0,1)$')
plt.subplots_adjust(left = 0.15)
plt.show()
normDistribution() | StarcoderdataPython |
202550 | #MenuTitle: Make bottom left node first
# -*- coding: utf-8 -*-
__doc__="""
Makes the bottom left node in each path the first node in all masters
"""
def left(x):
return x.position.x
def bottom(x):
return x.position.y
layers = Glyphs.font.selectedLayers
for aLayer in layers:
for idx, thisLayer in enumerate(aLayer.parent.layers):
for p in thisLayer.paths:
oncurves = filter(lambda n: n.type != "offcurve", list(p.nodes))
n = sorted(sorted(oncurves, key = bottom),key=left)[0]
n.makeNodeFirst() | StarcoderdataPython |
6549123 | """Faça um programa que leia o ano de nascimento de um jovem e informe,
de acordo com a sua idade:
- Se ele ainda vai se alistar ao serviço militar
- Se é a hora de se alistar
- Se já passou do tempo do alistamento
Seu programa também deverá mostrar o tempo que falta ou que passou do prazo.
"""
import datetime
import time
nasc = int(input('Digite o seu ano de nascimento: '))
hoje = datetime.datetime.today()
alist = hoje.year - nasc
print('Aguarde...')
time.sleep(2)
print('Processando...')
time.sleep(2)
if alist == 17:
print('Hora de se Alistar')
elif alist < 17:
falta = 17 - alist
print('Você ainda vai se alistar, falta {} anos'.format(falta))
elif alist > 17:
passou = alist - 17
print('Já passou {} do tempo do alistamento'.format(passou))
| StarcoderdataPython |
6657613 | <reponame>M155K4R4/Tensorflow
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for builtin_functions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.contrib.py2tf.converters import builtin_functions
from tensorflow.contrib.py2tf.converters import converter_test_base
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BuiltinFunctionsTest(converter_test_base.TestCase):
def test_len(self):
def test_fn(a):
return len(a)
node = self.parse_and_analyze(test_fn, {'len': len})
node = builtin_functions.transform(node, self.ctx)
with self.compiled(node, array_ops.shape) as result:
with self.test_session() as sess:
self.assertEqual(3,
sess.run(
result.test_fn(constant_op.constant([0, 0, 0]))))
def test_print(self):
def test_fn(a):
print(a)
node = self.parse_and_analyze(test_fn, {'print': print})
node = builtin_functions.transform(node, self.ctx)
with self.compiled(node) as result:
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
result.test_fn('a')
self.assertEqual(out_capturer.getvalue(), 'a\n')
finally:
sys.stdout = sys.__stdout__
def test_print_tuple(self):
def test_fn(a, b, c):
print(a, b, c)
node = self.parse_and_analyze(test_fn, {'print': print})
node = builtin_functions.transform(node, self.ctx)
with self.compiled(node) as result:
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
result.test_fn('a', 1, [2, 3])
# It appears that the print output looks odd only under Python 2.
if six.PY2:
self.assertEqual(out_capturer.getvalue(), "('a', 1, [2, 3])\n")
else:
self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
finally:
sys.stdout = sys.__stdout__
if __name__ == '__main__':
test.main()
| StarcoderdataPython |
4979848 | from typing import Pattern
from recognizers_text import RegExpUtility
from ...resources.chinese_date_time import ChineseDateTime
from ..parsers import DateTimeParser
from ..base_merged import MergedParserConfiguration
from .duration_parser import ChineseDurationParser
from .date_parser import ChineseDateParser
from .time_parser import ChineseTimeParser
from .dateperiod_parser import ChineseDatePeriodParser
from .timeperiod_parser import ChineseTimePeriodParser
from .datetime_parser import ChineseDateTimeParser
from .datetimeperiod_parser import ChineseDateTimePeriodParser
from .holiday_parser import ChineseHolidayParser
from .set_parser import ChineseSetParser
class ChineseMergedParserConfiguration(MergedParserConfiguration):
@property
def before_regex(self) -> Pattern:
return self._before_regex
@property
def after_regex(self) -> Pattern:
return self._after_regex
@property
def since_regex(self) -> Pattern:
return self._since_regex
@property
def date_parser(self) -> DateTimeParser:
return self._date_parser
@property
def holiday_parser(self) -> DateTimeParser:
return self._holiday_parser
@property
def time_parser(self) -> DateTimeParser:
return self._time_parser
@property
def date_time_parser(self) -> DateTimeParser:
return self._date_time_parser
@property
def date_period_parser(self) -> DateTimeParser:
return self._date_period_parser
@property
def time_period_parser(self) -> DateTimeParser:
return self._time_period_parser
@property
def date_time_period_parser(self) -> DateTimeParser:
return self._date_time_period_parser
@property
def duration_parser(self) -> DateTimeParser:
return self._duration_parser
@property
def set_parser(self) -> DateTimeParser:
return self._set_parser
def __init__(self):
self._before_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MergedBeforeRegex)
self._after_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MergedAfterRegex)
self._since_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MergedAfterRegex)
self._date_parser = ChineseDateParser()
self._holiday_parser = ChineseHolidayParser()
self._time_parser = ChineseTimeParser()
self._date_time_parser = ChineseDateTimeParser()
self._date_period_parser = ChineseDatePeriodParser()
self._time_period_parser = ChineseTimePeriodParser()
self._date_time_period_parser = ChineseDateTimePeriodParser()
self._duration_parser = ChineseDurationParser()
self._set_parser = ChineseSetParser()
| StarcoderdataPython |
5096573 | <filename>Core/BehaviorImports.py
from Core.Behavior.SimpleBehavior.SimpleBehavior import SimpleBehavior
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehavior import BlinkBehavior
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehaviorEaseIn import BlinkBehaviorEaseIn
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehaviorEaseOut import BlinkBehaviorEaseOut
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehaviorEaseInOut import BlinkBehaviorEaseInOut
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehaviorInstant import BlinkBehaviorInstant
from Core.Behavior.SimpleBehavior.BlinkBehavior.BlinkBehaviorLinear import BlinkBehaviorLinear
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorCircle import MoveBehaviorCircle
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorCurved import MoveBehaviorCurved
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorForwardAndBack import MoveBehaviorForwardAndBack
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorLoops import MoveBehaviorLoops
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorRect import MoveBehaviorRect
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorSpikes import MoveBehaviorSpikes
from Core.Behavior.SimpleBehavior.MoveBehavior.MoveBehaviorStraight import MoveBehaviorStraight
from Core.Behavior.ComposedBehavior.ComposedBehavior import ComposedBehavior
from Core.Behavior.ComposedBehavior.HelloBehavior import HelloBehavior
from Core.Behavior.ComposedBehavior.PuppeteerBehavior import PuppeteerBehavior | StarcoderdataPython |
93672 | <reponame>andrewt-cville/fb_recruiting<gh_stars>0
import requests
import json
def get_defYears():
return ['2002', '2003', '2004', '2005', '2006', '2007', '2008','2009','2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
def get_header():
return {'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
def get_schoolsList():
return json.loads(open('..//config//schools.json', "r").read())
def create_url247(level, school_id, year):
if (level == 'team'):
return 'https://247sports.com/college/{}/Season/{}-Football/Commits/'.format(school_id,year)
else:
print('ERROR: appropriate levels are team, prospect and recruit')
def get_htmlDir(source, conference, level):
return "..//html//{}//{}//{}//".format(source, conference, level)
def get_availableConferences():
return ['bigten', 'bigtwelve', 'acc', 'sec', 'pactwelve', 'american', 'independents', 'cusa', 'mac', 'mwc', 'sunbelt']
def save_files(filePath, filePersist):
with open(filePath, "w") as write_file:
json.dump(filePersist, write_file)
def save_html(filePath, reqText):
with open(filePath, "w") as write_file:
write_file.write(reqText)
def save_records(folder, filename, listPersist):
with open("..//{}//{}.json".format(folder, filename), "w") as write_file:
json.dump(listPersist, write_file)
databaseName = 'fb_recruiting.db'
sports247FuzzyFields = ['ID', 'IDYR', 'College', 'Year', 'PlayerName', 'HighSchool', 'City', 'State', 'Position']
rivalsFuzzyFields = ['IDYR', 'College', 'Year', 'PlayerName', 'HighSchool', 'City', 'State', 'Position']
nflFuzzyFields = ['ID', 'College', 'Year', 'PlayerName', 'Position']
allconfFuzzyFields = ['ID', 'College', 'PlayerName']
ncaaFuzzyFields = ['ID', 'College', 'PlayerName', 'Position']
allamericanFuzzyFields = ['ID', 'College', 'Year', 'PlayerName']
sports247Blockers = ['College', 'Year']
rivalsBlockers = ['College', 'Year']
nflBlockers = ['College']
allconfBlockers = ['College']
ncaaBlockers = ['College']
allamericanBlockers = ['College']
fuzzyFields = {'Rivals': rivalsFuzzyFields, 'NFL': nflFuzzyFields, 'AllConference': allconfFuzzyFields, 'NCAA': ncaaFuzzyFields, 'AllAmerican': allamericanFuzzyFields, 'Sports247': sports247FuzzyFields}
blockers = {'Rivals': rivalsBlockers, 'NFL': nflBlockers, 'AllConference': allconfBlockers, 'NCAA': ncaaBlockers, 'AllAmerican': allamericanBlockers, 'Sports247': sports247Blockers} | StarcoderdataPython |
3503043 | <filename>francoralite/apps/francoralite_front/views/errors.py
from django.shortcuts import render
from django.utils.translation import gettext as _
def handler403(request, exception=None):
return render(request, 'error.html', {
'exception': exception or _('Accès interdit.'),
}, status=403)
def handler404(request, exception=None):
return render(request, 'error.html', {
'exception': exception or _('Cette fiche n’existe pas.'),
}, status=404)
def handler500(request, exception=None):
return render(request, 'error.html', {
'exception': _('Erreur indéterminée'),
}, status=500)
| StarcoderdataPython |
6698741 | import pytest
import torch
from blackhc.project.utils import cpu_memory
@pytest.mark.forked
def test_cpu_mem_limit():
# 128 MB (128/4M float32)
tensor = torch.empty((128, 1024, 1024 // 4), dtype=torch.float32)
tensor.resize_(1)
cpu_memory.set_cpu_memory_limit(0.25)
# 512 MB (128/4M float32)
with pytest.raises(RuntimeError):
torch.empty((512, 1024, 1024 // 4), dtype=torch.float32)
| StarcoderdataPython |
5083473 | <gh_stars>0
import logging
from decimal import Decimal
from textwrap import dedent
from dbnd import log_duration, log_metrics
from dbnd_snowflake.snowflake_values import SnowflakeController
logger = logging.getLogger(__name__)
# TODO: Add support for QUERY_TAG
# I.e. Subclass SnowflakeOperator and set session param QUERY_TAG to "dbnd.{dag/task_name/task_id}"
# Then use pass this QUERY_TAG to UI for easier navigation between
# See https://community.snowflake.com/s/article/How-We-Controlled-and-Reduced-Snowflake-Compute-Cost
# https://github.com/snowflakedb/snowflake-connector-python/issues/203
def log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None
):
"""
get and log cpu time, run time, disk read, and processed rows.
connection or connection_string is required. supports only psycopg2 connections.
"""
try:
with log_duration("log_snowflake_resource_usage__time_seconds", "system"):
_log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id
)
except Exception as exc:
conn_without_pass = _censor_password(connection_string)
logger.exception(
"Failed to log_redshift_resource_usage (query_text=%s, connection_string=%s)",
query_text,
conn_without_pass,
)
def _log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None,
):
# Quick and dirty way to handle optional clause element.
# Might be better to use SQLAlchemy expression language here
if session_id:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s) and session_id=%s
order by start_time desc limit 1;"""
).format(database, session_id)
query_params = (query_text, user, session_id)
else:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s)
order by start_time desc limit 1;"""
).format(database)
query_params = (query_text, user)
result = _connect_and_query(connection_string, query_history, *query_params)
if not result:
logger.info(
"resource metrics were not found for query '%s', query_params=%s",
query_text,
query_params,
)
log_metrics(
{
"snowflake_query_warning": "No resources info found",
"snowflake_query_text": query_text,
},
source="system",
)
return
metrics = result[0]
key = "snowflake_query_{}".format(
metrics["QUERY_TAG"] if metrics["QUERY_TAG"] else metrics["QUERY_ID"]
)
snowflake_metric_to_ui_name = {
"BYTES_SCANNED": "bytes_scanned",
"COMPILATION_TIME": "compilation_time_milliseconds",
"CREDITS_USED_CLOUD_SERVICES": "credits_used_cloud_services",
"EXECUTION_TIME": "execution_time_milliseconds",
"QUERY_TEXT": "query_text",
"ROWS_PRODUCED": "rows_produced",
"TOTAL_ELAPSED_TIME": "total_elapsed_time_milliseconds",
}
metrics_to_log = {}
for metric, ui_name in snowflake_metric_to_ui_name.items():
if metric in metrics:
value = metrics[metric]
# Quick hack to track decimal values. probably should be handled on a serialization level
if isinstance(value, Decimal):
value = float(value)
metrics_to_log[key + "." + ui_name] = value
log_metrics(metrics_to_log, source="system")
def _connect_and_query(connection_string, query, *params):
""" connect if needed, then query. """
# if (connection is None) and (connection_string is None):
if connection_string is None:
logger.error(
"connection and connection string are None, one of them is required to query redshift"
)
return
with SnowflakeController(connection_string) as snowflake:
return snowflake._query(query, params)
def _censor_password(connection_string):
"""
example connection string:
postgres://user:<EMAIL>:5439/dev
returns:
postgres://user:*****<EMAIL>:5439/dev
"""
if (not connection_string) or ("@" not in connection_string):
return connection_string
split1 = connection_string.split("@")
split2 = split1[0].split(":")
if len(split2) != 3:
return connection_string
split2[-1] = "*****"
split2_join = ":".join(split2)
split1[0] = split2_join
split1_join = "@".join(split1)
return split1_join
| StarcoderdataPython |
3277658 | from collections import defaultdict
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from .models import Website
from .models import Category
# Create your views here.
@require_http_methods(["GET"])
def add_website(request):
pass
@require_http_methods(["GET"])
def index(request):
categorys = Category.objects.filter()
websites = Website.objects.filter()
websites_dict = defaultdict(list)
for x in websites:
if x:
websites_dict[x.website_category].append({
'name': x.website_name,
'url': x.website_url,
# 'logo': f"http://favicon.byi.pw/?url={x.website_url}",
# 'logo': f"https://www.baidu.com/favicon.ico",
'logo': x.website_logo,
'desc': x.website_desc,
'create_time': x.website_create_time,
})
group_websites = [
{
'name': category.name,
'sites': websites_dict.get(category.id),
} for category in categorys if category
]
filter_group_websites = [x for x in group_websites if x.get('sites')]
result = {
'msg': 'ok',
'code': '200',
'data': filter_group_websites,
}
return JsonResponse(data=result)
| StarcoderdataPython |
389440 | <filename>Lib/site-packages/pygments/lexers/nimrod.py
"""
pygments.lexers.nimrod
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Nim language (formerly known as Nimrod).
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['NimrodLexer']
class NimrodLexer(RegexLexer):
"""
For Nim source code.
.. versionadded:: 1.5
"""
name = 'Nimrod'
url = 'http://nim-lang.org/'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nim']
flags = re.MULTILINE | re.IGNORECASE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'bind', 'block', 'break', 'case',
'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard',
'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except',
'export', 'finally', 'for', 'func', 'if', 'in', 'yield', 'interface',
'is', 'isnot', 'iterator', 'let', 'macro', 'method', 'mixin', 'mod',
'not', 'notin', 'object', 'of', 'or', 'out', 'proc', 'ptr', 'raise',
'ref', 'return', 'shl', 'shr', 'static', 'template', 'try',
'tuple', 'type', 'using', 'when', 'while', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
(r'0b[01][01_]*', Number.Bin, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'e[+-]?[0-9][0-9_]*', Number.Float),
default('#pop')
],
'float-suffix': [
(r'\'f(32|64)', Number.Float),
default('#pop')
],
'int-suffix': [
(r'\'i(32|64)', Number.Integer.Long),
(r'\'i(8|16)', Number.Integer),
default('#pop')
],
}
| StarcoderdataPython |
279423 | import math
math.exp(99)
| StarcoderdataPython |
3313312 | # <NAME> (<EMAIL>)
# A (slightly modified) implementation of the Recurrent Convolutional Neural Network (RCNN) found in [1].
# [1] <NAME>., <NAME>., <NAME>., and <NAME>. 2015. Recurrent convolutional
# neural networks for text classification. In AAAI, pp. 2267-2273.
# http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9745
import gensim
import numpy as np
import string
from keras import backend
from keras.layers import Dense, Input, Lambda, LSTM, TimeDistributed
from keras.layers.merge import concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
word2vec = gensim.models.Word2Vec.load("word2vec.gensim")
# We add an additional row of zeros to the embeddings matrix to represent unseen words and the NULL token.
embeddings = np.zeros((word2vec.syn0.shape[0] + 1, word2vec.syn0.shape[1]), dtype = "float32")
embeddings[:word2vec.syn0.shape[0]] = word2vec.syn0
MAX_TOKENS = word2vec.syn0.shape[0]
embedding_dim = word2vec.syn0.shape[1]
hidden_dim_1 = 200
hidden_dim_2 = 100
NUM_CLASSES = 10
document = Input(shape = (None, ), dtype = "int32")
left_context = Input(shape = (None, ), dtype = "int32")
right_context = Input(shape = (None, ), dtype = "int32")
embedder = Embedding(MAX_TOKENS + 1, embedding_dim, weights = [embeddings], trainable = False)
doc_embedding = embedder(document)
l_embedding = embedder(left_context)
r_embedding = embedder(right_context)
# I use LSTM RNNs instead of vanilla RNNs as described in the paper.
forward = LSTM(hidden_dim_1, return_sequences = True)(l_embedding) # See equation (1).
backward = LSTM(hidden_dim_1, return_sequences = True, go_backwards = True)(r_embedding) # See equation (2).
together = concatenate([forward, doc_embedding, backward], axis = 2) # See equation (3).
semantic = TimeDistributed(Dense(hidden_dim_2, activation = "tanh"))(together) # See equation (4).
# Keras provides its own max-pooling layers, but they cannot handle variable length input
# (as far as I can tell). As a result, I define my own max-pooling layer here.
pool_rnn = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (hidden_dim_2, ))(semantic) # See equation (5).
output = Dense(NUM_CLASSES, input_dim = hidden_dim_2, activation = "softmax")(pool_rnn) # See equations (6) and (7).
model = Model(inputs = [document, left_context, right_context], outputs = output)
model.compile(optimizer = "adadelta", loss = "categorical_crossentropy", metrics = ["accuracy"])
text = "This is some example text."
text = text.strip().lower().translate(str.maketrans({key: " {0} ".format(key) for key in string.punctuation}))
tokens = text.split()
tokens = [word2vec.vocab[token].index if token in word2vec.vocab else MAX_TOKENS for token in tokens]
doc_as_array = np.array([tokens])
# We shift the document to the right to obtain the left-side contexts.
left_context_as_array = np.array([[MAX_TOKENS] + tokens[:-1]])
# We shift the document to the left to obtain the right-side contexts.
right_context_as_array = np.array([tokens[1:] + [MAX_TOKENS]])
target = np.array([NUM_CLASSES * [0]])
target[0][3] = 1
history = model.fit([doc_as_array, left_context_as_array, right_context_as_array], target, epochs = 1, verbose = 0)
loss = history.history["loss"][0]
| StarcoderdataPython |
116775 | <gh_stars>10-100
from functools import wraps
from graphql_jwt.utils import get_credentials
from graphql_jwt.shortcuts import get_user_by_token
def jwt_token_decorator(view_func):
@wraps(view_func)
def wrapped_view(request, *args, **kwargs):
token = get_credentials(request, **kwargs)
if token is not None:
request.user = get_user_by_token(token, request)
return view_func(request, *args, **kwargs)
return wrapped_view
| StarcoderdataPython |
11225283 | from flask import Flask, request, jsonify
from pymongo import MongoClient
from os import getenv
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.secret_key = getenv('APP_SECRET_KEY')
client = MongoClient(getenv('MONGO_URI'))
users = client.linksbase.users
@app.route('/')
def index():
return 'LinksBase API'
@app.route('/user/')
def user():
return jsonify({
'path': request.path,
'_error': True,
'_error_message': 'No user provided'
})
@app.route('/user/<username>/')
def user_data(username):
u = users.find_one({'username': username})
if not u:
return jsonify({
'_error': True,
'_error_message': f'User {username} not found'
})
del u['password']
del u['email']
del u['_id']
del u['visits_monthly']
del u['visits_weekly']
u['avatar'] = f'https://cdn.linksb.me/avatars/{username}'
u['qr_code'] = f'https://cdn.linksb.me/qrcodes/{username}'
return jsonify({
'_error': False,
'_error_message': '',
**u
})
@app.route('/qrcode/')
def qrcode():
return jsonify({
'path': request.path,
'_error': True,
'_error_message': 'No user provided'
})
@app.route('/qrcode/<username>/')
def qrcode_data(username):
u = users.find_one({'username': username})
if not u:
return jsonify({
'_error': True,
'_error_message': f'User {username} not found'
})
d = {}
d['qr_code'] = f'https://cdn.linksb.me/qrcodes/{username}'
return jsonify({
'_error': False,
'_error_message': '',
**d
})
@app.route('/avatar/')
def avatar():
return jsonify({
'path': request.path,
'_error': True,
'_error_message': 'No user provided'
})
@app.route('/avatar/<username>/')
def avatar_data(username):
u = users.find_one({'username': username})
if not u:
return jsonify({
'_error': True,
'_error_message': f'User {username} not found'
})
del u['password']
del u['email']
del u['_id']
del u['qr_code']
del u['data']
del u['registered_in']
u['avatar'] = f'https://cdn.linksb.me/avatars/{username}'
return jsonify({
'_error': False,
'_error_message': '',
**u
})
if __name__ == '__main__':
app.run(port=9090, debug=True) | StarcoderdataPython |
5092915 | import sys
from ifind.search import Query
from ifind.search.engines.whooshtrec import Whooshtrec
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
whoosh_path = sys.argv[1]
stopwords_path = sys.argv[2]
page = 3
page_len = 10
search_engine = Whooshtrec(whoosh_index_dir=whoosh_path,
stopwords_file=stopwords_path,
model=1,
newschema=True)
query = Query('wildlife extinction')
query.skip = page
query.top = page_len
response = search_engine.search(query)
for result in response:
print '{0} {1}'.format(result.whooshid, result.rank)
print response.result_total
print response.results_on_page
print response.actual_page
########
print
print
index = open_dir(whoosh_path)
searcher = index.searcher()
parser = QueryParser('content', index.schema)
parsed_terms = parser.parse('wildlife extinction')
res_page = searcher.search_page(parsed_terms, page, pagelen=page_len)
print dir(res_page)
print res_page.offset
#offsetlist = res_page.results[res_page.offset:]
for res in res_page.results:
print res.docnum, res.rank
| StarcoderdataPython |
6702042 | <reponame>ReanGD/rofi-proxy
#!/bin/python
import sys
import json
req = {"help": "Press <span foreground=\"red\">Alt+1</span> or <span foreground=\"red\">Alt+2</span>"}
sys.stdout.write(json.dumps(req) + "\n")
sys.stdout.flush()
for line in sys.stdin:
j = json.loads(line)
if j["name"] != "key_press":
continue
key_code = j["value"]["key"]
if key_code == "custom_1":
req = {"lines": [{"text": "You press Alt+1", "filtering": False}]}
elif key_code == "custom_2":
req = {"lines": [{"text": "You press Alt+2", "filtering": False}]}
else:
req = {"lines": [{"text": "You press unknown key", "filtering": False}]}
sys.stdout.write(json.dumps(req) + "\n")
sys.stdout.flush()
| StarcoderdataPython |
232275 | class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
dp = [[0] * len(matrix[0]) for i in range(len(matrix))]
for row_i, row in enumerate(matrix):
for col_i, col in enumerate(row):
if col == "0":
dp[row_i][col_i] = 0
else:
dp[row_i][col_i] = self.find(col_i, row)
mx = 0
print(dp)
for i in range(len(dp)):
for j in range(len(dp[i])):
mx = max(mx, self.search(dp, i, j))
return mx
def search(self, dp, start_row, start_col):
start, up, down = dp[start_row][start_col], 0, 0
if start_row > 0:
for i in range(start_row - 1, -1, -1):
if start <= dp[i][start_col]:
up += 1
else:
break
if start_row < len(dp) - 1:
for i in range(start_row + 1, len(dp)):
if start <= dp[i][start_col]:
down += 1
else:
break
return start * (up + down + 1)
def find(self, index, row):
count = 0
for i in range(index, len(row)):
if row[i] == "0":
break
count += 1
return count | StarcoderdataPython |
6448459 | from django.db import models
from django.utils.translation import gettext_lazy as _
# Contact entity
class Contact(models.Model):
name = models.CharField(default='NO_NAME', max_length=50, error_messages={
'max_length': _("Your name is too long"),
'blank': _("You need to fill your name"),
'null': _("You need to fill your name")
})
email = models.CharField(default='NO_EMAIL', max_length=100, error_messages={
'max_length': _("Your email is too long"),
'blank': _("You need to fill your email"),
'null': _("You need to fill your email")
})
message = models.TextField(default='NO_MESSAGE', error_messages={
'blank': _("You need to fill the message"),
'null': _("You need to fill the message")
})
def __str__(self):
return self.email
| StarcoderdataPython |
5034577 | <reponame>chathu1996/hacktoberfest2020<filename>Python/AIspeech.py<gh_stars>10-100
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser #pip install webbrowser
import os
from playsound import playsound #pip install playsound
engine=pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning sir!")
elif hour>=12 and hour<18:
speak("Good Afternoon sir! ")
else:
speak("Good Evening sir!")
speak("This is our Artificial intelligent project, Is there any thing with which i can help you?")
def takeCommand(): #It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source, phrase_time_limit = 5)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("Say that again please...")
speak("Say that again please...")
return "None"
return query
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'how are you doing' in query or 'hows going on' in query or 'how are you' in query:
speak("oh! i am doing great and always ready to help you...")
elif 'search' in query:
speak('searching it...')
query=query.replace("search","")
if query=='search':
continue
webbrowser.open(query)
speak("showing result from google")
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open u m s' in query or 'login u m s' in query:
webbrowser.open("https://ums.lpu.in/lpuums/")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
playsound("C:\\Users\\Shivanshu\\Desktop\\Steg.mp3")
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
| StarcoderdataPython |
11315959 | import numpy as np
from generic.tf_utils.evaluator import Evaluator
class GuesserWrapper(object):
def __init__(self, guesser):
self.guesser = guesser
self.evaluator = None
def initialize(self, sess):
self.evaluator = Evaluator(self.guesser.get_sources(sess), self.guesser.scope_name)
def find_object(self, sess, dialogues, seq_length, game_data):
game_data["dialogues"] = dialogues
game_data["seq_length"] = seq_length
# sample
selected_object, softmax = self.evaluator.execute(sess, output=[self.guesser.selected_object, self.guesser.softmax], batch=game_data)
found = (selected_object == game_data["targets_index"])
return found, softmax, selected_object
class GuesserUserWrapper(object):
def __init__(self, tokenizer, img_raw_dir=None):
self.tokenizer = tokenizer
self.img_raw_dir = img_raw_dir
def initialize(self, sess):
pass
def find_object(self, _, dialogues, __, game_data):
# Step 1 : Display dialogue and objects
print()
print("Final dialogue:")
qas = self.tokenizer.split_questions(dialogues[0])
for qa in qas:
print(" -", self.tokenizer.decode(qa))
print()
print("Select one of the following objects")
game = game_data["raw"][0]
objects = game.objects
for i, obj in enumerate(objects):
print(" -", i, obj.category, "\t", obj.bbox)
# Step 2 : Ask for guess
while True:
selected_object = input('What is your guess id? (S)how image. --> ')
if selected_object == "S" or selected_object.lower() == "show":
game.show(self.img_raw_dir, display_index=True)
elif 0 <= int(selected_object) < len(objects):
break
# Step 3 : Check guess
found = (selected_object == game_data["targets_index"])
softmax = np.zeros(len(objects))
softmax[selected_object] = 1
if found:
print("Success!")
else:
print("Failure :(")
print("The correct object was: {}".format(game_data["targets_index"][0]))
print()
return [found], [softmax], [selected_object]
| StarcoderdataPython |
11308499 | <reponame>daviferreira/defprogramming
# coding: utf-8
import uuid
from django.db.models import signals
from django.template.defaultfilters import slugify
from .models import Author, Quote, Tag
def author_pre_save(signal, instance, sender, **kwargs):
if not instance.uuid:
instance.uuid = uuid.uuid4().hex[:12]
instance.slug = slugify(instance.name[:99])
def quote_pre_save(signal, instance, sender, **kwargs):
if not instance.uuid:
instance.uuid = uuid.uuid4().hex[:12]
instance.slug = slugify(instance.body[:99])
def tag_pre_save(signal, instance, sender, **kwargs):
if not instance.uuid:
instance.uuid = uuid.uuid4().hex[:12]
instance.slug = slugify(instance.name[:99])
signals.pre_save.connect(author_pre_save, sender=Author)
signals.pre_save.connect(quote_pre_save, sender=Quote)
signals.pre_save.connect(tag_pre_save, sender=Tag) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.