id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3453293 | from spaceone.repository.manager.repository_manager import RepositoryManager
class LocalRepositoryManager(RepositoryManager):
def register_repository(self, params):
# Assume there is only one local repository
return self.repo_model.create(params)
| StarcoderdataPython |
12849110 | import types
from tf.advanced.helpers import dh
from tf.advanced.find import loadModule
from tf.advanced.app import App
def transform_prime(app, n, p):
return ("'" * int(p)) if p else ""
def transform_ctype(app, n, t):
if t == "uncertain":
return "?"
elif t == "properName":
return "="
elif t == "supplied":
return ">"
else:
return ""
def transform_atf(app, n, a):
return app.atfFromSign(n, flags=True)
class TfApp(App):
def __init__(app, *args, silent=False, **kwargs):
app.transform_ctype = types.MethodType(transform_ctype, app)
app.transform_prime = types.MethodType(transform_prime, app)
app.transform_atf = types.MethodType(transform_atf, app)
atf = loadModule("atf", *args)
atf.atfApi(app)
app.atf = atf
super().__init__(*args, silent=silent, **kwargs)
app.image = loadModule("image", *args)
app.image.getImagery(app, silent, checkout=kwargs.get("checkout", ""))
app.reinit()
def reinit(app):
customMethods = app.customMethods
customMethods.afterChild.clear()
customMethods.afterChild.update(quad=app.getOp)
customMethods.plainCustom.clear()
customMethods.plainCustom.update(
sign=app.plainAtfType, quad=app.plainAtfType, cluster=app.plainAtfType,
)
customMethods.prettyCustom.clear()
customMethods.prettyCustom.update(
case=app.caseDir, cluster=app.clusterBoundaries, comments=app.commentsCls
)
def cdli(app, n, linkText=None, asString=False):
(nType, objectType, identifier) = app.image.imageCls(app, n)
if linkText is None:
linkText = identifier
result = app.image.wrapLink(linkText, objectType, "main", identifier)
if asString:
return result
else:
dh(result)
# PRETTY HELPERS
def getGraphics(app, isPretty, n, nType, outer):
api = app.api
F = api.F
E = api.E
result = ""
isOuter = outer or (all(F.otype.v(parent) != "quad" for parent in E.sub.t(n)))
if isOuter:
width = "2em" if nType == "sign" else "4em"
height = "4em" if nType == "quad" else "6em"
theGraphics = app.image.getImages(
app,
n,
kind="lineart",
width=width,
height=height,
_asString=True,
withCaption=False,
warning=False,
)
if theGraphics:
result = f"<div>{theGraphics}</div>" if isPretty else f" {theGraphics}"
return result
def lineart(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="lineart",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def photo(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="photo",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def imagery(app, objectType, kind):
return set(app._imagery.get(objectType, {}).get(kind, {}))
| StarcoderdataPython |
3286016 | import random
TAG_MASC = "Masculine"
TAG_FEMME = "Feminine"
class Gender(object):
def __init__(self, noun="person", adjective="nonbinary", subject_pronoun="ze", object_pronoun="zem",
possessive_determiner="zir", absolute_pronoun="zirs", card_pattern="card_*_*.png",
tags=(TAG_MASC, TAG_FEMME)):
self.noun = noun
self.adjective = adjective
self.subject_pronoun = subject_pronoun
self.object_pronoun = object_pronoun
self.possessive_determiner = possessive_determiner
self.absolute_pronoun = absolute_pronoun
self.card_pattern = card_pattern
self.tags = list(tags)
@classmethod
def get_default_female(cls):
return cls("woman", "female", "she", "her", "her", "hers", "card_f_*.png", (TAG_FEMME,))
@classmethod
def get_default_male(cls):
return cls("man", "male", "he", "him", "his", "his", "card_m_*.png", (TAG_MASC,))
@classmethod
def get_default_nonbinary(cls):
return cls()
@classmethod
def random_gender(cls):
if random.randint(1, 20) == 20:
# Nonbinary it is.
return cls()
elif random.randint(1, 2) == 1:
return cls.get_default_female()
else:
return cls.get_default_male()
| StarcoderdataPython |
354598 | # -*- coding: utf-8 -*-
from os import path as op
from nose.tools import assert_raises, assert_true, assert_equal
import numpy as np
from mne._hdf5 import write_hdf5, read_hdf5
from mne.utils import requires_pytables, _TempDir, object_diff
tempdir = _TempDir()
@requires_pytables()
def test_hdf5():
"""Test HDF5 IO
"""
test_file = op.join(tempdir, 'test.hdf5')
x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128),
d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None])
write_hdf5(test_file, 1)
assert_equal(read_hdf5(test_file), 1)
assert_raises(IOError, write_hdf5, test_file, x) # file exists
write_hdf5(test_file, x, overwrite=True)
assert_raises(IOError, read_hdf5, test_file + 'FOO') # not found
xx = read_hdf5(test_file)
assert_true(object_diff(x, xx) == '') # no assert_equal, ugly output
| StarcoderdataPython |
4836310 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ, wildcard-import
"Vision transforms."
import warnings
import random
from ....block import Block, HybridBlock
from ....nn import Sequential, HybridSequential
from .....util import is_np_array
from . image import *
from .image import _append_return
class Compose(Sequential):
"""Sequentially composes multiple transforms.
Parameters
----------
transforms : list of transform Blocks.
The list of transforms to be composed.
Inputs:
- **data**: input tensor with shape of the first transform Block requires.
Outputs:
- **out**: output tensor with shape of the last transform Block produces.
Examples
--------
>>> transformer = transforms.Compose([transforms.Resize(300),
... transforms.CenterCrop(256),
... transforms.ToTensor()])
>>> image = mx.nd.random.uniform(0, 255, (224, 224, 3)).astype(dtype=np.uint8)
>>> transformer(image)
<NDArray 3x256x256 @cpu(0)>
"""
def __init__(self, transforms):
super(Compose, self).__init__()
transforms.append(None)
hybrid = []
for i in transforms:
if isinstance(i, HybridBlock):
hybrid.append(i)
continue
elif len(hybrid) == 1:
self.add(hybrid[0])
hybrid = []
elif len(hybrid) > 1:
hblock = HybridSequential()
for j in hybrid:
hblock.add(j)
hblock.hybridize()
self.add(hblock)
hybrid = []
if i is not None:
self.add(i)
class HybridCompose(HybridSequential):
"""Sequentially composes multiple transforms. This is the Hybrid version of Compose.
Parameters
----------
transforms : list of transform Blocks.
The list of transforms to be composed.
Inputs:
- **data**: input tensor with shape of the first transform Block requires.
Outputs:
- **out**: output tensor with shape of the last transform Block produces.
Examples
--------
>>> transformer = transforms.HybridCompose([transforms.Resize(300),
... transforms.CenterCrop(256),
... transforms.ToTensor()])
>>> image = mx.nd.random.uniform(0, 255, (224, 224, 3)).astype(dtype=np.uint8)
>>> transformer(image)
<NDArray 3x256x256 @cpu(0)>
"""
def __init__(self, transforms):
super(HybridCompose, self).__init__()
for i in transforms:
if not isinstance(i, HybridBlock):
raise ValueError("{} is not a HybridBlock, try use `Compose` instead".format(i))
self.add(i)
self.hybridize()
class Cast(HybridBlock):
"""Cast inputs to a specific data type
Parameters
----------
dtype : str, default 'float32'
The target data type, in string or `numpy.dtype`.
Inputs:
- **data**: input tensor with arbitrary shape and dtype.
Outputs:
- **out**: output tensor with the same shape as `data` and data type as dtype.
"""
def __init__(self, dtype='float32'):
super(Cast, self).__init__()
self._dtype = dtype
def hybrid_forward(self, F, *args):
if is_np_array():
F = F.npx
return tuple([F.cast(x, self._dtype) for x in args])
class RandomApply(Sequential):
"""Apply a list of transformations randomly given probability
Parameters
----------
transforms
List of transformations.
p : float
Probability of applying the transformations.
Inputs:
- **data**: input tensor.
Outputs:
- **out**: transformed image.
"""
def __init__(self, transforms, p=0.5):
super(RandomApply, self).__init__()
self.transforms = transforms
self.p = p
def forward(self, x, *args):
if self.p < random.random():
return x
x = self.transforms(x)
return _append_return(x, *args)
class HybridRandomApply(HybridSequential):
"""Apply a list of transformations randomly given probability
Parameters
----------
transforms
List of transformations which must be HybridBlocks.
p : float
Probability of applying the transformations.
Inputs:
- **data**: input tensor.
Outputs:
- **out**: transformed image.
"""
def __init__(self, transforms, p=0.5):
super(HybridRandomApply, self).__init__()
assert isinstance(transforms, HybridBlock)
self.transforms = transforms
self.p = p
def hybrid_forward(self, F, x, *args):
if is_np_array():
cond = self.p < F.random.uniform(low=0, high=1, size=1)
return F.npx.cond(cond, x, self.transforms(x))
cond = self.p < F.random.uniform(low=0, high=1, shape=1)
return _append_return(F.contrib.cond(cond, x, self.transforms(x)), *args)
| StarcoderdataPython |
1913399 | class MockController:
def returns_instance_of(self, obj):
pass
| StarcoderdataPython |
6438189 | <gh_stars>0
#
# Copyright (C) 2001-2004 <NAME> and Rational Discovery LLC
# All Rights Reserved
#
""" The "parser" for compound descriptors.
I almost hesitate to document this, because it's not the prettiest
thing the world has ever seen... but it does work (for at least some
definitions of the word).
Rather than getting into the whole mess of writing a parser for the
compound descriptor expressions, I'm just using string substitutions
and python's wonderful ability to *eval* code.
It would probably be a good idea at some point to replace this with a
real parser, if only for the flexibility and intelligent error
messages that would become possible.
The general idea is that we're going to deal with expressions where
atomic descriptors have some kind of method applied to them which
reduces them to a single number for the entire composition. Compound
descriptors (those applicable to the compound as a whole) are not
operated on by anything in particular (except for standard math stuff).
Here's the general flow of things:
1) Composition descriptor references ($a, $b, etc.) are replaced with the
corresponding descriptor names using string subsitution.
(*_SubForCompoundDescriptors*)
2) Atomic descriptor references ($1, $2, etc) are replaced with lookups
into the atomic dict with "DEADBEEF" in place of the atom name.
(*_SubForAtomicVars*)
3) Calls to Calculator Functions are augmented with a reference to
the composition and atomic dictionary
(*_SubMethodArgs*)
**NOTE:**
anytime we don't know the answer for a descriptor, rather than
throwing a (completely incomprehensible) exception, we just return
-666. So bad descriptor values should stand out like sore thumbs.
"""
from __future__ import print_function
__DEBUG = 0
from rdkit import RDConfig
# we do this to allow the use of stuff in the math module
from math import *
#----------------------
# atomic descriptor section
#----------------------
# these are the methods which can be applied to ATOMIC descriptors.
knownMethods = ['SUM', 'MIN', 'MAX', 'MEAN', 'AVG', 'DEV', 'HAS']
def HAS(strArg, composList, atomDict):
""" *Calculator Method*
does a string search
**Arguments**
- strArg: the arguments in string form
- composList: the composition vector
- atomDict: the atomic dictionary
**Returns**
1 or 0
"""
splitArgs = string.split(strArg, ',')
if len(splitArgs) > 1:
for atom, num in composList:
tStr = splitArgs[0].replace('DEADBEEF', atom)
where = eval(tStr)
what = eval(splitArgs[1])
if where.find(what) != -1:
return 1
return 0
else:
return -666
def SUM(strArg, composList, atomDict):
""" *Calculator Method*
calculates the sum of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + eval(tStr) * num
return accum
def MEAN(strArg, composList, atomDict):
""" *Calculator Method*
calculates the average of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
nSoFar = 0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + eval(tStr) * num
nSoFar = nSoFar + num
return accum / nSoFar
AVG = MEAN
def DEV(strArg, composList, atomDict):
""" *Calculator Method*
calculates the average deviation of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
avg = MEAN(strArg, composList, atomDict)
accum = 0.0
nSoFar = 0.0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + abs(eval(tStr) - avg) * num
nSoFar = nSoFar + num
return accum / nSoFar
def MIN(strArg, composList, atomDict):
""" *Calculator Method*
calculates the minimum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum.append(eval(tStr))
return min(accum)
def MAX(strArg, composList, atomDict):
""" *Calculator Method*
calculates the maximum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum.append(eval(tStr))
return max(accum)
#------------------
# string replacement routines
# these are not intended to be called by clients
#------------------
def _SubForAtomicVars(cExpr, varList, dictName):
""" replace atomic variables with the appropriate dictionary lookup
*Not intended for client use*
"""
for i in range(len(varList)):
cExpr = cExpr.replace('$%d' % (i + 1), '%s["DEADBEEF"]["%s"]' % (dictName, varList[i]))
return cExpr
def _SubForCompoundDescriptors(cExpr, varList, dictName):
""" replace compound variables with the appropriate list index
*Not intended for client use*
"""
for i in range(len(varList)):
cExpr = cExpr.replace('$%s' % chr(ord('a') + i), '%s["%s"]' % (dictName, varList[i]))
return cExpr
def _SubMethodArgs(cExpr, knownMethods):
""" alters the arguments of calls to calculator methods
*Not intended for client use*
This is kind of putrid (and the code ain't so pretty either)
The general idea is that the various special methods for atomic
descriptors need two extra arguments (the composition and the atomic
dict). Rather than make the user type those in, we just find
invocations of these methods and fill out the function calls using
string replacements.
"""
res = cExpr
for method in knownMethods:
p = 0
while p != -1 and p < len(res):
p = res.find(method, p)
if p != -1:
p = p + len(method) + 1
start = p
parenCount = 1
while parenCount and p < len(res):
if res[p] == ')':
parenCount = parenCount - 1
elif res[p] == '(':
parenCount = parenCount + 1
p = p + 1
if p <= len(res):
res = res[0:start] + "'%s',compos,atomDict" % (res[start:p - 1]) + res[p - 1:]
return res
def CalcSingleCompoundDescriptor(compos, argVect, atomDict, propDict):
""" calculates the value of the descriptor for a single compound
**ARGUMENTS:**
- compos: a vector/tuple containing the composition
information... in the form:
'[("Fe",1.),("Pt",2.),("Rh",0.02)]'
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDescriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVect:
a list of descriptors for the composition.
**RETURNS:**
the value of the descriptor, -666 if a problem was encountered
**NOTE:**
- because it takes rather a lot of work to get everything set
up to calculate a descriptor, if you are calculating the
same descriptor for multiple compounds, you probably want to
be calling _CalcMultipleCompoundsDescriptor()_.
"""
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula, compositionVarNames, 'propDict')
formula = _SubForAtomicVars(formula, atomVarNames, 'atomDict')
evalTarget = _SubMethodArgs(formula, knownMethods)
except Exception:
if __DEBUG:
import sys, traceback
print('Sub Failure!')
traceback.print_exc()
print(evalTarget)
print(propDict)
raise RuntimeError('Failure 1')
else:
return -666
try:
v = eval(evalTarget)
except Exception:
if __DEBUG:
import sys, traceback
outF = open(RDConfig.RDCodeDir + '/ml/descriptors/log.txt', 'a+')
outF.write('#------------------------------\n')
outF.write('formula: %s\n' % repr(formula))
outF.write('target: %s\n' % repr(evalTarget))
outF.write('propDict: %s\n' % (repr(propDict)))
outF.write('keys: %s\n' % (repr(sorted(atomDict))))
outF.close()
print('ick!')
print('formula:', formula)
print('target:', evalTarget)
print('propDict:', propDict)
print('keys:', atomDict.keys())
traceback.print_exc()
raise RuntimeError('Failure 2')
else:
v = -666
return v
def CalcMultipleCompoundsDescriptor(composVect, argVect, atomDict, propDictList):
""" calculates the value of the descriptor for a list of compounds
**ARGUMENTS:**
- composVect: a vector of vector/tuple containing the composition
information.
See _CalcSingleCompoundDescriptor()_ for an explanation of the elements.
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDsscriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVectList:
a vector of vectors of descriptors for the composition.
**RETURNS:**
a vector containing the values of the descriptor for each
compound. Any given entry will be -666 if problems were
encountered
"""
res = [-666] * len(composVect)
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula, compositionVarNames, 'propDict')
formula = _SubForAtomicVars(formula, atomVarNames, 'atomDict')
evalTarget = _SubMethodArgs(formula, knownMethods)
except Exception:
return res
for i in range(len(composVect)):
propDict = propDictList[i]
compos = composVect[i]
try:
v = eval(evalTarget)
except Exception:
v = -666
res[i] = v
return res
#------------
# Demo/testing code
#------------
if __name__ == '__main__':
piece1 = [['d1', 'd2'], ['d1', 'd2']]
aDict = {'Fe': {'d1': 1., 'd2': 2.}, 'Pt': {'d1': 10., 'd2': 20.}}
pDict = {'d1': 100., 'd2': 200.}
compos = [('Fe', 1), ('Pt', 1)]
cExprs = ["SUM($1)", "SUM($1)+SUM($2)", "SUM($1)+SUM($1)", "MEAN($1)", "DEV($2)", "MAX($1)",
"MIN($1)/MAX($1)", "MIN($2)", "SUM($1)/$a", "sqrt($a+$b)", "SUM((3.*$1)/($2))", "foo"]
for cExpr in cExprs:
argVect = piece1 + [cExpr]
print(cExpr)
print(CalcSingleCompoundDescriptor(compos, argVect, aDict, pDict))
print(CalcMultipleCompoundsDescriptor([compos, compos], argVect, aDict, [pDict, pDict]))
| StarcoderdataPython |
11293354 | # Copyright 2021 Zuru Tech HK Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import tensorflow as tf
import tensorflow.keras as keras
from anomaly_toolbox.models.ganomaly import GANomalyGenerator
class GANomalyPredictor:
generator: keras.Model
discriminator: keras.Model
def load_from_savedmodel(self, generator_dir: str, discriminator_dir: str):
self.generator: GANomalyGenerator = tf.keras.models.load_model(generator_dir)
self.discriminator = tf.keras.models.load_model(discriminator_dir)
def evaluate(self, dataset: tf.data.Dataset) -> Tuple[tf.Tensor, tf.Tensor]:
"""Evaluate for benchmark."""
anomaly_scores, labels = [], []
for batch in dataset:
# a_score: [batch, 1, latent dimension]
# y: [batch, 1]
a_score, y = self.evaluate_step(batch)
anomaly_scores.append(a_score)
labels.append(y)
anomaly_scores = tf.reshape(anomaly_scores, -1)
labels = tf.reshape(labels, -1)
tf.assert_equal(anomaly_scores.shape, labels.shape)
return anomaly_scores, labels
@tf.function
def evaluate_step(
self, inputs: Tuple[tf.Tensor, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
# x: [batch, height, width, channels]
# y: [batch, 1]
x, y = inputs
# z: [batch, 1, 1, latent dimension]
# x_hat: [batch, height, width, channels]
# z_hat: [batch, 1, 1, latent dimension]
z, x_hat, z_hat = self.generator(x)
# z: [batch, latent dimension]
# z_hat: [batch, latent dimension]
z, z_hat = tf.squeeze(z), tf.squeeze(z_hat)
# a_score: [batch, 1]
a_score = self.compute_anomaly_score(z, z_hat)
return a_score, y
@staticmethod
@tf.function
def predict(
generator: keras.Model, x: tf.Tensor, return_score_only: bool = True
) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor, tf.Tensor]]:
# x: [batch, height, width, channels]
# z: [batch, 1, 1, latent dimension]
# x_hat: [batch, height, width, channels]
# z_hat: [batch, 1, 1, latent dimension]
z, x_hat, z_hat = generator(x)
# z: [batch, latent dimension]
# z_hat: [batch, latent dimension]
z, z_hat = tf.squeeze(z), tf.squeeze(z_hat)
# a_score: [batch, 1]
a_score = GANomalyPredictor.compute_anomaly_score(z, z_hat)
if return_score_only:
return a_score
else:
return x_hat, z_hat, z, a_score
@staticmethod
def compute_anomaly_score(
encoded_input: tf.Tensor, encoded_generated: tf.Tensor
) -> tf.Tensor:
anomaly_score = tf.reduce_mean(
tf.math.squared_difference(encoded_input, encoded_generated), axis=1
)
return anomaly_score
| StarcoderdataPython |
4891874 | from pathlib import Path
import click
import pandas as pd
import functions as f
current_directory = Path(__file__).absolute().parent
default_data_directory = current_directory.joinpath('..', '..', 'data')
@click.command()
@click.option('--data-path', default=None, help='Directory for the CSV files')
def main(data_path):
# calculate path to files
data_directory = Path(data_path) if data_path else default_data_directory
train_csv = data_directory.joinpath('train.csv')
test_csv = data_directory.joinpath('test.csv')
subm_csv = data_directory.joinpath('submission_popular.csv')
print(f"Reading {train_csv} ...")
df_train = pd.read_csv(train_csv, header=0)
print(f"Reading {test_csv} ...")
df_test = pd.read_csv(test_csv, header=0)
print("Get popular items...")
df_popular = f.get_popularity(df_train)
print("Identify target rows...")
df_target = f.get_submission_target(df_test)
print("Get recommendations...")
df_expl = f.explode(df_target, "impressions")
df_out = f.calc_recommendation(df_expl, df_popular)
print(f"Writing {subm_csv}...")
df_out.to_csv(subm_csv, index=False)
print("Finished calculating recommendations.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
11327839 | <reponame>rpSebastian/AutoCFR<gh_stars>1-10
import pandas as pd
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from autocfr.utils import load_df, remove_border, png_to_pdf
plt.rc("pdf", fonttype=42)
plt.rc("ps", fonttype=42)
class PlotAlgorithmCompare:
def __init__(
self,
game_name,
legend=True,
iterations=20000,
print_freq=20,
save_dir="performance",
):
print("Plot {}".format(game_name))
self.save_dir = save_dir
self.legend = legend
if self.legend:
self.save_name += "_legend"
self.ymin = self.tick_min
self.ymax = self.tick_max
self.format = "png"
self.game_name = game_name
self.algorithm_names = [
"CFR",
"CFRPlus",
"LinearCFR",
"DCFR",
# "DDCFR",
"DCFRPlus",
# "DCFRPlusTwo",
# "AutoCFR4",
# "AutoCFRS",
]
self.print_freq = print_freq
self.iterations = iterations
self.transparent = False
self.chinese = False
self.dpi = 1000
self.xlabel_unit = None
self.font = matplotlib.font_manager.FontProperties(
fname="C:\\Windows\\Fonts\\SimHei.ttf"
)
def run(self):
self.set_font_size()
df = self.load_algorithm()
# df = self.filter_algorithm(df)
df = self.filter_freq(df)
df = self.filter_iterations(df)
df = df.reset_index()
self.plot_exp_compare_in_one_game(df, self.game_name)
def plot_exp_compare_in_one_game(self, df, game_name):
fig = plt.figure(figsize=(6, 4))
df = self.filter_game(df, [game_name])
sns.set_style("darkgrid")
for algorithm_name in self.algorithm_names:
self.plot_one_algorithm_exp(df, algorithm_name)
if self.legend:
plt.legend()
plt.title(self.title_name)
self.set_xticks()
self.set_yticks()
if self.chinese:
plt.xlabel("迭代次数", fontproperties=self.font)
plt.ylabel("可利用度(与纳什均衡策略的距离)", fontproperties=self.font)
else:
xlabel = "Iterations"
if self.xlabel_unit:
xlabel += f" ({self.xlabel_unit})"
plt.xlabel(xlabel)
plt.ylabel("Exploitability")
self.set_lim()
path = Path("images/{}/{}.{}".format(self.save_dir, self.save_name, self.format))
path.parent.mkdir(exist_ok=True, parents=True)
# plt.show()
plt.savefig(
path,
format=self.format,
bbox_inches="tight",
pad_inches=0,
dpi=self.dpi,
transparent=self.transparent,
)
plt.close(fig)
remove_border(path)
png_to_pdf(path)
def plot_one_algorithm_exp(self, df, algorithm_name):
legend_name = self.get_legend_name_by_algorithm(algorithm_name)
color = self.get_color_by_algorithm(algorithm_name)
df = self.filter_algorithm(df, [algorithm_name])
step = df["step"].values
log_10_exp = df["log_10_exp"].values
plt.plot(step, log_10_exp, label=legend_name, color=color, linewidth=2.5)
def get_color_by_algorithm(self, algorithm_name):
color_dict = {
"CFR": "#0C8599",
"CFRPlus": "#7FC41D",
"LinearCFR": "#F99D00",
"DCFR": "#7D92EE",
"DCFRPlus": "#F16D85",
}
return color_dict[algorithm_name]
def get_legend_name_by_algorithm(self, algorithm_name):
legend_name_dict = {
"CFR": "CFR",
"CFRPlus": "CFR+",
"LinearCFR": "Linear CFR",
"DCFR": "DCFR",
"DCFRPlus": "DCFR+",
"AutoCFR4": "AutoCFR4",
"AutoCFRS": "AutoCFRS",
}
return legend_name_dict[algorithm_name]
def load_baseline(self):
df = load_df("../baseline")
df["log_10_exp"] = np.log(df["exp"]) / np.log(10)
return df
def load_algorithm(self):
# bigleduc_df = load_df("../bigleduc")
df_list = []
game_name = self.game_name
for algo_name in self.algorithm_names:
df = load_df("../games/{}/{}_{}".format(game_name, algo_name, game_name))
if df is not None:
df_list.append(df)
df = pd.concat(df_list)
df["log_10_exp"] = np.log(df["exp"]) / np.log(10)
return df
def filter_algorithm(self, df, algorithm_names=None):
if algorithm_names is None:
algorithm_names = self.algorithm_names
result_df = df[df.algorithm_name.isin(algorithm_names)]
return result_df
def filter_freq(self, df):
result_df = df[df.step % self.print_freq == 0]
return result_df
def filter_iterations(self, df):
result_df = df[df.step <= self.iterations]
return result_df
def filter_game(self, df, game_name_list):
result_df = df[df.game_name.isin(game_name_list)]
return result_df
def set_font_size(self):
import matplotlib.pyplot as plt
# plt.rc('font', size=20) # controls default text sizes
plt.rc("axes", titlesize=20) # fontsize of the axes title
plt.rc("axes", labelsize=20) # fontsize of the x and y labels
plt.rc("xtick", labelsize=20) # fontsize of the tick labels
plt.rc("ytick", labelsize=20) # fontsize of the tick labels
plt.rc("legend", fontsize=20) # legend fontsize
# plt.rc('figure', titlesize=10) # fontsize of the figure title
def set_yticks(self):
tick_dict = {
-12: "1e-12",
-11: "1e-11",
-10: "1e-10",
-9: "1e-9",
-8: "1e-8",
-7: "1e-7",
-6: "1e-6",
-5: "1e-5",
-4: "1e-4",
-3: "1e-3",
-2: "0.01",
-1: "0.1",
0: "1",
1: "10",
2: "100",
3: "1e3",
4: "1e4",
5: "1e5",
}
tick_len = self.tick_max - self.tick_min + 1
if tick_len > 6 and tick_len % 2 == 0:
self.tick_max += 1
tick_len = self.tick_max - self.tick_min + 1
if tick_len > 6:
self.tick_list = list(range(self.tick_min, self.tick_max + 1, 2))
else:
self.tick_list = list(range(self.tick_min, self.tick_max + 1))
tick_range_before = [i for i in self.tick_list if i in tick_dict]
tick_range_after = [tick_dict[tick] for tick in tick_range_before]
plt.yticks(tick_range_before, tick_range_after)
def set_xticks(self):
tick_list = list(range(0, self.iterations + 1, self.iterations // 4))
tick_range_before = [i for i in tick_list]
tick_range_after = [i for i in tick_list]
if tick_range_after[-1] > 1000:
tick_range_after = [i // 1000 for i in tick_range_after]
self.xlabel_unit = "$\\times10^3$"
plt.xticks(tick_range_before, tick_range_after)
def set_lim(self):
plt.ylim(self.ymin, self.ymax)
train_class_list = []
test_class_list = []
plot_class_list = []
def train(cls):
train_class_list.append(cls)
return cls
def test(cls):
test_class_list.append(cls)
return cls
def plot(cls):
plot_class_list.append(cls)
return cls
@train
class PlotDCFRExampleCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "NFG-1"
self.save_name = "NFG-1"
self.tick_min = -3
self.tick_max = 5
super().__init__("NFG-1", legend=True, iterations=1000, print_freq=20)
def get_legend_name_by_algorithm(self, algorithm_name):
legend_name_dict = {
"CFR": "CFR",
"CFRPlus": "CFR+",
"LinearCFR": "Linear CFR",
"DCFR": "DCFR",
# "DDCFR": "DDCFR",
# "DCFRPlus": "DCFR+",
"DCFRPlus": "Learned CFR\nVariant",
}
return legend_name_dict[algorithm_name]
@train
class PlotRPS3Compare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "NFG-2"
self.save_name = "NFG-2"
self.tick_min = -4
self.tick_max = 4
super().__init__("NFG-2", legend=False, iterations=1000, print_freq=20)
@train
class PlotSmallValueCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "NFG-3"
self.save_name = "NFG-3"
self.tick_min = -10
self.tick_max = 0
super().__init__("NFG-3", legend=True, iterations=1000, print_freq=20)
@train
class PlotMoreActionCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "NFG-4"
self.save_name = "NFG-4"
self.tick_min = -5
self.tick_max = 3
super().__init__("NFG-4", legend=False, iterations=1000, print_freq=20)
@train
class PlotKuhnPokerCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Kuhn Poker"
self.save_name = "Kuhn_Poker"
self.tick_min = -5
self.tick_max = 0
super().__init__("kuhn_poker", legend=False, iterations=1000, print_freq=20)
@train
class PlotLiarsDice13Compare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Liar's Dice (3)"
self.save_name = "Liars_Dice_3"
self.tick_min = -8
self.tick_max = 0
super().__init__("liars_dice_1n_3s", legend=False, iterations=1000, print_freq=20)
@train
class PlotLiarsDice14T100Compare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Liar's Dice (4)"
self.save_name = "Liars_Dice_4"
self.tick_min = -4
self.tick_max = 0
super().__init__("liars_dice_1n_4s", legend=False, iterations=100, print_freq=2)
@train
class PlotGoofspiel3ImpDecCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Goofspiel (3)"
self.save_name = "Goofspiel_3"
self.tick_min = -9
self.tick_max = 1
super().__init__("goofspiel_3", legend=False, iterations=1000, print_freq=20)
# @plot
@test
class PlotGoofspiel4ImpDecCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Goofspiel (4)"
self.save_name = "Goofspiel_4"
self.tick_min = -7
self.tick_max = 1
super().__init__(
"goofspiel_4", legend=True, iterations=20000, print_freq=100
)
@test
class PlotLeducPokerCompare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "Leduc Poker"
self.save_name = "Leduc_Poker"
self.tick_min = -6
self.tick_max = 2
super().__init__("leduc_poker", legend=False, iterations=20000, print_freq=100)
@test
class PlotSubgame3Compare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "HUNL Subgame (3)"
self.save_name = "Subgame3"
self.tick_min = -5
self.tick_max = 3
super().__init__("subgame3", legend=False, iterations=20000, print_freq=100)
@test
class PlotSubgame4Compare(PlotAlgorithmCompare):
def __init__(self):
self.title_name = "HUNL Subgame (4)"
self.save_name = "Subgame4"
self.tick_min = -6
self.tick_max = 2
super().__init__("subgame4", legend=False, iterations=20000, print_freq=100)
for run_class in train_class_list:
run_class().run()
for run_class in test_class_list:
run_class().run()
# for run_class in plot_class_list:
# run_class().run()
| StarcoderdataPython |
6455594 | <filename>statscraper/scrapers/work_injury_scraper.py
# encoding: utf-8
""" A scraper to fetch Swedish work injury stats from
http://webbstat.av.se
This is an example of a scraper using Selenium.
TODO: Move some useful functionality to a SeleciumFirefoxScraper
To change download location:
export STATSCRAPER_TEMPDIR="/path/to/temp/dir"
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
from statscraper import BaseScraper, Collection, Dataset, Result, Dimension
import os
from glob import iglob
from time import sleep
from uuid import uuid4
from xlrd import open_workbook
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
DEFAULT_TEMPDIR = "./tmp"
TEMPDIR_ENVVAR = "STATSCRAPER_TEMPDIR"
PAGELOAD_TIMEOUT = 90 # seconds
class WorkInjuries(BaseScraper):
tempdir = "./tmp"
@BaseScraper.on("init")
def initiate_browser(self):
# Create a unique tempdir for downloaded files
tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR)
tempsubdir = uuid4().hex
# TODO: Remove this directory when finished!
self.tempdir = os.path.join(tempdir, tempsubdir)
try:
# Try and create directory before checking if it exists,
# to avoid race condition
os.makedirs(self.tempdir)
except OSError:
if not os.path.isdir(self.tempdir):
raise
profile = webdriver.FirefoxProfile()
# Set download location, avoid download dialogues if possible
# Different settings needed for different Firefox versions
# This will be a long list...
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.manager.closeWhenDone', True)
profile.set_preference('browser.download.dir', self.tempdir)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/vnd.ms-excel")
profile.set_preference("browser.helperApps.alwaysAsk.force", False)
profile.set_preference("browser.download.manager.useWindow", False)
self.browser = webdriver.Firefox(profile)
self.browser.get('http://webbstat.av.se')
detailed_cls = "Document_TX_GOTOTAB_Avancerad"
""" The button for expanded detailed options. This
also happens to be a good indicator as to wheter
all content is loaded.
"""
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3)
self.browser\
.find_element_by_class_name(detailed_cls)\
.find_element_by_tag_name("td")\
.click()
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3)
@BaseScraper.on("select")
def switch_dataset(self, id_):
(c, r, p) = self.current_item.blob
# Select collection
xpath = "//div[@title='%s']" % c
# `c` can be either "Arbetsolycka" or "Arbetssjukdom"
button = self.browser.find_element_by_xpath(xpath)
button.click()
# select Kommun or Län
xpath = '//div[@class="QvContent"]/div[@class="QvGrid"]//div[@title="Visa tabell per:"]'
self.browser\
.find_element_by_xpath(xpath)\
.click()
region = "Kommun" if r == "kommun" else "Län"
xpath = "//div[@class='QvListbox']//div[@title='%s']" % region
self.browser\
.find_element_by_xpath(xpath)\
.click()
# select Månad or År
xpath = '//div[@class="QvContent"]/div[@class="QvGrid"]//div[@title="Tidsenhet:"]'
self.browser\
.find_element_by_xpath(xpath)\
.click()
period = "Månad" if p == u"månad" else "År och månad"
xpath = "//div[@class='QvListbox']//div[@title='%s']" % period
self.browser\
.find_element_by_xpath(xpath)\
.click()
def _fetch_dimensions(self, dataset):
""" Declaring available dimensions like this is not mandatory,
but nice, especially if they differ from dataset to dataset.
If you are using a built in datatype, you can specify the dialect
you are expecting, to have values normalized. This scraper will
look for Swedish month names (e.g. 'Januari'), but return them
according to the Statscraper standard ('january').
"""
yield Dimension(u"region",
label="municipality or county",
datatype="region",
dialect="arbetsmiljoverket")
yield Dimension(u"period",
label="Year or month")
def _fetch_itemslist(self, item):
""" We define two collection:
- Number of work injuries ("Arbetsolycka")
- Number of workrelated diseases ("Arbetssjukdom")
Each contains four datasets:
- Per municipality and year
- Per county and year
- Per municipality and month
- Per municipality and year
"""
if item.is_root:
for c in ["Arbetsolycka", "Arbetssjukdom"]:
yield Collection(c, blob=(c, None, None))
else:
c = item.id
for r in [u"kommun", u"län"]:
for p in [u"år", u"månad"]:
yield Dataset(u"%s-%s-%s" % (c, r, p),
blob=(c, r, p),
label=u"%s, antal per %s och %s" % (c, r, p))
def _fetch_data(self, dataset, query=None):
(c, r, p) = dataset.blob
self.browser\
.find_element_by_xpath("//div[@title='Skicka till Excel']")\
.click()
# Press enter trice in case of any prompts
actions = ActionChains(self.browser)
actions.send_keys(Keys.RETURN)
actions.send_keys(Keys.RETURN)
actions.send_keys(Keys.RETURN)
actions.perform()
# Wait for download
i = 0
while not os.listdir(self.tempdir):
sleep(1)
i += 1
if i > PAGELOAD_TIMEOUT:
# TODO: Use a suitable basescraper exception
raise Exception("Download timed out")
sleep(20) # TODO: We need to check that the file is complete.
# Something like this:
# https://stackoverflow.com/questions/35891393/how-to-get-file-download-complete-status-using-selenium-web-driver-c-sharp#35892347
# WARNING: Assuming the latest downloaded xls to be our file.
# This is obviously not 100 % water proof.
latest_download = max(iglob(os.path.join(self.tempdir, "*.xls")),
key=os.path.getctime)
workbook = open_workbook(latest_download)
sheet = workbook.sheet_by_index(0)
periods = sheet.row_values(0)[2:-1]
periods = [int(x) for x in periods]
for n in range(1, sheet.nrows):
row = sheet.row_values(n)
region = row.pop(0)
row.pop(0) # empty due to merged cells
if region == "Total":
break
i = 0
for col in row[:-1]:
yield Result(
int(col),
{
"region": region,
"period": periods[i],
}
)
| StarcoderdataPython |
12863017 | <gh_stars>1-10
"""
Distributed under the MIT License. See LICENSE.txt for more info.
"""
from django import template
register = template.Library()
@register.filter
def get_item(dictionary, key):
"""
Returns the object for that key from a dictionary.
:param dictionary: A dictionary object
:param key: Key to search for
:return: Object that corresponds to the key in the dictionary
"""
return dictionary.get(key, None)
@register.filter
def field_type(field):
"""
Returns the field type of an input
:param field: input field
:return: string representing the class name
"""
return field.field.widget.__class__.__name__
| StarcoderdataPython |
9778189 | import unittest
from datetime import datetime
import tempfile
import netCDF4 as nc
import os
from ncagg.attributes import (
StratFirst,
StratLast,
StratUniqueList,
StratIntSum,
StratFloatSum,
StratAssertConst,
)
from ncagg.attributes import (
StratDateCreated,
StratStatic,
StratTimeCoverageStart,
StratTimeCoverageEnd,
)
from ncagg.attributes import (
StartFirstInputFilename,
StartLastInputFilename,
StratCountInputFiles,
)
from ncagg import Config
from ncagg.attributes import datetime_format
test_dir = os.path.dirname(os.path.realpath(__file__))
test_input_file = os.path.join(
test_dir,
"data/OR_MAG-L1b-GEOF_G16_s20170431500000_e20170431500599_c20170431501005.nc",
)
class TestAttributeStrategies(unittest.TestCase):
def setUp(self):
# having two seconds is on purpose to test the unique list
self.mock_str_attributes = ["first", "second", "second", "third"]
self.mock_int_attributes = [1, 2, 2, 3]
self.mock_float_attributes = [1.1, 2.2, 2.3, 3.3]
self.test_nc = nc.Dataset(test_input_file)
self.handler_kwargs = {"config": Config.from_nc(test_input_file)}
def test_strat_first_gives_first(self):
process, finalize = StratFirst.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first")
def test_strat_last_gives_last(self):
process, finalize = StratLast.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "third")
def test_strat_unique_list(self):
process, finalize = StratUniqueList.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first, second, third")
def test_int_sum(self):
process, finalize = StratIntSum.setup_handler(**self.handler_kwargs)
for attr in self.mock_int_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), sum(self.mock_int_attributes))
def test_float_sum(self):
process, finalize = StratFloatSum.setup_handler(**self.handler_kwargs)
for attr in self.mock_float_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), sum(self.mock_float_attributes))
def test_assert_const_fails_nonconst(self):
process, finalize = StratAssertConst.setup_handler(**self.handler_kwargs)
with self.assertRaises(AssertionError):
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first")
def test_assert_const_pass_consts(self):
process, finalize = StratAssertConst.setup_handler(**self.handler_kwargs)
for attr in ["const", "const", "const"]:
process(attr)
self.assertEqual(finalize(self.test_nc), "const")
def test_date_created_close(self):
process, finalize = StratDateCreated.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
# since both of these date time strings may not be created exactly at the same time,
# only check to make sure they are mostly the same, it's ok if there is some difference
# in the last milliseconds piece.
self.assertEqual(
finalize(self.test_nc)[:-3], datetime_format(datetime.now())[:-3]
)
def test_strat_first_filename(self):
process, finalize = StartFirstInputFilename.setup_handler(**self.handler_kwargs)
process("test", self.test_nc)
self.assertIn(".nc", finalize(self.test_nc))
def test_strat_static(self):
# set the config for a static "license" attribute...
value = "Hello world"
self.handler_kwargs["config"].attrs["license"] = {
"name": "license",
"strategy": "static",
"value": value,
}
process, finalize = StratStatic.setup_handler(
name="license", **self.handler_kwargs
)
process("test", self.test_nc)
self.assertEqual(value, finalize(self.test_nc))
| StarcoderdataPython |
20217 | <filename>gamestate-changes/change_statistics/other/rectangleAnimation.py<gh_stars>1-10
# https://stackoverflow.com/questions/31921313/matplotlib-animation-moving-square
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import animation
x = [0, 1, 2]
y = [0, 10, 20]
y2 = [40, 30, 20]
colors = ['r','b','g','orange']
fig = plt.figure()
plt.axis('equal')
plt.grid()
ax = fig.add_subplot(111)
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
patch1 = patches.Rectangle((0, 0), 0, 0, fill=False, edgecolor=colors[0])
patch1.set_width(21)
patch1.set_height(21)
patch2 = patches.Rectangle((0, 0), 0, 0, fill=False, edgecolor=colors[1])
patch2.set_width(21)
patch2.set_height(21)
def init():
ax.add_patch(patch1)
ax.add_patch(patch2)
return patch1, patch2,
def animate(i):
patch1.set_xy([x[i], y[i]])
patch2.set_xy([x[i], y2[i]])
return patch1, patch2,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=len(x),
interval=500,
blit=True)
plt.show() | StarcoderdataPython |
9680799 | <filename>tests/integration/test_charm.py
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import base64
import json
import logging
import re
from pathlib import Path
import pytest
import requests
import tenacity
import yaml
from lightkube import Client
from lightkube.resources.core_v1 import Service
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
APP_NAME = METADATA["name"]
async def _get_password(ops_test: OpsTest) -> str:
unit = ops_test.model.applications[APP_NAME].units[0]
action = await unit.run_action("get-admin-password")
action = await action.wait()
return action.results["admin-password"]
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test: OpsTest):
"""Build the charm-under-test and deploy it together with related charms.
Assert on the unit status before any relations/configurations take place.
"""
# build and deploy charm from local source folder
charm = await ops_test.build_charm(".")
resources = {"zinc-image": METADATA["resources"]["zinc-image"]["upstream-source"]}
await ops_test.model.deploy(charm, resources=resources, application_name=APP_NAME)
# issuing dummy update_status just to trigger an event
await ops_test.model.set_config({"update-status-hook-interval": "10s"})
await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1000)
assert ops_test.model.applications[APP_NAME].units[0].workload_status == "active"
# effectively disable the update status from firing
await ops_test.model.set_config({"update-status-hook-interval": "60m"})
@pytest.mark.abort_on_fail
async def test_application_is_up(ops_test: OpsTest):
status = await ops_test.model.get_status() # noqa: F821
address = status["applications"][APP_NAME]["units"][f"{APP_NAME}/0"]["address"]
url = f"http://{address}:4080"
logger.info("querying unit address: %s", url)
response = requests.get(url)
assert response.status_code == 200
@pytest.mark.abort_on_fail
async def test_get_admin_password_action(ops_test: OpsTest):
password = await _get_password(ops_test)
assert re.match("[A-Za-z0-9]{24}", password)
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=2, min=1, max=30),
stop=tenacity.stop_after_attempt(10),
reraise=True,
)
async def test_application_service_port_patch(ops_test: OpsTest):
# Check the port has actually been patched
client = Client()
svc = client.get(Service, name=APP_NAME, namespace=ops_test.model_name)
assert svc.spec.ports[0].port == 4080
# Now try to actually hit the service
status = await ops_test.model.get_status() # noqa: F821
address = status["applications"][APP_NAME].public_address
url = f"http://{address}:4080"
logger.info("querying app address: %s", url)
response = requests.get(url)
assert response.status_code == 200
async def test_can_auth_with_zinc(ops_test: OpsTest):
# Now try to actually hit the service
status = await ops_test.model.get_status() # noqa: F821
address = status["applications"][APP_NAME].public_address
# Some data to populate
data = {
"Athlete": "DEMTSCHENKO, Albert",
"City": "Turin",
"Country": "RUS",
"Discipline": "Luge",
"Event": "Singles",
"Gender": "Men",
"Medal": "Silver",
"Season": "winter",
"Sport": "Luge",
"Year": 2006,
}
# Encode the credentials for the API using the password from the charm action
password = await _get_password(ops_test)
creds = base64.b64encode(bytes(f"admin:{password}", "utf-8")).decode("utf-8")
# We're going to send some data to the "games" index
res = requests.put(
url=f"http://{address}:4080/api/games/document",
headers={"Content-type": "application/json", "Authorization": f"Basic {creds}"},
data=json.dumps(data),
)
results = res.json()
assert res.status_code == 200
assert "id" in results.keys()
logger.info("successfully queried the Zinc API, got response: '%s'", str(res.json()))
| StarcoderdataPython |
40500 | <reponame>jbargu/imagetagger
from django import forms
from imagetagger.annotations.models import AnnotationType
class AnnotationTypeCreationForm(forms.ModelForm):
class Meta:
model = AnnotationType
fields = [
'name',
'active',
'node_count',
'vector_type',
'enable_concealed',
'enable_blurred',
]
class AnnotationTypeEditForm(forms.ModelForm):
class Meta:
model = AnnotationType
fields = [
'name',
'active',
'enable_concealed',
'enable_blurred',
]
| StarcoderdataPython |
375412 | import math
import pygame
from utils import *
from sprite import MySprite
class Story():
def __init__(self, screen):
self.screen = screen
# Load all background images
self.bg_dragon_castle = MySprite('pictures/dragon_castle.jpg')
self.bg_lock_sword = MySprite('pictures/sword_lockpick.jpg')
self.bg_goblin = MySprite('pictures/goblin.jpg')
self.bg_tower = MySprite('pictures/tower.jpg', scale_box=(0,0,math.floor(B_WIDTH/2), B_HEIGHT))
self.bg_dungeon = MySprite('pictures/dungeon.jpg', scale_box=(math.floor(B_WIDTH/2),0,math.floor(B_WIDTH/2), B_HEIGHT))
#self.bg_tower_dungeon = MySprite('pictures/tower_dungeon.jpg') #
self.bg_princess_door = MySprite('pictures/princess_door.jpg') #
self.bg_princess_room = MySprite('pictures/dragon_fight.jpg')
self.bg_princess_room = MySprite('pictures/dragon_fight.jpg')
self.bg_castle_princess = MySprite('pictures/castle_princess.jpg')
self.bg_victory_castle = MySprite('pictures/lonely_castle.jpg')
self.bg_victory_princess = MySprite('pictures/lonely_princess.jpg')
self.reset_choices()
self.story_loop('')
self.valid_text = False
# Rectangle to cover up text for each story
self.text_rect = pygame.Rect(0, TEXT_Y, S_WIDTH, 300)
def reset_choices(self):
self.lockpick = False
self.sword = False
self.pinky = True
self.tower = False
self.dungeon = False
self.fight = False
self.sneak = False
self.castle = False
self.princess = False
self.story_generator = self._story_generator()
def story_loop(self, txt):
"""
Wrapper for story generator
:param txt:
:return: True if story continues. False when finished
"""
self.input_text = txt.lower()
self.valid_text = False
try:
next(self.story_generator)
except StopIteration:
return False
return True
def _story_generator(self):
"""
Story loop of game, a generator that requires valid text to continue
:param txt: Input text passed in from main loop
"""
# Music
pygame.mixer.music.load('audio/Magistar.mp3')
pygame.mixer.music.play(-1)
##############################################################
# Quest stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0, 0, 0))
self.bg_dragon_castle.draw(self.screen)
txt = 'A princess has been captured by a dragon. The king has offered a castle as a reward for rescuing her.\n' +\
'This is a typing game, type with keyboard and hit enter to continue. Escape will quit'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
##############################################################
# Equipment stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_lock_sword.draw(self.screen)
txt = 'Choose your equipment for the adventure (lockpick[l] or sword[s])'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
while not valid_text:
if not self.input_text:
yield
elif self.input_text[0] == 'l':
self.lockpick = True
pygame.mixer.Channel(0).play(pygame.mixer.Sound('audio/lockpick.mp3'))
valid_text = True
elif self.input_text[0] == 's':
pygame.mixer.Channel(0).play(pygame.mixer.Sound('audio/sword.mp3'))
self.sword = True
valid_text = True
else:
#print('You must enter "lockpick" or "sword"')
yield
##############################################################
# Goblin stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_goblin.draw(self.screen)
pygame.mixer.music.load('audio/Darkling.mp3')
pygame.mixer.music.play(-1)
txt = "You approach the dragon's castle, a goblin guards the entrance.\n" + \
"Do you fight the goblin or sneak past? (fight[f] or sneak[s]): "
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
while not valid_text:
if not self.input_text:
yield
elif self.input_text[0] == 'f':
self.fight = True
valid_text = True
elif self.input_text[0] == 's':
self.sneak = True
valid_text = True
else:
#print('You must enter "fight" or "sneak"')
yield
self.valid_text = True
# Clear text
pygame.draw.rect(self.screen, (0,0,0), self.text_rect)
if self.fight:
if self.sword:
txt = 'You defeat the goblin with your sword'
else:
txt = 'You have no sword! While fighting, the goblin manages to nibble your pinky off'
pygame.mixer.Channel(0).play(pygame.mixer.Sound('audio/pinky.mp3'))
self.pinky = False
if self.sneak:
if self.sword:
txt = 'The goblin catches a glimpse of your big shiny sword! While fighting, the goblin manages to nibble your pinky off'
pygame.mixer.Channel(0).play(pygame.mixer.Sound('audio/pinky.mp3'))
self.pinky = False
else:
txt = 'You manage to sneak past the goblin'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
##############################################################
# Tower Dungeon stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_tower.draw(self.screen)
self.bg_dungeon.draw(self.screen)
txt = 'You enter the castle, do you think the princess is in the tower or the dungeon? (tower[t] or dungeon[d]):'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
while not valid_text:
if not self.input_text:
yield
elif self.input_text[0] == 't':
self.tower = True
valid_text = True
elif self.input_text[0] == 'd':
self.dungeon = True
valid_text = True
else:
yield
##############################################################
# Princess door stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_princess_door.draw(self.screen)
txt = 'Hooray! You found the princess door, but there is a code:'
give_up = False
if self.lockpick:
txt = txt + '\nThankfully you got through the door with the lockpick!'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
else:
txt = txt + '\n6 x Pineapple = 24\nPineapple x Apple = 32\nWhat does apple = ?'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
tries = 0
while not valid_text:
if not self.input_text:
yield
elif self.input_text == '8' or self.input_text == 'eight':
valid_text = True
else:
if tries < 3:
render_text_centered(txt + '\nIncorrect! Try again', self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
elif tries < 10:
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
render_text_centered(txt + '\nIncorrect! Try again (Hint: Pineapple = 4)', self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
else:
# Let them through after 10 tries
print('You give up and break the door with your sword. Your sword breaks')
valid_text = True
give_up = True
tries = tries + 1
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
if give_up:
txt = 'You give up and break the door with your sword. Your sword breaks'
self.sword = False
else:
txt = ''
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_princess_room.draw(self.screen)
pygame.mixer.music.load('audio/BurntSpirit.mp3')
pygame.mixer.music.play(-1)
txt = txt + "\nYou enter the room and it turns out that the dragon is guarding the princess himself!"
if self.sword:
txt = txt + '\nThe dragon breathes fire and melts your sword!'
self.sword = False
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
##############################################################
# Princess Room stage
##############################################################
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
txt = 'The dragon whips his tail at you. The tail is 5 meters long and you are 3 meters away.\n' + \
'How far back do you walk (in meters)?'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
while not valid_text:
if not self.input_text:
yield
try:
number = float(self.input_text)
if number > 3:
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
render_text_centered(txt + '\nYou stepped too far back and fell onto the stairs. Try again', self.screen, TEXT_X,
TEXT_Y, S_WIDTH)
pygame.mixer.Channel(0).play(pygame.mixer.Sound('audio/down_stairs.mp3'))
yield
elif number > 2:
txt = 'You dodged it, great job!'
valid_text = True
elif number == 2:
# Clear text
txt = 'Wow, that was close, but you dodged it, great job!'
valid_text = True
else:
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
render_text_centered(txt + '\nOh no! You got smacked to the ground!', self.screen, TEXT_X,
TEXT_Y, S_WIDTH)
yield
except:
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
render_text_centered(txt + '\nIs that even a number??', self.screen, TEXT_X,
TEXT_Y, S_WIDTH)
yield
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
txt = txt + '\nThe dragon was about to step on you when you sneak under him!'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
##############################################################
# Castle Princess stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
self.bg_castle_princess.draw(self.screen)
pygame.mixer.music.load('audio/MidnightTale.mp3')
pygame.mixer.music.play(-1)
txt = 'You get to the princess and she says "thank you for saving me" with a sparkle in her eye\nDo you wish to return her for the castle or run away and fall in love with her (castle[c] or princess[p]): '
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
valid_text = False
while not valid_text:
if not self.input_text:
yield
elif self.input_text[0] == 'c':
self.castle = True
valid_text = True
elif self.input_text[0] == 'p':
self.princess = True
valid_text = True
else:
yield
##############################################################
# Final stage
##############################################################
self.valid_text = True
self.input_text = ''
self.screen.fill((0,0,0))
if self.castle:
self.bg_victory_castle.draw(self.screen)
txt = 'You take the princess to the king and he gives you a castle, but it is VERY lonely'
else:
self.bg_victory_princess.draw(self.screen)
txt = 'You get married to the princess, but she complains that you do not have a castle'
if not self.pinky:
txt = txt + '\nAND you do not have a pinky'
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield
# Clear text
pygame.draw.rect(self.screen, (0, 0, 0), self.text_rect)
txt = 'Thanks for playing!\n'
if self.lockpick:
txt = txt + 'See if you can win with the sword\n'
self.reset_choices()
else:
txt = txt + 'See if you can win with the lockpicks\n'
self.reset_choices()
render_text_centered(txt, self.screen, TEXT_X, TEXT_Y, S_WIDTH)
yield | StarcoderdataPython |
240233 | # -*- encoding: utf-8 -*-
'''
@Time : 2018-3-19
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : ROI与泛洪填充
'''
# here put the import lib
import cv2 as cv
import numpy as np
def roi_demo(src):
# ROI操作-----------ROI(Region of Interest)
face = src[100:310, 170:400] # 框选出脸的位置,这是我手动选的- -
# 格式是:[行开始位置:行结束位置,列开始位置:列结束位置]
cv.imshow('select_face', face) # 看一下扣选出来的图
# 脸部图像灰度,灰度后颜色信息丢失,只剩下黑白,1通道。
gray_face = cv.cvtColor(face, cv.COLOR_BGR2GRAY)
gray_face = cv.cvtColor(gray_face, cv.COLOR_GRAY2BGR) # 再将图片转为RGB三色通道。
# 为什么要重新转换为RGB三色:
# 因为当你将图像灰度后,就只剩下一个通道,不能直接赋值给RGB三通道的图像,所以要转为3通道再覆盖。
src[100:310, 170:400] = gray_face
cv.imshow('after_src', src)
def fill_color_demo(image):
copyImage = image.copy()
h, w = image.shape[:2]
mask = np.zeros([h+2, w+2], np.uint8) # 此处的+2是工程项的处理。
cv.floodFill(copyImage, mask, (30, 30), (0, 255, 255),
(100, 100, 100), (30, 30, 30), cv.FLOODFILL_FIXED_RANGE)
# (30,30)是起始填充位置,简单来说就是从这个位置开始找周围的像素是否符合条件
# (0,255,255)填充的颜色。这里是黄色
# (100,100,100)最低填充像素范围-----根据这个选取像素
# (30,30,30)最高填充像素范围--------根据这个选取像素
# cv.FLOODFILL_FIXED_RANGE 填充方法选择
cv.imshow('fill_color_demo', copyImage)
src = cv.imread(
'example/0_Basic_usage_of_the_library/openCV/picture/angle2.jpg')
cv.imshow('src1', src)
roi_demo(src)
fill_color_demo(src)
cv.waitKey(0)
cv.destroyAllWindows()
| StarcoderdataPython |
3420590 | <reponame>Wolodija/aucote
from unittest.mock import patch, MagicMock, call
from cpe import CPE
from os import path
from tornado.concurrent import Future
from tornado.httpclient import HTTPError, HTTPRequest, HTTPResponse
from tornado.testing import gen_test, AsyncTestCase
from fixtures.exploits import Exploit
from structs import Port, Node, Scan, TransportProtocol, Service, PhysicalPort, ScanContext, Vulnerability
from tools.cve_search.exceptions import CVESearchApiException
from tools.cve_search.structs import CVESearchVulnerabilityResults, CVESearchVulnerabilityResult
from tools.cve_search.tasks import CVESearchServiceTask
from utils import Config
future = Future()
future.set_result(True)
@patch('utils.http_client.gen.sleep', MagicMock(return_value=future))
class CVESearchServiceTaskTest(AsyncTestCase):
@patch('tools.cve_search.tasks.cfg', new_callable=Config)
def setUp(self, cfg):
super(CVESearchServiceTaskTest, self).setUp()
cfg._cfg = {
'tools': {
'cve-search': {
'api': 'localhost:200'
}
}
}
self.example_output = ''
with open(path.join(path.dirname(path.abspath(__file__)), 'example_output.json'), 'rb') as f:
self.example_output = f.read()
self.node = Node(ip='127.0.0.1', node_id=None)
self.port = Port(node=self.node, transport_protocol=TransportProtocol.TCP, number=22)
self.port.service_name = 'ssh'
self.port.scan = Scan()
self.port.service = Service()
self.app = Service()
self.app_2 = Service()
self.app.cpe = 'cpe:/a:microsoft:iexplorer:8.0.6001:beta'
self.app_2.cpe = 'cpe:/a:microsoft:aexplorer:8.0.6001:beta'
self.cpe_txt = 'cpe:/a:microsoft:internet_explorer:8.0.6001:beta'
self.os_cpe_txt = 'cpe:/o:a:b:4'
self.cpe_without_version = 'cpe:/o:cisco:ios'
self.node.os.cpe = self.os_cpe_txt
self.port.service.cpe = self.cpe_txt
self.exploit = Exploit(exploit_id=1337, name='cve-search', app='cve-search')
self.aucote = MagicMock()
self.context = ScanContext(aucote=self.aucote, scanner=MagicMock(scan=Scan()))
self.task = CVESearchServiceTask(context=self.context, port=self.port, exploits=[self.exploit])
self.vuln_1 = Vulnerability(port=self.port, exploit=self.exploit, cve='CVE-2016-8612', cvss=3.3,
output='test summary 1',
context=self.context, subid=0)
self.vuln_2 = Vulnerability(port=self.port, exploit=self.exploit, cve='CVE-2017-9798', cvss=5.0,
output='test summary 2',
context=self.context, subid=1)
self.vuln_3 = Vulnerability(port=self.port, exploit=self.exploit, cve='CVE-2017-9788', cvss=6.4,
output='test summary 3',
context=self.context, subid=2)
def test_init(self):
self.assertEqual(self.task.api, 'localhost:200')
@patch('tools.cve_search.tasks.HTTPClient')
@gen_test
async def test_api_cvefor(self, mock_http,):
json_data = '{"test_key": "test_value"}'
response = HTTPResponse(code=200, buffer='', request=HTTPRequest('test_url'))
mock_get = mock_http.instance.return_value.get
mock_get.return_value = Future()
mock_get.return_value.set_result(response)
response._body = json_data.encode()
service = Service()
service.cpe = self.cpe_txt
expected = {'test_key': 'test_value'}
result = await self.task.api_cvefor(service.cpe)
self.assertEqual(result, expected)
mock_get.assert_called_once_with('localhost:200/cvefor/cpe%3A2.3%3Aa%3Amicrosoft%3Ainternet_explorer%3A8.0.6001%3Abeta%3A%2A%3A%2A%3A%2A%3A%2A%3A%2A%3A%2A',)
@patch('tools.cve_search.tasks.HTTPClient')
@gen_test
async def test_api_cvefor_http_error(self, mock_http):
mock_http.instance().get.side_effect = HTTPError(code=404)
service = Service()
service.cpe = self.cpe_txt
with self.assertRaises(CVESearchApiException):
await self.task.api_cvefor(service.cpe)
@patch('tools.cve_search.tasks.HTTPClient')
@gen_test
async def test_api_unavailable(self, mock_http):
mock_http.instance().get.side_effect = ConnectionError()
service = Service()
service.cpe = self.cpe_txt
with self.assertRaises(CVESearchApiException):
await self.task.api_cvefor(service.cpe)
def test_get_vulnerabilities(self):
results = CVESearchVulnerabilityResults()
vulnerability_1 = MagicMock()
vulnerability_1.summary = 'test_vuln'
results.vulnerabilities = (vulnerability_1, )
result = self.task.get_vulnerabilities(results=results)
self.assertEqual(result[0].exploit, self.exploit)
self.assertEqual(result[0].port, self.port)
self.assertEqual(result[0].output, 'test_vuln')
def test_get_vulnerabilities_duplicated(self):
results = CVESearchVulnerabilityResults()
result_1 = CVESearchVulnerabilityResult(cwe='CVE-2016-435')
result_1.summary = 'test_vuln'
results.vulnerabilities = (result_1, )
vuln_1 = Vulnerability(cve='CVE-2016-435', output='test_vuln')
self.aucote.storage.get_vulnerabilities.return_value = (vuln_1, )
result = self.task.get_vulnerabilities(results=results)
self.assertEqual(result, [])
@gen_test
async def test_call_with_port_without_cpe(self):
self.port.service = Service()
self.task.api_cvefor = MagicMock(return_value=Future())
self.task.api_cvefor.return_value.set_result(True)
await self.task()
self.assertFalse(self.task.api_cvefor.called)
@gen_test
async def test_call_with_cpe_without_version(self):
self.port.service = Service()
self.port.service.cpe = self.cpe_without_version
self.task.api_cvefor = MagicMock()
await self.task()
self.assertFalse(self.task.api_cvefor.called)
@gen_test
async def test_call_without_results(self):
self.task.api_cvefor = MagicMock(return_value=Future())
self.task.api_cvefor.return_value.set_result([])
self.task.store_vulnerability = MagicMock()
await self.task()
self.assertFalse(self.task.store_vulnerability.called)
@patch('structs.time.time', MagicMock(return_value=13))
@patch('tools.cve_search.tasks.HTTPClient')
@gen_test
async def test_call(self, http_client):
response = MagicMock()
response.body = self.example_output
http_client.instance().get.return_value = Future()
http_client.instance().get.return_value.set_result(response)
self.task.store_vulnerability = MagicMock()
await self.task()
self.task.store_vulnerability.assert_has_calls((
call(self.vuln_1),
call(self.vuln_2),
call(self.vuln_3),
), any_order=True)
def test_get_node_cpe(self):
self.task._port = PhysicalPort(node=self.node)
cpe = self.task.get_cpes()
self.assertEqual(cpe, [self.node.os.cpe])
def test_get_apache_httpd_cpe(self):
self.task._port.service.cpe = 'cpe:2.3:a:apache:httpd:2.4.18:*:*:*:*:*:*:*'
expected = [CPE('cpe:2.3:a:apache:httpd:2.4.18:*:*:*:*:*:*:*'),
CPE('cpe:2.3:a:apache:http_server:2.4.18:*:*:*:*:*:*:*')]
result = self.task.get_cpes()
self.assertEqual(result, expected)
def test_get_apache_http_server_cpe(self):
self.task._port.service.cpe = 'cpe:2.3:a:apache:http_server:2.4.18:*:*:*:*:*:*:*'
expected = [CPE('cpe:2.3:a:apache:http_server:2.4.18:*:*:*:*:*:*:*'),
CPE('cpe:2.3:a:apache:httpd:2.4.18:*:*:*:*:*:*:*')]
result = self.task.get_cpes()
self.assertEqual(result, expected)
@patch('tools.cve_search.tasks.HTTPClient')
@gen_test
async def test_get_cisco_with_brackets(self, mock_http):
self.task.api = ''
json_data = '{"test_key": "test_value"}'
response = HTTPResponse(code=200, buffer='', request=HTTPRequest('test_url'))
mock_get = mock_http.instance.return_value.get
mock_get.return_value = Future()
mock_get.return_value.set_result(response)
response._body = json_data.encode()
cpe = CPE('cpe:2.3:o:cisco:ios:12.2\(52\)se:*:*:*:*:*:*:*')
expected = '/cvefor/cpe%3A2.3%3Ao%3Acisco%3Aios%3A12.2%25252852%252529se%3A%2A%3A%2A%3A%2A%3A%2A%3A%2A%3A%2A%3A%2A'
await self.task.api_cvefor(cpe)
mock_get.assert_called_once_with(expected)
def test_unique_cpe(self):
cpe_1 = CPE('cpe:2.3:o:cisco:ios:12.2\(52\)se:*:*:*:*:*:*:*')
cpe_2 = CPE('cpe:2.3:o:cisco:ios:12.2\(52\)se:*:*:*:*:*:*:*')
expected = [cpe_1]
result = self.task._unique_cpes([cpe_1, cpe_2])
self.assertCountEqual(result, expected)
@patch('structs.time.time', MagicMock(return_value=13))
@patch('tools.cve_search.tasks.CVESearchParser')
@gen_test
async def test_call_with_api_exception(self, parser):
response = MagicMock()
future = Future()
future.set_result([response])
self.port.apps = [self.app, self.app_2]
self.task.api_cvefor = MagicMock(side_effect=(CVESearchApiException('just test'), future))
self.task.store_vulnerability = MagicMock()
await self.task()
parser.dict_to_results.assert_called_once_with([response])
| StarcoderdataPython |
9736137 | #coding=utf-8
import sys
from selenium import webdriver
import time
import re
reload(sys)
sys.setdefaultencoding('utf-8')
browser = webdriver.Chrome()
browser.get('https://192.168.0.1')
browser.maximize_window()
browser.implicitly_wait(10)
browser.find_element_by_id('iptUserName').send_keys('Admin')
browser.implicitly_wait(1)
browser.find_element_by_id('iptPassword').send_keys('<PASSWORD>')
browser.find_element_by_id("btnLogin").click()
browser.switch_to_frame('mainFrame')
Select(driver.find_element_by_id("txtBMCIp")).select_by_index(1)
BMCIP = Select(driver.find_element_by_id("txtBMCIp")).select_by_index(1)
#SN = browser.find_element_by_id('txtSequence').text
#pattren = re.compile('<p.*?"txtBMCIp".*?>(\d+.\d+.\d+.\d+)<')
#BMCIP = re.search(pattren.html)
def write_to_file():
with open('result.txt','a') as f:
f.write( BMCIP +'\n')
f.close()
write_to_file()
#time.sleep(10)
#browser.quit()
| StarcoderdataPython |
6612834 | <gh_stars>1-10
#! /usr/bin/env python
#
#
# similar to test_FM, but in the official ADMIT environment
# these are meant to be able to run without CASA, ie. in a
# vanilla python environment
#
# you might need to run
# rm ../at/__init__.py ../at/__init__.pyc ; touch ../at/__init__.py
# before, and reset this file using
# dtdGenerator
# if CASA sits in the way
# performance (on nemo2): time test_Flow_5.py > /dev/null
# touch=False touch=True
# 100 ...
# 1000 0.582u 0.096s 0:00.68 98.5% 0.794u 2.267s 0:03.19 95.6%
# 10000 4.004u 0.522s 0:04.57 98.9% 5.401u 22.515s 0:28.56 97.7%
#
#
# (10000,True) is the default bench, thus nemo2 goes in 1:21
# inferno goes in 1:53 (yipes, and /dev/shm didn't help)
# subaru 1:52
#
import sys, os
from admit.AT import AT
import admit.Admit as admit
from admit.at.File_AT import File_AT
from admit.at.Flow1N_AT import Flow1N_AT
if __name__ == '__main__':
n = 3
touch = True
subdir = False
subdir = True
# pick where admit will do its work, any cmdline argument will be the dirname
if len(sys.argv) > 1:
a = admit.Admit(sys.argv[1])
else: # or else the current directory
a = admit.Admit()
print 'Flow11: new admit?',a.new
a1 = File_AT()
i1 = a.addtask(a1)
a1.setkey('file','Flow1N.dat')
a1.setkey('touch',touch)
a2 = Flow1N_AT()
i2 = a.addtask(a2, [(i1,0)])
a2.setkey('n',n)
a2.setkey('touch',touch)
a2.setkey('subdir',subdir)
#
if True:
# continue with a Flow11 for each BDP created by Flow1N
from admit.at.Flow11_AT import Flow11_AT
a.run() # need to run the flow, otherwise #BDP's unknown
n1 = len(a2) # of course n1 = n, but we don't know this
a3 = range(n1) # a list of AT's
i3 = range(n1) # a list of ATID's
for i in range(n1):
a3[i] = Flow11_AT()
i3[i] = a.addtask(a3[i], [(i2,i)])
a3[i].setkey('touch',touch)
#
a.run()
#
a.write()
| StarcoderdataPython |
5141453 | from stable_baselines.acer.acer_simple import ACER
| StarcoderdataPython |
1810880 | import os
import subprocess
from typing import Dict
import boto3
from absl import flags, logging
from xain.helpers import project
from xain.ops.ec2 import user_data
FLAGS = flags.FLAGS
root_dir = project.root()
# Note:
# We actually would like to use the m5.large up to m5.24xlarge
# but AWS is not easily willing to give us the increase without
# asking again and again and again for limit increases.
# Therefore we switches to using c4 which have higher default limits
cores: Dict[int, str] = {
2: "c4.large",
4: "c4.xlarge",
8: "c4.2xlarge",
16: "c4.4xlarge",
32: "c4.8xlarge",
}
def docker(image: str, timeout: int = 300, instance_cores=2, **kwargs):
"""Run train in docker while accepting an arbitrary
number of absl flags to be passed to the docker container
Args:
image (str): docker image name
timeout (int): timeout in minutes
instance_cores (int): number of cpu cores to be used, if num is to high os.cpu_count()
will be used
**kwargs: Will be turned into "--{arg}={kwargs[arg]" format and
passed to docker container
"""
instance_cores = (
instance_cores if instance_cores <= os.cpu_count() else os.cpu_count()
)
command = [
"docker",
"run",
"-d",
f"--stop-timeout={timeout}",
f"--cpus={instance_cores}",
"-e",
"AWS_ACCESS_KEY_ID",
"-e",
"AWS_SECRET_ACCESS_KEY",
"-e",
f"S3_RESULTS_BUCKET={FLAGS.S3_results_bucket}",
image,
"python",
"-m",
"xain.benchmark.exec",
]
for arg in kwargs:
if kwargs[arg] is None:
# Don't pass flags where arg has value None
continue
command.append(f"--{arg}={kwargs[arg]}")
subprocess.run(command, cwd=root_dir)
def ec2(image: str, timeout: int = 300, instance_cores=2, **kwargs):
"""Runs job on EC2 instead of a local machine
Possible options for instance_type (CPU only) are:
- m5.large: 2 vCPU, 8 GB RAM
- m5.xlarge: 4 vCPU, 16 GB RAM
- m5.2xlarge: 8 vCPU, 32 GB RAM
- m5.4xlarge: 16 vCPU, 64 GB RAM
- m5.8xlarge: 32 vCPU, 128 GB RAM
- m5.12xlarge: 48 vCPU, 192 GB RAM
- m5.16xlarge: 64 vCPU, 256 GB RAM
- m5.24xlarge: 96 vCPU, 384 GB RAM
Args:
image (str): docker image name
timeout (int): timeout in minutes
instance_cores (int): number of EC2 instance cpu cores
**kwargs: Will be turned into "--{arg}={kwargs[arg]" format and passed to docker container
"""
assert (
instance_cores in cores
), f"instance_cores {instance_cores} not in {cores.keys()}"
instance_type = cores[instance_cores]
absl_flags = "" # Will be passed to docker run in EC2 instance
for arg in kwargs:
if kwargs[arg] is None:
# Don't pass flags where arg has value None
continue
absl_flags += f"--{arg}={kwargs[arg]} "
absl_flags = absl_flags.strip()
instance_name = (
f"{kwargs['group_name']}_{kwargs['task_name']}"
) # Will be used to make the instance easier identifyable
udata = user_data(
image=image,
timeout=timeout,
S3_results_bucket=FLAGS.S3_results_bucket,
flags=absl_flags,
)
client = boto3.client("ec2")
run_response = client.run_instances(
ImageId="ami-08806c999be9493f1",
MinCount=1,
MaxCount=1,
InstanceType=instance_type,
KeyName="xain-ec2-remote-training",
SubnetId="subnet-1bc3c466",
IamInstanceProfile={"Name": "XainEC2RemoteTraining"},
SecurityGroupIds=["sg-01ff10b690dffbaf5", "sg-01207b671ffadadf5"],
InstanceInitiatedShutdownBehavior="terminate",
UserData=udata,
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [{"Key": "Name", "Value": instance_name}],
}
],
AdditionalInfo=absl_flags, # Helpful to identify instance in EC2 UI
)
instance_id = run_response["Instances"][0]["InstanceId"]
logging.info({"InstanceId": instance_id, "Name": instance_name})
| StarcoderdataPython |
4855048 | from __future__ import print_function
from setuptools import setup
import raspgif
setup(
name='raspgif',
version=raspgif.__version__,
url='https://github.com/tomislater/raspgif',
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
description='Regional Atmospheric Soaring Prediction as a gif.',
packages=['raspgif'],
include_package_data=True,
platforms='any',
)
| StarcoderdataPython |
11262517 | <filename>src/solution_29c11459.py
# This file has the solution to the task 29c11459.json
from ioOps import read_file, get_file_path, print_grid
import json
import sys
"""
Method to find solution for the task 29c11459.json
Args:
data(input): The data grid to be processed
Returns:
Output grid with a line of the colour reaching out from left and right cells and the middle cell as grey
"""
def solve(input):
# The number associated with grey colour
grey = 5
for row in input:
if row[0] != 0:
length = len(row)
# Finding mid cell of the row to color it grey
midPoint = length // 2
# getting the start color number
startCode = row[0]
# Getting the end color number
endCode = row[length - 1]
row[midPoint] = grey
# Filling start number from beginning of row to mid of row
row[1: midPoint] = startCode
# Filling end color number from mid to end of row
row[midPoint + 1: ] = endCode
return input
if __name__ == "__main__":
inputFilePath = get_file_path(sys.argv)
data = read_file(inputFilePath)
grid = []
for input in data:
grid.append(solve(input))
print_grid(grid) | StarcoderdataPython |
1844215 | <filename>tests/test_api.py
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for session management and API invocation classes.
"""
import mock
from oslo.vmware import api
from oslo.vmware import exceptions
from oslo.vmware import vim_util
from tests import base
class RetryDecoratorTest(base.TestCase):
"""Tests for retry decorator class."""
def test_retry(self):
result = "RESULT"
@api.RetryDecorator()
def func(*args, **kwargs):
return result
self.assertEqual(result, func())
def func2(*args, **kwargs):
return result
retry = api.RetryDecorator()
self.assertEqual(result, retry(func2)())
self.assertTrue(retry._retry_count == 0)
def test_retry_with_expected_exceptions(self):
result = "RESULT"
responses = [exceptions.VimSessionOverLoadException(None),
exceptions.VimSessionOverLoadException(None),
result]
def func(*args, **kwargs):
response = responses.pop(0)
if isinstance(response, Exception):
raise response
return response
sleep_time_incr = 1
retry_count = 2
retry = api.RetryDecorator(10, sleep_time_incr, 10,
(exceptions.VimSessionOverLoadException,))
self.assertEqual(result, retry(func)())
self.assertTrue(retry._retry_count == retry_count)
self.assertEqual(retry_count * sleep_time_incr, retry._sleep_time)
def test_retry_with_max_retries(self):
responses = [exceptions.VimSessionOverLoadException(None),
exceptions.VimSessionOverLoadException(None),
exceptions.VimSessionOverLoadException(None)]
def func(*args, **kwargs):
response = responses.pop(0)
if isinstance(response, Exception):
raise response
return response
retry = api.RetryDecorator(2, 0, 0,
(exceptions.VimSessionOverLoadException,))
self.assertRaises(exceptions.VimSessionOverLoadException, retry(func))
self.assertTrue(retry._retry_count == 2)
def test_retry_with_unexpected_exception(self):
def func(*args, **kwargs):
raise exceptions.VimException(None)
retry = api.RetryDecorator()
self.assertRaises(exceptions.VimException, retry(func))
self.assertTrue(retry._retry_count == 0)
class VMwareAPISessionTest(base.TestCase):
"""Tests for VMwareAPISession."""
SERVER_IP = '10.1.2.3'
USERNAME = 'admin'
PASSWORD = 'password'
def setUp(self):
super(VMwareAPISessionTest, self).setUp()
patcher = mock.patch('oslo.vmware.vim.Vim')
self.addCleanup(patcher.stop)
self.VimMock = patcher.start()
self.VimMock.side_effect = lambda *args, **kw: mock.Mock()
def _create_api_session(self, _create_session, retry_count=10,
task_poll_interval=1):
return api.VMwareAPISession(VMwareAPISessionTest.SERVER_IP,
VMwareAPISessionTest.USERNAME,
VMwareAPISessionTest.PASSWORD,
retry_count,
task_poll_interval,
'https',
_create_session)
def test_vim(self):
api_session = self._create_api_session(False)
api_session.vim
self.VimMock.assert_called_with(protocol=api_session._scheme,
host=VMwareAPISessionTest.SERVER_IP,
wsdl_loc=api_session._wsdl_loc)
def test_create_session(self):
session = mock.Mock()
session.key = "12345"
api_session = self._create_api_session(False)
vim_obj = api_session.vim
vim_obj.Login.return_value = session
api_session._create_session()
session_manager = vim_obj.service_content.sessionManager
vim_obj.Login.assert_called_once_with(
session_manager, userName=VMwareAPISessionTest.USERNAME,
password=<PASSWORD>)
self.assertFalse(vim_obj.TerminateSession.called)
self.assertEqual(session.key, api_session._session_id)
def test_create_session_with_existing_session(self):
old_session_key = '12345'
new_session_key = '67890'
session = mock.Mock()
session.key = new_session_key
api_session = self._create_api_session(False)
api_session._session_id = old_session_key
vim_obj = api_session.vim
vim_obj.Login.return_value = session
api_session._create_session()
session_manager = vim_obj.service_content.sessionManager
vim_obj.Login.assert_called_once_with(
session_manager, userName=VMwareAPISessionTest.USERNAME,
password=<PASSWORD>)
vim_obj.TerminateSession.assert_called_once_with(
session_manager, sessionId=[old_session_key])
self.assertEqual(new_session_key, api_session._session_id)
def test_invoke_api(self):
api_session = self._create_api_session(True)
response = mock.Mock()
def api(*args, **kwargs):
return response
module = mock.Mock()
module.api = api
ret = api_session.invoke_api(module, 'api')
self.assertEqual(response, ret)
def test_invoke_api_with_expected_exception(self):
api_session = self._create_api_session(True)
ret = mock.Mock()
responses = [exceptions.VimConnectionException(None), ret]
def api(*args, **kwargs):
response = responses.pop(0)
if isinstance(response, Exception):
raise response
return response
module = mock.Mock()
module.api = api
self.assertEqual(ret, api_session.invoke_api(module, 'api'))
def test_invoke_api_with_vim_fault_exception(self):
api_session = self._create_api_session(True)
def api(*args, **kwargs):
raise exceptions.VimFaultException([], None)
module = mock.Mock()
module.api = api
self.assertRaises(exceptions.VimFaultException,
lambda: api_session.invoke_api(module, 'api'))
def test_invoke_api_with_empty_response(self):
api_session = self._create_api_session(True)
vim_obj = api_session.vim
vim_obj.SessionIsActive.return_value = True
def api(*args, **kwargs):
raise exceptions.VimFaultException(
[exceptions.NOT_AUTHENTICATED], None)
module = mock.Mock()
module.api = api
ret = api_session.invoke_api(module, 'api')
self.assertEqual([], ret)
vim_obj.SessionIsActive.assert_called_once_with(
vim_obj.service_content.sessionManager,
sessionID=api_session._session_id,
userName=api_session._session_username)
def test_wait_for_task(self):
api_session = self._create_api_session(True)
task_info_list = [('queued', 0), ('running', 40), ('success', 100)]
task_info_list_size = len(task_info_list)
def invoke_api_side_effect(module, method, *args, **kwargs):
(state, progress) = task_info_list.pop(0)
task_info = mock.Mock()
task_info.progress = progress
task_info.state = state
return task_info
api_session.invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
task = mock.Mock()
ret = api_session.wait_for_task(task)
self.assertEqual('success', ret.state)
self.assertEqual(100, ret.progress)
api_session.invoke_api.assert_called_with(vim_util,
'get_object_property',
api_session.vim, task,
'info')
self.assertEqual(task_info_list_size,
api_session.invoke_api.call_count)
def test_wait_for_task_with_error_state(self):
api_session = self._create_api_session(True)
task_info_list = [('queued', 0), ('running', 40), ('error', -1)]
task_info_list_size = len(task_info_list)
def invoke_api_side_effect(module, method, *args, **kwargs):
(state, progress) = task_info_list.pop(0)
task_info = mock.Mock()
task_info.progress = progress
task_info.state = state
return task_info
api_session.invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
task = mock.Mock()
self.assertRaises(exceptions.VMwareDriverException,
lambda: api_session.wait_for_task(task))
api_session.invoke_api.assert_called_with(vim_util,
'get_object_property',
api_session.vim, task,
'info')
self.assertEqual(task_info_list_size,
api_session.invoke_api.call_count)
def test_wait_for_task_with_invoke_api_exception(self):
api_session = self._create_api_session(True)
api_session.invoke_api = mock.Mock(
side_effect=exceptions.VimException(None))
task = mock.Mock()
self.assertRaises(exceptions.VimException,
lambda: api_session.wait_for_task(task))
api_session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
api_session.vim, task,
'info')
def test_wait_for_lease_ready(self):
api_session = self._create_api_session(True)
lease_states = ['initializing', 'ready']
num_states = len(lease_states)
def invoke_api_side_effect(module, method, *args, **kwargs):
return lease_states.pop(0)
api_session.invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
lease = mock.Mock()
api_session.wait_for_lease_ready(lease)
api_session.invoke_api.assert_called_with(vim_util,
'get_object_property',
api_session.vim, lease,
'state')
self.assertEqual(num_states, api_session.invoke_api.call_count)
def test_wait_for_lease_ready_with_error_state(self):
api_session = self._create_api_session(True)
responses = ['initializing', 'error', 'error_msg']
def invoke_api_side_effect(module, method, *args, **kwargs):
return responses.pop(0)
api_session.invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
lease = mock.Mock()
self.assertRaises(exceptions.VimException,
lambda: api_session.wait_for_lease_ready(lease))
exp_calls = [mock.call(vim_util, 'get_object_property',
api_session.vim, lease, 'state')] * 2
exp_calls.append(mock.call(vim_util, 'get_object_property',
api_session.vim, lease, 'error'))
self.assertEqual(exp_calls, api_session.invoke_api.call_args_list)
def test_wait_for_lease_ready_with_unknown_state(self):
api_session = self._create_api_session(True)
def invoke_api_side_effect(module, method, *args, **kwargs):
return 'unknown'
api_session.invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
lease = mock.Mock()
self.assertRaises(exceptions.VimException,
lambda: api_session.wait_for_lease_ready(lease))
api_session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
api_session.vim,
lease, 'state')
def test_wait_for_lease_ready_with_invoke_api_exception(self):
api_session = self._create_api_session(True)
api_session.invoke_api = mock.Mock(
side_effect=exceptions.VimException(None))
lease = mock.Mock()
self.assertRaises(exceptions.VimException,
lambda: api_session.wait_for_lease_ready(lease))
api_session.invoke_api.assert_called_once_with(
vim_util, 'get_object_property', api_session.vim, lease,
'state')
def _poll_task_well_known_exceptions(self, fault,
expected_exception):
api_session = self._create_api_session(False)
def fake_invoke_api(self, module, method, *args, **kwargs):
task_info = mock.Mock()
task_info.progress = -1
task_info.state = 'error'
error = mock.Mock()
error.localizedMessage = "Error message"
error_fault = mock.Mock()
error_fault.__class__.__name__ = fault
error.fault = error_fault
task_info.error = error
return task_info
with (
mock.patch.object(api_session, 'invoke_api', fake_invoke_api)
):
self.assertRaises(expected_exception,
api_session._poll_task, 'fake-task')
def test_poll_task_well_known_exceptions(self):
for k, v in exceptions._fault_classes_registry.iteritems():
self._poll_task_well_known_exceptions(k, v)
def test_poll_task_unknown_exception(self):
_unknown_exceptions = {
'NoDiskSpace': exceptions.VMwareDriverException,
'RuntimeFault': exceptions.VMwareDriverException
}
for k, v in _unknown_exceptions.iteritems():
self._poll_task_well_known_exceptions(k, v)
| StarcoderdataPython |
1690319 | """
Test request.render() and the predicates related to rendering
"""
def test_get_templatestring_view(
fake_templatestring_view,
requestservice,
resourceservice,
fake_article1
):
resourceservice.resources[fake_article1.id] = fake_article1
request = requestservice.make_request('more/article1')
assert 'Fake Template String View' == request.view.name
# Render
assert '<p>View Name: Fake Template String View</p>' == request.render()
| StarcoderdataPython |
5137009 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2008 <NAME> - CrysaLEAD - www.crysalead.fr
from . import models
from odoo import api, SUPERUSER_ID
def _l10n_fr_post_init_hook(cr, registry):
_preserve_tag_on_taxes(cr, registry)
_setup_inalterability(cr, registry)
def _preserve_tag_on_taxes(cr, registry):
from odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes
preserve_existing_tags_on_taxes(cr, registry, 'l10n_fr')
def _setup_inalterability(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
# enable ping for this module
env['publisher_warranty.contract'].update_notification(cron_mode=True)
fr_companies = env['res.company'].search([('partner_id.country_id.code', 'in', env['res.company']._get_unalterable_country())])
if fr_companies:
fr_companies._create_secure_sequence(['l10n_fr_closing_sequence_id'])
for fr_company in fr_companies:
fr_journals = env['account.journal'].search([('company_id', '=', fr_company.id)])
fr_journals.filtered(lambda x: not x.secure_sequence_id)._create_secure_sequence(['secure_sequence_id'])
| StarcoderdataPython |
11390546 | import pandas as pd
from functions import random_iid
def noisify(data, signal_col, noise_type, intensity, noisy_signal_col='degraded', normalize=False):
"""Adds noise to signal
:param noisy_signal_col: name of column that should be created to save noisy data in
:param data: Pandas Dataframe
:param signal_col: name of column in which noise should be added
:param noise_type: "normal"
:param intensity: Standard deviation or equivalent
:param normalize: Data column will be normalized before adding noise
:return: Dataframe with noisy signal
"""
if not type(data) == type(pd.DataFrame()):
raise Exception('Data is not in pandas DataFrame format')
if not signal_col in data.columns:
raise Exception('Data does not have column ' + signal_col)
if noisy_signal_col == signal_col:
raise Exception('Columns have identical names')
df_copy = data.copy()
if normalize:
df_copy[signal_col] = df_copy[signal_col] / df_copy[signal_col].max()
if noise_type == 'normal':
noise = random_iid.normal(length=df_copy.shape[0], std_dev=intensity)
else:
raise Exception('noise type not recognized')
# Add noise to signal, scaling to signal size
df_copy[noisy_signal_col] = df_copy[signal_col] + df_copy[signal_col] * noise
# Measure correlation
correlation = df_copy.corr(method='spearman')
return df_copy, correlation[signal_col][noisy_signal_col]
| StarcoderdataPython |
8070428 | <reponame>Rijul24/StressMeOut-1<gh_stars>0
"""MIT License
Copyright (c) 2021 armaanbadhan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from utils.google_sheet_funcs import insert_row_sheet
from utils.misc import change_timeformat, is_user_authorized
from discord_slash.utils.manage_commands import create_option
class AddStuffSlash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(
name="add",
description="to add 'stuff' in StressMeOut",
options=[
create_option(
name="name",
description="name of the assignment/project",
option_type=3,
required=True
),
create_option(
name="date",
description="date of deadline duh",
option_type=4,
required=True
),
create_option(
name="month",
description="month of deadline duh",
option_type=4,
required=True
),
create_option(
name="hours",
description="hour of deadline duh (24 hr format)",
option_type=4,
required=True
),
create_option(
name="minutes",
description="minutes of deadline duh",
option_type=4,
required=True
)
],
)
async def add_stuff_in_bot(
self,
ctx,
name: SlashContext,
date: SlashContext,
month: SlashContext,
hours: SlashContext,
minutes: SlashContext
):
deadline = f"{date}.{month}.2021 {hours}:{minutes}"
_name = f"{name}"
if not is_user_authorized(ctx.author_id):
await ctx.send("no prems 4 u")
return
if change_timeformat(deadline):
await ctx.defer()
insert_row_sheet(deadline, _name)
await ctx.send("successful")
else:
await ctx.send("couldnt add, time format invalid")
def setup(bot):
bot.add_cog(AddStuffSlash(bot))
| StarcoderdataPython |
5144328 | <reponame>forgi86/RNN-adaptation
import os
import numpy as np
import time
import torch
import torch.nn as nn
from models import LSTMWrapper
from diffutil.products import jvp_diff, unflatten_like
import torch.optim as optim
from open_lstm import OpenLSTM
from torchid import metrics
if __name__ == '__main__':
time_start = time.time()
# In[Set seed for reproducibility]
np.random.seed(0)
torch.manual_seed(0)
# In[Settings]
sigma = 0.03
n_skip = 64 # skip initial n_skip samples for transfer (ignore transient)
model_name = "lstm"
n_iter = 250 # 750 # 100
lr = 1e-2
batch_size = 1
context = 25
# In[Load dataset]
u = np.load(os.path.join("data", "cstr", "u_transf.npy")).astype(np.float32)[:batch_size, :, :] # seq_len, input_size
y = np.load(os.path.join("data", "cstr", "y_transf.npy")).astype(np.float32)[:batch_size, :, :] # seq_len, output_size
# In[Check dimensions]
_, seq_len, input_size = u.shape
_, seq_len_, output_size = y.shape
N = y.size
assert(seq_len == seq_len_)
# In[Load LSTM model]
# Setup neural model structure and load fitted model parameters
model_op = OpenLSTM(context, input_size)
model_filename = f"{model_name}.pt"
model_op.load_state_dict(torch.load(os.path.join("models", model_filename)))
# In[Model wrapping]
model_wrapped = LSTMWrapper(model_op, seq_len, input_size, batch_s=batch_size)
u_torch = torch.tensor(u[:, 1:, :].reshape(-1, input_size), dtype=torch.float, requires_grad=False)
y_torch = torch.tensor(y.reshape(-1, output_size), dtype=torch.float)
u_torch_f = torch.clone(u_torch.view((input_size * (seq_len - 1), 1))) # [bsize*seq_len*n_in, ]
y_torch_f = torch.clone(y_torch[1:, :].view(output_size * (seq_len - 1), 1)) # [bsize*seq_len, ]
y_f = y_torch_f.detach().numpy()
u_torch_op = torch.cat((u_torch, y_torch[:-1, :]), dim=1).unsqueeze(0)
n_param = sum(map(torch.numel, model_op.parameters()))
#theta_lin = torch.tensor(np.load(os.path.join("models", "theta_lin.npy")).ravel())
#theta_lin = 1/np.sqrt(n_param)*torch.randn(n_param)
theta_lin = torch.zeros(n_param)
theta_lin.requires_grad_(True)
optimizer = optim.Adam([theta_lin], lr=lr)
LOSS = []
LOSS_REG = []
LOSS_FIT = []
y_sim_f = []
y_lin_f = []
for itr in range(n_iter):
optimizer.zero_grad()
theta_lin_f = unflatten_like(theta_lin, tensor_lst=list(model_wrapped.parameters()))
# Compute nominal and linear model output
y_sim_f = model_wrapped(u_torch_op)
y_lin_f = jvp_diff(y_sim_f, model_wrapped.parameters(), theta_lin_f)[0]
# Compute loss
err_fit = y_torch_f[n_skip * output_size:] - y_lin_f[n_skip * output_size:]
loss_fit = torch.sum(err_fit**2)
loss_reg = sigma**2 * theta_lin.dot(theta_lin)
loss = loss_fit + loss_reg
loss = loss/1000
# Statistics
print(f'Iter {itr} | Tradeoff Loss {loss:.3f} | Fit Loss {loss_fit:.6f} | Reg Loss {loss_reg:.6f}')
LOSS.append(loss.item())
LOSS_FIT.append(loss_fit.item())
LOSS_REG.append(loss_reg.item())
# Optimization
loss.backward()
optimizer.step()
adapt_time = time.time() - time_start
print(f"\nAdapt time: {adapt_time:.2f}")
np.save(os.path.join("models", "theta_lin_gd.npy"), theta_lin.detach().numpy())
# In[Plot]
y_sim = y_sim_f.detach().numpy().reshape(seq_len-1, output_size)
y_lin = y_lin_f.detach().numpy().reshape(seq_len-1, output_size)
np.save(os.path.join("data", "cstr", "cstr_transf_gd_eval_sim.npy"), y_sim)
np.save(os.path.join("data", "cstr", "cstr_transf_gd_eval_lin.npy"), y_lin)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 1)
ax[0].plot(y[0, 1:, 0], 'k', label="Ground truth")
ax[0].plot(y_sim[:, 0], 'b', label="LSTM")
ax[0].plot(y_lin[:, 0], 'r--', label="JVP-LSTM") # Jacobian-Vector Product
ax[0].axvline(context-1, color='k', linestyle='--', alpha=0.2)
ax[0].set_ylabel('Y')
ax[0].set_xlabel('X')
ax[0].legend()
ax[0].grid(True)
ax[1].plot(y[0, 1:, 1], 'k')
ax[1].plot(y_sim[:, 1], 'b')
ax[1].plot(y_lin[:, 1], 'r--')
ax[1].axvline(context-1, color='k', linestyle='--', alpha=0.2)
ax[1].set_ylabel('Y')
ax[1].set_xlabel('X')
# ax[1].legend()
ax[1].grid(True)
plt.show() | StarcoderdataPython |
12832581 | from numpy import array, matrix, diag, exp, inner, nan_to_num
from numpy.core.umath_tests import inner1d
from numpy import argmin, array
class GKS:
"""Gaussian kernel smoother to transform any clustering method into regression. setN is the list containing numpy arrays which are the weights of clustering centors.
populations is a list of integers of cluster populations. standard_variances is the list of real
numbers meaning the standard variances of the dataset along each dimension. smooth is None or real number.
While set to None, an SSL procedure will be employed. For details, see the responses() method."""
sv_kernel = None
setN = None #:Weights of the clustering centers, after instance initialization, it will be a list data structure.
Y = 1 #:Number of response variables.
percentages = None #:Distribution of the cluster populations.
xdim = None #:Dimension of the explanatory variables.
ydim = None #:Dimension of the response variables.
__global = True
smooth = None #:Smooth parameter.
__S = 0.0
K = 5 #: Number of clustering centers for smooth parameter calculation.
def __init__(self, setN, populations, standard_variances, Y_number, smooth = None, K = 5):
if len(setN[0])!=len(standard_variances):
print('ill GKS initialization')
else:
self.sv_kernel = matrix(diag(array(standard_variances)[:-1*Y_number]**-1.0))
self.setN = []
self.Y = []
for each in setN:
self.setN.append(each[:-1*Y_number])
self.Y.append(each[-1*Y_number:])
self.Y = matrix(self.Y).T
self.percentages = array(populations) / float(sum(populations))
self.setN = array(self.setN)
self.xdim = float(len(setN[0]) - Y_number)
self.ydim = float(Y_number)
self.smooth = smooth
self.K = K
def response_1s(self, point):
dif_vectors = self.setN - point
dif_and_varianced = array(matrix(dif_vectors)*self.sv_kernel)
dif_traces = inner1d(dif_and_varianced , dif_vectors)
weights = exp(-0.5*self.__S*dif_traces)
results = (self.Y*(matrix(self.percentages * weights).T))/(inner(self.percentages, weights))
return array(results.T)[0]
def responses(self, points, prototypes = None):
"""points is a list or array of numpy arrays, and this method returns the regression results
of the dataset points. If the smooth parameter is initialized as None, the prototypes parameter
will be required as a list or array of clustering centers in the form of numpy arrays, which is genertated
by the user chosen clustering method on the same dataset to the one specified by points variable."""
if self.smooth == None:
self.K = min(self.K, prototypes)
accumulated_traces = 0.0
for point in prototypes:
dif_vectors = self.setN - point
dif_and_varianced = array(matrix(dif_vectors)*self.sv_kernel)
dif_traces = inner1d(dif_and_varianced , dif_vectors)
nn_index = argmin(dif_traces)
accumulated_traces += float(dif_traces[nn_index])
for i in range(self.K - 1):
dif_traces[nn_index] = float('inf')
nn_index = argmin(dif_traces)
accumulated_traces += float(dif_traces[nn_index])
self.__S = len(self.setN)*self.xdim/accumulated_traces
if self.__S < 0.0:
self.__S = 0.0
else:
self.__S = len(self.setN)**(-2.0*self.smooth)
results = []
if self.ydim == 1:
for each in points:
results.append(self.response_1s(each)[0])
else:
for each in points:
results.append(self.response_1s(each))
return results
if __name__ == '__main__':
testgks = GKS([array([1, 2, 2,3]), array([2, 3, 1,5])], array([1, 2]), array([1, 2, 3,1]), 2, smooth = -0.4)
print(testgks.response_1s(array([1,2])))
print(testgks.responses([array([1,2]),array([2,0])]))
| StarcoderdataPython |
5145721 | #!/usr/bin/env python
import os, sys
import json
import fnmatch, re
from collections import namedtuple
from ..helper import slugify
from ..exceptions import *
Key = namedtuple("Key", ["name","version"])
class Repo:
"""
Class to track each repo
"""
def __init__(self, username, reponame):
self.username = username
self.reponame = reponame
self.package = None
self.manager = None
self.rootdir = None
self.options = {}
self.key = None
self.remoteurl = None
def find_matching_files(self, includes):
"""
For various actions we need files that match patterns
"""
if len(includes) == 0:
return []
files = [f['relativepath'] for f in self.package['resources']]
includes = r'|'.join([fnmatch.translate(x) for x in includes])
# Match both the file name as well the path..
files = [f for f in files if re.match(includes, os.path.basename(f))] + \
[f for f in files if re.match(includes, f)]
files = list(set(files))
return files
# Cache for partially computed information
def cache_path(self, prefix, objname, ext=""):
path = os.path.join('.dgit',
prefix,
slugify(objname))
if ext != "":
ext = slugify(ext) # clean this up as well
path += ".{}".format(ext)
return {
'relative': path,
'full': os.path.join(self.rootdir, path)
}
def cache_check(self, cachepath):
return os.path.exists(cachepath['full'])
def cache_read(self, cachepath):
return open(cachepath['full']).read()
def cache_write(self, cachepath, content):
path = cachepath['full']
try:
os.makedirs(os.path.dirname(path))
except:
pass
flag = "wb" if isinstance(content, bytes) else "w"
with open(path, flag) as fd:
fd.write(content)
print("Updated", os.path.relpath(path, self.rootdir))
def __str__(self):
return "[{}] {}/{}".format(self.manager.name,
self.username,
self.reponame)
def run(self, cmd, *args):
"""
Run a specific command using the manager
"""
if self.manager is None:
raise Exception("Fatal internal error: Missing repository manager")
if cmd not in dir(self.manager):
raise Exception("Fatal internal error: Invalid command {} being run".format(cmd))
func = getattr(self.manager, cmd)
repo = self
return func(repo, *args)
def get_resource(self, p):
"""
Get metadata for a given file
"""
for r in self.package['resources']:
if r['relativepath'] == p:
r['localfullpath'] = os.path.join(self.rootdir, p)
return r
raise Exception("Invalid path")
class RepoManagerBase(object):
"""Repository manager handles the specifics of the version control
system. Currently only git manager is supported.
"""
def __init__(self, name, version, description, supported=[]):
self.enable = 'y'
self.name = name
self.version = version
self.description = description
self.support = supported + [name]
self.enabled = 'y'
self.initialize()
self.repos = {}
def initialize(self):
pass
def enabled(self):
return self.enabled.lower() != 'n'
def get_repo_list(self):
return list(self.repos.keys())
def get_repo_details(self, key):
return self.repos[key]
def search(self, username, reponame):
matches = []
for k in list(self.repos.keys()):
if username is not None and k[0] != username:
continue
if reponame is not None and k[1] != reponame:
continue
matches.append(k)
return matches
def is_my_repo(self, username, reponame):
rootdir = os.path.join(self.workspace, 'datasets')
metadatadir = os.path.join(rootdir, username,
reponame,
self.metadatadir)
if os.path.exists(metadatadir):
return True
else:
return False
def init(self, username, reponame, force):
"""
Initialize a repo (may be fs/git/.. backed)
"""
pass
def key(self, username, reponame):
return (username, reponame)
def lookup(self, username=None, reponame=None, key=None):
"""
Lookup all available repos
"""
if key is None:
key = self.key(username, reponame)
if key not in self.repos:
raise UnknownRepository()
return self.repos[key]
def users(self):
"""
Find users
"""
return os.listdir(os.path.join(self.workspace, 'datasets'))
def repos(self, username):
return os.listdir(os.path.join(self.workspace, 'datasets', username))
def server_rootdir_from_repo(self, repo, create=True):
return self.server_rootdir(repo.username,
repo.reponame,
create)
def server_rootdir(self, username, reponame, create=True):
"""
Working directory for the repo
"""
path = os.path.join(self.workspace,
'git',
username,
reponame + ".git")
if create:
try:
os.makedirs(path)
except:
pass
return path
def rootdir(self, username, reponame, create=True):
"""
Working directory for the repo
"""
path = os.path.join(self.workspace,
'datasets',
username,
reponame)
if create:
try:
os.makedirs(path)
except:
pass
return path
def add(self, repo):
"""
Add repo to the internal lookup table...
"""
key = self.key(repo.username, repo.reponame)
repo.key = key
self.repos[key] = repo
return key
def drop(self, repo):
"""
Drop repository
"""
key = repo.key
del self.repos[key]
def push(self, repo, args):
pass
def status(self, repo, args):
pass
def show(self, repo, args):
pass
def stash(self, repo, args):
pass
def commit(self, repo, message):
pass
def notes(self, repo, args):
pass
def add_raw(self, repo, files):
pass
def add_files(self, repo, files):
"""
Files is a list with simple dict structure with relativepath and fullpath
"""
pass
def clone(self, repo, newusername, newreponame):
"""
Clone repo
"""
pass
def config(self, what='get', params=None):
return
| StarcoderdataPython |
9737090 | <reponame>YiWeiShen/Project-Euler-Hints
from multiprocessing.pool import Pool
import math
def cal_dividor(num):
list_a = []
i = 0
while num > 1:
if num % prime_list_clear[i] == 0:
print(prime_list_clear[i])
list_a.append(prime_list_clear[i])
num /= prime_list_clear[i]
continue
i += 1
return list_a
def cal_prime(num):
for i in range(2, int(num**0.5+1)):
if num % i == 0:
return None
return num
p=Pool(processes=16)
num_range = range(2,1000000)
prime_list = p.map(cal_prime, num_range)
p.close()
p.join()
prime_list_clear = [x for x in prime_list if x is not None]
print(prime_list_clear)
count=0
result =0
i = 0
print(pow(10, 10**9, 9 * prime_list_clear[0]))
while (count<40):
a = prime_list_clear[i] * 9
if pow(10, 10**9, a) == 1:
result += a /9
count += 1
i += 1
print(result)
| StarcoderdataPython |
4908470 | from devices.cisco import CiscoIOS
class CiscoIOSSSHTelnet(CiscoIOS):
"""
Class to represent Cisco IOS device to connect via ssh or telnet if unsure what connection is required
"""
def __init__(self, **kwargs):
super(CiscoIOS, self).__init__(**kwargs)
@property
def device_type(self):
"""
Returns device type - IOS SSH first then IOS telnet
:param self:
:return tuple:
"""
return 'cisco_ios', 'cisco_ios_telnet'
| StarcoderdataPython |
254373 | <filename>tests/python/pants_test/subsystem/subsystem_util.py<gh_stars>0
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.subsystem.util import global_subsystem_instance as global_subsystem_instance # noqa
from pants.testutil.subsystem.util import init_subsystem as init_subsystem # noqa
from pants.testutil.subsystem.util import init_subsystems as init_subsystems # noqa
from pants_test.deprecated_testinfra import deprecated_testinfra_module
deprecated_testinfra_module('pants.testutil.subsystem.util')
| StarcoderdataPython |
9679414 | import seaborn as sns
from Perceptron import Perceptron, accuracy
from sklearn.model_selection import KFold
from sklearn import linear_model
# Load dataframe
sns.set(font_scale=1.5)
df = sns.load_dataset('penguins')
# We are going to try and find the difference between
# Adelie and Gentoo penguins using all fields
relevant_classes = ('Adelie', 'Gentoo')
# Remove all inputs that are NaN
df.dropna(inplace=True)
# Pull out only relevant classes. The | is bitwise or which compares each list and makes
# a new list where a[x] | b[x] = c[x]
df = df[(df.species == relevant_classes[0]) | (df.species == relevant_classes[1])]
# Pull out labels
labels = df['species']
# Drop unused data (island, body mass, and sex) and species (redundant)
df = df.drop(['island', 'body_mass_g', 'sex', 'species'], axis=1)
# Z-score normalization. Tells how far each data point is off the mean.
for col in df.columns:
mean = df[col].mean()
std_dev = df[col].std()
df = df.apply(lambda x: (x - mean) / std_dev)
# Turn our labels into class labels (0 or 1)
labels = labels.apply(lambda x: 0 if x == relevant_classes[0] else 1)
# Let's take a look at our ready data
print(labels, df)
# Turn it into lists for our model to consume
x = df.values
y = labels.values
# Let's train our model with k-fold cross validation to ensure that it is reliable
# Fold across all our data in 80-20 split train to test changing whcih data is
# training and testing each time to ensure our model works well.
kf = KFold(n_splits=5, shuffle=True)
for train_index, test_index in kf.split(x):
# Create new perceptron
penguin_perceptron = Perceptron(relevant_classes, {'accuracy': accuracy})
# Train it
train_results = penguin_perceptron.train(x[train_index], y[train_index], 500, {'accuracy': 0.95})
print('Training results:')
print(train_results)
# Test it and get results
test_accuracy = accuracy(penguin_perceptron.predict(x[test_index]), y[test_index])
print('Testing accuracy: ')
print(test_accuracy)
print('Sklearn model for comparison')
sklearn_perceptron = linear_model.Perceptron()
sklearn_perceptron.fit(x[train_index], y[train_index])
print(sklearn_perceptron.score(x[test_index], y[test_index])) | StarcoderdataPython |
12811128 | <gh_stars>1-10
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census=np.concatenate((data,new_record),axis=0)
print("Census{}\n Data{}".format(census.shape,data.shape))
age=np.array(census[:,0])
print("age",age)
max_age=np.max(age)
print("max age",max_age)
age_mean=np.mean(age)
print('age mean',age_mean)
age_std = np.std(age)
print('age std',age_std)
race_0=census[census[:,2]==0]
print(race_0)
race_1=census[census[:,2]==1]
print(race_1)
race_2=census[census[:,2]==2]
print(race_2)
race_3=census[census[:,2]==3]
print(race_3)
race_4=census[census[:,2]==4]
print(race_4)
len_0 = len(race_0)
print(len_0)
len_1 = len(race_1)
print(len_1)
len_2 = len(race_2)
print(len_2)
len_3 = len(race_3)
print(len_3)
len_4 = len(race_4)
print(len_4)
l = [len_0,len_1,len_2,len_3,len_4]
minority_race = l.index(min(l))
print(minority_race)
senior_citizens=census[census[:,0]>60]
print(senior_citizens)
working_hours_sum = np.sum(senior_citizens[:,6])
print(working_hours_sum)
senior_citizens_len = len(senior_citizens)
print(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
| StarcoderdataPython |
11274620 | from django.core.management import BaseCommand
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
for key, value in settings.ENV.__dict__.items():
print(f'{key:25} {value}') | StarcoderdataPython |
343082 | <filename>cap02_variaveis_tipos_estrututurasDeDados/exercicios/ex01.py
# Exercício 1 - Imprima na tela os números de 1 a 10. Use uma lista para armazenar os números.
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(numbers) | StarcoderdataPython |
3386335 | import time
from django.core.wsgi import get_wsgi_application
import os
import subprocess
import logging
from loghandler.loghandler import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
# Django specific settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# Ensure settings are read
application = get_wsgi_application()
from database.models import Settings
logger.info("sleeping ...")
time.sleep(25)
logger.info("waking ...")
startCommand = Settings.objects.get(name="startCommando")
startCommand = startCommand.value.split(" ")
logger.info(startCommand)
p = subprocess.Popen(startCommand) | StarcoderdataPython |
6404852 | <reponame>hooooolyshit/VocabularyNoteBook<filename>bing.py
import requests
import bs4
import time
import random
# 暂时没用的class
class ExampleSentance:
meanings = ''
sentences = []
def set_meaning(self, meanings):
self.meanings = meanings
def add_sentence(self, s):
if type(s) == str:
self.sentences.append(s)
elif type(s) == list:
for each in s:
self.add_sentence(each)
else:
self.sentences.append('ERROR')
class Meaning:
type_word = ''
paraphrase = ''
def __init__(self, thetype, trans) -> None:
self.type_word = thetype
self.paraphrase = trans
class Bing:
def __init__(self, word):
self.example_sentances = []
self.tense = ''
self.meanings = []
self.word = word
self.url = "https://cn.bing.com/dict/search?q="+word
def get_html(self):
headers = {
'authority': 'cn.bing.com',
'sec-ch-ua': '^\\^',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'sec-ch-ua-arch': '^\\^x86^\\^',
'x-autosuggest-contentwidth': '648',
'sec-ch-ua-full-version': '^\\^90.0.4430.212^\\^',
'sec-ch-ua-platform-version': '^\\^10.0^\\^',
'sec-ch-ua-model': '',
'sec-ch-ua-platform': '^\\^Windows^\\^',
'accept': '*/*',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'accept-language': 'zh-CN,zh;q=0.9',
'Referer': 'https://cn.bing.com/',
}
html = requests.get(self.url, headers=headers)
return html
def get_meanings(self, soup):
meanings = soup.find_all('span', class_='def b_regtxt')
print("正在寻找", self.word, "的释义")
for each in meanings:
a = Meaning(each.text, each.previous)
self.meanings.append(a)
def get_tense(self, soup):
print("正在寻找", self.word, "的时态:")
tense = soup.find(class_="hd_div1")
if(tense):
self.tense = tense.text
def get_sentences(self, soup):
sentences = soup.find_all('div', class_='sen_en b_regtxt')
print("爬取", self.word, "的例句中༼ つ ◕_◕ ༽つ")
for each in sentences:
self.example_sentances.append(each.text)
def save(self):
file_name = 'VocabularyNotebook'
try:
f = open(file_name + '.txt', 'r', encoding='utf-8')
except FileNotFoundError:
f = open(file_name + '.txt', 'w+', encoding='utf-8')
f.close()
with open(file_name + '.txt', 'a', encoding='utf-8') as file:
file.write(self.word + '\n')
if self.tense != '':
file.write(self.tense+'\n')
file.write('释义:\n')
count = 0
for each in self.meanings:
file.write('\t'+each.paraphrase+" "+each.type_word+"\n")
count += 1
if count >= 3:
count = 0
break
file.write('例句:\n')
for each in self.example_sentances:
file.write('\t'+each+"\n")
count += 1
if count >= 4:
count = 0
break
file.write('\n\n')
print(self.word+"已经写入文件并保存")
def run(self):
html = self.get_html()
soup = bs4.BeautifulSoup(html.text, 'lxml')
# 时态 第三人称单数 过去分词、、、
self.get_tense(soup)
# 各种词性的释义
self.get_meanings(soup)
# 例句
self.get_sentences(soup)
self.save()
if __name__ == "__main__":
# 前期准备工作
word_list = []
f = open('VocabularyList.txt', 'r', encoding='utf-8')
str = f.read()
f.close()
word_list = str.split(" ")
random.shuffle(word_list)
max_num_of_words = len(word_list)
print("想写多少个单词?\n最多", max_num_of_words, '个: ', end="")
times = int(input()) % len(word_list)
timeGap = float(input("爬取间隔(推荐5s以上): "))
# 去 单词本中瞧瞧吧
for each in word_list:
if(times < 0):
break
myBing = Bing(each)
print("正在尝试爬取: "+each)
myBing.run()
time.sleep(timeGap)
times -= 1
input("程序运行结束, 打开\"VocabularyNotebook\"看看吧")
| StarcoderdataPython |
3590322 | <reponame>leandroaquinopereira/real-estate
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render
# Create your views here.
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, DeleteView, CreateView, UpdateView
from records.forms import ClientForm, PropertyForm, RentForm
from records.models import Client, Property, Rent
# Property
class PropertyList(ListView):
queryset = Property.objects.all().order_by('type')
context_object_name = 'property'
template_name = 'records/list_properties.html'
def get_queryset(self):
txt_type = self.request.GET.get('type')
if txt_type:
property = Property.objects.filter(type__icontains=txt_type)
else:
property = Property.objects.all()
return property
class PropertyDetail(DetailView):
queryset = Property.objects.all()
context_object_name = 'property'
template_name = 'records/detail_properties.html'
class PropertyDelete(DeleteView):
model = Property
context_object_name = 'property'
template_name = 'records/delete_properties.html'
success_url = reverse_lazy('property-list')
class PropertyCreate(CreateView):
model = Property
form_class = PropertyForm
template_name = 'records/create_properties.html'
success_url = reverse_lazy('property-list')
class PropertyUpdate(UpdateView):
model = Property
form_class = PropertyForm
template_name = 'records/update_properties.html'
success_url = reverse_lazy('property-list')
# Client
class ClientList(ListView):
queryset = Client.objects.all().order_by('name')
context_object_name = 'client'
template_name = 'records/list_clients.html'
class ClientDetail(DetailView):
queryset = Client.objects.all()
context_object_name = 'client'
template_name = 'records/detail_clients.html'
class ClientDelete(DeleteView):
model = Client
context_object_name = 'client'
template_name = 'records/delete_clients.html'
success_url = reverse_lazy('client-list')
class ClientCreate(CreateView):
model = Client
form_class = ClientForm
template_name = 'records/create_clients.html'
success_url = reverse_lazy('client-list')
class ClientUpdate(UpdateView):
model = Client
form_class = ClientForm
template_name = 'records/update_clients.html'
success_url = reverse_lazy('client-list')
# Rent
class RentList(ListView):
queryset = Rent.objects.all().order_by('id')
context_object_name = 'rent'
template_name = 'records/list_rents.html'
class RentDetail(DetailView):
queryset = Rent.objects.all()
context_object_name = 'rent'
template_name = 'records/detail_rents.html'
class RentDelete(DeleteView):
model = Rent
context_object_name = 'rent'
template_name = 'records/delete_rents.html'
success_url = reverse_lazy('rent-list')
class RentCreate(CreateView):
model = Rent
form_class = RentForm
template_name = 'records/create_rents.html'
success_url = reverse_lazy('rent-list')
class RentUpdate(UpdateView):
model = Rent
form_class = RentForm
template_name = 'records/update_rents.html'
success_url = reverse_lazy('rent-list')
| StarcoderdataPython |
4922542 | <reponame>scrasmussen/icar
import numpy as np
import units
from bunch import Bunch
R=8.3144621 # J/mol/K
cp=29.19 # J/mol/K =1.012 J/g/K
g=9.81 # m/s^2
def convert_atm(data,sfc):
output_data=Bunch()
# [time,z,ns,ew]
output_data.u = data.u # m/s
output_data.v = data.v # m/s
output_data.p = data.p # Pa
output_data.qv = data.qv # kg/kg
pii = (100000.0 / output_data.p)**(R / cp)
output_data.t = data.t * pii # K (converted to potential temperature)
if "z" in data.keys():
output_data.z=data.z # m
else:
print(data.ps.shape,
sfc.hgt.shape,
data.t.shape)
print(data.p[0,0,...].mean(), data.p[0,-1,...].mean())
data.slp=units.calc_slp(data.ps,sfc.hgt[np.newaxis,...],ts=data.t[:,0,...],mr=output_data.qv[:,0,...],method=2)
output_data.z=np.zeros(data.t.shape)
for z_time in range(data.t.shape[0]):
output_data.z[z_time,...]=units.calc_z(data.slp[z_time],
output_data.p[z_time],
t=output_data.t[z_time],
mr=output_data.qv[z_time])
# now calculate layer thicknesses
output_data.dz=np.zeros(output_data.z.shape)
output_data.dz[:,0,:,:]= 2 * (output_data.z[:,0,:,:]-sfc.hgt[np.newaxis,:,:])
for i in range(1,output_data.z.shape[1]):
output_data.dz[:,i,:,:]= 2 * np.mean(output_data.z[:,i,:,:]-output_data.z[:,i-1,:,:])-output_data.dz[:,i-1]
output_data.dz[0]=output_data.dz[1]
output_data.cloud= np.zeros(data.qv.shape)
output_data.ice = output_data.cloud
return output_data
def cmip2icar(data):
output_data=Bunch()
atm=convert_atm(data.atm,data.sfc)
for k in atm.keys():
output_data[k]=atm[k]
for k in data.sfc.keys():
output_data[k]=data.sfc[k]
return output_data | StarcoderdataPython |
1690984 | from tensorflow_sparsemax.sparsemax_regression import SparsemaxRegression
| StarcoderdataPython |
3480561 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.ads.google_ads.v2.proto.common import ad_asset_pb2
from google.ads.google_ads.v2.proto.common import ad_type_infos_pb2
from google.ads.google_ads.v2.proto.common import asset_types_pb2
from google.ads.google_ads.v2.proto.common import bidding_pb2
from google.ads.google_ads.v2.proto.common import click_location_pb2
from google.ads.google_ads.v2.proto.common import criteria_pb2
from google.ads.google_ads.v2.proto.common import criterion_category_availability_pb2
from google.ads.google_ads.v2.proto.common import custom_parameter_pb2
from google.ads.google_ads.v2.proto.common import dates_pb2
from google.ads.google_ads.v2.proto.common import explorer_auto_optimizer_setting_pb2
from google.ads.google_ads.v2.proto.common import extensions_pb2
from google.ads.google_ads.v2.proto.common import feed_common_pb2
from google.ads.google_ads.v2.proto.common import final_app_url_pb2
from google.ads.google_ads.v2.proto.common import frequency_cap_pb2
from google.ads.google_ads.v2.proto.common import keyword_plan_common_pb2
from google.ads.google_ads.v2.proto.common import matching_function_pb2
from google.ads.google_ads.v2.proto.common import metrics_pb2
from google.ads.google_ads.v2.proto.common import policy_pb2
from google.ads.google_ads.v2.proto.common import real_time_bidding_setting_pb2
from google.ads.google_ads.v2.proto.common import segments_pb2
from google.ads.google_ads.v2.proto.common import simulation_pb2
from google.ads.google_ads.v2.proto.common import tag_snippet_pb2
from google.ads.google_ads.v2.proto.common import targeting_setting_pb2
from google.ads.google_ads.v2.proto.common import text_label_pb2
from google.ads.google_ads.v2.proto.common import url_collection_pb2
from google.ads.google_ads.v2.proto.common import user_lists_pb2
from google.ads.google_ads.v2.proto.common import value_pb2
from google.ads.google_ads.v2.proto.enums import access_reason_pb2
from google.ads.google_ads.v2.proto.enums import access_role_pb2
from google.ads.google_ads.v2.proto.enums import account_budget_proposal_status_pb2
from google.ads.google_ads.v2.proto.enums import account_budget_proposal_type_pb2
from google.ads.google_ads.v2.proto.enums import account_budget_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_customizer_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_ad_rotation_mode_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_ad_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_criterion_approval_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_criterion_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_group_type_pb2
from google.ads.google_ads.v2.proto.enums import ad_network_type_pb2
from google.ads.google_ads.v2.proto.enums import ad_serving_optimization_status_pb2
from google.ads.google_ads.v2.proto.enums import ad_strength_pb2
from google.ads.google_ads.v2.proto.enums import ad_type_pb2
from google.ads.google_ads.v2.proto.enums import advertising_channel_sub_type_pb2
from google.ads.google_ads.v2.proto.enums import advertising_channel_type_pb2
from google.ads.google_ads.v2.proto.enums import affiliate_location_feed_relationship_type_pb2
from google.ads.google_ads.v2.proto.enums import affiliate_location_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import age_range_type_pb2
from google.ads.google_ads.v2.proto.enums import app_campaign_app_store_pb2
from google.ads.google_ads.v2.proto.enums import app_campaign_bidding_strategy_goal_type_pb2
from google.ads.google_ads.v2.proto.enums import app_payment_model_type_pb2
from google.ads.google_ads.v2.proto.enums import app_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import app_store_pb2
from google.ads.google_ads.v2.proto.enums import app_url_operating_system_type_pb2
from google.ads.google_ads.v2.proto.enums import asset_field_type_pb2
from google.ads.google_ads.v2.proto.enums import asset_performance_label_pb2
from google.ads.google_ads.v2.proto.enums import asset_type_pb2
from google.ads.google_ads.v2.proto.enums import attribution_model_pb2
from google.ads.google_ads.v2.proto.enums import bid_modifier_source_pb2
from google.ads.google_ads.v2.proto.enums import bidding_source_pb2
from google.ads.google_ads.v2.proto.enums import bidding_strategy_status_pb2
from google.ads.google_ads.v2.proto.enums import bidding_strategy_type_pb2
from google.ads.google_ads.v2.proto.enums import billing_setup_status_pb2
from google.ads.google_ads.v2.proto.enums import brand_safety_suitability_pb2
from google.ads.google_ads.v2.proto.enums import budget_delivery_method_pb2
from google.ads.google_ads.v2.proto.enums import budget_period_pb2
from google.ads.google_ads.v2.proto.enums import budget_status_pb2
from google.ads.google_ads.v2.proto.enums import budget_type_pb2
from google.ads.google_ads.v2.proto.enums import call_conversion_reporting_state_pb2
from google.ads.google_ads.v2.proto.enums import call_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import callout_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import campaign_criterion_status_pb2
from google.ads.google_ads.v2.proto.enums import campaign_draft_status_pb2
from google.ads.google_ads.v2.proto.enums import campaign_experiment_status_pb2
from google.ads.google_ads.v2.proto.enums import campaign_experiment_traffic_split_type_pb2
from google.ads.google_ads.v2.proto.enums import campaign_experiment_type_pb2
from google.ads.google_ads.v2.proto.enums import campaign_serving_status_pb2
from google.ads.google_ads.v2.proto.enums import campaign_shared_set_status_pb2
from google.ads.google_ads.v2.proto.enums import campaign_status_pb2
from google.ads.google_ads.v2.proto.enums import change_status_operation_pb2
from google.ads.google_ads.v2.proto.enums import change_status_resource_type_pb2
from google.ads.google_ads.v2.proto.enums import click_type_pb2
from google.ads.google_ads.v2.proto.enums import content_label_type_pb2
from google.ads.google_ads.v2.proto.enums import conversion_action_category_pb2
from google.ads.google_ads.v2.proto.enums import conversion_action_counting_type_pb2
from google.ads.google_ads.v2.proto.enums import conversion_action_status_pb2
from google.ads.google_ads.v2.proto.enums import conversion_action_type_pb2
from google.ads.google_ads.v2.proto.enums import conversion_adjustment_type_pb2
from google.ads.google_ads.v2.proto.enums import conversion_attribution_event_type_pb2
from google.ads.google_ads.v2.proto.enums import conversion_lag_bucket_pb2
from google.ads.google_ads.v2.proto.enums import conversion_or_adjustment_lag_bucket_pb2
from google.ads.google_ads.v2.proto.enums import criterion_category_channel_availability_mode_pb2
from google.ads.google_ads.v2.proto.enums import criterion_category_locale_availability_mode_pb2
from google.ads.google_ads.v2.proto.enums import criterion_system_serving_status_pb2
from google.ads.google_ads.v2.proto.enums import criterion_type_pb2
from google.ads.google_ads.v2.proto.enums import custom_interest_member_type_pb2
from google.ads.google_ads.v2.proto.enums import custom_interest_status_pb2
from google.ads.google_ads.v2.proto.enums import custom_interest_type_pb2
from google.ads.google_ads.v2.proto.enums import custom_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import customer_match_upload_key_type_pb2
from google.ads.google_ads.v2.proto.enums import customer_pay_per_conversion_eligibility_failure_reason_pb2
from google.ads.google_ads.v2.proto.enums import data_driven_model_status_pb2
from google.ads.google_ads.v2.proto.enums import day_of_week_pb2
from google.ads.google_ads.v2.proto.enums import device_pb2
from google.ads.google_ads.v2.proto.enums import display_ad_format_setting_pb2
from google.ads.google_ads.v2.proto.enums import display_upload_product_type_pb2
from google.ads.google_ads.v2.proto.enums import distance_bucket_pb2
from google.ads.google_ads.v2.proto.enums import dsa_page_feed_criterion_field_pb2
from google.ads.google_ads.v2.proto.enums import education_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import extension_setting_device_pb2
from google.ads.google_ads.v2.proto.enums import extension_type_pb2
from google.ads.google_ads.v2.proto.enums import external_conversion_source_pb2
from google.ads.google_ads.v2.proto.enums import feed_attribute_type_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_quality_approval_status_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_quality_disapproval_reason_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_status_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_target_device_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_target_type_pb2
from google.ads.google_ads.v2.proto.enums import feed_item_validation_status_pb2
from google.ads.google_ads.v2.proto.enums import feed_link_status_pb2
from google.ads.google_ads.v2.proto.enums import feed_mapping_criterion_type_pb2
from google.ads.google_ads.v2.proto.enums import feed_mapping_status_pb2
from google.ads.google_ads.v2.proto.enums import feed_origin_pb2
from google.ads.google_ads.v2.proto.enums import feed_status_pb2
from google.ads.google_ads.v2.proto.enums import flight_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import frequency_cap_event_type_pb2
from google.ads.google_ads.v2.proto.enums import frequency_cap_level_pb2
from google.ads.google_ads.v2.proto.enums import frequency_cap_time_unit_pb2
from google.ads.google_ads.v2.proto.enums import gender_type_pb2
from google.ads.google_ads.v2.proto.enums import geo_target_constant_status_pb2
from google.ads.google_ads.v2.proto.enums import geo_targeting_restriction_pb2
from google.ads.google_ads.v2.proto.enums import geo_targeting_type_pb2
from google.ads.google_ads.v2.proto.enums import google_ads_field_category_pb2
from google.ads.google_ads.v2.proto.enums import google_ads_field_data_type_pb2
from google.ads.google_ads.v2.proto.enums import hotel_date_selection_type_pb2
from google.ads.google_ads.v2.proto.enums import hotel_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import hotel_rate_type_pb2
from google.ads.google_ads.v2.proto.enums import income_range_type_pb2
from google.ads.google_ads.v2.proto.enums import interaction_event_type_pb2
from google.ads.google_ads.v2.proto.enums import interaction_type_pb2
from google.ads.google_ads.v2.proto.enums import job_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import keyword_match_type_pb2
from google.ads.google_ads.v2.proto.enums import keyword_plan_competition_level_pb2
from google.ads.google_ads.v2.proto.enums import keyword_plan_forecast_interval_pb2
from google.ads.google_ads.v2.proto.enums import keyword_plan_network_pb2
from google.ads.google_ads.v2.proto.enums import label_status_pb2
from google.ads.google_ads.v2.proto.enums import legacy_app_install_ad_app_store_pb2
from google.ads.google_ads.v2.proto.enums import listing_custom_attribute_index_pb2
from google.ads.google_ads.v2.proto.enums import listing_group_type_pb2
from google.ads.google_ads.v2.proto.enums import local_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import location_extension_targeting_criterion_field_pb2
from google.ads.google_ads.v2.proto.enums import location_group_radius_units_pb2
from google.ads.google_ads.v2.proto.enums import location_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import manager_link_status_pb2
from google.ads.google_ads.v2.proto.enums import matching_function_context_type_pb2
from google.ads.google_ads.v2.proto.enums import matching_function_operator_pb2
from google.ads.google_ads.v2.proto.enums import media_type_pb2
from google.ads.google_ads.v2.proto.enums import merchant_center_link_status_pb2
from google.ads.google_ads.v2.proto.enums import message_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import mime_type_pb2
from google.ads.google_ads.v2.proto.enums import minute_of_hour_pb2
from google.ads.google_ads.v2.proto.enums import mobile_device_type_pb2
from google.ads.google_ads.v2.proto.enums import month_of_year_pb2
from google.ads.google_ads.v2.proto.enums import mutate_job_status_pb2
from google.ads.google_ads.v2.proto.enums import negative_geo_target_type_pb2
from google.ads.google_ads.v2.proto.enums import operating_system_version_operator_type_pb2
from google.ads.google_ads.v2.proto.enums import page_one_promoted_strategy_goal_pb2
from google.ads.google_ads.v2.proto.enums import parental_status_type_pb2
from google.ads.google_ads.v2.proto.enums import payment_mode_pb2
from google.ads.google_ads.v2.proto.enums import placeholder_type_pb2
from google.ads.google_ads.v2.proto.enums import placement_type_pb2
from google.ads.google_ads.v2.proto.enums import policy_approval_status_pb2
from google.ads.google_ads.v2.proto.enums import policy_review_status_pb2
from google.ads.google_ads.v2.proto.enums import policy_topic_entry_type_pb2
from google.ads.google_ads.v2.proto.enums import policy_topic_evidence_destination_mismatch_url_type_pb2
from google.ads.google_ads.v2.proto.enums import policy_topic_evidence_destination_not_working_device_pb2
from google.ads.google_ads.v2.proto.enums import policy_topic_evidence_destination_not_working_dns_error_type_pb2
from google.ads.google_ads.v2.proto.enums import positive_geo_target_type_pb2
from google.ads.google_ads.v2.proto.enums import preferred_content_type_pb2
from google.ads.google_ads.v2.proto.enums import price_extension_price_qualifier_pb2
from google.ads.google_ads.v2.proto.enums import price_extension_price_unit_pb2
from google.ads.google_ads.v2.proto.enums import price_extension_type_pb2
from google.ads.google_ads.v2.proto.enums import price_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import product_bidding_category_level_pb2
from google.ads.google_ads.v2.proto.enums import product_bidding_category_status_pb2
from google.ads.google_ads.v2.proto.enums import product_channel_exclusivity_pb2
from google.ads.google_ads.v2.proto.enums import product_channel_pb2
from google.ads.google_ads.v2.proto.enums import product_condition_pb2
from google.ads.google_ads.v2.proto.enums import product_type_level_pb2
from google.ads.google_ads.v2.proto.enums import promotion_extension_discount_modifier_pb2
from google.ads.google_ads.v2.proto.enums import promotion_extension_occasion_pb2
from google.ads.google_ads.v2.proto.enums import promotion_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import proximity_radius_units_pb2
from google.ads.google_ads.v2.proto.enums import quality_score_bucket_pb2
from google.ads.google_ads.v2.proto.enums import real_estate_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import recommendation_type_pb2
from google.ads.google_ads.v2.proto.enums import search_engine_results_page_type_pb2
from google.ads.google_ads.v2.proto.enums import search_term_match_type_pb2
from google.ads.google_ads.v2.proto.enums import search_term_targeting_status_pb2
from google.ads.google_ads.v2.proto.enums import served_asset_field_type_pb2
from google.ads.google_ads.v2.proto.enums import shared_set_status_pb2
from google.ads.google_ads.v2.proto.enums import shared_set_type_pb2
from google.ads.google_ads.v2.proto.enums import simulation_modification_method_pb2
from google.ads.google_ads.v2.proto.enums import simulation_type_pb2
from google.ads.google_ads.v2.proto.enums import sitelink_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import slot_pb2
from google.ads.google_ads.v2.proto.enums import spending_limit_type_pb2
from google.ads.google_ads.v2.proto.enums import structured_snippet_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import system_managed_entity_source_pb2
from google.ads.google_ads.v2.proto.enums import target_cpa_opt_in_recommendation_goal_pb2
from google.ads.google_ads.v2.proto.enums import target_impression_share_location_pb2
from google.ads.google_ads.v2.proto.enums import targeting_dimension_pb2
from google.ads.google_ads.v2.proto.enums import time_type_pb2
from google.ads.google_ads.v2.proto.enums import tracking_code_page_format_pb2
from google.ads.google_ads.v2.proto.enums import tracking_code_type_pb2
from google.ads.google_ads.v2.proto.enums import travel_placeholder_field_pb2
from google.ads.google_ads.v2.proto.enums import user_interest_taxonomy_type_pb2
from google.ads.google_ads.v2.proto.enums import user_list_access_status_pb2
from google.ads.google_ads.v2.proto.enums import user_list_closing_reason_pb2
from google.ads.google_ads.v2.proto.enums import user_list_combined_rule_operator_pb2
from google.ads.google_ads.v2.proto.enums import user_list_crm_data_source_type_pb2
from google.ads.google_ads.v2.proto.enums import user_list_date_rule_item_operator_pb2
from google.ads.google_ads.v2.proto.enums import user_list_logical_rule_operator_pb2
from google.ads.google_ads.v2.proto.enums import user_list_membership_status_pb2
from google.ads.google_ads.v2.proto.enums import user_list_number_rule_item_operator_pb2
from google.ads.google_ads.v2.proto.enums import user_list_prepopulation_status_pb2
from google.ads.google_ads.v2.proto.enums import user_list_rule_type_pb2
from google.ads.google_ads.v2.proto.enums import user_list_size_range_pb2
from google.ads.google_ads.v2.proto.enums import user_list_string_rule_item_operator_pb2
from google.ads.google_ads.v2.proto.enums import user_list_type_pb2
from google.ads.google_ads.v2.proto.enums import vanity_pharma_display_url_mode_pb2
from google.ads.google_ads.v2.proto.enums import vanity_pharma_text_pb2
from google.ads.google_ads.v2.proto.enums import webpage_condition_operand_pb2
from google.ads.google_ads.v2.proto.enums import webpage_condition_operator_pb2
from google.ads.google_ads.v2.proto.errors import access_invitation_error_pb2
from google.ads.google_ads.v2.proto.errors import account_budget_proposal_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_customizer_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_group_ad_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_group_bid_modifier_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_group_criterion_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_group_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_group_feed_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_parameter_error_pb2
from google.ads.google_ads.v2.proto.errors import ad_sharing_error_pb2
from google.ads.google_ads.v2.proto.errors import adx_error_pb2
from google.ads.google_ads.v2.proto.errors import asset_error_pb2
from google.ads.google_ads.v2.proto.errors import authentication_error_pb2
from google.ads.google_ads.v2.proto.errors import authorization_error_pb2
from google.ads.google_ads.v2.proto.errors import bidding_error_pb2
from google.ads.google_ads.v2.proto.errors import bidding_strategy_error_pb2
from google.ads.google_ads.v2.proto.errors import billing_setup_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_budget_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_criterion_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_draft_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_experiment_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_feed_error_pb2
from google.ads.google_ads.v2.proto.errors import campaign_shared_set_error_pb2
from google.ads.google_ads.v2.proto.errors import change_status_error_pb2
from google.ads.google_ads.v2.proto.errors import collection_size_error_pb2
from google.ads.google_ads.v2.proto.errors import context_error_pb2
from google.ads.google_ads.v2.proto.errors import conversion_action_error_pb2
from google.ads.google_ads.v2.proto.errors import conversion_adjustment_upload_error_pb2
from google.ads.google_ads.v2.proto.errors import conversion_upload_error_pb2
from google.ads.google_ads.v2.proto.errors import country_code_error_pb2
from google.ads.google_ads.v2.proto.errors import criterion_error_pb2
from google.ads.google_ads.v2.proto.errors import currency_code_error_pb2
from google.ads.google_ads.v2.proto.errors import custom_interest_error_pb2
from google.ads.google_ads.v2.proto.errors import customer_client_link_error_pb2
from google.ads.google_ads.v2.proto.errors import customer_error_pb2
from google.ads.google_ads.v2.proto.errors import customer_feed_error_pb2
from google.ads.google_ads.v2.proto.errors import customer_manager_link_error_pb2
from google.ads.google_ads.v2.proto.errors import database_error_pb2
from google.ads.google_ads.v2.proto.errors import date_error_pb2
from google.ads.google_ads.v2.proto.errors import date_range_error_pb2
from google.ads.google_ads.v2.proto.errors import distinct_error_pb2
from google.ads.google_ads.v2.proto.errors import enum_error_pb2
from google.ads.google_ads.v2.proto.errors import errors_pb2
from google.ads.google_ads.v2.proto.errors import extension_feed_item_error_pb2
from google.ads.google_ads.v2.proto.errors import extension_setting_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_attribute_reference_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_item_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_item_target_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_item_validation_error_pb2
from google.ads.google_ads.v2.proto.errors import feed_mapping_error_pb2
from google.ads.google_ads.v2.proto.errors import field_error_pb2
from google.ads.google_ads.v2.proto.errors import field_mask_error_pb2
from google.ads.google_ads.v2.proto.errors import function_error_pb2
from google.ads.google_ads.v2.proto.errors import function_parsing_error_pb2
from google.ads.google_ads.v2.proto.errors import geo_target_constant_suggestion_error_pb2
from google.ads.google_ads.v2.proto.errors import header_error_pb2
from google.ads.google_ads.v2.proto.errors import id_error_pb2
from google.ads.google_ads.v2.proto.errors import image_error_pb2
from google.ads.google_ads.v2.proto.errors import internal_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_ad_group_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_campaign_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_idea_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_keyword_error_pb2
from google.ads.google_ads.v2.proto.errors import keyword_plan_negative_keyword_error_pb2
from google.ads.google_ads.v2.proto.errors import label_error_pb2
from google.ads.google_ads.v2.proto.errors import language_code_error_pb2
from google.ads.google_ads.v2.proto.errors import list_operation_error_pb2
from google.ads.google_ads.v2.proto.errors import manager_link_error_pb2
from google.ads.google_ads.v2.proto.errors import media_bundle_error_pb2
from google.ads.google_ads.v2.proto.errors import media_file_error_pb2
from google.ads.google_ads.v2.proto.errors import media_upload_error_pb2
from google.ads.google_ads.v2.proto.errors import multiplier_error_pb2
from google.ads.google_ads.v2.proto.errors import mutate_error_pb2
from google.ads.google_ads.v2.proto.errors import mutate_job_error_pb2
from google.ads.google_ads.v2.proto.errors import new_resource_creation_error_pb2
from google.ads.google_ads.v2.proto.errors import not_empty_error_pb2
from google.ads.google_ads.v2.proto.errors import not_whitelisted_error_pb2
from google.ads.google_ads.v2.proto.errors import null_error_pb2
from google.ads.google_ads.v2.proto.errors import operation_access_denied_error_pb2
from google.ads.google_ads.v2.proto.errors import operator_error_pb2
from google.ads.google_ads.v2.proto.errors import partial_failure_error_pb2
from google.ads.google_ads.v2.proto.errors import policy_finding_error_pb2
from google.ads.google_ads.v2.proto.errors import policy_validation_parameter_error_pb2
from google.ads.google_ads.v2.proto.errors import policy_violation_error_pb2
from google.ads.google_ads.v2.proto.errors import query_error_pb2
from google.ads.google_ads.v2.proto.errors import quota_error_pb2
from google.ads.google_ads.v2.proto.errors import range_error_pb2
from google.ads.google_ads.v2.proto.errors import recommendation_error_pb2
from google.ads.google_ads.v2.proto.errors import region_code_error_pb2
from google.ads.google_ads.v2.proto.errors import request_error_pb2
from google.ads.google_ads.v2.proto.errors import resource_access_denied_error_pb2
from google.ads.google_ads.v2.proto.errors import resource_count_limit_exceeded_error_pb2
from google.ads.google_ads.v2.proto.errors import setting_error_pb2
from google.ads.google_ads.v2.proto.errors import shared_criterion_error_pb2
from google.ads.google_ads.v2.proto.errors import shared_set_error_pb2
from google.ads.google_ads.v2.proto.errors import size_limit_error_pb2
from google.ads.google_ads.v2.proto.errors import string_format_error_pb2
from google.ads.google_ads.v2.proto.errors import string_length_error_pb2
from google.ads.google_ads.v2.proto.errors import url_field_error_pb2
from google.ads.google_ads.v2.proto.errors import user_list_error_pb2
from google.ads.google_ads.v2.proto.errors import youtube_video_registration_error_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_proposal_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_feed_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_parameter_pb2
from google.ads.google_ads.v2.proto.resources import ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_schedule_view_pb2
from google.ads.google_ads.v2.proto.resources import age_range_view_pb2
from google.ads.google_ads.v2.proto.resources import asset_pb2
from google.ads.google_ads.v2.proto.resources import bidding_strategy_pb2
from google.ads.google_ads.v2.proto.resources import billing_setup_pb2
from google.ads.google_ads.v2.proto.resources import campaign_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import campaign_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import campaign_budget_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import campaign_draft_pb2
from google.ads.google_ads.v2.proto.resources import campaign_experiment_pb2
from google.ads.google_ads.v2.proto.resources import campaign_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import campaign_feed_pb2
from google.ads.google_ads.v2.proto.resources import campaign_label_pb2
from google.ads.google_ads.v2.proto.resources import campaign_pb2
from google.ads.google_ads.v2.proto.resources import campaign_shared_set_pb2
from google.ads.google_ads.v2.proto.resources import carrier_constant_pb2
from google.ads.google_ads.v2.proto.resources import change_status_pb2
from google.ads.google_ads.v2.proto.resources import click_view_pb2
from google.ads.google_ads.v2.proto.resources import conversion_action_pb2
from google.ads.google_ads.v2.proto.resources import custom_interest_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_pb2
from google.ads.google_ads.v2.proto.resources import customer_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import customer_feed_pb2
from google.ads.google_ads.v2.proto.resources import customer_label_pb2
from google.ads.google_ads.v2.proto.resources import customer_manager_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_negative_criterion_pb2
from google.ads.google_ads.v2.proto.resources import customer_pb2
from google.ads.google_ads.v2.proto.resources import detail_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import display_keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import distance_view_pb2
from google.ads.google_ads.v2.proto.resources import domain_category_pb2
from google.ads.google_ads.v2.proto.resources import dynamic_search_ads_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import expanded_landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import extension_feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_target_pb2
from google.ads.google_ads.v2.proto.resources import feed_mapping_pb2
from google.ads.google_ads.v2.proto.resources import feed_pb2
from google.ads.google_ads.v2.proto.resources import feed_placeholder_view_pb2
from google.ads.google_ads.v2.proto.resources import gender_view_pb2
from google.ads.google_ads.v2.proto.resources import geo_target_constant_pb2
from google.ads.google_ads.v2.proto.resources import geographic_view_pb2
from google.ads.google_ads.v2.proto.resources import google_ads_field_pb2
from google.ads.google_ads.v2.proto.resources import group_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_group_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_performance_view_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_ad_group_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_campaign_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_negative_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_pb2
from google.ads.google_ads.v2.proto.resources import keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import label_pb2
from google.ads.google_ads.v2.proto.resources import landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import language_constant_pb2
from google.ads.google_ads.v2.proto.resources import location_view_pb2
from google.ads.google_ads.v2.proto.resources import managed_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import media_file_pb2
from google.ads.google_ads.v2.proto.resources import merchant_center_link_pb2
from google.ads.google_ads.v2.proto.resources import mobile_app_category_constant_pb2
from google.ads.google_ads.v2.proto.resources import mobile_device_constant_pb2
from google.ads.google_ads.v2.proto.resources import mutate_job_pb2
from google.ads.google_ads.v2.proto.resources import operating_system_version_constant_pb2
from google.ads.google_ads.v2.proto.resources import paid_organic_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import parental_status_view_pb2
from google.ads.google_ads.v2.proto.resources import payments_account_pb2
from google.ads.google_ads.v2.proto.resources import product_bidding_category_constant_pb2
from google.ads.google_ads.v2.proto.resources import product_group_view_pb2
from google.ads.google_ads.v2.proto.resources import recommendation_pb2
from google.ads.google_ads.v2.proto.resources import remarketing_action_pb2
from google.ads.google_ads.v2.proto.resources import search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import shared_criterion_pb2
from google.ads.google_ads.v2.proto.resources import shared_set_pb2
from google.ads.google_ads.v2.proto.resources import shopping_performance_view_pb2
from google.ads.google_ads.v2.proto.resources import topic_constant_pb2
from google.ads.google_ads.v2.proto.resources import topic_view_pb2
from google.ads.google_ads.v2.proto.resources import user_interest_pb2
from google.ads.google_ads.v2.proto.resources import user_list_pb2
from google.ads.google_ads.v2.proto.resources import user_location_view_pb2
from google.ads.google_ads.v2.proto.resources import video_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_service_pb2
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
from google.ads.google_ads.v2.proto.services import asset_service_pb2
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2
from google.ads.google_ads.v2.proto.services import change_status_service_pb2
from google.ads.google_ads.v2.proto.services import click_view_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import customer_service_pb2
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import extension_feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2
from google.ads.google_ads.v2.proto.services import feed_mapping_service_pb2
from google.ads.google_ads.v2.proto.services import feed_placeholder_view_service_pb2
from google.ads.google_ads.v2.proto.services import feed_service_pb2
from google.ads.google_ads.v2.proto.services import gender_view_service_pb2
from google.ads.google_ads.v2.proto.services import geo_target_constant_service_pb2
from google.ads.google_ads.v2.proto.services import geographic_view_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_field_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_service_pb2
from google.ads.google_ads.v2.proto.services import group_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_group_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_performance_view_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_campaign_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_idea_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_negative_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import label_service_pb2
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import language_constant_service_pb2
from google.ads.google_ads.v2.proto.services import location_view_service_pb2
from google.ads.google_ads.v2.proto.services import managed_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import media_file_service_pb2
from google.ads.google_ads.v2.proto.services import merchant_center_link_service_pb2
from google.ads.google_ads.v2.proto.services import mobile_app_category_constant_service_pb2
from google.ads.google_ads.v2.proto.services import mobile_device_constant_service_pb2
from google.ads.google_ads.v2.proto.services import mutate_job_service_pb2
from google.ads.google_ads.v2.proto.services import operating_system_version_constant_service_pb2
from google.ads.google_ads.v2.proto.services import paid_organic_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import parental_status_view_service_pb2
from google.ads.google_ads.v2.proto.services import payments_account_service_pb2
from google.ads.google_ads.v2.proto.services import product_bidding_category_constant_service_pb2
from google.ads.google_ads.v2.proto.services import product_group_view_service_pb2
from google.ads.google_ads.v2.proto.services import recommendation_service_pb2
from google.ads.google_ads.v2.proto.services import remarketing_action_service_pb2
from google.ads.google_ads.v2.proto.services import search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import shared_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import shopping_performance_view_service_pb2
from google.ads.google_ads.v2.proto.services import topic_constant_service_pb2
from google.ads.google_ads.v2.proto.services import topic_view_service_pb2
from google.ads.google_ads.v2.proto.services import user_interest_service_pb2
from google.ads.google_ads.v2.proto.services import user_list_service_pb2
from google.ads.google_ads.v2.proto.services import user_location_view_service_pb2
from google.ads.google_ads.v2.proto.services import video_service_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
_shared_modules = [
ad_asset_pb2,
ad_type_infos_pb2,
asset_types_pb2,
bidding_pb2,
click_location_pb2,
criteria_pb2,
criterion_category_availability_pb2,
custom_parameter_pb2,
dates_pb2,
explorer_auto_optimizer_setting_pb2,
extensions_pb2,
feed_common_pb2,
final_app_url_pb2,
frequency_cap_pb2,
keyword_plan_common_pb2,
matching_function_pb2,
metrics_pb2,
policy_pb2,
real_time_bidding_setting_pb2,
segments_pb2,
simulation_pb2,
tag_snippet_pb2,
targeting_setting_pb2,
text_label_pb2,
url_collection_pb2,
user_lists_pb2,
value_pb2,
access_reason_pb2,
access_role_pb2,
account_budget_proposal_status_pb2,
account_budget_proposal_type_pb2,
account_budget_status_pb2,
ad_customizer_placeholder_field_pb2,
ad_group_ad_rotation_mode_pb2,
ad_group_ad_status_pb2,
ad_group_criterion_approval_status_pb2,
ad_group_criterion_status_pb2,
ad_group_status_pb2,
ad_group_type_pb2,
ad_network_type_pb2,
ad_serving_optimization_status_pb2,
ad_strength_pb2,
ad_type_pb2,
advertising_channel_sub_type_pb2,
advertising_channel_type_pb2,
affiliate_location_feed_relationship_type_pb2,
affiliate_location_placeholder_field_pb2,
age_range_type_pb2,
app_campaign_app_store_pb2,
app_campaign_bidding_strategy_goal_type_pb2,
app_payment_model_type_pb2,
app_placeholder_field_pb2,
app_store_pb2,
app_url_operating_system_type_pb2,
asset_field_type_pb2,
asset_performance_label_pb2,
asset_type_pb2,
attribution_model_pb2,
bid_modifier_source_pb2,
bidding_source_pb2,
bidding_strategy_status_pb2,
bidding_strategy_type_pb2,
billing_setup_status_pb2,
brand_safety_suitability_pb2,
budget_delivery_method_pb2,
budget_period_pb2,
budget_status_pb2,
budget_type_pb2,
call_conversion_reporting_state_pb2,
call_placeholder_field_pb2,
callout_placeholder_field_pb2,
campaign_criterion_status_pb2,
campaign_draft_status_pb2,
campaign_experiment_status_pb2,
campaign_experiment_traffic_split_type_pb2,
campaign_experiment_type_pb2,
campaign_serving_status_pb2,
campaign_shared_set_status_pb2,
campaign_status_pb2,
change_status_operation_pb2,
change_status_resource_type_pb2,
click_type_pb2,
content_label_type_pb2,
conversion_action_category_pb2,
conversion_action_counting_type_pb2,
conversion_action_status_pb2,
conversion_action_type_pb2,
conversion_adjustment_type_pb2,
conversion_attribution_event_type_pb2,
conversion_lag_bucket_pb2,
conversion_or_adjustment_lag_bucket_pb2,
criterion_category_channel_availability_mode_pb2,
criterion_category_locale_availability_mode_pb2,
criterion_system_serving_status_pb2,
criterion_type_pb2,
custom_interest_member_type_pb2,
custom_interest_status_pb2,
custom_interest_type_pb2,
custom_placeholder_field_pb2,
customer_match_upload_key_type_pb2,
customer_pay_per_conversion_eligibility_failure_reason_pb2,
data_driven_model_status_pb2,
day_of_week_pb2,
device_pb2,
display_ad_format_setting_pb2,
display_upload_product_type_pb2,
distance_bucket_pb2,
dsa_page_feed_criterion_field_pb2,
education_placeholder_field_pb2,
extension_setting_device_pb2,
extension_type_pb2,
external_conversion_source_pb2,
feed_attribute_type_pb2,
feed_item_quality_approval_status_pb2,
feed_item_quality_disapproval_reason_pb2,
feed_item_status_pb2,
feed_item_target_device_pb2,
feed_item_target_type_pb2,
feed_item_validation_status_pb2,
feed_link_status_pb2,
feed_mapping_criterion_type_pb2,
feed_mapping_status_pb2,
feed_origin_pb2,
feed_status_pb2,
flight_placeholder_field_pb2,
frequency_cap_event_type_pb2,
frequency_cap_level_pb2,
frequency_cap_time_unit_pb2,
gender_type_pb2,
geo_target_constant_status_pb2,
geo_targeting_restriction_pb2,
geo_targeting_type_pb2,
google_ads_field_category_pb2,
google_ads_field_data_type_pb2,
hotel_date_selection_type_pb2,
hotel_placeholder_field_pb2,
hotel_rate_type_pb2,
income_range_type_pb2,
interaction_event_type_pb2,
interaction_type_pb2,
job_placeholder_field_pb2,
keyword_match_type_pb2,
keyword_plan_competition_level_pb2,
keyword_plan_forecast_interval_pb2,
keyword_plan_network_pb2,
label_status_pb2,
legacy_app_install_ad_app_store_pb2,
listing_custom_attribute_index_pb2,
listing_group_type_pb2,
local_placeholder_field_pb2,
location_extension_targeting_criterion_field_pb2,
location_group_radius_units_pb2,
location_placeholder_field_pb2,
manager_link_status_pb2,
matching_function_context_type_pb2,
matching_function_operator_pb2,
media_type_pb2,
merchant_center_link_status_pb2,
message_placeholder_field_pb2,
mime_type_pb2,
minute_of_hour_pb2,
mobile_device_type_pb2,
month_of_year_pb2,
mutate_job_status_pb2,
negative_geo_target_type_pb2,
operating_system_version_operator_type_pb2,
page_one_promoted_strategy_goal_pb2,
parental_status_type_pb2,
payment_mode_pb2,
placeholder_type_pb2,
placement_type_pb2,
policy_approval_status_pb2,
policy_review_status_pb2,
policy_topic_entry_type_pb2,
policy_topic_evidence_destination_mismatch_url_type_pb2,
policy_topic_evidence_destination_not_working_device_pb2,
policy_topic_evidence_destination_not_working_dns_error_type_pb2,
positive_geo_target_type_pb2,
preferred_content_type_pb2,
price_extension_price_qualifier_pb2,
price_extension_price_unit_pb2,
price_extension_type_pb2,
price_placeholder_field_pb2,
product_bidding_category_level_pb2,
product_bidding_category_status_pb2,
product_channel_exclusivity_pb2,
product_channel_pb2,
product_condition_pb2,
product_type_level_pb2,
promotion_extension_discount_modifier_pb2,
promotion_extension_occasion_pb2,
promotion_placeholder_field_pb2,
proximity_radius_units_pb2,
quality_score_bucket_pb2,
real_estate_placeholder_field_pb2,
recommendation_type_pb2,
search_engine_results_page_type_pb2,
search_term_match_type_pb2,
search_term_targeting_status_pb2,
served_asset_field_type_pb2,
shared_set_status_pb2,
shared_set_type_pb2,
simulation_modification_method_pb2,
simulation_type_pb2,
sitelink_placeholder_field_pb2,
slot_pb2,
spending_limit_type_pb2,
structured_snippet_placeholder_field_pb2,
system_managed_entity_source_pb2,
target_cpa_opt_in_recommendation_goal_pb2,
target_impression_share_location_pb2,
targeting_dimension_pb2,
time_type_pb2,
tracking_code_page_format_pb2,
tracking_code_type_pb2,
travel_placeholder_field_pb2,
user_interest_taxonomy_type_pb2,
user_list_access_status_pb2,
user_list_closing_reason_pb2,
user_list_combined_rule_operator_pb2,
user_list_crm_data_source_type_pb2,
user_list_date_rule_item_operator_pb2,
user_list_logical_rule_operator_pb2,
user_list_membership_status_pb2,
user_list_number_rule_item_operator_pb2,
user_list_prepopulation_status_pb2,
user_list_rule_type_pb2,
user_list_size_range_pb2,
user_list_string_rule_item_operator_pb2,
user_list_type_pb2,
vanity_pharma_display_url_mode_pb2,
vanity_pharma_text_pb2,
webpage_condition_operand_pb2,
webpage_condition_operator_pb2,
access_invitation_error_pb2,
account_budget_proposal_error_pb2,
ad_customizer_error_pb2,
ad_error_pb2,
ad_group_ad_error_pb2,
ad_group_bid_modifier_error_pb2,
ad_group_criterion_error_pb2,
ad_group_error_pb2,
ad_group_feed_error_pb2,
ad_parameter_error_pb2,
ad_sharing_error_pb2,
adx_error_pb2,
asset_error_pb2,
authentication_error_pb2,
authorization_error_pb2,
bidding_error_pb2,
bidding_strategy_error_pb2,
billing_setup_error_pb2,
campaign_budget_error_pb2,
campaign_criterion_error_pb2,
campaign_draft_error_pb2,
campaign_error_pb2,
campaign_experiment_error_pb2,
campaign_feed_error_pb2,
campaign_shared_set_error_pb2,
change_status_error_pb2,
collection_size_error_pb2,
context_error_pb2,
conversion_action_error_pb2,
conversion_adjustment_upload_error_pb2,
conversion_upload_error_pb2,
country_code_error_pb2,
criterion_error_pb2,
currency_code_error_pb2,
custom_interest_error_pb2,
customer_client_link_error_pb2,
customer_error_pb2,
customer_feed_error_pb2,
customer_manager_link_error_pb2,
database_error_pb2,
date_error_pb2,
date_range_error_pb2,
distinct_error_pb2,
enum_error_pb2,
errors_pb2,
extension_feed_item_error_pb2,
extension_setting_error_pb2,
feed_attribute_reference_error_pb2,
feed_error_pb2,
feed_item_error_pb2,
feed_item_target_error_pb2,
feed_item_validation_error_pb2,
feed_mapping_error_pb2,
field_error_pb2,
field_mask_error_pb2,
function_error_pb2,
function_parsing_error_pb2,
geo_target_constant_suggestion_error_pb2,
header_error_pb2,
id_error_pb2,
image_error_pb2,
internal_error_pb2,
keyword_plan_ad_group_error_pb2,
keyword_plan_campaign_error_pb2,
keyword_plan_error_pb2,
keyword_plan_idea_error_pb2,
keyword_plan_keyword_error_pb2,
keyword_plan_negative_keyword_error_pb2,
label_error_pb2,
language_code_error_pb2,
list_operation_error_pb2,
manager_link_error_pb2,
media_bundle_error_pb2,
media_file_error_pb2,
media_upload_error_pb2,
multiplier_error_pb2,
mutate_error_pb2,
mutate_job_error_pb2,
new_resource_creation_error_pb2,
not_empty_error_pb2,
not_whitelisted_error_pb2,
null_error_pb2,
operation_access_denied_error_pb2,
operator_error_pb2,
partial_failure_error_pb2,
policy_finding_error_pb2,
policy_validation_parameter_error_pb2,
policy_violation_error_pb2,
query_error_pb2,
quota_error_pb2,
range_error_pb2,
recommendation_error_pb2,
region_code_error_pb2,
request_error_pb2,
resource_access_denied_error_pb2,
resource_count_limit_exceeded_error_pb2,
setting_error_pb2,
shared_criterion_error_pb2,
shared_set_error_pb2,
size_limit_error_pb2,
string_format_error_pb2,
string_length_error_pb2,
url_field_error_pb2,
user_list_error_pb2,
youtube_video_registration_error_pb2,
account_budget_pb2,
account_budget_proposal_pb2,
ad_group_ad_asset_view_pb2,
ad_group_ad_label_pb2,
ad_group_ad_pb2,
ad_group_audience_view_pb2,
ad_group_bid_modifier_pb2,
ad_group_criterion_label_pb2,
ad_group_criterion_pb2,
ad_group_criterion_simulation_pb2,
ad_group_extension_setting_pb2,
ad_group_feed_pb2,
ad_group_label_pb2,
ad_group_pb2,
ad_group_simulation_pb2,
ad_parameter_pb2,
ad_pb2,
ad_schedule_view_pb2,
age_range_view_pb2,
asset_pb2,
bidding_strategy_pb2,
billing_setup_pb2,
campaign_audience_view_pb2,
campaign_bid_modifier_pb2,
campaign_budget_pb2,
campaign_criterion_pb2,
campaign_criterion_simulation_pb2,
campaign_draft_pb2,
campaign_experiment_pb2,
campaign_extension_setting_pb2,
campaign_feed_pb2,
campaign_label_pb2,
campaign_pb2,
campaign_shared_set_pb2,
carrier_constant_pb2,
change_status_pb2,
click_view_pb2,
conversion_action_pb2,
custom_interest_pb2,
customer_client_link_pb2,
customer_client_pb2,
customer_extension_setting_pb2,
customer_feed_pb2,
customer_label_pb2,
customer_manager_link_pb2,
customer_negative_criterion_pb2,
customer_pb2,
detail_placement_view_pb2,
display_keyword_view_pb2,
distance_view_pb2,
domain_category_pb2,
dynamic_search_ads_search_term_view_pb2,
expanded_landing_page_view_pb2,
extension_feed_item_pb2,
feed_item_pb2,
feed_item_target_pb2,
feed_mapping_pb2,
feed_pb2,
feed_placeholder_view_pb2,
gender_view_pb2,
geo_target_constant_pb2,
geographic_view_pb2,
google_ads_field_pb2,
group_placement_view_pb2,
hotel_group_view_pb2,
hotel_performance_view_pb2,
keyword_plan_ad_group_pb2,
keyword_plan_campaign_pb2,
keyword_plan_keyword_pb2,
keyword_plan_negative_keyword_pb2,
keyword_plan_pb2,
keyword_view_pb2,
label_pb2,
landing_page_view_pb2,
language_constant_pb2,
location_view_pb2,
managed_placement_view_pb2,
media_file_pb2,
merchant_center_link_pb2,
mobile_app_category_constant_pb2,
mobile_device_constant_pb2,
mutate_job_pb2,
operating_system_version_constant_pb2,
paid_organic_search_term_view_pb2,
parental_status_view_pb2,
payments_account_pb2,
product_bidding_category_constant_pb2,
product_group_view_pb2,
recommendation_pb2,
remarketing_action_pb2,
search_term_view_pb2,
shared_criterion_pb2,
shared_set_pb2,
shopping_performance_view_pb2,
topic_constant_pb2,
topic_view_pb2,
user_interest_pb2,
user_list_pb2,
user_location_view_pb2,
video_pb2,
operations_pb2,
any_pb2,
empty_pb2,
field_mask_pb2,
wrappers_pb2,
status_pb2,
]
_local_modules = [
account_budget_proposal_service_pb2,
account_budget_service_pb2,
ad_group_ad_asset_view_service_pb2,
ad_group_ad_label_service_pb2,
ad_group_ad_service_pb2,
ad_group_audience_view_service_pb2,
ad_group_bid_modifier_service_pb2,
ad_group_criterion_label_service_pb2,
ad_group_criterion_service_pb2,
ad_group_criterion_simulation_service_pb2,
ad_group_extension_setting_service_pb2,
ad_group_feed_service_pb2,
ad_group_label_service_pb2,
ad_group_service_pb2,
ad_group_simulation_service_pb2,
ad_parameter_service_pb2,
ad_schedule_view_service_pb2,
ad_service_pb2,
age_range_view_service_pb2,
asset_service_pb2,
bidding_strategy_service_pb2,
billing_setup_service_pb2,
campaign_audience_view_service_pb2,
campaign_bid_modifier_service_pb2,
campaign_budget_service_pb2,
campaign_criterion_service_pb2,
campaign_criterion_simulation_service_pb2,
campaign_draft_service_pb2,
campaign_experiment_service_pb2,
campaign_extension_setting_service_pb2,
campaign_feed_service_pb2,
campaign_label_service_pb2,
campaign_service_pb2,
campaign_shared_set_service_pb2,
carrier_constant_service_pb2,
change_status_service_pb2,
click_view_service_pb2,
conversion_action_service_pb2,
conversion_adjustment_upload_service_pb2,
conversion_upload_service_pb2,
custom_interest_service_pb2,
customer_client_link_service_pb2,
customer_client_service_pb2,
customer_extension_setting_service_pb2,
customer_feed_service_pb2,
customer_label_service_pb2,
customer_manager_link_service_pb2,
customer_negative_criterion_service_pb2,
customer_service_pb2,
detail_placement_view_service_pb2,
display_keyword_view_service_pb2,
distance_view_service_pb2,
domain_category_service_pb2,
dynamic_search_ads_search_term_view_service_pb2,
expanded_landing_page_view_service_pb2,
extension_feed_item_service_pb2,
feed_item_service_pb2,
feed_item_target_service_pb2,
feed_mapping_service_pb2,
feed_placeholder_view_service_pb2,
feed_service_pb2,
gender_view_service_pb2,
geo_target_constant_service_pb2,
geographic_view_service_pb2,
google_ads_field_service_pb2,
google_ads_service_pb2,
group_placement_view_service_pb2,
hotel_group_view_service_pb2,
hotel_performance_view_service_pb2,
keyword_plan_ad_group_service_pb2,
keyword_plan_campaign_service_pb2,
keyword_plan_idea_service_pb2,
keyword_plan_keyword_service_pb2,
keyword_plan_negative_keyword_service_pb2,
keyword_plan_service_pb2,
keyword_view_service_pb2,
label_service_pb2,
landing_page_view_service_pb2,
language_constant_service_pb2,
location_view_service_pb2,
managed_placement_view_service_pb2,
media_file_service_pb2,
merchant_center_link_service_pb2,
mobile_app_category_constant_service_pb2,
mobile_device_constant_service_pb2,
mutate_job_service_pb2,
operating_system_version_constant_service_pb2,
paid_organic_search_term_view_service_pb2,
parental_status_view_service_pb2,
payments_account_service_pb2,
product_bidding_category_constant_service_pb2,
product_group_view_service_pb2,
recommendation_service_pb2,
remarketing_action_service_pb2,
search_term_view_service_pb2,
shared_criterion_service_pb2,
shared_set_service_pb2,
shopping_performance_view_service_pb2,
topic_constant_service_pb2,
topic_view_service_pb2,
user_interest_service_pb2,
user_list_service_pb2,
user_location_view_service_pb2,
video_service_pb2,
]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.ads.googleads_v2.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| StarcoderdataPython |
1864238 | <reponame>saydulk/admin4<gh_stars>0
# The Admin4 Project
# (c) 2013-2014 <NAME>
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
moduleinfo={ 'name': "PostgreSQL Server",
'modulename': "PostgreSQL",
'description': "PostgreSQL database server",
'version': "9.4",
'revision': "0.5.7",
'requiredAdmVersion': "2.1.8",
'testedAdmVersion': "2.1.8",
'supports': "PostgreSQL 8.1 ... 9.4 (pre-9.1 with restrictions)",
'copyright': "(c) 2013-2014 PSE Consulting <NAME>",
'credits': "psycopg2 from http://initd.org/psycopg using libpq (http://www.postgresql.org)",
}
import sys
if not hasattr(sys, 'skipSetupInit'):
import adm
import wx
from wh import xlt, floatToTime
from LoggingDialog import LogPanel
class SqlPage:
name="SQL"
order=800
def __init__(self, notebook):
from _sqledit import SqlEditor
self.control=SqlEditor(notebook)
self.control.SetMarginWidth(1, 2)
self.notebook=notebook
self.lastNode=None
def GetControl(self):
return self.control
def Display(self, node, _detached):
if hasattr(node, "GetSql"):
sql=node.GetSql().strip().replace("\n\r", "\n").replace("\r\n", "\n")
else:
sql=xlt("No SQL query available.")
self.control.SetReadOnly(False)
self.control.SetValue(sql)
self.control.SetReadOnly(True)
self.control.SetSelection(0,0)
moduleinfo['pages'] = [SqlPage]
class Preferences(adm.PreferencePanel):
name="PostgreSQL"
configDefaults={ 'AdminNamespace': "Admin4",
'SettingCategorySort': "Reporting Query" }
import Server
| StarcoderdataPython |
11397121 | <reponame>mzy2240/GridCal<gh_stars>100-1000
import pandas as pd
import numpy as np
from scipy.sparse import lil_matrix, csc_matrix
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# file_name = 'D:\\GitHub\\GridCal\\Grids_and_profiles\\grids\\Reduction Model 2.xlsx'
file_name = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Reduction Model 2.xlsx'
from GridCal.Engine import MultiCircuit, BranchType, FileOpen
circuit = FileOpen(file_name).open()
# form C
threshold = 1e-5
m = circuit.get_branch_number()
n = len(circuit.buses)
C = lil_matrix((m, n), dtype=int)
buses_dict = {bus: i for i, bus in enumerate(circuit.buses)}
branches_to_keep_idx = list()
branches_to_remove_idx = list()
states = np.zeros(m, dtype=int)
br_idx = [0] * m
for i in range(m):
# get the from and to bus indices
f = buses_dict[circuit.branches[i].bus_from]
t = buses_dict[circuit.branches[i].bus_to]
C[i, f] = 1
C[i, t] = -1
br_idx[i] = i
rx = circuit.branches[i].R + circuit.branches[i].X
if circuit.branches[i].branch_type == BranchType.Branch:
branches_to_remove_idx.append(i)
states[i] = 0
else:
branches_to_keep_idx.append(i)
states[i] = 1
C = csc_matrix(C)
df_C = pd.DataFrame(C.todense(),
columns=circuit.circuits[0].power_flow_input.bus_names,
index=circuit.circuits[0].power_flow_input.branch_names)
print('C:\n', df_C)
df_Cb = pd.DataFrame((C.transpose() * C).todense(),
columns=circuit.circuits[0].power_flow_input.bus_names,
index=circuit.circuits[0].power_flow_input.bus_names)
print('C:\n', df_Cb)
B = C[branches_to_keep_idx, :]
df_B = pd.DataFrame(B.todense(),
columns=circuit.circuits[0].power_flow_input.bus_names,
index=circuit.circuits[0].power_flow_input.branch_names[branches_to_keep_idx])
print('B:\n', df_B)
# B is a CSC matrix
buses_to_keep = list()
for j in range(B.shape[1]): # column index
bus_occurrences = 0 # counter
for k in range(B.indptr[j], B.indptr[j + 1]):
# i = B.indices[k] # row index
# val = B.data[k] # value
bus_occurrences += 1
if bus_occurrences > 0: # if the bus appeared more than one time in the column of B, then we propose to keep it
buses_to_keep.append(j)
D = B[:, buses_to_keep]
df_D = pd.DataFrame(D.todense(),
columns=circuit.circuits[0].power_flow_input.bus_names[buses_to_keep],
index=circuit.circuits[0].power_flow_input.branch_names[branches_to_keep_idx])
print('D:\n', df_D)
new_branches_to_keep = list()
buses_to_keep_s = set(buses_to_keep)
buses_availability = np.zeros(n, dtype=int)
for i in range(len(circuit.branches)):
# get the from and to bus indices
f = buses_dict[circuit.branches[i].bus_from]
t = buses_dict[circuit.branches[i].bus_to]
if f in buses_to_keep_s and t in buses_to_keep_s:
new_branches_to_keep.append(i)
buses_availability[i] = 1
E = C[new_branches_to_keep, :][:, buses_to_keep]
df_E = pd.DataFrame(E.todense(),
columns=circuit.circuits[0].power_flow_input.bus_names[buses_to_keep],
index=circuit.circuits[0].power_flow_input.branch_names[new_branches_to_keep])
print('E:\n', df_E)
# determine which buses to merge
for j in range(C.shape[1]): # column index
for k in range(C.indptr[j], C.indptr[j + 1]):
i = B.indices[k] # row index
| StarcoderdataPython |
5112196 | <reponame>lvxiaojie111/2019NCCCU-
import os
#read pic from col
from PIL import Image
#array col
import numpy as np
import tensorflow as tf
#data file
data_dir="data"
#train or test
train=True
#MODEL PATH
model_path="model/image_model"
#read pic and label from file
#label form:1_400.jpg
def read_data(data_dir):
datas=[]
labels=[]
fpaths=[]
for fname in os.listdir(data_dir):
fpath=os.path.join(data_dir,fname)
fpaths.append(fpath)
image=Image.open(fpath)
data=np.array(image)/255.0
label=int(fname.split("_"[0]))
datas.append(data)
labels.append(label)
datas=np.array(datas)
labels=np.array(labels)
print("shape of datas:{}\tshappe of labels:{}".format(datas.shape,labels.shape))
return fpaths,datas,labels
fpaths,datas,labels=read_data(data_dir)
#computer how class pic
num_classes=len(set(labels))
#define placeholder,put input and labels
datas_placeholder=tf.placeholder(tf.float32,[None,32,32,3])
labels_placeholder=tf.placeholder(tf.int32,[None])
#cun fang dropout can shu de rong qi,xun lian shi wei 0.25,test 0
dropout_placeholdr=tf.placeholder(tf.float32)
#define cnn :
#cnn core:20 size:5 jihuo:relu
conv0=tf.layers.conv2d(datas_placeholder,20,5,activation=tf.nn.relu)
#define max-pooling ,pooling size:2x2,step:2x2
pool0=tf.layers.max_pooling2d(conv0,[2,2],[2,2])
#define cnn :
#cnn core:40 size:4 jihuo:relu
conv1=tf.layers.conv2d(pool0,40,4,activation=tf.nn.relu)
#define max-pooling ,pooling size:2x2,step:2x2
pool1=tf.layers.max_pooling2d(conv1,[2,2],[2,2])
#conv feather dim:3 to feature dim:1
flatten=tf.layers.flatten(pool1)
#full connect,convert feature vt of 100 long
fc=tf.layers.dense(flatten,400,activation=tf.nn.relu)
#add dropout to admit out nihe
dropout_fc=tf.layers.dropout(fc,dropout_placeholdr)
# no activate out
logits=tf.layers.dense(dropout_fc,num_classes)
predicted_labels=tf.arg_max(logits,1)
#define cross loss fun
losses=tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(labels_placeholder,num_classes),
logits=logits
)
#define avrage loss
mean_loss=tf.reduce_mean(losses)
#define adam,appoint adam loss fun.
optimizer=tf.train.AdamOptimizer(learning_rate=1e-2.minimize(losses))
#used for ssave and resolve model
saver=tf.train.Saver()
with tf.Session() as sess:
if train:
print("train model")
#if train,init para
sess.run(tf.global_variables_initializer())
#define input and label to cover bottle,when train,dropout is 0.25
train_feed_dict={
datas_placeholder:datas,
labels_placeholder:labels,
dropout_placeholdr:0.25
}
for step in range(150):
_,mean_loss_val=sess.run([optimizer,mean_loss],feed_dict=train_feed_dict)
if step %10==0:
print("step={}\tmean_loss={}".format(step,mean_loss_val))
saver.save(sess,model_path)
print("train is over,save model to{}".format(model_path))
else:
print("test mode")
#if test ,import para
saver.restore(sess,model_path)
print("from{}model import".format(model_path))
#label and mingcheng dui zhao guan xi
label_name_dict={
0:"flying",
1:"car",
2:"bird"
}
# define input and label to cover bottle,when test,dropout is 0
test_feed_dict = {
datas_placeholder: datas,
labels_placeholder: labels,
dropout_placeholdr: 0
}
predicted_labels_val=sess.run(predicted_labels,feed_dict=test_feed_dict)
#real label and model predit label
for fpath,real_label,predicted_label in zip(fpaths,labels,predicted_labels_val):
#put label_id to label
real_label_name=label_name_dict[real_label]
predicted_label_name=label_name_dict[predicted_label]
print("{}\t{}=>{}".format(fpath,real_label_name,predicted_label_name))
| StarcoderdataPython |
9660223 | <filename>run_client.py
import sys
from Client import *
if __name__ == '__main__':
log = logger.getChild('Run_Client')
try:
mainWindow.show()
sys.exit(app.exec_())
except Exception as err:
log.error(err)
| StarcoderdataPython |
9748032 | <filename>tests/api/users/test_user.py
# -*- coding: utf-8 -*-
"""
Onyx Project
https://onyxlabs.fr
Software under licence Creative Commons 3.0 France
http://creativecommons.org/licenses/by-nc-sa/3.0/fr/
You may not use this software for commercial purposes.
@author :: <NAME>
"""
import json
import pytest
from flask import session
@pytest.mark.usefixtures('db', 'connected_app', 'connected_app_refresh', 'connected_admin_app', 'user_test')
class Test_UserApi:
def test_get_users(self, connected_app):
response = connected_app.get('/api/users')
assert response.status_code == 200
assert response.content_type == 'application/json'
def test_get_user(self, connected_app):
response = connected_app.get('/api/users/get')
assert response.status_code == 200
assert response.content_type == 'application/json'
def test_get_user_by_id(self, connected_app):
response = connected_app.post('/api/users/get', {"id": 1})
assert response.status_code == 200
assert response.content_type == 'application/json'
def test_add_user(self, connected_app):
response = connected_app.post('/api/users/register', {"email": "<EMAIL>", "username": "Test", "password": "<PASSWORD>", "firstname": "John", "lastname": "Doe", "language": "en-US"})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json == {"status": "success"}
def test_login_user(self, connected_app, user_test):
response = connected_app.post('/api/users/login', {"email": "<EMAIL>", "password": "<PASSWORD>"})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
def test_manage_user(self, connected_app):
response = connected_app.post('/api/users/manage', {"email": "<EMAIL>", "username": "Test", "password": "<PASSWORD>", "verifPassword": "<PASSWORD>", "firstname": "John", "lastname": "Doe", "language": "en-US"})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
def test_manage_user_mismatch_password(self, connected_app):
response = connected_app.post('/api/users/manage', {"email": "<EMAIL>", "username": "Test", "password": "<PASSWORD>", "verifPassword": "<PASSWORD>", "firstname": "John", "lastname": "Doe", "language": "en-US"})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "error"
def test_logout_access(self, connected_app, user_test):
response = connected_app.get('/api/users/logout_access')
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
def test_logout_refresh(self, connected_app_refresh, user_test):
response = connected_app_refresh.get('/api/users/logout_refresh')
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
def test_refresh(self, connected_app_refresh, user_test):
response = connected_app_refresh.get('/api/users/refresh_token')
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
def test_token_valid(self, connected_app, user_test):
response = connected_app.get('/api/users/token_valid')
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.json["status"] == "success"
| StarcoderdataPython |
12817521 | <filename>tools/mo/openvino/tools/mo/front/tf/lrn_ext.py<gh_stars>1-10
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.lrn import AttributedLRN
class LRNExtractor(FrontExtractorOp):
"""
TF and IE(CAFFE) parameters in LRN differs in several places :
region (IE) : in TF there is no such parameter, they just use last dimension (feature dimension in case of NHWC)
local-size (IE) : it's the size of 1D vector in Caffe. In TF they have 'depth_radius' that eq
'(local-size * 2) + 1'
alpha (IE) : in Caffe 'alpha' divides on local-size, so we should multiply alpha on local-size
Caffe ref : http://caffe.berkeleyvision.org/tutorial/layers/lrn.html
TF ref : https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization
"""
op = 'LRN'
enabled = True
@classmethod
def extract(cls, node):
pb = node.pb
AttributedLRN.update_node_stat(node, {
'alpha': pb.attr['alpha'].f * (2. * pb.attr['depth_radius'].i + 1.),
'beta': pb.attr['beta'].f,
'bias': pb.attr['bias'].f,
'local_size': (2 * pb.attr['depth_radius'].i + 1),
})
return cls.enabled
| StarcoderdataPython |
3288935 | class Borg(object):
_shared_state = {}
def __new__(cls, *a, **k):
obj = object.__new__(cls, *a, **k)
obj.__dict__ = cls._shared_state
return obj
def __hash__(self): return 9 # any arbitrary constant integer
def __eq__(self, other):
try: return self.__dict__ is other.__dict__
except AttributeError: return False
| StarcoderdataPython |
1611384 | #To input username&pass and check if it's correct. Q22
#Made by INS, Using dictionary for faster credential check.
d={}
while True:
x=raw_input("Enter a user name : ")
y=raw_input("Enter a password : ")
d[x]=y
cont=raw_input("Do you want to add more usernames? [y/n] ")
if cont=='y':
continue
else:
break
#begining login system
a=raw_input("Enter username to log in : ")
if d.has_key(a)==True:
print "User found. Enter password to continue. "
b=raw_input("Enter password to log in : ")
if d.get(a)==b:
print "Welcome back ",a
else:
print "Wrong password. Aborting program."
#Made by <NAME>, using LISTS
""" #Remove this quotation marks to use GM's program.
u=[]
p=[]
count=input("Enter number of usernames you want to save : ")
for i in range(0, count):
username=raw_input("Enter username to save : ")
password=raw_input("Enter password to save : ")
position_user=len(u)+1
position_pass=len(p)+1
u.append(username)
p.append(password)
#Search function begins
def passcheck():
user_=raw_input("Enter username to log in : ")
pass_=raw_input("Ente password to log in : ")
if user_ in u:
if pass_ in p:
if position_user==position_pass:
print "Verified user. "
else:
print "Password doesn't match. "
else:
print "Username doesn't exist"
passcheck()"""
| StarcoderdataPython |
11210713 | import unittest
import numpy as np
import os
import pycqed as pq
import time
import openql
import warnings
import pycqed.analysis.analysis_toolbox as a_tools
import pycqed.instrument_drivers.virtual_instruments.virtual_AWG8 as v8
import pycqed.instrument_drivers.virtual_instruments.virtual_SignalHound as sh
import pycqed.instrument_drivers.virtual_instruments.virtual_MW_source as vmw
from pycqed.instrument_drivers.meta_instrument.LutMans import mw_lutman as mwl
import pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon as ct
from pycqed.measurement import measurement_control
from qcodes import station
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.dummy_UHFQC import dummy_UHFQC
from pycqed.instrument_drivers.physical_instruments.QuTech_Duplexer import Dummy_Duplexer
from pycqed.instrument_drivers.meta_instrument.qubit_objects.QuDev_transmon import QuDev_transmon
from pycqed.instrument_drivers.meta_instrument.qubit_objects.Tektronix_driven_transmon import Tektronix_driven_transmon
from pycqed.instrument_drivers.meta_instrument.qubit_objects.CC_transmon import CBox_v3_driven_transmon, QWG_driven_transmon
from pycqed.instrument_drivers.physical_instruments.QuTech_CCL import dummy_CCL
from pycqed.instrument_drivers.physical_instruments.QuTech_VSM_Module import Dummy_QuTechVSMModule
from pycqed.instrument_drivers.meta_instrument.LutMans.ro_lutman import UHFQC_RO_LutMan
Dummy_VSM_not_fixed = False
class Test_QO(unittest.TestCase):
@classmethod
def setUpClass(self):
self.station = station.Station()
self.CCL_qubit = ct.CCLight_Transmon('CCL_qubit')
self.MW1 = vmw.VirtualMWsource('MW1')
self.MW2 = vmw.VirtualMWsource('MW2')
self.MW3 = vmw.VirtualMWsource('MW3')
self.SH = sh.virtual_SignalHound_USB_SA124B('SH')
self.UHFQC = dummy_UHFQC('UHFQC')
self.CCL = dummy_CCL('CCL')
# self.VSM = Dummy_Duplexer('VSM')
self.VSM = Dummy_QuTechVSMModule('VSM')
self.MC = measurement_control.MeasurementControl(
'MC', live_plot_enabled=False, verbose=False)
self.MC.station = self.station
self.station.add_component(self.MC)
# Required to set it to the testing datadir
test_datadir = os.path.join(pq.__path__[0], 'tests', 'test_output')
self.MC.datadir(test_datadir)
a_tools.datadir = self.MC.datadir()
self.AWG = v8.VirtualAWG8('DummyAWG8')
self.AWG8_VSM_MW_LutMan = mwl.AWG8_VSM_MW_LutMan('MW_LutMan_VSM')
self.AWG8_VSM_MW_LutMan.AWG(self.AWG.name)
self.AWG8_VSM_MW_LutMan.channel_GI(1)
self.AWG8_VSM_MW_LutMan.channel_GQ(2)
self.AWG8_VSM_MW_LutMan.channel_DI(3)
self.AWG8_VSM_MW_LutMan.channel_DQ(4)
self.AWG8_VSM_MW_LutMan.mw_modulation(100e6)
self.AWG8_VSM_MW_LutMan.sampling_rate(2.4e9)
self.ro_lutman = UHFQC_RO_LutMan(
'RO_lutman', num_res=5, feedline_number=0)
self.ro_lutman.AWG(self.UHFQC.name)
# Assign instruments
self.CCL_qubit.instr_LutMan_MW(self.AWG8_VSM_MW_LutMan.name)
self.CCL_qubit.instr_LO_ro(self.MW1.name)
self.CCL_qubit.instr_LO_mw(self.MW2.name)
self.CCL_qubit.instr_spec_source(self.MW3.name)
self.CCL_qubit.instr_acquisition(self.UHFQC.name)
self.CCL_qubit.instr_VSM(self.VSM.name)
self.CCL_qubit.instr_CC(self.CCL.name)
self.CCL_qubit.instr_LutMan_RO(self.ro_lutman.name)
self.CCL_qubit.instr_MC(self.MC.name)
self.CCL_qubit.instr_SH(self.SH.name)
config_fn = os.path.join(
pq.__path__[0], 'tests', 'openql', 'test_cfg_CCL.json')
self.CCL_qubit.cfg_openql_platform_fn(config_fn)
# Setting some "random" initial parameters
self.CCL_qubit.ro_freq(5.43e9)
self.CCL_qubit.ro_freq_mod(200e6)
self.CCL_qubit.freq_qubit(4.56e9)
self.CCL_qubit.freq_max(4.62e9)
self.CCL_qubit.mw_freq_mod(-100e6)
self.CCL_qubit.mw_awg_ch(1)
self.CCL_qubit.cfg_qubit_nr(0)
self.CCL_qubit.mw_vsm_delay(15)
self.CCL_qubit.mw_mixer_offs_GI(.1)
self.CCL_qubit.mw_mixer_offs_GQ(.2)
self.CCL_qubit.mw_mixer_offs_DI(.3)
self.CCL_qubit.mw_mixer_offs_DQ(.4)
def test_instantiate_QuDevTransmon(self):
QDT = QuDev_transmon('QuDev_transmon',
MC=None, heterodyne_instr=None, cw_source=None)
QDT.close()
def test_instantiate_TekTransmon(self):
TT = Tektronix_driven_transmon('TT')
TT.close()
def test_instantiate_CBoxv3_transmon(self):
CT = CBox_v3_driven_transmon('CT')
CT.close()
def test_instantiate_QWG_transmon(self):
QT = QWG_driven_transmon('QT')
QT.close()
##############################################
# calculate methods
##############################################
def test_calc_freq(self):
self.CCL_qubit.cfg_qubit_freq_calc_method('latest')
self.CCL_qubit.calculate_frequency()
self.CCL_qubit.cfg_qubit_freq_calc_method('flux')
self.CCL_qubit.calculate_frequency()
##############################################
# basic prepare methods
##############################################
def test_prep_for_continuous_wave(self):
self.CCL_qubit.ro_acq_weight_type('optimal')
with warnings.catch_warnings(record=True) as w:
self.CCL_qubit.prepare_for_continuous_wave()
self.assertEqual(str(w[0].message), 'Changing ro_acq_weight_type to SSB.')
self.CCL_qubit.ro_acq_weight_type('SSB')
self.CCL_qubit.prepare_for_continuous_wave()
@unittest.skipIf(True, 'Test for use with an old duplexer.')
def test_prep_cw_config_vsm(self):
self.CCL_qubit.spec_vsm_ch_in(2)
self.CCL_qubit.spec_vsm_ch_out(1)
self.CCL_qubit.spec_vsm_amp(0.5)
self.CCL_qubit.prepare_for_continuous_wave()
self.assertEqual(self.VSM.in1_out1_switch(), 'OFF')
self.assertEqual(self.VSM.in1_out2_switch(), 'OFF')
self.assertEqual(self.VSM.in2_out1_switch(), 'EXT')
self.assertEqual(self.VSM.in2_out2_switch(), 'OFF')
self.assertEqual(self.VSM.in2_out1_amp(), 0.5)
def test_prep_for_fluxing(self):
self.CCL_qubit.prepare_for_fluxing()
@unittest.skip('Not Implemented')
def test_prep_flux_bias(self):
raise NotImplementedError()
##############################################
# Testing prepare for readout
##############################################
def test_prep_readout(self):
self.CCL_qubit.prepare_readout()
def test_prep_ro_instantiate_detectors(self):
self.MC.soft_avg(1)
self.CCL_qubit.ro_soft_avg(4)
detector_attributes = [
'int_avg_det', 'int_log_det', 'int_avg_det_single',
'input_average_detector']
for det_attr in detector_attributes:
if hasattr(self.CCL_qubit, det_attr):
delattr(self.CCL_qubit, det_attr)
# Test there are no detectors to start with
for det_attr in detector_attributes:
self.assertFalse(hasattr(self.CCL_qubit, det_attr))
self.CCL_qubit.prepare_readout()
# Test that the detectors have been instantiated
for det_attr in detector_attributes:
self.assertTrue(hasattr(self.CCL_qubit, det_attr))
self.assertEqual(self.MC.soft_avg(), 4)
def test_prep_ro_MW_sources(self):
LO = self.CCL_qubit.instr_LO_ro.get_instr()
LO.off()
LO.frequency(4e9)
LO.power(10)
self.assertEqual(LO.status(), 'off')
self.assertEqual(LO.frequency(), 4e9)
self.CCL_qubit.mw_pow_td_source(20)
self.CCL_qubit.ro_freq(5.43e9)
self.CCL_qubit.ro_freq_mod(200e6)
self.CCL_qubit.prepare_readout()
self.assertEqual(LO.status(), 'on')
self.assertEqual(LO.frequency(), 5.43e9-200e6)
self.assertEqual(LO.power(), 20)
def test_prep_ro_pulses(self):
self.CCL_qubit.ro_pulse_mixer_alpha(1.1)
self.CCL_qubit.ro_pulse_mixer_phi(4)
self.CCL_qubit.ro_pulse_length(312e-9)
self.CCL_qubit.ro_pulse_down_amp0(.1)
self.CCL_qubit.ro_pulse_down_length0(23e-9)
self.CCL_qubit.ro_pulse_mixer_offs_I(.01)
self.CCL_qubit.ro_pulse_mixer_offs_Q(.02)
self.CCL_qubit.prepare_readout()
self.assertEqual(self.ro_lutman.mixer_phi(), 4)
self.assertEqual(self.ro_lutman.mixer_alpha(), 1.1)
self.assertEqual(self.ro_lutman.M_length_R0(), 312e-9)
self.assertEqual(self.ro_lutman.M_down_length0_R0(), 23e-9)
self.assertEqual(self.ro_lutman.M_down_amp0_R0(), .1)
self.assertEqual(self.UHFQC.sigouts_0_offset(), .01)
self.assertEqual(self.UHFQC.sigouts_1_offset(), .02)
def test_prep_ro_integration_weigths(self):
IF = 50e6
self.CCL_qubit.ro_freq_mod(IF)
self.CCL_qubit.ro_acq_weight_chI(3)
self.CCL_qubit.ro_acq_weight_chQ(4)
# Testing SSB
trace_length = 4096
self.CCL_qubit.ro_acq_weight_type('SSB')
self.CCL_qubit.prepare_readout()
tbase = np.arange(0, trace_length/1.8e9, 1/1.8e9)
cosI = np.array(np.cos(2*np.pi*IF*tbase))
self.assertEqual(self.UHFQC.quex_rot_3_real(), 1)
self.assertEqual(self.UHFQC.quex_rot_3_imag(), 1)
self.assertEqual(self.UHFQC.quex_rot_4_real(), 1)
self.assertEqual(self.UHFQC.quex_rot_4_imag(), -1)
uploaded_wf = self.UHFQC.quex_wint_weights_3_real()
np.testing.assert_array_almost_equal(cosI, uploaded_wf)
# Testing DSB case
self.CCL_qubit.ro_acq_weight_type('DSB')
self.CCL_qubit.prepare_readout()
self.assertEqual(self.UHFQC.quex_rot_3_real(), 2)
self.assertEqual(self.UHFQC.quex_rot_3_imag(), 0)
self.assertEqual(self.UHFQC.quex_rot_4_real(), 2)
self.assertEqual(self.UHFQC.quex_rot_4_imag(), 0)
# Testing Optimal weight uploading
test_I = np.ones(10)
test_Q = 0.5*test_I
self.CCL_qubit.ro_acq_weight_func_I(test_I)
self.CCL_qubit.ro_acq_weight_func_Q(test_Q)
self.CCL_qubit.ro_acq_weight_type('optimal')
self.CCL_qubit.prepare_readout()
self.UHFQC.quex_rot_4_real(.21)
self.UHFQC.quex_rot_4_imag(.108)
upl_I = self.UHFQC.quex_wint_weights_3_real()
upl_Q = self.UHFQC.quex_wint_weights_3_imag()
np.testing.assert_array_almost_equal(test_I, upl_I)
np.testing.assert_array_almost_equal(test_Q, upl_Q)
self.assertEqual(self.UHFQC.quex_rot_3_real(), 1)
self.assertEqual(self.UHFQC.quex_rot_3_imag(), -1)
# These should not have been touched by optimal weights
self.assertEqual(self.UHFQC.quex_rot_4_real(), .21)
self.assertEqual(self.UHFQC.quex_rot_4_imag(), .108)
self.CCL_qubit.ro_acq_weight_type('SSB')
########################################################
# Test prepare for timedomain #
########################################################
def test_prep_for_timedomain(self):
self.CCL_qubit.prepare_for_timedomain()
def test_prep_td_sources(self):
self.MW1.off()
self.MW2.off()
self.CCL_qubit.freq_qubit(4.56e9)
self.CCL_qubit.mw_freq_mod(-100e6)
self.CCL_qubit.mw_pow_td_source(13)
self.CCL_qubit.prepare_for_timedomain()
self.assertEqual(self.MW1.status(), 'on')
self.assertEqual(self.MW2.status(), 'on')
self.assertEqual(self.MW2.frequency(), 4.56e9 + 100e6)
self.assertEqual(self.MW2.power(), 13)
def test_prep_td_pulses(self):
self.CCL_qubit.mw_awg_ch(5)
self.CCL_qubit.mw_G_mixer_alpha(1.02)
self.CCL_qubit.mw_D_mixer_phi(8)
self.CCL_qubit.mw_mixer_offs_GI(.1)
self.CCL_qubit.mw_mixer_offs_GQ(.2)
self.CCL_qubit.mw_mixer_offs_DI(.3)
self.CCL_qubit.mw_mixer_offs_DQ(.4)
self.CCL_qubit.mw_ef_amp(.34)
self.CCL_qubit.mw_freq_mod(-100e6)
self.CCL_qubit.anharmonicity(-235e6)
self.CCL_qubit.prepare_for_timedomain()
self.assertEqual(self.AWG8_VSM_MW_LutMan.channel_GI(), 5)
self.assertEqual(self.AWG8_VSM_MW_LutMan.channel_GQ(), 6)
self.assertEqual(self.AWG8_VSM_MW_LutMan.channel_DI(), 7)
self.assertEqual(self.AWG8_VSM_MW_LutMan.channel_DQ(), 8)
self.assertEqual(self.AWG8_VSM_MW_LutMan.G_mixer_alpha(), 1.02)
self.assertEqual(self.AWG8_VSM_MW_LutMan.D_mixer_phi(), 8)
self.assertEqual(self.CCL.vsm_channel_delay0(),
self.CCL_qubit.mw_vsm_delay())
self.assertEqual(self.AWG.sigouts_4_offset(), .1)
self.assertEqual(self.AWG.sigouts_5_offset(), .2)
self.assertEqual(self.AWG.sigouts_6_offset(), .3)
self.assertEqual(self.AWG.sigouts_7_offset(), .4)
self.assertEqual(self.AWG8_VSM_MW_LutMan.mw_ef_amp180(), .34)
self.assertEqual(self.AWG8_VSM_MW_LutMan.mw_ef_modulation(), -335e6)
def test_prep_td_config_vsm(self):
self.CCL_qubit.mw_vsm_G_amp(0.8)
self.CCL_qubit.mw_vsm_D_phase(0)
self.CCL_qubit.mw_vsm_ch_in(2)
self.CCL_qubit.mw_vsm_mod_out(5)
self.CCL_qubit.prepare_for_timedomain()
self.assertEqual(self.VSM.mod5_ch2_gaussian_amp(), 0.8)
self.assertEqual(self.VSM.mod5_ch2_derivative_phase(), 0)
###################################################
# Test basic experiments #
###################################################
def test_cal_mixer_offsets_drive(self):
self.CCL_qubit.calibrate_mixer_offsets_drive()
def test_resonator_spec(self):
self.CCL_qubit.ro_acq_weight_type('SSB')
# set to not set to bypass validator
self.CCL_qubit.freq_res._save_val(None)
try:
self.CCL_qubit.find_resonator_frequency()
except ValueError:
pass # Fit can fail because testing against random data
self.CCL_qubit.freq_res(5.4e9)
try:
self.CCL_qubit.find_resonator_frequency()
except ValueError:
pass # Fit can fail because testing against random data
freqs = np.linspace(6e9, 6.5e9, 31)
self.CCL_qubit.measure_heterodyne_spectroscopy(freqs=freqs,
analyze=False)
def test_resonator_power(self):
self.CCL_qubit.ro_acq_weight_type('SSB')
freqs = np.linspace(6e9, 6.5e9, 31)
powers = np.arange(-30, -10, 5)
# set to not set to bypass validator
self.CCL_qubit.freq_res._save_val(None)
self.CCL_qubit.measure_resonator_power(freqs=freqs, powers=powers)
def test_measure_transients(self):
self.CCL_qubit.ro_acq_input_average_length(2e-6)
self.CCL_qubit.measure_transients()
def test_qubit_spec(self):
freqs = np.linspace(6e9, 6.5e9, 31)
# Data cannot be analyzed as dummy data is just random numbers
self.CCL_qubit.measure_spectroscopy(freqs=freqs, analyze=False)
def test_find_qubit_freq(self):
self.CCL_qubit.cfg_qubit_freq_calc_method('latest')
try:
self.CCL_qubit.find_frequency()
except TypeError:
# Because the test runs against dummy data, the analysis
# can fail on a failing fit which raises a type error when
# creating the custom text string. This test now only tests
# if the find_frequency method runs until the expected part.
# This should be fixed by making the analysis robust.
pass
self.CCL_qubit.cfg_qubit_freq_calc_method('flux')
try:
self.CCL_qubit.find_frequency()
except TypeError:
pass
def test_AllXY(self):
self.CCL_qubit.measure_allxy()
def test_T1(self):
self.CCL_qubit.measure_T1(
times=np.arange(0, 1e-6, 20e-9), update=False, analyze=False)
self.CCL_qubit.T1(20e-6)
self.CCL_qubit.measure_T1(update=False, analyze=False)
def test_Ramsey(self):
self.CCL_qubit.mw_freq_mod(100e6)
# Cannot analyze dummy data as analysis will fail on fit
self.CCL_qubit.measure_ramsey(times=np.arange(0, 1e-6, 20e-9),
update=False, analyze=False)
self.CCL_qubit.T2_star(20e-6)
self.CCL_qubit.measure_ramsey(update=False, analyze=False)
def test_echo(self):
self.CCL_qubit.mw_freq_mod(100e6)
# self.CCL_qubit.measure_echo(times=np.arange(0,2e-6,40e-9))
time.sleep(1)
self.CCL_qubit.T2_echo(40e-6)
self.CCL_qubit.measure_echo(analyze=False, update=False)
time.sleep(1)
with self.assertRaises(ValueError):
invalid_times = [0.1e-9, 0.2e-9, 0.3e-9, 0.4e-9]
self.CCL_qubit.measure_echo(times=invalid_times)
with self.assertRaises(ValueError):
self.CCL_qubit.mw_freq_mod(.1e6)
invalid_times = np.arange(0, 2e-6, 60e-9)
self.CCL_qubit.measure_echo(times=invalid_times)
self.CCL_qubit.mw_freq_mod(100e6)
@classmethod
def tearDownClass(self):
for inststr in list(self.CCL_qubit._all_instruments):
try:
inst = self.CCL_qubit.find_instrument(inststr)
inst.close()
except KeyError:
pass
| StarcoderdataPython |
11217518 | <filename>Python/1235.py<gh_stars>0
def execucoes():
return int(input())
def entrada():
return input()
def imprimir(v):
print(v)
def dividir(s):
return (int(len(s)/2) -1)
def processar(e, s):
return (s[e::-1] + s[len(s)-1:e:-1])
def decifrar(n, e):
n -= 1
imprimir(processar(dividir(e), e))
if (n > 0): decifrar(n, entrada())
decifrar(execucoes(), entrada()) | StarcoderdataPython |
225414 | <filename>silver/migrations/0034_auto_20170203_1644.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("silver", "0033_auto_20170203_1540"),
]
operations = [
migrations.AddField(
model_name="Document",
name="transaction_currency",
field=models.CharField(max_length=4),
),
]
| StarcoderdataPython |
4890562 | """Evaluation of the model."""
import numpy as np
import tensorflow as tf
import os
import logging
import sys
import imp
import include.tensorvision.utils as utils
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
# TODO: Iterate over all possible Values
# Write Values to Tensorboard
def evaluate(train_dir):
"""
Load the model and run evaluation.
Current Version runs the evaluation defined in network.evaluation
and prints the output to std out.
Parameters
----------
train_dir : str
Path to a directory which includes a folder model_files. This folder
has to include a params.py, input.py and a network.py
"""
target_dir = os.path.join(train_dir, "model_files")
params = imp.load_source("params", os.path.join(target_dir, "params.py"))
data_input = imp.load_source("input", os.path.join(target_dir, "input.py"))
network = imp.load_source("network",
os.path.join(target_dir, "network.py"))
with tf.Graph().as_default():
# Retrieve images and labels
eval_data = FLAGS.eval_data == 'test'
images, labels = data_input.inputs(eval_data=eval_data,
data_dir=utils.cfg.data_dir,
batch_size=params.batch_size)
# Generate placeholders for the images and labels.
keep_prob = utils.placeholder_inputs(params.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = network.inference(images, keep_prob)
# Add to the Graph the Ops for loss calculation.
loss = network.loss(logits, labels)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = network.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("No checkpoints found! ")
exit(1)
print("Doing Evaluation with lots of data")
utils.do_eval(sess=sess,
eval_correct=eval_correct,
keep_prob=keep_prob,
num_examples=params.num_examples_per_epoch_for_eval,
params=params,
name="eval")
def main(_):
"""Orchestrate the evaluation of a model in the default training dir."""
train_dir = utils.get_train_dir()
evaluate(train_dir)
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
8069829 | #
# DVI.py
#
# (c) 2020 by <NAME>
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# ResourceType: mgmtObj:DeviceInfo
#
from .MgmtObj import *
from Constants import Constants as C
from Validator import constructPolicy
import Utils
# Attribute policies for this resource are constructed during startup of the CSE
attributePolicies = constructPolicy([
'ty', 'ri', 'rn', 'pi', 'acpi', 'ct', 'lt', 'et', 'lbl', 'at', 'aa', 'daci',
'mgd', 'obis', 'obps', 'dc', 'mgs', 'cmlk',
'dlb', 'man', 'mfdl', 'mfd', 'mod', 'smod', 'dty', 'dvnm', 'fwv', 'swv',
'hwv', 'osv', 'cnty', 'loc', 'syst', 'spur', 'purl', 'ptl'
])
defaultDeviceType = 'unknown'
defaultModel = "unknown"
defaultManufacturer = "unknown"
defaultDeviceLabel = "unknown serial id"
class DVI(MgmtObj):
def __init__(self, jsn: dict = None, pi: str = None, create: bool = False) -> None:
super().__init__(jsn, pi, C.tsDVI, C.mgdDVI, create=create, attributePolicies=attributePolicies)
if self.json is not None:
self.setAttribute('dty', defaultDeviceType, overwrite=False)
self.setAttribute('mod', defaultModel, overwrite=False)
self.setAttribute('man', defaultManufacturer, overwrite=False)
self.setAttribute('dlb', defaultDeviceLabel, overwrite=False)
| StarcoderdataPython |
8135650 | <reponame>Joe310/GenomeBuilder
"""
Created on June 20, 2014
@author: <NAME>
"""
import time
import mmap
import random
import sys
import re
import argparse
import pickle
import shutil
import unittest
import os
from heapq import merge
class chromosome_builder():
def __init__(self, args=None):
if not args:
args = self.parse_system_args()
self._genome_id = args.id
self._chromosome_id = args.chr_id
self._chromosome_size = args.chr_size
if args.scale == 'k':
self._chromosome_size *= 1000
elif args.scale == 'm':
self._chromosome_size *= 1000000
elif args.scale == 'b':
self._chromosome_size *= 1000000000
if args.alu == 'y':
self._use_alu = True
self._base_alu = args.base_alu
else:
self._use_alu = False
if args.assembly == 'y':
self._use_assembly = True
else:
self._use_assembly = False
self._allele_base_list = ["C", "T", "G", "A"]
self._working_dir = "TMP_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id)
self._ref_genome_file = "ref_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._priv_genome_file = "private_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._reads_file = "reads_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._answer_file = "ans_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._base_alu_file = "alu_" + str(self._genome_id) + ".txt"
self._overlap_buffer = 5
self._long_variant_rate = .1
self._snp_rate = 0.003
self._ref_str_rate = 0.000075
self._denovo_str_rate = 0.000025
self._str_min_copies = 5
self._str_max_copies = 50
self._str_min_length = 2
self._str_max_length = 5
self._str_mutation_amount = 2
self._ref_cnv_rate = 0.0001
self._denovo_cnv_rate = 0.00001
self._cnv_min_length = 20
self._cnv_max_length = 500
self._cnv_min_copies = 2
self._cnv_max_copies = 10
self._cnv_mutation_amount = 2
self._inv_rate = 0.00001
self._inv_short_min_length = 20
self._inv_short_max_length = 50
self._inv_long_min_length = 50
self._inv_long_max_length = 500
self._ins_rate = 0.0005
self._ins_short_min_length = 1
self._ins_short_max_length = 5
self._ins_long_min_length = 5
self._ins_long_max_length = 200
self._del_rate = 0.0005
self._del_short_min_length = 1
self._del_short_max_length = 5
self._del_long_min_length = 5
self._del_long_max_length = 200
self._alu_mutation_rate = 0.3
self._alu_min_length = 300
self._alu_max_length = 300
if self._use_alu:
self._ref_alu_rate = 0.075
self._denovo_alu_rate = 0.025
else:
self._ref_alu_rate = 0
self._denovo_alu_rate = 0
#reduce the max length of mutations for smaller chromosome sizes
if self._chromosome_size < 500000:
self._nbr_long_inv = 0
self._nbr_long_ins = 0
self._nbr_long_del = 0
self._str_max_copies = 20 #from 50
self._cnv_max_length = 50 #from 500
self._cnv_max_copies = 4 #from 10
else:
self._nbr_long_inv = max(4, int(self._inv_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_long_ins = max(10, int(self._ins_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_long_del = max(10, int(self._ins_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_snp = int(self._snp_rate * self._chromosome_size)
self._nbr_denovo_str = int(self._denovo_str_rate * self._chromosome_size)
self._nbr_denovo_cnv = int(self._denovo_cnv_rate * self._chromosome_size)
self._nbr_ref_alu = int(self._ref_alu_rate * self._chromosome_size / self._alu_max_length)
self._nbr_denovo_alu = int(self._denovo_alu_rate * self._chromosome_size / self._alu_max_length)
self._nbr_ref_str = max(4, int(self._ref_str_rate * self._chromosome_size))
self._nbr_ref_cnv = max(4, int(self._ref_cnv_rate * self._chromosome_size))
self._nbr_short_inv = max(4, int(self._inv_rate * self._chromosome_size * (1 - self._long_variant_rate)))
self._nbr_short_ins = max(10, int(self._ins_rate * self._chromosome_size * (1 - self._long_variant_rate)))
self._nbr_short_del = max(10, int(self._ins_rate * self._chromosome_size * (1 - self._long_variant_rate)))
#mutation_list is used when generating the various mutations for the genomes
self._mutation_list = []
self._str_list = [] #used when mutating STRs in donor genome
self._cnv_list = [] #used when mutating CNVs in donor genome
self._cnv_dict = {}
self._sequencer_coverage = 30
self._sequencer_error_rate = 0.01
self._sequencer_garbage_rate = 0.1
self._sequencer_read_length = 50
self._sequencer_gap_min = 90
self._sequencer_gap_max = 110
def insert_newlines(self, sequence, line_size=80):
return '\n'.join(sequence[i:i+line_size] for i in range(0, len(sequence), line_size)) + '\n'
def write_genome_lines_to_file(self, genome, file_object):
genome = self.insert_newlines(genome, 80)
file_object.write(genome)
def parse_fasta(self, file_name, buffer_size=100000):
"""Gives buffered access to large fasta files so that the entire file doesn't need to be loaded into
memory all at once. Works as a generator, yielding a block of up to buffer_size with each call. For
general use, use:
for sequence in parse_fasta(file_name, buffer_size)
This yield sequences until the end of file or a '>' character is found, at which point it yields None
Since None is yielded for '>', this can be used with multiple chromosomes separated by '>chr#' in a single
file. To do so, the generator should be initialized before iterating through chromosomes, then as each
chromosome is processed you can anticipate None will be yielded one time to mark the end of the current
chromoeome
:param file_name: the file to read in
:param buffer_size: the number of characters to return for each iteration
:returns: Sequences of up to size buffer_size, or None if EOF or '>' is encountered
"""
with open(file_name) as fasta_file:
start_of_file = True
buffer = ""
while True:
for line in fasta_file:
#skip initial documentation lines
if start_of_file and '>' in line:
pass
#each chromosome is marked by a > line, so need to catch this switch
elif not start_of_file and '>' in line:
if len(buffer) == 0:
yield None
else:
#first yield the buffer, then yeild None to flag the end of the chromosome
yield buffer
buffer = ''
yield None
else:
if start_of_file:
start_of_file = False
buffer += line.strip()
if len(buffer) >= buffer_size:
yield buffer[:buffer_size]
buffer = buffer[buffer_size:]
#clear out any remaining buffer when the file is done
if len(buffer) > 0:
yield buffer
buffer = ''
else:
yield None
#this version may give a slight performance boost, but need to work out the bugs before it can be used
def parse_fasta_mmap(self, file_name, buffer_size=100000):
with open(file_name, encoding='utf-8') as fasta:
fasta_map = mmap.mmap(fasta.fileno(), 0, access=mmap.ACCESS_READ)
start_of_file = True
buffer = ""
for line in fasta_map:
line = line.decode('utf-8')
#skip initial documentation lines
if start_of_file and '>' in line:
pass
#each chromosome is marked by a > line, so need to catch this switch
elif not start_of_file and '>' in line:
if len(buffer) == 0:
yield None
else:
#first yield the buffer, then yeild None to flag the end of the chromosome
yield buffer
buffer = ''
yield None
else:
if start_of_file:
start_of_file = False
buffer += line.strip()
if len(buffer) >= buffer_size:
yield buffer[:buffer_size]
buffer = buffer[buffer_size:]
#clear out any remaining buffer when the file is done
yield buffer
def compare_intervals(self, stt1, end1, stt2, end2, buffer_space=0):
"""
Compares two intervals represented by their start and end posns to check which precedes the other,
or if they overlap. Adds a buffer space around each interval that increases the region in which they
are considered to overlap.
Returns: -1 If interval 1 (stt1 and end1) precedes interval 2, 0 if they overlap, or 1 if interval 2
precedes interval 1
"""
stt1 -= buffer_space
end1 += buffer_space
stt2 -= buffer_space
end2 += buffer_space
if end1 < stt2:
return -1
elif end2 < stt1:
return 1
else:
return 0
def find_empty_ranges(self, range_size, nbr_posns, buffer_size):
"""
Searches for ranges of unused posns in the genome for use when introducing new structural variants.
Finds the number of posns given as a parameter and returns them as a list.
"""
posn_list = []
max_posn = self._chromosome_size - range_size - 1
#Will repeat until enough positions have been found
while len(posn_list) < nbr_posns:
raw_posn_list = []
for posn in posn_list:
raw_posn_list.append(posn)
posn_list = []
#1. Generate 150% needed number of random positions
for i in range(int(nbr_posns)):
raw_posn_list.append(random.randint(0, max_posn))
#2. Sort those positions and then check each to find whether they will overlap a preexisting
#structural variant.
raw_posn_list.sort()
overlap_idx = 0
#first check that there is no overlap among the generated positions
last_end = raw_posn_list[0] + range_size + (2 * buffer_size)
tmp_posn_list = []
tmp_posn_list.append(raw_posn_list[0])
for i in range(1, len(raw_posn_list)):
raw_posn = raw_posn_list[i]
if raw_posn > last_end:
tmp_posn_list.append(raw_posn)
last_end = raw_posn + range_size + (2 * buffer_size)
else:
new_posn = last_end + 1
new_end = new_posn + range_size + (2 * buffer_size)
if new_posn < max_posn:
if i == len(raw_posn_list) - 1 or new_end < raw_posn_list[i+1]:
tmp_posn_list.append(new_posn)
last_end = new_end
raw_posn_list = tmp_posn_list
tmp_posn_list = None
if len(self._mutation_list) == 0:
posn_list = raw_posn_list
else:
#then check that the remaining positions do not overlap existing structural variants
for i in range(len(raw_posn_list)):
raw_posn = raw_posn_list[i]
while overlap_idx < len(self._mutation_list):
ovlp_stt = self._mutation_list[overlap_idx][0]
ovlp_end = self._mutation_list[overlap_idx][1]
compare_result = self.compare_intervals(raw_posn, raw_posn+range_size,
ovlp_stt, ovlp_end,
buffer_size)
#no overlap
if compare_result == -1:
posn_list.append(raw_posn)
break
#attempt to shift this interval down, if that doesn't work, then
#ignore this position as it overlaps a preexisting position
elif compare_result == 0:
if overlap_idx > 0:
prev_end1 = self._mutation_list[overlap_idx-1][1]
prev_end2 = raw_posn_list[i-1]+range_size
prev_end = max(prev_end1, prev_end2)
new_posn = prev_end + (2*buffer_size)
if new_posn + range_size + (2 * buffer_size) < ovlp_stt:
posn_list.append(new_posn)
break
#no overlap was found, move to the next position in the mutation list to check for overlap
elif compare_result == 1:
if overlap_idx < len(self._mutation_list) - 1:
overlap_idx += 1
else:
posn_list.append(raw_posn)
break
#3. If there are too many positions, then randomly removes some to reduce list to proper size
while len(posn_list) > nbr_posns:
del posn_list[random.randint(0, len(posn_list)-1)]
return posn_list
def random_sequence(self, seq_len):
return "".join(random.choice(self._allele_base_list) for i in range(seq_len))
def delete_block(self, sequence, index, size):
"""deletes a block of items from a given sequence
:param sequence: sequence from which to delete items
:param index: the first position to delete
:param size: the total number of positions to delete, may extend beyond the end of the sequence
:returns: modified sequence with block deleted
"""
if index < 0 and index + size > -1:
return sequence[:index]
else:
return sequence[:index] + sequence[index + size:]
def insert_block(self, sequence, index, new_block):
"""inserts a block of items into a given sequence
:param sequence: sequence into which to insert items
:param index: the position before which to begin the insertion, to append to end use index = len(sequence)
:param new_block: the items to be inserted
:returns: modified sequence with block inserted
"""
return sequence[:index] + new_block + sequence[index:]
def overwrite_block(self, sequence, index, new_block):
"""overwrites a block of items in a given sequence
:param sequence: sequence in which to overwrite items
:param index: the position at which to begin overwriting, to append to end use index = len(sequence)
:param new_block: the items which will be written, may extend beyond end of original sequence
:returns: modified sequence with block overwritten
"""
if (index < 0 and index + len(new_block) > -1) or (index + len(new_block) > len(sequence) - 1):
return sequence[:index] + new_block
else:
return sequence[:index] + new_block + sequence[index + len(new_block):]
def invert_block(self, sequence, index, size):
"""inverts a block of items in a given sequence
:param sequence: sequence in which to invert items
:param index: the position at which to begin inversion
:param size: the number of items which will be inverted
:returns: modified sequence with block overwritten, the original block, and the inverted block
"""
if index < 0:
stt_idx = len(sequence) + index
else:
stt_idx = index
end_idx = min(stt_idx + size, len(sequence))
original_block = sequence[stt_idx:end_idx]
inverted_block = original_block[::-1]
sequence_with_inversion = self.overwrite_block(sequence, stt_idx, inverted_block)
return sequence_with_inversion, original_block, inverted_block
def generate_snp_allele(self, orig_allele):
allele_list = ["A", "C", "G", "T"]
allele_list.remove(orig_allele)
return random.choice(allele_list)
def generate_str_base(self, seq_len):
str_seq = ''
while len(str_seq) == 0:
str_seq = self.random_sequence(seq_len)
invalid = True
#ensure the sequence is not all the same allele
for idx in range(1, len(str_seq)):
if str_seq[idx - 1] != str_seq[idx]:
invalid = False
#if the first half of the sequence matches the second half, then consider it invalid
if not invalid and str_seq[:int(len(str_seq)/2)] == str_seq[int(len(str_seq)/2):]:
invalid = True
#if the sequence was invalid, then clear it out and try again
if invalid:
str_seq = ''
return str_seq
def generate_alu_sequence(self, length):
if not self._base_alu or len(self._base_alu) == 0:
raise Exception("No base Alu defined")
new_alu = self._base_alu
len_diff = abs(length - len(new_alu))
for i in range(length - len(new_alu)):
new_alu = self.insert_block(new_alu,
random.randint(0, len(new_alu)-1),
random.choice(self._allele_base_list))
for i in range(len(new_alu) - length):
new_alu = self.delete_block(new_alu,
random.randint(0, len(new_alu)-1),
1)
for k in range(int(len(new_alu)*self._alu_mutation_rate)-len_diff):
alu_posn = random.randint(0, len(new_alu)-1)
snp = self.generate_snp_allele(new_alu[alu_posn])
new_alu = self.overwrite_block(new_alu, alu_posn, snp)
return new_alu
def ranged_length_list(self, min_len, max_len, nbr_items):
"""generates a list of lengths that vary in size between min_len and max_len
:param min_len: smallest value to return
:param max_len: largest value to return, at least 2 items will have this value, must be >= min_len
:param nbr_items: the number of items which will be returned, must be > 0
:returns: a list of lengths with nbr_items items that vary from min_len to max_len
"""
if nbr_items < 1:
raise Exception("Minimum length for the list is 1")
if max_len < min_len:
raise Exception("max_len must be greater than or equal to min_len")
length_list = []
if nbr_items > 1:
max_items = max(2, int(nbr_items/10))
else:
max_items = 1
for i in range(max_items):
length_list.append(max_len)
if nbr_items > 2:
below_max = nbr_items - max_items
length_range = max_len - min_len
for i in range(below_max):
adj_value = random.randint(0, i) / below_max
length_list.append(int(min_len + (length_range * adj_value)))
return length_list
def mutate_str(self):
temp_mut_list = []
for j in range(len(self._str_list)):
mutation_amount = random.randint(-self._str_mutation_amount, self._str_mutation_amount)
orig_str = self._str_list[j][1] * self._str_list[j][2]
new_str = self._str_list[j][1] * (self._str_list[j][2] + mutation_amount)
str_stt = self._str_list[j][0]
str_end = self._str_list[j][0] + len(orig_str)
#self._mutation_list.append([str_stt, str_end, 'MUT_STR', new_str])
#self._mutation_list.sort()
temp_mut_list.append([str_stt, str_end, 'MUT_STR', new_str])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def mutate_cnv(self):
new_posn_list = self.find_empty_ranges(self._cnv_max_length,
self._nbr_ref_cnv * self._cnv_mutation_amount,
self._overlap_buffer)
temp_mut_list = []
for i in range(self._nbr_ref_cnv):
cnv_id = self._cnv_list[i][0]
cnv_len = self._cnv_list[i][1]
cnv_posn_list = self._cnv_list[i][2]
mutation_amount = random.randint(-self._cnv_mutation_amount, self._cnv_mutation_amount)
if mutation_amount > 0:
for i in range(mutation_amount):
cnv_posn = new_posn_list.pop(random.randint(0, len(new_posn_list)-1))
cnv_posn_list.append(cnv_posn)
cnv_posn_list.sort()
if mutation_amount < 0:
for j in range(mutation_amount):
next_posn = cnv_posn_list.pop(random.randint(0, len(cnv_posn_list)-1))
cnv_stt = next_posn
cnv_end = next_posn + cnv_len
self._mutation_list.append([cnv_stt, cnv_end, 'DEL_CNV', cnv_id])
for cnv_posn in cnv_posn_list:
#self._mutation_list.append([cnv_posn, cnv_posn + cnv_len, "DONOR_CNV", cnv_id])
#self._mutation_list.sort()
temp_mut_list.append([cnv_posn, cnv_posn + cnv_len, "DONOR_CNV", cnv_id])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_cnv(self, nbr_cnv, variant_tag):
if nbr_cnv < 1:
return
cnv_length_list = self.ranged_length_list(self._cnv_min_length, self._cnv_max_length, nbr_cnv)
cnv_posn_list = self.find_empty_ranges(self._cnv_max_length,
nbr_cnv * self._cnv_max_copies,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_cnv):
posn_list = []
seq_len = cnv_length_list[i]
nbr_copies = random.randint(self._cnv_min_copies, self._cnv_max_copies)
if 'REF' in variant_tag:
cnv_id = i
else:
cnv_id = i + self._nbr_ref_cnv
for j in range(nbr_copies):
cnv_posn = cnv_posn_list.pop(random.randint(0, len(cnv_posn_list)-1))
posn_list.append(cnv_posn)
#self._mutation_list.append([cnv_posn, cnv_posn+seq_len, variant_tag, cnv_id])
temp_mut_list.append([cnv_posn, cnv_posn+seq_len, variant_tag, cnv_id])
self._cnv_list.append([cnv_id, seq_len, posn_list])
#self._mutation_list.sort()
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_alu(self, nbr_alu, variant_tag):
if nbr_alu < 1:
return
alu_length_list = self.ranged_length_list(self._alu_min_length, self._alu_max_length, nbr_alu)
alu_posn_list = self.find_empty_ranges(self._alu_max_length,
nbr_alu,
self._overlap_buffer)
temp_mut_list = []
for j in range(nbr_alu):
alu_stt = alu_posn_list[j]
alu_len = alu_length_list.pop(random.randint(0, len(alu_length_list)-1))
#donor alus are inserted into the genome, so their end_posn in reference to ref genome is their start
if variant_tag == 'DONOR_ALU':
alu_end = alu_stt
else:
alu_end = alu_posn_list[j] + alu_len
#self._mutation_list.append([alu_stt, alu_end, variant_tag, alu_len])
#self._mutation_list.sort()
temp_mut_list.append([alu_stt, alu_end, variant_tag, alu_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_str(self, nbr_str, variant_tag):
if nbr_str < 1:
return
str_posn_list = self.find_empty_ranges(self._str_max_copies * self._str_max_length,
nbr_str,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_str):
seq_len = random.randint(self._str_min_length, self._str_max_length)
nbr_copies = random.randint(self._str_min_copies, self._str_max_copies)
str_seq = self.generate_str_base(seq_len)
str_posn = str_posn_list.pop(random.randint(0, len(str_posn_list)-1))
#self._mutation_list.append([str_posn, str_posn + (seq_len*nbr_copies), variant_tag, str_seq, nbr_copies])
self._str_list.append([str_posn, str_seq, nbr_copies])
#self._mutation_list.sort()
temp_mut_list.append([str_posn, str_posn + (seq_len*nbr_copies), variant_tag, str_seq, nbr_copies])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_inversions(self, nbr_inv, min_len, max_len):
if nbr_inv < 1:
return
inv_length_list = self.ranged_length_list(min_len, max_len, nbr_inv)
inv_posn_list = self.find_empty_ranges(max_len,
nbr_inv,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_inv):
inv_stt = inv_posn_list[i]
inv_len = inv_length_list.pop(random.randint(0, len(inv_length_list)-1))
inv_end = inv_stt + inv_len
#self._mutation_list.append([inv_stt, inv_end, 'INV', inv_len])
#self._mutation_list.sort()
temp_mut_list.append([inv_stt, inv_end, 'INV', inv_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_insertions(self, nbr_ins, min_len, max_len):
if nbr_ins < 1:
return
ins_length_list = self.ranged_length_list(min_len, max_len, nbr_ins)
ins_posn_list = self.find_empty_ranges(max_len,
nbr_ins,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_ins):
ins_stt = ins_posn_list[i]
ins_len = ins_length_list.pop(random.randint(0, len(ins_length_list)-1))
ins_end = ins_stt
#self._mutation_list.append([ins_stt, ins_end, 'INS', ins_len])
#self._mutation_list.sort()
temp_mut_list.append([ins_stt, ins_end, 'INS', ins_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_deletions(self, nbr_del, min_len, max_len):
if nbr_del < 1:
return
del_length_list = self.ranged_length_list(min_len, max_len, nbr_del)
del_posn_list = self.find_empty_ranges(max_len,
nbr_del,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_del):
del_stt = del_posn_list[i]
del_len = del_length_list.pop(random.randint(0, len(del_length_list)-1))
del_end = del_stt + del_len
#self._mutation_list.append([del_stt, del_end, 'DEL', del_len])
#self._mutation_list.sort()
temp_mut_list.append([del_stt, del_end, 'DEL', del_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_snps(self):
if self._nbr_snp < 1:
return
snp_posn_list = self.find_empty_ranges(1, self._nbr_snp, 0)
temp_mut_list = []
for i in range(self._nbr_snp):
snp_stt = snp_posn_list[i]
snp_end = snp_stt + 1
temp_mut_list.append([snp_stt, snp_end, 'SNP', 1])
#self._mutation_list.append([snp_stt, snp_end, 'SNP', 1])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
#self._mutation_list.sort()
def generate_ref_genome(self):
"""
Generates a random reference genome with the specified number of chromosomes,
each of length length_chromosome
"""
if self._use_alu:
if not self._base_alu or len(self._base_alu) == 0:
raise Exception("No base Alu defined")
if not os.path.exists(self._base_alu_file):
with open(self._base_alu_file, "w") as alu_file:
alu_file.write(">" + str(self._genome_id) + "\n")
self.write_genome_lines_to_file(self._base_alu, alu_file)
with open(self._ref_genome_file, "w") as ref_file:
if not os.path.exists(self._working_dir):
os.makedirs(self._working_dir)
ref_file.write(">" + str(self._genome_id))
ref_file.write("\n>chr" + str(self._chromosome_id) + "\n")
self._mutation_list = []
if self._use_alu:
print('REF GENOME: Allocating ' + str(self._nbr_ref_alu) + ' Alus')
self.allocate_alu(self._nbr_ref_alu, "REF_ALU")
print('REF GENOME: Allocating ' + str(self._nbr_ref_cnv) + ' CNVs')
self.allocate_cnv(self._nbr_ref_cnv, "REF_CNV")
print('REF GENOME: Allocating ' + str(self._nbr_ref_str) + ' STRs')
self.allocate_str(self._nbr_ref_str, "REF_STR")
buffer_adj = 0
buffer = ''
mut_idx = 0
mut_max_idx = len(self._mutation_list) - 1
buffer_size = 80
count = 0
if len(self._mutation_list) == 0:
mut_idx = -1
while count < self._chromosome_size:
if len(buffer) > buffer_size or mut_idx == -1:
if mut_idx == -1:
skip_distance = self._chromosome_size - count
buffer += self.random_sequence(skip_distance)
count += skip_distance
buffer_size = len(buffer)
self.write_genome_lines_to_file(buffer, ref_file)
else:
self.write_genome_lines_to_file(buffer[:buffer_size], ref_file)
buffer = buffer[buffer_size:]
buffer_adj += buffer_size
elif len(self._mutation_list) > 0 and count < self._mutation_list[mut_idx][0]:
skip_distance = self._mutation_list[mut_idx][0] - count
buffer += self.random_sequence(skip_distance)
count += skip_distance
elif mut_idx != -1:
mut_type = self._mutation_list[mut_idx][2]
if mut_type == 'REF_STR':
str_seq = self._mutation_list[mut_idx][3]
nbr_copies = self._mutation_list[mut_idx][4]
str_seq = str_seq * nbr_copies
#pads either side of str with non matching allele to remove ambiguity
if buffer[-1] == str_seq[-1]:
buffer = buffer[:-1] + self.generate_snp_allele(buffer[-1])
right_padding = self.generate_snp_allele(str_seq[0])
buffer += str_seq + right_padding
count += len(str_seq) + 1
elif mut_type == 'REF_CNV':
cnv_stt = self._mutation_list[mut_idx][0]
cnv_end = self._mutation_list[mut_idx][1]
cnv_len = cnv_end - cnv_stt
cnv_id = self._mutation_list[mut_idx][3]
if cnv_id in self._cnv_dict:
cnv_seq = self._cnv_dict[cnv_id]
else:
cnv_seq = self.random_sequence(cnv_len)
self._cnv_dict[cnv_id] = cnv_seq
buffer += cnv_seq
count += cnv_len
elif mut_type == 'REF_ALU':
alu_len = self._mutation_list[mut_idx][3]
alu_seq = self.generate_alu_sequence(alu_len)
buffer += alu_seq
count += alu_len
if mut_idx < mut_max_idx:
mut_idx += 1
else:
mut_idx = -1 #flags when all mutations have been seen
def generate_donor_genome(self):
with open(self._priv_genome_file, "w") as donor_genome_file:
donor_genome_file.write(">" + str(self._genome_id) + "\n")
donor_genome_file.write(">chr" + str(self._chromosome_id) + "\n")
buffer_size = 100000
fasta_parser = self.parse_fasta(self._ref_genome_file, buffer_size=buffer_size)
#plan out all mutation ranges in reference to the ref genome, storing them in the mutation_list
print('DONOR GENOME: Mutating existing STRs')
self.mutate_str()
print('DONOR GENOME: Mutating existing CNVs')
self.mutate_cnv()
if self._use_alu:
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_alu) + ' Alus')
self.allocate_alu(self._nbr_denovo_alu, "DONOR_ALU")
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_cnv) + ' CNVs')
self.allocate_cnv(self._nbr_denovo_cnv, "DONOR_CNV")
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_str) + ' STRs')
self.allocate_str(self._nbr_denovo_str, "DONOR_STR")
print('DONOR GENOME: Allocating ' + str(self._nbr_long_inv) + ' long inversions')
self.allocate_inversions(self._nbr_long_inv, self._inv_long_min_length, self._inv_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_long_ins) + ' long insertions')
self.allocate_insertions(self._nbr_long_ins, self._ins_long_min_length, self._ins_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_long_del) + ' long deletions')
self.allocate_deletions(self._nbr_long_del, self._del_long_min_length, self._del_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_inv) + ' short inversions')
self.allocate_inversions(self._nbr_short_inv, self._inv_short_min_length, self._inv_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_ins) + ' short insertions')
self.allocate_insertions(self._nbr_short_ins, self._ins_short_min_length, self._ins_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_del) + ' short deletions')
self.allocate_deletions(self._nbr_short_del, self._del_short_min_length, self._del_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_snp) + ' SNPs')
self.allocate_snps()
variant_types = ['STR','CNV','ALU','INV','INS','DEL','SNP']
answer_files = {}
for variant in variant_types:
answer_files[variant] = open(os.path.join(self._working_dir, variant + '_ANS_FILE'), 'w')
#read in the reference genome, writing out the donor genome out to file using
#the mutations from the mutation list
with open(self._priv_genome_file, "a") as donor_genome_file:
ref_genome_idx = 0
buffer_adjust = 0
donor_genome = ''
ref_genome = ''
if len(self._mutation_list) > 0:
mut_idx = 0
else:
mut_idx = -1
mut_max_idx = len(self._mutation_list) - 1
while ref_genome_idx + buffer_adjust < self._chromosome_size:
ref_genome = ref_genome[ref_genome_idx:]
buffer_adjust += ref_genome_idx
ref_genome_idx = 0
next_segment = next(fasta_parser)
if next_segment:
ref_genome += next_segment
if mut_idx == -1:
donor_genome += ref_genome[ref_genome_idx:]
ref_genome_idx += len(ref_genome) - ref_genome_idx
elif ref_genome_idx + buffer_adjust != self._mutation_list[mut_idx][0]:
donor_genome += ref_genome[ref_genome_idx:self._mutation_list[mut_idx][0] - buffer_adjust]
ref_genome_idx = self._mutation_list[mut_idx][0] - buffer_adjust
else:
if len(donor_genome) > buffer_size:
self.write_genome_lines_to_file(donor_genome[:buffer_size], donor_genome_file)
donor_genome = donor_genome[buffer_size:]
mut_type = self._mutation_list[mut_idx][2]
ref_genome_stt = self._mutation_list[mut_idx][0] - buffer_adjust
ref_genome_end = self._mutation_list[mut_idx][1] - buffer_adjust
ref_genome_idx = ref_genome_end
if mut_type == 'SNP':
orig_allele = ref_genome[ref_genome_stt]
snp_allele = self.generate_snp_allele(orig_allele)
donor_genome += snp_allele
answer_files['SNP'].write('\n' + str(self._chromosome_id) + ',' + orig_allele +
',' + snp_allele + ',' + str(self._mutation_list[mut_idx][0]))
#the mutation list contains both the original str and the mutated str, so when
#one is encountered the other needs to be pulled and dealt with at the same
#time
elif mut_type == 'MUT_STR':
new_str = self._mutation_list[mut_idx][3]
mut_idx += 1
donor_genome += new_str
answer_files['STR'].write('\n' + str(self._chromosome_id) + ',' + new_str +
',' + str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DONOR_STR':
str_seq = self._mutation_list[mut_idx][3]
nbr_copies = self._mutation_list[mut_idx][4]
str_seq = str_seq * nbr_copies
#pads either side of str with non matching allele to remove ambiguity
left_padding = self.generate_snp_allele(str_seq[-1])
right_padding = self.generate_snp_allele(str_seq[0])
padded_str_seq = left_padding + str_seq + right_padding
donor_genome += padded_str_seq
answer_files['STR'].write('\n' + str(self._chromosome_id) + ',' + str_seq +
',' + str(self._mutation_list[mut_idx][0] + 1))
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + padded_str_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'REF_CNV':
cnv_id = self._mutation_list[mut_idx][3]
cnv_seq = ref_genome[ref_genome_stt:ref_genome_end]
donor_genome += cnv_seq
answer_files['CNV'].write('\n' + str(self._chromosome_id) + ',' + str(cnv_id) +
',' + str(self._mutation_list[mut_idx][0]) + ',' + cnv_seq)
#assumes DEL_CNV is always followed by an entry for the REF_CNV, so the mut_idx is incremented
elif mut_type == 'DEL_CNV':
mut_idx += 1
del_len = self._mutation_list[mut_idx][1] - self._mutation_list[mut_idx][0]
del_seq = ref_genome[ref_genome_stt:ref_genome_end]
answer_files['DEL'].write('\n' + str(self._chromosome_id) + ',' + del_seq + ',' +
str(self._mutation_list[mut_idx][0]))
#every non deleted CNV will have a DONOR_CNV entry (some will only have DONOR_CNV, no REF_CNV)
elif mut_type == 'DONOR_CNV':
cnv_stt = self._mutation_list[mut_idx][0]
cnv_end = self._mutation_list[mut_idx][1]
cnv_len = cnv_end - cnv_stt
cnv_id = self._mutation_list[mut_idx][3]
if cnv_id in self._cnv_dict:
cnv_seq = self._cnv_dict[cnv_id]
else:
cnv_seq = ref_genome[ref_genome_stt:ref_genome_end]
self._cnv_dict[cnv_id] = cnv_seq
donor_genome += cnv_seq
if mut_idx < mut_max_idx and self._mutation_list[mut_idx+1][2] == 'REF_CNV' and \
self._mutation_list[mut_idx][0] == self._mutation_list[mut_idx+1][0]:
mut_idx += 1
else:
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + cnv_seq + ',' +
str(self._mutation_list[mut_idx][0]))
answer_files['CNV'].write('\n' + str(self._chromosome_id) + ',' + str(cnv_id) +
',' + str(self._mutation_list[mut_idx][0]) + ',' + cnv_seq)
elif mut_type == 'REF_ALU':
alu_stt = self._mutation_list[mut_idx][0]
alu_end = self._mutation_list[mut_idx][1]
alu_len = alu_end - alu_stt
alu_seq = ref_genome[ref_genome_stt:ref_genome_end]
donor_genome += alu_seq
answer_files['ALU'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DONOR_ALU':
alu_len = self._mutation_list[mut_idx][3]
alu_seq = self.generate_alu_sequence(alu_len)
donor_genome += alu_seq
answer_files['ALU'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'INV':
orig_block = ref_genome[ref_genome_stt:ref_genome_end]
inv_block = orig_block[::-1]
donor_genome += inv_block
answer_files['INV'].write('\n' + str(self._chromosome_id) + ',' + orig_block + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'INS':
ins_len = self._mutation_list[mut_idx][3]
ins_seq = self.random_sequence(ins_len)
donor_genome += ins_seq
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + ins_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DEL':
del_seq = ref_genome[ref_genome_stt:ref_genome_end]
answer_files['DEL'].write('\n' + str(self._chromosome_id) + ',' + del_seq + ',' +
str(self._mutation_list[mut_idx][0]))
if mut_idx < mut_max_idx:
mut_idx += 1
else:
mut_idx = -1 #flags when all mutations have been seen
writeable = int(len(donor_genome) / 80)
if writeable >= 1:
self.write_genome_lines_to_file(donor_genome[:writeable*80], donor_genome_file)
donor_genome = donor_genome[writeable*80:]
self.write_genome_lines_to_file(donor_genome, donor_genome_file)
for key in answer_files:
answer_files[key].close()
with open(self._answer_file, 'w') as main_ans:
main_ans.write(">" + str(self._genome_id) + "\n")
main_ans.write(">chr" + str(self._chromosome_id))
for variant in variant_types:
with open(os.path.join(self._working_dir, variant + '_ANS_FILE'), 'r') as temp_file:
if variant == 'CNV':
main_ans.write("\n>CNV")
cnv_dict = {}
for line in temp_file:
line = line.strip()
if line:
line_array = line.split(',')
cnv_id = line_array[1]
cnv_posn = line_array[2]
cnv_seq = line_array[3]
if cnv_id in cnv_dict:
cnv_seq, cnv_posn_list = cnv_dict[cnv_id]
else:
cnv_posn_list = []
cnv_posn_list.append(cnv_posn)
cnv_dict[cnv_id] = (cnv_seq, cnv_posn_list)
cnv_list = []
for key in cnv_dict:
cnv_seq, cnv_posn_list = cnv_dict[key]
cnv_posn_list.sort()
cnv_list.append([cnv_seq, cnv_posn_list])
cnv_list.sort(key = lambda l: l[:][1][0])
for cnv_seq, cnv_posn_list in cnv_list:
main_ans.write('\n' + str(self._chromosome_id) + ',' + cnv_seq)
for posn in cnv_posn_list:
main_ans.write(',' + str(posn))
else:
main_ans.write("\n>" + variant)
for line in temp_file:
line = line.strip()
if line:
main_ans.write('\n' + line)
os.remove(os.path.join(self._working_dir, variant + '_ANS_FILE'))
shutil.rmtree(self._working_dir)
def add_sequencer_errors(self, read_sequence):
error_list = []
for i in range(len(read_sequence)):
if random.random() < self._sequencer_error_rate:
error_list.append(i)
for i in error_list:
orig_allele = read_sequence[i]
error_allele = self.generate_snp_allele(orig_allele)
read_sequence = self.overwrite_block(read_sequence, i, error_allele)
return read_sequence
def create_read_pair(self, donor_genome, left_stt, right_stt):
left_read = donor_genome[left_stt:left_stt+self._sequencer_read_length]
right_read = donor_genome[right_stt:right_stt+self._sequencer_read_length]
#only one is flipped so they are always in opposing directions with their
#overall direction
if random.random() > .5:
right_read = right_read[::-1]
else:
left_read = left_read[::-1]
return left_read, right_read
def generate_reads(self):
with open(self._reads_file, "w") as reads_file:
reads_file.write(">" + str(self._genome_id))
with open(self._priv_genome_file, "r") as donor_genome_file:
# skip the first two '>' labels in the donor genome file
donor_genome_file.readline()
donor_genome_file.readline()
temp_file_name_list = []
donor_genome = ""
for line in donor_genome_file:
if ">" in line:
break
donor_genome += str(line).strip()
nbr_reads = int(self._chromosome_size * self._sequencer_coverage / self._sequencer_read_length)
nbr_pairs = int(nbr_reads / 2)
write_list = []
for i in range(nbr_pairs):
if random.random() < self._sequencer_garbage_rate:
left_read = self.random_sequence(self._sequencer_read_length)
right_read = self.random_sequence(self._sequencer_read_length)
else:
gap_len = random.randint(self._sequencer_gap_min, self._sequencer_gap_max)
total_len = 2 * self._sequencer_read_length + gap_len
left_stt = random.randint(0, self._chromosome_size - total_len - 1)
right_stt = left_stt + self._sequencer_read_length + gap_len
left_read, right_read = self.create_read_pair(donor_genome, left_stt, right_stt)
left_read = self.add_sequencer_errors(left_read)
right_read = self.add_sequencer_errors(right_read)
reads_file.write('\n' + left_read + ',' + right_read)
def parse_system_args(self):
parser = argparse.ArgumentParser(
description="This script generates a reference and donor genome as a set "
"of files. The files can be used for various computational "
"genetics purposes. The following files are created: 1) "
"reference genome \'ref_*.txt\' 2) mutated donor genome "
"\'private_*.txt\' 3) paired-end reads \'reads_*.txt\'"
"from donor genome 4) mutation answer key \'ans_*.txt\'"
)
parser.add_argument(
"--id",
type=str,
default='test',
help="The name or ID of this genome for identification purposes. The "
"genome id will be reflected in the generated file names."
)
parser.add_argument(
"--chr_id",
type=int,
default='1',
help="The id number for this chromosome, defaults to 1."
)
parser.add_argument(
"--chr_size",
type=int,
default='10',
help="The size of each chromosome, multiplied by -s (scaling factor). Change "
"scale with -s option"
)
parser.add_argument(
"-s", "--scale",
type=str,
choices=["k", "m", "b"],
default="k",
help="the amount to scale chromosome-size by. k: thousands, m: millions,"
" b: billions. By default, scale is k (thousands)."
)
parser.add_argument(
"--alu",
type=str,
choices=["y", "n"],
default="n",
help="whether to include Alus in the genome."
)
parser.add_argument(
"--assembly",
type=str,
choices=["y", "n"],
default="n",
help="whether to generate output for assembly (no reference genome)."
)
return parser.parse_args()
class TestClass(unittest.TestCase):
def setUp(self):
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = 10
args.scale = 'k'
args.alu = 'y'
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
def test_compare_intervals(self):
self.assertEqual(0, self.gen.compare_intervals(0, 1, 1, 2, buffer_space=0))
self.assertEqual(-1, self.gen.compare_intervals(0, 0, 1, 10, buffer_space=0))
self.assertEqual(1, self.gen.compare_intervals(11, 12, 1, 10, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(0, 0, 1, 10, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(1, 10, 0, 0, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(11, 12, 1, 10, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(0, 4, 1, 2, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(0, 4, 1, 2, buffer_space=5))
self.assertEqual(0, self.gen.compare_intervals(1, 4, 0, 2, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(1, 4, 1, 4, buffer_space=0))
def test_find_empty_ranges(self):
self.gen._mutation_list.append([1000, 10000, 'MUT_STR', 9000])
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue(posn < 995 and posn >= 0)
self.gen._mutation_list = []
self.gen._mutation_list.append([0, 9000, 'MUT_STR', 9000])
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue(posn <= 10000 and posn > 9005)
self.gen._mutation_list = []
self.gen._mutation_list.append([9000, 10000, 'MUT_STR', 1000])
self.gen._mutation_list.append([0, 1000, 'MUT_STR', 1000])
self.gen._mutation_list.sort()
posn_list = self.gen.find_empty_ranges(10, 100, 5)
self.assertEqual(len(posn_list), 100)
for posn in posn_list:
self.assertTrue(posn < 8995 and posn > 1005)
self.gen._mutation_list = []
self.gen._mutation_list.append([0, 1000, 'MUT_STR', 1000])
self.gen._mutation_list.append([1100, 2000, 'MUT_STR', 900])
self.gen._mutation_list.append([4000, 5000, 'MUT_STR', 1000])
self.gen._mutation_list.append([7000, 8000, 'MUT_STR', 1000])
self.gen._mutation_list.append([9000, 10000, 'MUT_STR', 1000])
self.gen._mutation_list.sort()
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue( (posn > 1005 and posn < 1095) or
(posn > 2005 and posn < 3995) or
(posn > 5005 and posn < 6995) or
(posn > 8005 and posn < 8995) )
self.gen._mutation_list = []
posn_list = self.gen.find_empty_ranges(50, 4, 5)
for posn in posn_list:
self.assertTrue(posn > 0 and posn < 10000)
def test_random_sequence(self):
rand_seq = self.gen.random_sequence(10)
self.assertEqual(len(rand_seq), 10)
for allele in rand_seq:
self.assertTrue(allele in ['A','C','G','T'])
def test_delete_block(self):
sequence = 'THIS IS A TEST SEQUENCE'
sequence = self.gen.delete_block(sequence, 5, 3)
self.assertEqual(sequence, 'THIS A TEST SEQUENCE')
sequence = self.gen.delete_block(sequence, 0, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUENCE')
sequence = self.gen.delete_block(sequence, -1, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUENC')
sequence = self.gen.delete_block(sequence, -2, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUEC')
sequence = self.gen.delete_block(sequence, -2, 3)
self.assertEqual(sequence, 'HIS A TEST SEQU')
def test_insert_block(self):
sequence = 'HIS A TEST SEQUE'
sequence = self.gen.insert_block(sequence, -1, 'EC')
self.assertEqual(sequence, 'HIS A TEST SEQUECE')
sequence = self.gen.insert_block(sequence, -2, 'N')
self.assertEqual(sequence, 'HIS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, 0, 'T')
self.assertEqual(sequence, 'THIS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, 5, 'IS ')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, len(sequence), '!')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE!')
def test_overwrite_block(self):
sequence = 'THIS IS A TEST SEQUENCE!'
sequence = self.gen.overwrite_block(sequence, 2, 'AT')
self.assertEqual(sequence, 'THAT IS A TEST SEQUENCE!')
sequence = self.gen.overwrite_block(sequence, 0, 'W')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE!')
sequence = self.gen.overwrite_block(sequence, -1, '?')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?')
sequence = self.gen.overwrite_block(sequence, -1, '?!?')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?!?')
sequence = self.gen.overwrite_block(sequence, len(sequence), '!')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?!?!')
def test_invert_block(self):
sequence = 'THIS IS A TEST SEQUENCE'
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 0, 1)
self.assertEqual(orig_block, 'T')
self.assertEqual(inverted_block, 'T')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 0, 2)
self.assertEqual(orig_block, 'TH')
self.assertEqual(inverted_block, 'HT')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, -1, 1)
self.assertEqual(orig_block, 'E')
self.assertEqual(inverted_block, 'E')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, -2, 2)
self.assertEqual(orig_block, 'CE')
self.assertEqual(inverted_block, 'EC')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENEC')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 5, 4)
self.assertEqual(orig_block, 'IS A')
self.assertEqual(inverted_block, 'A SI')
self.assertEqual(sequence, 'HTIS A SI TEST SEQUENEC')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, len(sequence) - 2, 2)
self.assertEqual(orig_block, 'EC')
self.assertEqual(inverted_block, 'CE')
self.assertEqual(sequence, 'HTIS A SI TEST SEQUENCE')
def test_generate_snp_allele(self):
snp_allele = self.gen.generate_snp_allele('A')
self.assertTrue(snp_allele in ['T','G','C'])
snp_allele = self.gen.generate_snp_allele('T')
self.assertTrue(snp_allele in ['A','G','C'])
snp_allele = self.gen.generate_snp_allele('G')
self.assertTrue(snp_allele in ['T','A','C'])
snp_allele = self.gen.generate_snp_allele('C')
self.assertTrue(snp_allele in ['T','G','A'])
def test_generate_str_base(self):
str_base = self.gen.generate_str_base(2)
self.assertTrue(str_base[0] != str_base[1])
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(3)
self.assertTrue((str_base[0] != str_base[1]) or
(str_base[1] != str_base[2]) or
(str_base[0] != str_base[2]) )
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(4)
self.assertEqual(len(str_base), 4)
self.assertTrue(str_base[:2] != str_base[2:])
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(5)
self.assertEqual(len(str_base), 5)
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
def test_generate_alu_sequence(self):
alu_seq = self.gen.generate_alu_sequence(300)
self.assertTrue(len(alu_seq) == 300)
self.assertTrue(alu_seq != self.gen._base_alu)
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
alu_seq = self.gen.generate_alu_sequence(295)
self.assertTrue(len(alu_seq) == 295)
self.assertTrue(alu_seq != self.gen._base_alu[:295])
self.assertTrue(alu_seq != self.gen._base_alu[4:])
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
alu_seq = self.gen.generate_alu_sequence(305)
self.assertTrue(len(alu_seq) == 305)
self.assertTrue(alu_seq != self.gen._base_alu)
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
def test_ranged_length_list(self):
self.assertRaises(Exception, self.gen.ranged_length_list, 10, 10, 0)
self.assertRaises(Exception, self.gen.ranged_length_list, 1, 0, 1)
length_list = self.gen.ranged_length_list(10, 10, 10)
self.assertEqual(10, len(length_list))
for item in length_list:
self.assertEqual(item, 10)
length_list = self.gen.ranged_length_list(10, 20, 10)
self.assertEqual(10, len(length_list))
max_count = 0
for item in length_list:
if item == 20:
max_count += 1
self.assertTrue(item <= 20 and item >= 10)
self.assertEqual(max_count, 2)
length_list = self.gen.ranged_length_list(10, 20, 1)
self.assertEqual(1, len(length_list))
self.assertTrue(length_list[0] == 20)
length_list = self.gen.ranged_length_list(10, 20, 2)
self.assertEqual(2, len(length_list))
self.assertTrue(length_list[0] == 20 and length_list[1] == 20)
length_list = self.gen.ranged_length_list(10, 20, 3)
self.assertEqual(3, len(length_list))
self.assertTrue(length_list[0] == 20 and length_list[1] == 20 and length_list[2] == 10)
def test_write_genome_lines_to_file(self):
length_list = [0,1,79,80,81]
for i in length_list:
with open('test_file', 'w') as test_file:
self.gen.write_genome_lines_to_file(self.gen._base_alu, test_file)
with open('test_file', 'r') as test_file:
base_alu = ''
for line in test_file:
base_alu += str(line).strip()
self.assertEqual(base_alu, self.gen._base_alu)
os.remove('test_file')
def test_parse_fasta(self):
nbr_chr_list = [1,2,3]
length_list = [1,79,80,81]
for nbr_chr in nbr_chr_list:
for next_len in length_list:
file_name = 'test_file_' + str(nbr_chr) + '_' + str(next_len)
with open(file_name, 'w') as test_file:
test_file.write('>test')
for chr in range(1, nbr_chr + 1):
test_file.write('\n>chr' + str(chr) + '\n')
self.gen.write_genome_lines_to_file(self.gen._base_alu, test_file)
for sequence in self.gen.parse_fasta(file_name):
if sequence:
base_alu = sequence
self.assertEqual(base_alu, self.gen._base_alu)
else:
break
os.remove(file_name)
def test_create_read_pair(self):
donor_genome = self.gen.random_sequence(200)
left_read, right_read = self.gen.create_read_pair(donor_genome, 0, 0)
self.assertEqual(left_read, right_read[::-1])
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
left_stt = random.randint(0, 200 - self.gen._sequencer_read_length - 1)
right_stt = random.randint(0, 200 - self.gen._sequencer_read_length - 1)
left_read, right_read = self.gen.create_read_pair(donor_genome, left_stt, right_stt)
self.assertEqual(len(left_read), len(right_read))
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
left_stt = 0
right_stt = 200 - self.gen._sequencer_read_length - 1
left_read, right_read = self.gen.create_read_pair(donor_genome, left_stt, right_stt)
self.assertEqual(len(left_read), len(right_read))
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
def test_add_sequencer_errors(self):
error_found = False
donor_genome = self.gen.random_sequence(200)
left_read, right_read = self.gen.create_read_pair(donor_genome, 0, 0)
self.assertEqual(left_read, right_read[::-1])
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
for i in range(25):
left_read_w_error = self.gen.add_sequencer_errors(left_read)
right_read_w_error = self.gen.add_sequencer_errors(right_read)
if left_read != left_read_w_error or right_read != right_read_w_error:
error_found = True
self.assertTrue(error_found)
def test_generate_ref_genome(self):
for alu in ['y', 'n']:
for test_args in [[10, 'k'], [100, 'k']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test_args[0]
args.scale = test_args[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
self.gen._alu_min_length = 300
self.gen._alu_max_length = 300
self.gen._alu_mutation_rate = 0.3
self.gen.generate_ref_genome()
ref_genome = next(self.gen.parse_fasta(self.gen._ref_genome_file))
cnv_dict = {}
cnv_count = 0
str_count = 0
for mutation in self.gen._mutation_list:
if mutation[2] == 'REF_STR':
str_count += 1
self.assertTrue(ref_genome[mutation[0]:mutation[1]] == mutation[3]*mutation[4])
elif mutation[2] == 'REF_CNV':
if mutation[3] in cnv_dict:
self.assertTrue(cnv_dict[mutation[3]] == ref_genome[mutation[0]:mutation[1]])
else:
cnv_count += 1
cnv_dict[mutation[3]] = ref_genome[mutation[0]:mutation[1]]
elif mutation[2] == 'REF_ALU':
base_alu = self.gen._base_alu
match_count = 0
for i in range(len(base_alu)):
if self.gen._base_alu[i] == ref_genome[mutation[0]+i]:
match_count += 1
self.assertTrue((match_count / len(base_alu)) > (.99 - self.gen._alu_mutation_rate))
self.assertEqual(cnv_count, self.gen._nbr_ref_cnv)
self.assertEqual(str_count, self.gen._nbr_ref_str)
def test_generate_donor_genome(self):
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = 10
args.scale = 'k'
args.alu = 'n'
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
self.gen._nbr_snp = 0
self.gen._nbr_denovo_str = 0
self.gen._nbr_denovo_cnv = 0
self.gen._nbr_long_inv = 0
self.gen._nbr_long_ins = 0
self.gen._nbr_long_del = 0
self.gen._nbr_ref_alu = 0
self.gen._nbr_denovo_alu = 0
self.gen._nbr_ref_str = 0
self.gen._nbr_ref_cnv = 0
self.gen._nbr_short_inv = 0
self.gen._nbr_short_ins = 0
self.gen._nbr_short_del = 0
self.gen._cnv_mutation_amount = 0
self.gen._str_mutation_amount = 0
self.gen.generate_ref_genome()
self.gen.generate_donor_genome()
ref_genome = ''
for sequence in self.gen.parse_fasta(self.gen._ref_genome_file):
if sequence:
ref_genome += sequence
else:
break
donor_genome = ''
for sequence in self.gen.parse_fasta(self.gen._priv_genome_file):
if sequence:
donor_genome += sequence
else:
break
self.assertEqual(ref_genome, donor_genome)
self.assertEqual(len(ref_genome), 10000)
for alu in ['y', 'n']:
for test_args in [[10, 'k'], [100, 'k'], [150, 'k']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test_args[0]
args.scale = test_args[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
if args.scale == 'k':
expected_size = test_args[0] * 1000
elif args.scale == 'm':
expected_size = test_args[0] * 1000000
self.gen = chromosome_builder(args)
self.gen._alu_min_length = 300
self.gen._alu_max_length = 300
self.gen._alu_mutation_rate = 0.3
self.gen.generate_ref_genome()
self.gen.generate_donor_genome()
ref_genome = ''
for sequence in self.gen.parse_fasta(self.gen._ref_genome_file):
if sequence:
ref_genome += sequence
else:
break
donor_genome = ''
for sequence in self.gen.parse_fasta(self.gen._priv_genome_file):
if sequence:
donor_genome += sequence
else:
break
last_end = 0
self.assertEqual(expected_size, len(ref_genome))
for i in range(len(self.gen._mutation_list)):
mutation = self.gen._mutation_list[i]
self.assertTrue(ref_genome[last_end:mutation[0]] in donor_genome)
last_end = mutation[1]
mut_type = mutation[2]
range_stt = max(0, mutation[0]-20)
range_end = min(len(ref_genome)-1, mutation[0]+20)
gapped_range_end = min(len(ref_genome)-1, mutation[1]+20)
if mut_type == 'SNP':
self.assertTrue(ref_genome[range_stt:range_end] not in donor_genome, msg='SNP ' + str(mutation[0]))
elif mut_type == 'MUT_STR':
new_str = mutation[3]
self.assertTrue(new_str in donor_genome, msg='MUT_STR ' + str(mutation[0]))
elif mut_type == 'DONOR_STR':
str_seq = mutation[3]
nbr_copies = mutation[4]
new_str = str_seq * nbr_copies
self.assertTrue(new_str in donor_genome, msg='DONOR_STR ' + str(mutation[0]))
elif mut_type == 'REF_ALU':
self.assertTrue(ref_genome[mutation[0]:mutation[1]] in donor_genome, msg='REF_ALU ' + str(mutation[0]))
elif mut_type == 'REF_CNV':
self.assertTrue(ref_genome[mutation[0]:mutation[1]] in donor_genome, msg='REF_CNV ' + str(mutation[0]))
elif mut_type == 'DONOR_ALU':
self.assertTrue(ref_genome[range_stt:gapped_range_end] not in donor_genome, msg='DONOR_ALU ' + str(mutation[0]))
elif mut_type == 'INV':
inv_seq = ref_genome[mutation[0]:mutation[1]]
inv_seq = inv_seq[::-1]
self.assertTrue(inv_seq in donor_genome, msg='INV ' + str(mutation[0]))
elif mut_type == 'INS':
self.assertTrue(ref_genome[range_stt:range_end] not in donor_genome, msg='INS ' + str(mutation[0]))
elif mut_type == 'DEL':
self.assertTrue(ref_genome[range_stt:gapped_range_end] not in donor_genome, msg='DEL ' + str(mutation[0]))
def test_generate_reads(self):
pass
def test_mutate_str(self):
pass
def test_mutate_cnv(self):
pass
def test_allocate_cnv(self):
#, nbr_cnv, variant_tag):
pass
def test_allocate_alu(self):
#, nbr_alu, variant_tag):
pass
def test_allocate_str(self):
#, nbr_str, variant_tag):
pass
def test_allocate_inversions(self):
#, nbr_inv, min_len, max_len):
pass
def test_allocate_insertions(self):
#, nbr_ins, min_len, max_len):
pass
def test_allocate_deletions(self):
#, nbr_del, min_len, max_len):
pass
def test_allocate_snps(self):
pass
def random_sequence(seq_len):
return "".join(random.choice(['A','C','G','T']) for i in range(seq_len))
class TestSetting():
def __init__(self):
self.id = None
self.num_chr = None
self.chr_size = None
self.scale = None
self.alu = None
self.assembly = None
self.base_alu = None
if __name__ == '__main__':
unittest.main()
test_results = []
for alu in ['y']:#, 'n']:
for test in [[100, 'k']]:#, [1, 'm']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test[0]
args.scale = test[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
start = time.clock()
gen = chromosome_builder(args)
print('generating ref genome length: ' + str(test[0]) + test[1])
gen.generate_ref_genome()
print('generating donor genome')
gen.generate_donor_genome()
print('generating reads')
gen.generate_reads()
test_results.append('Test: ' + str(test[0]) + test[1] + ' time: ' + str(time.clock() - start))
for res in test_results:
print(res) | StarcoderdataPython |
8069786 | import pathlib
class FileMissingError(Exception):
def __init__ (self, path_to_file):
self.path_to_file = path_to_file
def __str__ (self):
return "File " + self.path_to_file + " doesn't exist!"
class Target:
def __init__ (self, output_path):
self.output_path = output_path
def __str__ (self):
return str(self.output_path)
def make (self):
if not self.output_path.exists(): # Just make sure the file exists
raise FileMissingError(self.output_path)
def check_mustbemade (self):
if not self.output_path.exists(): # Just make sure the file exists
raise FileMissingError(self.output_path)
return False
def get_mtime (self):
return self.output_path.stat().st_mtime
class CompiledObject(Target):
def __init__ (self, source_targets, output_path, compiler):
Target.__init__(self, output_path)
self.source_targets = source_targets
self.compiler = compiler
self.output_path = pathlib.Path(self.output_path)
def check_mustbemade (self):
try:
output_mtime = self.get_mtime()
except FileNotFoundError:
return True
for target in self.source_targets:
if target.check_mustbemade():
return True
if target.get_mtime() > output_mtime:
return True
return False
def make (self):
sources = " ".join( str(elem) for elem in self.source_targets )
self.compiler(sources, str(self.output_path))
from overlay import *
| StarcoderdataPython |
9619523 | <reponame>OCHA-DAP/hdx-ckan
import datetime
import ckan.lib.helpers as h
import ckan.plugins.toolkit as tk
import ckanext.hdx_search.controller_logic.search_logic as sl
_ = tk._
from ckanext.hdx_package.helpers.freshness_calculator import UPDATE_STATUS_URL_FILTER,\
UPDATE_STATUS_UNKNOWN, UPDATE_STATUS_FRESH, UPDATE_STATUS_NEEDS_UPDATE
class DashboardSearchLogic(sl.SearchLogic):
def __init__(self):
super(DashboardSearchLogic, self).__init__()
self.flask_route_name = 'hdx_user_dashboard.datasets'
def _search_url(self, params, package_type=None):
'''
Returns the url of the current search type
:param params: the parameters that will be added to the search url
:type params: list of key-value tuples
:param package_type: for now this is always 'dataset'
:type package_type: string
:rtype: string
'''
# url = h.url_for(self._generate_action_name(self.type), id=self.org_id)
url = self._current_url()
return sl.url_with_params(url, params)
def _current_url(self):
url = h.url_for(self.flask_route_name)
return url
def _add_additional_faceting_queries(self, search_data_dict):
super(DashboardSearchLogic, self)._add_additional_faceting_queries(search_data_dict)
now_string = datetime.datetime.utcnow().isoformat() + 'Z'
freshness_facet_extra = 'ex={},{}'.format(UPDATE_STATUS_URL_FILTER, 'batch')
search_data_dict.update({
'facet.range': '{{!{extra}}}due_date'.format(extra=freshness_facet_extra),
'f.due_date.facet.range.start': now_string + '-100YEARS',
'f.due_date.facet.range.end': now_string + '+100YEARS',
'f.due_date.facet.range.gap': '+100YEARS',
'f.due_date.facet.mincount': '0',
'facet.query': '{{!key=unknown {extra}}}-due_date:[* TO *]'.format(extra=freshness_facet_extra),
})
def _process_complex_facet_data(self, existing_facets, title_translations, result_facets, search_extras):
super(DashboardSearchLogic, self)._process_complex_facet_data(existing_facets, title_translations, result_facets,
search_extras)
freshness_facet_name = 'due_date'
if existing_facets and freshness_facet_name in existing_facets:
item_list = existing_facets.get(freshness_facet_name).get('items')
if item_list and len(item_list) == 2:
item_list[0]['display_name'] = _('Needing update')
item_list[0]['name'] = UPDATE_STATUS_NEEDS_UPDATE
item_list[1]['display_name'] = _('Up to date')
item_list[1]['name'] = UPDATE_STATUS_FRESH
unknown_item = next((i for i in existing_facets.get('queries', []) if i.get('name') == 'unknown'), None)
unknown_item['display_name'] = _('Unknown')
unknown_item['name'] = UPDATE_STATUS_UNKNOWN
item_list.append(unknown_item)
title_translations[UPDATE_STATUS_URL_FILTER] = _('Update status')
existing_facets[UPDATE_STATUS_URL_FILTER] = existing_facets[freshness_facet_name]
del existing_facets[freshness_facet_name]
| StarcoderdataPython |
3400830 | /home/runner/.cache/pip/pool/a4/b9/c2/6cef50a2615b8634e197d0968a205ba0b576e792319aeb4ac358edd850 | StarcoderdataPython |
8019695 | from django import forms
from sistema.mail import send_mail_template
from .models import Cliente
from usuario.models import Usuario
from carteira.models import Carteira
from eth_account import Account
from web3 import Web3
import random
class ClienteNovoForm(forms.ModelForm):
name = forms.CharField(label='Nome',widget=forms.TextInput(attrs={'placeholder':'Digite seu Nome'}))
name.widget.attrs.update({'size':'25'})
cpf = forms.CharField(label='CPF',widget=forms.TextInput(attrs={'placeholder':'Digite seu CPF'}))
tel = forms.CharField(label='Telefone',widget=forms.TextInput(attrs={'placeholder':'Digite seu Telefone','class':'tel'}))
id_usuario = forms.ModelChoiceField(
queryset=Usuario.objects.all(),
widget=forms.HiddenInput(),
label='',
)
class Meta:
model = Cliente
fields = ['name','cpf','tel','id_usuario']
class EditarCliente(forms.ModelForm):
name = forms.CharField(label='Nome')
name.widget.attrs.update({'size':'25'})
cpf = forms.CharField(label='CPF',widget=forms.TextInput(attrs={'class':'cpf'}))
tel = forms.CharField(label='Telefone',widget=forms.TextInput(attrs={'class':'tel'}))
class Meta:
model = Cliente
fields = ['name','cpf','tel']
class MostrarCliente(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(attrs={'readonly':'True'}))
name.widget.attrs.update({'size':'25'})
cpf = forms.CharField(label='CPF',widget=forms.TextInput(attrs={'class':'cpf','readonly':'True'}))
tel = forms.CharField(widget=forms.TextInput(attrs={'readonly':'True'}))
class Meta:
model = Cliente
fields = ['name','cpf','tel']
| StarcoderdataPython |
9755484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
@organization: INTEL MCG PSI
@summary: This module implements Sphinx Auto-generator of Documentation
@since: 4/3/14
@author: sfusilie
"""
from acs.UtilitiesFWK.AttributeDict import AttributeDict
class DictConfigLoader(object):
"""
Create one config loader that will take param from current
global config dict. We'll have to create a new config loader
based on xml files when ready
"""
@staticmethod
def load(config):
"""
Load global device conf to the device conf objects
:type config: dict
:param config: device global conf dictionary
"""
device_conf = AttributeDict()
for key, value in config.items():
device_conf[key] = value
return device_conf
| StarcoderdataPython |
4814494 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...dyn.rdf.blank_node import BlankNode as BlankNode
from ...dyn.rdf.file_format import FileFormat as FileFormat
from ...dyn.rdf.file_format import FileFormatEnum as FileFormatEnum
from ...dyn.rdf.literal import Literal as Literal
from ...dyn.rdf.parse_exception import ParseException as ParseException
from ...dyn.rdf.query_exception import QueryException as QueryException
from ...dyn.rdf.repository import Repository as Repository
from ...dyn.rdf.repository_exception import RepositoryException as RepositoryException
from ...dyn.rdf.statement import Statement as Statement
from ...dyn.rdf.uri import URI as URI
from ...dyn.rdf.ur_is import URIs as URIs
from ...dyn.rdf.ur_is import URIsEnum as URIsEnum
from ...dyn.rdf.x_blank_node import XBlankNode as XBlankNode
from ...dyn.rdf.x_document_metadata_access import XDocumentMetadataAccess as XDocumentMetadataAccess
from ...dyn.rdf.x_document_repository import XDocumentRepository as XDocumentRepository
from ...dyn.rdf.x_literal import XLiteral as XLiteral
from ...dyn.rdf.x_metadatable import XMetadatable as XMetadatable
from ...dyn.rdf.x_named_graph import XNamedGraph as XNamedGraph
from ...dyn.rdf.x_node import XNode as XNode
from ...dyn.rdf.x_query_select_result import XQuerySelectResult as XQuerySelectResult
from ...dyn.rdf.x_reified_statement import XReifiedStatement as XReifiedStatement
from ...dyn.rdf.x_repository import XRepository as XRepository
from ...dyn.rdf.x_repository_supplier import XRepositorySupplier as XRepositorySupplier
from ...dyn.rdf.x_resource import XResource as XResource
from ...dyn.rdf.xuri import XURI as XURI
| StarcoderdataPython |
6460753 | <reponame>alex-mil/gcleaner<filename>python2/gcleaner.py<gh_stars>0
import sys
if sys.version_info[0] > 2:
sys.stderr.write("Python version must be 2.x.x\n")
sys.exit(-1)
from os import path, curdir, walk
from subprocess import Popen, PIPE
from threading import Timer
class GCleaner(object):
def __init__(self):
self.cur_dir = path.abspath(curdir)
self.git_fetch_cmd = 'git fetch --prune origin'.split()
self.git_branch_cmd = 'git branch --remote'.split()
self.git_push_delete_cmd = 'git push origin --delete'.split()
self.seconds = 5
self.kill = lambda process: process.terminate()
def clean(self, start_path=None):
start_path = self.cur_dir if not start_path else start_path
for root, dirs, _ in walk(start_path):
for name in dirs:
if name == '.git':
self._fetch_with_prune(root)
branches = self._get_remote_branches(root)
if branches:
self._push_with_delete(
root, list(filter(None, branches)))
else:
msg = """Failed:
Folder: {}
Cannot get a list of remote branches
""".format(root)
print(msg)
break
def _fetch_with_prune(self, path):
fetch = Popen(self.git_fetch_cmd, cwd=path, stdout=PIPE, stderr=PIPE)
_timer = Timer(self.seconds, self.kill, [fetch])
try:
_timer.start()
fetch.wait()
(_, _) = fetch.communicate()
if fetch.returncode:
print("Failed! Return code = {}".format(fetch.returncode))
finally:
_timer.cancel()
def _get_remote_branches(self, path):
git_branch = Popen(self.git_branch_cmd, cwd=path,
stdout=PIPE, stderr=PIPE)
_timer = Timer(self.seconds, self.kill, [git_branch])
try:
_timer.start()
(stdout, _) = git_branch.communicate()
if git_branch.returncode:
print("Failed! Return code = {}".format(git_branch.returncode))
else:
return stdout.decode('utf-8').split('\n')
finally:
_timer.cancel()
def _push_with_delete(self, path, branches):
for branch in branches:
branch = branch.strip(' *').split('/')[-1]
if not 'production' in branch and not 'master' in branch:
cmd = self.git_push_delete_cmd + [branch]
git_push = Popen(cmd, cwd=path, stdout=PIPE, stderr=PIPE)
_timer = Timer(self.seconds, self.kill, [git_push])
try:
_timer.start()
git_push.wait()
(_, _) = git_push.communicate()
if git_push.returncode:
print("Failed! Return code = {}".format(
git_push.returncode))
else:
print "Executing: {}".format(' '.join(map(str, cmd)))
finally:
_timer.cancel()
def main():
f_path = raw_input(
'Where to start scaning? (current directory by default) --> ')
gc = GCleaner()
gc.clean(None if not f_path else f_path)
print('The script has finished.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
4885729 | <gh_stars>1-10
import sqlite3, datetime
class DateTimeParseError (RuntimeError):
pass
#stupid sqlite
datetime_fields = {
"last_login", "expiration", "expiration", "time"
}
def sql_close_connections():
return;
def parse_dt(str):
i = [0]
def expect(s):
if not str[i[0]:].startswith(s):
raise DateTimeParseError("bad1 " + str[i[0]:i[0]+4])
i[0] += len(s)
return s
def get(n):
if i[0]+n > len(str):
raise DateTimeParseError("bad2 " + str[i[0]:i[0]+n])
ret = str[i[0]:i[0]+n]
i[0] += n
try:
ret = int(ret)
except:
raise DateTimeParseError("bad3 " + str[i[0]:i[0]+n])
return ret
year = get(4)
expect("-")
month = get(2)
expect("-")
day = get(2)
if str[i[0]] == " " or str[i[0]] == "\t":
i[0] += 1
hour = get(2)
expect(":")
minute = get(2)
expect(":")
second = str[i[0]:]
try:
second = float(second)
except:
raise DateTimeParseError("bad4 " + str[i[0]:i[0]+n])
else:
hour = 0
minute = 0
second = 0
second = int(second+0.5)
return datetime.datetime(year, month, day, hour, minute, second)
def parse_datetime(s):
try:
return parse_dt(s)
except DateTimeParseError:
print("Parse error!", s)
return None
sqlite3.register_converter("datetime", parse_datetime)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
if col[0] in datetime_fields and type(d[col[0]]) == str:
d[col[0]] = parse_datetime(d[col[0]])
return d
DBError = sqlite3.OperationalError
gcon = None
gcur = None
class CurProxy:
def __init__(self, cur):
self.cur = cur
def execute(self, str):
return self.cur.execute(str)
def fetchone(self):
ret = self.cur.fetchone()
if ret == None: return None
return ret
def sql_connect():
global gcon, gcur
if gcon == None:
gcon = sqlite3.connect("database.db")
gcon.row_factory = dict_factory
gcur = gcon.cursor()
return gcur, gcon
def sql_reconnect():
return sql_connect()
def init_sql():
pass
def default_db():
cur, con = sql_connect()
f = open("fairmotion.sql", "r")
buf = f.read()
f.close()
statements = [""]
s = ""
for l in buf.split("\n"):
if l.strip().startswith("--") or l.strip().startswith("/*") \
or l.strip().startswith("//"):
continue;
if "ENGINE" in l:
l = ");"
if l.strip() == "": continue
if l.startswith("SET"): continue
s += l + "\n"
for l in s.split("\n"):
if l.strip() == "": continue
if len(l) > 2 and l[:3] == l[:3].upper() and l[0] not in ["\t", " ", "\n", "\r", "("]:
statements.append("")
if l.strip().startswith("PRIMARY KEY"): continue
if l.strip().startswith("KEY"): continue
#l = l.replace("AUTO_INCREMENT", "")
statements[-1] += l + "\n"
for s in statements:
# buf = s.replace("IF NOT EXISTS ", "")
print("===executing====")
print(s)
con.execute(s)
con.commit()
pass
def get_last_rowid(cur):
return cur.lastrowid
| StarcoderdataPython |
3396230 | <gh_stars>100-1000
import argparse
import time
import os
import shutil
import logging
import torch
import torch.backends.cudnn as cudnn
import loaddata
from tqdm import tqdm
from models import modules, net, resnet
from util import query_yes_no
from test import test
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='')
# training/optimization related
parser.add_argument('--epochs', default=10, type=int,
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
help='initial learning rate')
parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--batch_size', default=32, type=int, help='batch size number') # 1 GPU - 8
parser.add_argument('--store_root', type=str, default='checkpoint')
parser.add_argument('--store_name', type=str, default='nyud2')
parser.add_argument('--data_dir', type=str, default='./data', help='data directory')
parser.add_argument('--resume', action='store_true', default=False, help='whether to resume training')
# imbalanced related
# LDS
parser.add_argument('--lds', action='store_true', default=False, help='whether to enable LDS')
parser.add_argument('--lds_kernel', type=str, default='gaussian',
choices=['gaussian', 'triang', 'laplace'], help='LDS kernel type')
parser.add_argument('--lds_ks', type=int, default=5, help='LDS kernel size: should be odd number')
parser.add_argument('--lds_sigma', type=float, default=2, help='LDS gaussian/laplace kernel sigma')
# FDS
parser.add_argument('--fds', action='store_true', default=False, help='whether to enable FDS')
parser.add_argument('--fds_kernel', type=str, default='gaussian',
choices=['gaussian', 'triang', 'laplace'], help='FDS kernel type')
parser.add_argument('--fds_ks', type=int, default=5, help='FDS kernel size: should be odd number')
parser.add_argument('--fds_sigma', type=float, default=2, help='FDS gaussian/laplace kernel sigma')
parser.add_argument('--start_update', type=int, default=0, help='which epoch to start FDS updating')
parser.add_argument('--start_smooth', type=int, default=1, help='which epoch to start using FDS to smooth features')
parser.add_argument('--bucket_num', type=int, default=100, help='maximum bucket considered for FDS')
parser.add_argument('--bucket_start', type=int, default=7, help='minimum(starting) bucket for FDS, 7 for NYUDv2')
parser.add_argument('--fds_mmt', type=float, default=0.9, help='FDS momentum')
# re-weighting: SQRT_INV / INV
parser.add_argument('--reweight', type=str, default='none', choices=['none', 'inverse', 'sqrt_inv'],
help='cost-sensitive reweighting scheme')
# two-stage training: RRT
parser.add_argument('--retrain_fc', action='store_true', default=False,
help='whether to retrain last regression layer (regressor)')
parser.add_argument('--pretrained', type=str, default='', help='pretrained checkpoint file path to load backbone weights for RRT')
def define_model(args):
original_model = resnet.resnet50(pretrained=True)
Encoder = modules.E_resnet(original_model)
model = net.model(args, Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
return model
def main():
error_best = 1e5
metric_dict_best = {}
epoch_best = -1
global args
args = parser.parse_args()
if not args.lds and args.reweight != 'none':
args.store_name += f'_{args.reweight}'
if args.lds:
args.store_name += f'_lds_{args.lds_kernel[:3]}_{args.lds_ks}'
if args.lds_kernel in ['gaussian', 'laplace']:
args.store_name += f'_{args.lds_sigma}'
if args.fds:
args.store_name += f'_fds_{args.fds_kernel[:3]}_{args.fds_ks}'
if args.fds_kernel in ['gaussian', 'laplace']:
args.store_name += f'_{args.fds_sigma}'
args.store_name += f'_{args.start_update}_{args.start_smooth}_{args.fds_mmt}'
if args.retrain_fc:
args.store_name += f'_retrain_fc'
args.store_name += f'_lr_{args.lr}_bs_{args.batch_size}'
args.store_dir = os.path.join(args.store_root, args.store_name)
if not args.resume:
if os.path.exists(args.store_dir):
if query_yes_no('overwrite previous folder: {} ?'.format(args.store_dir)):
shutil.rmtree(args.store_dir)
print(args.store_dir + ' removed.')
else:
raise RuntimeError('Output folder {} already exists'.format(args.store_dir))
print(f"===> Creating folder: {args.store_dir}")
os.makedirs(args.store_dir)
logging.root.handlers = []
log_file = os.path.join(args.store_dir, 'training_log.log')
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler()
])
logging.info(args)
writer = SummaryWriter(args.store_dir)
model = define_model(args)
model = torch.nn.DataParallel(model).cuda()
if args.resume:
model_state = torch.load(os.path.join(args.store_dir, 'checkpoint.pth.tar'))
logging.info(f"Loading checkpoint from {os.path.join(args.store_dir, 'checkpoint.pth.tar')}"
f" (Epoch [{model_state['epoch']}], RMSE: {model_state['error']:.3f})")
model.load_state_dict(model_state['state_dict'])
args.start_epoch = model_state['epoch'] + 1
epoch_best = model_state['epoch']
error_best = model_state['error']
metric_dict_best = model_state['metric']
if args.retrain_fc:
assert os.path.isfile(args.pretrained), f"No checkpoint found at '{args.pretrained}'"
model_state = torch.load(args.pretrained, map_location="cpu")
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in model_state['state_dict'].items():
if 'R' not in k:
new_state_dict[k] = v
model.load_state_dict(new_state_dict, strict=False)
logging.info(f'===> Pretrained weights found in total: [{len(list(new_state_dict.keys()))}]')
logging.info(f'===> Pre-trained model loaded: {args.pretrained}')
for name, param in model.named_parameters():
if 'R' not in name:
param.requires_grad = False
logging.info(f'Only optimize parameters: {[n for n, p in model.named_parameters() if p.requires_grad]}')
cudnn.benchmark = True
if not args.retrain_fc:
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
else:
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
optimizer = torch.optim.Adam(parameters, args.lr, weight_decay=args.weight_decay)
train_loader = loaddata.getTrainingData(args, args.batch_size)
train_fds_loader = loaddata.getTrainingFDSData(args, args.batch_size)
test_loader = loaddata.getTestingData(args, 1)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train(train_loader, train_fds_loader, model, optimizer, epoch, writer)
error, metric_dict = test(test_loader, model)
if error < error_best:
error_best = error
metric_dict_best = metric_dict
epoch_best = epoch
save_checkpoint(model.state_dict(), epoch, error, metric_dict, 'checkpoint_best.pth.tar')
save_checkpoint(model.state_dict(), epoch, error, metric_dict, 'checkpoint.pth.tar')
save_checkpoint(model.state_dict(), epoch, error, metric_dict, 'checkpoint_final.pth.tar')
logging.info(f'Best epoch: {epoch_best}; RMSE: {error_best:.3f}')
logging.info('***** TEST RESULTS *****')
for shot in ['Overall', 'Many', 'Medium', 'Few']:
logging.info(f" * {shot}: RMSE {metric_dict_best[shot.lower()]['RMSE']:.3f}\t"
f"ABS_REL {metric_dict_best[shot.lower()]['ABS_REL']:.3f}\t"
f"LG10 {metric_dict_best[shot.lower()]['LG10']:.3f}\t"
f"MAE {metric_dict_best[shot.lower()]['MAE']:.3f}\t"
f"DELTA1 {metric_dict_best[shot.lower()]['DELTA1']:.3f}\t"
f"DELTA2 {metric_dict_best[shot.lower()]['DELTA2']:.3f}\t"
f"DELTA3 {metric_dict_best[shot.lower()]['DELTA3']:.3f}\t"
f"NUM {metric_dict_best[shot.lower()]['NUM']}")
writer.close()
def train(train_loader, train_fds_loader, model, optimizer, epoch, writer):
batch_time = AverageMeter()
losses = AverageMeter()
model.train()
end = time.time()
for i, sample_batched in enumerate(train_loader):
image, depth, weight = sample_batched['image'], sample_batched['depth'], sample_batched['weight']
depth = depth.cuda(non_blocking=True)
weight = weight.cuda(non_blocking=True)
image = image.cuda()
optimizer.zero_grad()
if args.fds:
output, feature = model(image, depth, epoch)
else:
output = model(image, depth, epoch)
loss = torch.mean(((output - depth) ** 2) * weight)
losses.update(loss.item(), image.size(0))
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
writer.add_scalar('data/loss', loss.item(), i + epoch * len(train_loader))
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(epoch, i, len(train_loader), batch_time=batch_time, loss=losses))
if args.fds and epoch >= args.start_update:
logging.info(f"Starting Creating Epoch [{epoch}] features of subsampled training data...")
encodings, depths = [], []
with torch.no_grad():
for i, sample_batched in enumerate(tqdm(train_fds_loader)):
image, depth = sample_batched['image'].cuda(), sample_batched['depth'].cuda()
_, feature = model(image, depth, epoch)
encodings.append(feature.data.cpu())
depths.append(depth.data.cpu())
encodings, depths = torch.cat(encodings, 0), torch.cat(depths, 0)
logging.info(f"Created Epoch [{epoch}] features of subsampled training data (size: {encodings.size(0)})!")
model.module.R.FDS.update_last_epoch_stats(epoch)
model.module.R.FDS.update_running_stats(encodings, depths, epoch)
def adjust_learning_rate(optimizer, epoch):
lr = args.lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state_dict, epoch, error, metric_dict, filename='checkpoint.pth.tar'):
logging.info(f'Saving checkpoint to {os.path.join(args.store_dir, filename)}...')
torch.save({
'state_dict': state_dict,
'epoch': epoch,
'error': error,
'metric': metric_dict
}, os.path.join(args.store_dir, filename))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1924077 | import sys
cache = {}
def num_pebbles(pos):
return len([1 for x in pos if x == 'o'])
def move(pos, n, m):
p = pos[:]
p[n] = '-'
p[(n+m)//2] = '-'
p[m] = 'o'
return p
def min_number_pebbles(pos):
assert len(pos) == 23
try:
return cache[''.join(pos)]
except:
pass
moves = []
# Will be a list of pairs (m, n), each pair meaning that the pebble on
# place m moves to place n (and the pebble between m and n is removed).
# Here 0 <= m, n < len(pos)
for i in range(21): # 21 == len(pos)-2
if pos[i+1] == 'o':
if pos[i] == 'o' and pos[i+2] == '-':
moves.append((i, i+2))
elif pos[i] == '-' and pos[i+2] == 'o':
moves.append((i+2, i))
if not moves:
np = num_pebbles(pos)
else:
np = min(min_number_pebbles(move(pos, *p)) for p in moves)
cache[''.join(pos)] = np
return np
for ctr, line in enumerate(sys.stdin):
if ctr == 0:
number_test_cases = int(line)
continue
if ctr > number_test_cases:
# trailing lines?
break
print(min_number_pebbles(list(line.strip())))
| StarcoderdataPython |
158810 | import os
from ..exceptions import ArgumentError
from ..thirdparty.download import download_binaries
from ..thirdparty.kaldi import collect_kaldi_binaries, validate_kaldi_binaries
def validate_args(args):
available_commands = ['download', 'validate', 'kaldi']
if args.command not in available_commands:
raise ArgumentError(
'{} is not a valid thirdparty command ({})'.format(args.command, ', '.format(available_commands)))
if args.command not in ['download', 'validate']:
if not args.local_directory:
raise ArgumentError('Specify a directory to extract {} binaries from.'.format(args.command))
if not os.path.exists(args.local_directory):
raise ArgumentError('The directory {} does not exist.'.format(args.local_directory))
def run_thirdparty(args):
validate_args(args)
if args.command == 'download':
download_binaries()
elif args.command == 'validate':
validate_kaldi_binaries()
elif args.command == 'kaldi':
collect_kaldi_binaries(args.local_directory)
if __name__ == '__main__':
from montreal_forced_aligner.command_line.mfa import thirdparty_parser
thirdparty_args = thirdparty_parser.parse_args()
run_thirdparty(thirdparty_args)
| StarcoderdataPython |
3510397 | """Simple calculation to find most UTM zones given (lat/lng) coordinate."""
import math
def lat_lng_to_epsg(lat_coord, lng_coord):
"""Calculate the EPSG code of the given WGS84 (lat/lng) coordinate.
Parameters:
lat_coord (float): number between -90 and 90 indicating latitute
coordinate.
lng_coord (float): number between -180 to 180 indicating longitude
coordinate.
Returns:
EPSG code of the UTM zone that contains the given point.
"""
utm_code = (math.floor((lng_coord + 180)/6) % 60) + 1
lat_code = 6 if lat_coord > 0 else 7
epsg_code = int('32%d%02d' % (lat_code, utm_code))
return epsg_code
| StarcoderdataPython |
3340331 | <reponame>TooTouch/tootorch<filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'tootorch',
version = '0.2',
long_description = long_description,
long_description_content_type = 'text/markdown',
description = 'Implemetation XAI in Computer Vision (Pytorch)',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/TooTouch/tootorch',
download_url = 'https://github.com/TooTouch/tootorch/archive/v0.1.tar.gz',
install_requires = ["torch","torchvision","h5py","tqdm","pillow","opencv-python"],
packages = find_packages(exclude = []),
keywords = ['tootorch','XAI'],
python_requires = '>=3.6',
package_data = {},
zip_safe = False,
classifiers = [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
],
) | StarcoderdataPython |
8068740 | from __future__ import absolute_import
from mock import Mock
from celery.concurrency.threads import NullDict, TaskPool, apply_target
from celery.tests.case import AppCase, Case, mask_modules, mock_module
class test_NullDict(Case):
def test_setitem(self):
x = NullDict()
x['foo'] = 1
with self.assertRaises(KeyError):
x['foo']
class test_TaskPool(AppCase):
def test_without_threadpool(self):
with mask_modules('threadpool'):
with self.assertRaises(ImportError):
TaskPool()
def test_with_threadpool(self):
with mock_module('threadpool'):
x = TaskPool()
self.assertTrue(x.ThreadPool)
self.assertTrue(x.WorkRequest)
def test_on_start(self):
with mock_module('threadpool'):
x = TaskPool()
x.on_start()
self.assertTrue(x._pool)
self.assertIsInstance(x._pool.workRequests, NullDict)
def test_on_stop(self):
with mock_module('threadpool'):
x = TaskPool()
x.on_start()
x.on_stop()
x._pool.dismissWorkers.assert_called_with(x.limit, do_join=True)
def test_on_apply(self):
with mock_module('threadpool'):
x = TaskPool()
x.on_start()
callback = Mock()
accept_callback = Mock()
target = Mock()
req = x.on_apply(target, args=(1, 2), kwargs={'a': 10},
callback=callback,
accept_callback=accept_callback)
x.WorkRequest.assert_called_with(
apply_target,
(target, (1, 2), {'a': 10}, callback, accept_callback),
)
x._pool.putRequest.assert_called_with(req)
x._pool._results_queue.queue.clear.assert_called_with()
| StarcoderdataPython |
1626015 | <gh_stars>0
"""
Backward compatibility support for Python 3.5
"""
import sys
import test.support
import subprocess
# copied from Python 3.9 test.support module
def _missing_compiler_executable(cmd_names=[]):
"""Check if the compiler components used to build the interpreter exist.
Check for the existence of the compiler executables whose names are listed
in 'cmd_names' or all the compiler executables when 'cmd_names' is empty
and return the first missing executable or None when none is found
missing.
"""
from distutils import ccompiler, sysconfig, spawn
compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
for name in compiler.executables:
if cmd_names and name not in cmd_names:
continue
cmd = getattr(compiler, name)
if cmd_names:
assert cmd is not None, \
"the '%s' executable is not configured" % name
elif not cmd:
continue
if spawn.find_executable(cmd[0]) is None:
return cmd[0]
missing_compiler_executable = vars(test.support).setdefault(
'missing_compiler_executable',
_missing_compiler_executable,
)
try:
from test.support import unix_shell
except ImportError:
# Adapted from Python 3.9 test.support module
is_android = hasattr(sys, 'getandroidapilevel')
unix_shell = (
None if sys.platform == 'win32' else
'/system/bin/sh' if is_android else
'/bin/sh'
)
# copied from Python 3.9 subprocess module
def _optim_args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
optimization settings in sys.flags."""
args = []
value = sys.flags.optimize
if value > 0:
args.append('-' + 'O' * value)
return args
vars(subprocess).setdefault(
'_optim_args_from_interpreter_flags',
_optim_args_from_interpreter_flags,
)
def adapt_glob(regex):
"""
Supply legacy expectation on Python 3.5
"""
return regex
return regex.replace('(?s:', '').replace(r')\Z', r'\Z(?ms)')
| StarcoderdataPython |
4972408 | def extractNanjamora(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Endless Dantian' in item['tags']:
return buildReleaseMessageWithType(item, 'Endless Dantian', vol, chp, frag=frag, postfix=postfix)
if 'Infinite Temptation' in item['tags']:
return buildReleaseMessageWithType(item, 'Infinite Temptation', vol, chp, frag=frag, postfix=postfix)
if 'wushang jinshia' in item['tags']:
return buildReleaseMessageWithType(item, 'Wu Shang Jin Shia', vol, chp, frag=frag, postfix=postfix)
return False
| StarcoderdataPython |
6557453 | <reponame>inidun/unesco_data_collection
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List
from courier.config import get_config
from courier.extract.interface import ITextExtractor
from courier.extract.tesseract_extractor import TesseractExtractor
from courier.extract.utils import get_filenames
CONFIG = get_config()
def test_extract_extracts_right_amount_of_files():
with TemporaryDirectory() as output_dir:
files: List[Path] = get_filenames(CONFIG.test_files_dir / 'test.pdf')
extractor: ITextExtractor = TesseractExtractor(dpi=1, fmt='png')
extractor.batch_extract(files, output_dir)
assert len(sorted(Path(output_dir).glob('*.txt'))) == 8
assert (Path(output_dir) / 'extract.log').exists()
| StarcoderdataPython |
4917257 | from pyteal import *
from blob import Blob
def test():
b = Blob()
test = Seq(
Pop(b.write(Int(0), Int(0), Bytes("deadbeef" * 16))),
Log(b.read(Int(0), Int(8), Int(32))),
Int(1),
)
return Cond(
[Txn.application_id() == Int(0), Int(1)],
[Txn.on_completion() == OnComplete.OptIn, Int(1)],
[Txn.on_completion() == OnComplete.UpdateApplication, Int(1)],
[Int(1), test],
)
print(compileTeal(test(), mode=Mode.Application, version=5))
| StarcoderdataPython |
192725 | from fabric.api import task, local
@task
def start():
local("fab server.start:server=mooc,mooc")
def stop():
local("fab server.stop")
| StarcoderdataPython |
6535204 | <reponame>josecostamartins/pythonreges
# *-* coding: utf-8 *-*
'''
1. Classe Bola: Crie uma classe que modele uma bola:
a. Atributos: Cor, circunferência, material
b. Métodos: trocaCor e mostraCor
'''
class Bola(object):
def __init__(self, cor, circunferencia, material):
self.cor = cor
self.circunferencia = circunferencia
self.material = material
def trocaCor(self, cor):
self.cor = cor
def mostraCor(self):
print self.cor
return self.cor
| StarcoderdataPython |
4829202 | from decimal import Decimal
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0001_squashed_0052_ensure_report_builder_plans'),
('sms', '0010_update_sqlmobilebackend_couch_id'),
]
operations = [
migrations.CreateModel(
name='SmsBillable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('gateway_fee_conversion_rate', models.DecimalField(default=Decimal('1.0'), null=True, max_digits=20, decimal_places=9)),
('log_id', models.CharField(max_length=50, db_index=True)),
('phone_number', models.CharField(max_length=50)),
('api_response', models.TextField(null=True, blank=True)),
('is_valid', models.BooleanField(default=True, db_index=True)),
('domain', models.CharField(max_length=25, db_index=True)),
('direction', models.CharField(db_index=True, max_length=10, choices=[(b'I', b'Incoming'), (b'O', b'Outgoing')])),
('date_sent', models.DateField()),
('date_created', models.DateField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SmsGatewayFee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=4)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SmsGatewayFeeCriteria',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('backend_api_id', models.CharField(max_length=100, db_index=True)),
('backend_instance', models.CharField(max_length=255, null=True, db_index=True)),
('direction', models.CharField(db_index=True, max_length=10, choices=[(b'I', b'Incoming'), (b'O', b'Outgoing')])),
('country_code', models.IntegerField(db_index=True, max_length=5, null=True, blank=True)),
('prefix', models.CharField(default=b'', max_length=10, db_index=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SmsUsageFee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=4)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SmsUsageFeeCriteria',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('direction', models.CharField(db_index=True, max_length=10, choices=[(b'I', b'Incoming'), (b'O', b'Outgoing')])),
('domain', models.CharField(max_length=25, null=True, db_index=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='smsusagefee',
name='criteria',
field=models.ForeignKey(to='smsbillables.SmsUsageFeeCriteria', on_delete=django.db.models.deletion.PROTECT),
preserve_default=True,
),
migrations.AddField(
model_name='smsgatewayfee',
name='criteria',
field=models.ForeignKey(to='smsbillables.SmsGatewayFeeCriteria', on_delete=django.db.models.deletion.PROTECT),
preserve_default=True,
),
migrations.AddField(
model_name='smsgatewayfee',
name='currency',
field=models.ForeignKey(to='accounting.Currency', on_delete=django.db.models.deletion.PROTECT),
preserve_default=True,
),
migrations.AddField(
model_name='smsbillable',
name='gateway_fee',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='smsbillables.SmsGatewayFee', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='smsbillable',
name='usage_fee',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='smsbillables.SmsUsageFee', null=True),
preserve_default=True,
),
]
| StarcoderdataPython |
5155768 | """
Given a string s, partition s such that every substring of the partition is a palindrome.
Return the minimum cuts needed for a palindrome partitioning of s.
Example :
Given
s = "aab",
Return 1 since the palindrome partitioning ["aa","b"] could be produced using 1 cut.
"""
class Solution:
def plindrome_partitioning_ii(self, s):
n = len(s)
# Create a memo_table to store minimum number of cuts
memo_table = [0] * n
# if s[i....j] is palindrome then is_palindrome[i][j] = 1 else 0
is_palindrome = [[0 for _ in range(n)] for _ in range(n)] # initializing with 0 value
for i in range(n):
is_palindrome[i][i] = 1 # for a single letter in string set is_palindrome to true
for i in range(2, n + 1): # Now loop through the substring of length of 2 ro n
for j in range(n - i + 1):
k = i + j - 1
if i == 2: # if len of substring is 2 then check only for both element is equal or not
if s[j] == s[k]:
is_palindrome[j][k] = 1
else: # check for corner latter and check middle part form is_palindrome table
if s[j] == s[k] and is_palindrome[j + 1][k - 1]:
is_palindrome[j][k] = 1
for i in range(n):
if is_palindrome[0][i]:
memo_table[i] = 0
else:
import sys
memo_table[i] = sys.maxsize
for j in range(i):
if is_palindrome[j + 1][i] and (1 + memo_table[j] < memo_table[i]):
memo_table[i] = memo_table[j] + 1
return memo_table[n - 1]
if __name__ == '__main__':
c = Solution()
s = "aab"
print(c.plindrome_partitioning_ii(s))
| StarcoderdataPython |
1843750 | import tensorflow as tf
import numpy as np
#100 phony y, x data points are created in NumPy, y = 0.3 + x * 0.1
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# calculate y_data = b + W * x_data, by finding the values for b and W
# Knowing that b should be 0.3 and W 0.1, but it will be figured out by the tensorflow
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for step in xrange(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
# Learns best fit is W: [0.1], b: [0. | StarcoderdataPython |
1663092 | """Implements a buildable, serializable, deserializable lexicon."""
from typing import Any, Dict, Iterator, Tuple, Union, Optional
from textprobability.core.types import (
Unit,
Probability,
NGram,
ContextLexicon,
Splitter,
Text,
Lexicon,
)
SerializableLexicon = Tuple[Dict[Unit, int], int]
# Recursive type annotations are needed here, but they are not supported.
SerializableContextLexicon = Dict[str, Union[SerializableLexicon, Any]]
class LexiconImpl(Lexicon):
"""A Lexicon associates linguistic units with probabilities."""
# A Lexicon is backed by a _Probabilities instance.
def __init__(self, counts: Dict[Unit, int], n_obs: int):
"""This constructor should never be called from outside this module. In Java it
would be private.
"""
self._counts: Dict[Unit, int] = counts
self._probabilities: Dict[Unit, float] = {
k: v / n_obs for k, v in counts.items()
}
super().__init__(n_obs)
def __getitem__(self, key: Unit) -> Probability:
"""Returns the probability associated with `key`."""
return self._probabilities[key]
def get(
self, key: Unit, default: Optional[Probability] = None
) -> Optional[Probability]:
return self._probabilities[key] if key in self._probabilities else default
def summarize(self, min_n):
"""Summarizes this, reducing the amount of space required to store
this.
:param min_n: The minimum number of observations of a linguistic unit for
it to be recorded as observed.
:return: A summarized version of this.
"""
filtered = {
unit: count for unit, count in self._counts.items() if count >= min_n
}
# sum(filtered.values()) should be very close to self.n_obs for reasonably
# chosen min_n, but it is recomputed here anyway.
return LexiconImpl(filtered, sum(filtered.values()))
def to_serializable(self) -> SerializableLexicon:
"""Gets a representation of `self` that can be serialized using JSON."""
return (self._counts, self.n_obs)
@classmethod
def from_serializable(cls, serializable: SerializableLexicon) -> Any:
"""Retrieves a Lexicon from its serializable representation."""
return cls(*serializable)
def context_lexicon2serializable(cl: ContextLexicon) -> SerializableContextLexicon:
"""Converts a ContextLexicon to a Trie."""
ret: SerializableContextLexicon = {}
for key in cl:
current = ret
for element in key[:-1]:
if element not in current:
current[element] = {}
# The following is okay because it will only be a Lexicon at the very end.
current = current[element] # type: ignore
# This is the very end where current[element] is finally a Lexicon rather than
# a nested SerializableContextLexicon.
current[key[-1]] = cl[key].to_serializable()
return ret
def serializable2context_lexicon(trie: SerializableContextLexicon) -> ContextLexicon:
"""Converts a Trie like the output of context_lexicon2trie to a ContextLexicon."""
def walk(
trie: Union[SerializableContextLexicon, SerializableLexicon], parents: NGram
) -> Iterator[Tuple[NGram, SerializableLexicon]]:
if not isinstance(trie, dict): # This is brittle.
yield parents, trie
else:
for key in trie:
yield from walk(trie[key], (*parents, key))
ret: ContextLexicon = {}
for ngram, serializable_lexicon in walk(trie, ()):
ret[ngram] = LexiconImpl.from_serializable(serializable_lexicon)
return ret
Counts = Dict[Unit, int]
class LexiconBuilder:
"""Accumulates data about a Lexicon."""
def __init__(self, splitter: Splitter):
"""Initializes the builder to count linguistic units of the type output by
`splitter`.
:param splitter: the Splitter instance that determines the type of linguistic
unit counted by `self`
"""
self.splitter: Splitter = splitter
self._counts: Counts = {}
self.total = 0
def add(self, text: Text):
"""Acquires information from `text`."""
for unit in self.splitter(text):
self._counts[unit] = self._counts.get(unit, 0) + 1
self.total += 1
def get_lexicon(self) -> Lexicon:
"""Returns the Lexicon accumulated by `self`."""
return LexiconImpl.from_serializable((self._counts, self.total))
class LexiconContextLexiconBuilder(LexiconBuilder):
"""Accumulates data about a Lexicon and ContextLexicon."""
def __init__(self, splitter: Splitter, n):
"""Initializes the builder to count linguistic units of the type output by
`splitter`.
:param splitter: the Splitter instance that determines the type of linguistic
unit counted by `self`
:param n: the number of preceding linguistic units used as context
"""
self.n = n
self._builders: Dict[NGram, LexiconBuilder] = {}
super().__init__(splitter)
def add(self, text: Text):
sequence = self.splitter(text)
for n_plus_one_gram in zip(*[sequence[start:] for start in range(self.n + 1)]):
ngram = n_plus_one_gram[: self.n]
self._builders[ngram] = self._builders.get(
ngram, LexiconBuilder(self.splitter)
)
self._builders[ngram].add(n_plus_one_gram[-1])
super().add(text)
def get_context_lexicon(self) -> ContextLexicon:
"""Returns the ContextLexicon accumulated by `self`."""
return {key: self._builders[key].get_lexicon() for key in self._builders}
| StarcoderdataPython |
6599429 | arr = [25, 11, 7, 87, 56];
max = arr[0];
for i in range(0, len(arr)):
if(arr[i] > max):
max = arr[i];
print("Largest element present in given array: " + str(max));
| StarcoderdataPython |
1803431 | #Importando as coisas que o código precisa para funcionar
import pandas as pd
import requests
from bs4 import BeautifulSoup
#def que retorna a informação
def RetornarResposta(req):
print("0 = Visão geral")
print ("1 = Casos ativos no mundo")
print("3 = Casos leves ativos no mundo")
print("5 = Casos severos ativos no mundo")
modos = ["Mostrando visão geral","Mostrando casos ativos no mundo","",
"Mostrando casos leves ativos no mundo","","Mostrando Casos severos ativos no mundo"]
modo = int(input("Digite um número"))
content = req.content
soup = BeautifulSoup(content, 'html.parser')
table = soup.find(name='table')
table_str = str(table)
df = pd.read_html(table_str)[0]
print(modos[modo])
if not modo == 0:
print(df[0][modo])
else:
print(df[0])
#def que inicia o programa
def Iniciar():
print("Conectando com o banco de dados...")
req = requests.get("https://www.worldometers.info/coronavirus/coronavirus-cases/")
if req.status_code == 200:
print("Conectado!")
print("Dados da Covid 19 no mundo - Python | by @iDavi")
RetornarResposta(req)
else:
print("Erro ao tentar se conectar ao banco de dados")
Iniciar() #Apenas chame iniciar para iniciar o programa
| StarcoderdataPython |
4946783 | <reponame>Sannso/GameCG2
import pygame
import configparser
#850×480
ANCHO=850 #x4 = 3400
ALTO=480 #x3 = 1440
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO,ALTO])
archivo=configparser.ConfigParser()
archivo.read('info_mapa.txt')
nom_imagen=archivo.get('info','imagen')
terreno=pygame.image.load(nom_imagen)
info=terreno.get_rect()
an_t=info[2]
al_t=info[3]
ob_an= int (archivo.get('info','can_ancho'))
ob_al= int (archivo.get('info','can_alto'))
an_sp = int(an_t / ob_an)
al_sp = int(al_t / ob_al)
print ('ancho sprite: ',an_t / ob_an)
print ('alto sprite: ', al_t / ob_al)
ls_cuadro=[]
ls_terreno=[]
for fila in range(ob_al):
for col in range(ob_an):
cuadro=terreno.subsurface(col*an_sp,fila*al_sp,an_sp,al_sp)
ls_cuadro.append(cuadro)
ls_terreno.append(ls_cuadro.copy())
ls_cuadro.clear()
mapa=archivo.get('info','mapa')
ls_filas=mapa.split('\n')
print(ls_filas)
con=0
con_fil=0
for e in ls_filas:
for j in e:
col=int(archivo.get(j,'col'))
fila=int(archivo.get(j,'fil'))
pantalla.blit(ls_terreno[fila][col],[con*an_sp,con_fil*al_sp])
con+=1
con_fil+=1
con = 0
#pantalla.blit(ls_t[0],[0,0])
pygame.display.flip()
fin=False
while not fin :
#gestion de eventos
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
| StarcoderdataPython |
6401810 | import os
import pathlib
from typing import List
import boto3
import botocore
def s3_bucket_exists(name: str) -> bool:
s3 = boto3.client("s3")
try:
s3.head_bucket(Bucket=name)
except botocore.exceptions.ClientError as e:
print(e)
return False
return True
def file_exists(bucket_name: str, s3_object_path: str) -> None:
s3 = boto3.resource("s3")
try:
s3.Object(bucket_name, s3_object_path).load() # pylint: disable=no-member
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
return False
else:
raise
else:
return True
def upload_files(
bucket_name, files_to_send: List[str], s3_destination_object_dir: str
) -> None:
s3 = boto3.client("s3")
for file_index, file_to_send in enumerate(files_to_send):
s3_destination_object_path = os.path.join(
s3_destination_object_dir, os.path.basename(file_to_send)
)
try:
if file_exists(bucket_name, s3_destination_object_path):
print(
"S3 object already exists %s:%s, %i/%i"
% (
bucket_name,
s3_destination_object_dir,
file_index + 1,
len(files_to_send),
)
)
continue
s3.upload_file(file_to_send, bucket_name, s3_destination_object_path)
except botocore.exceptions.ClientError as e:
print(e)
continue
print(
"Uploading file to %s:%s, %i/%i"
% (
bucket_name,
s3_destination_object_path,
file_index + 1,
len(files_to_send),
)
)
| StarcoderdataPython |
5194328 | # -*- coding: utf-8 -*-
import ujson
from flask import request
from flask_restplus import Namespace, Resource, fields
from models.user import User as orm_user
from models.project import Project as orm_project
from models.file import File as orm_file
from orator.exceptions.orm import ModelNotFound
from orator.exceptions.query import QueryException
from .utils import PAGINATOR
API = Namespace('users', description='Platform access administration')
USER = API.model('User', {
'first_name': fields.String(
required=True,
description='First name',
example='John',
pattern=r'^[a-zA-Z]'),
'last_name': fields.String(
required=True,
description='Last name',
example='Smith',
pattern="^[a-zA-Z]"),
'email': fields.String(
required=True,
description='Contact email',
example='<EMAIL>',
pattern=r"[^@]+@[^@]+\.[^@]+")
})
USER_DATA = USER.inherit('User', USER, {
'id': fields.Integer(required=False, description='Unique identifier', example='1')
})
# pylint: disable=no-self-use
# pylint: disable=maybe-no-member
@API.route('')
class UsersList(Resource):
"""Endpoint for list-based user results."""
@API.marshal_list_with(USER_DATA)
@API.response(200, 'Users list')
@API.expect(PAGINATOR)
def get(self):
"""
List all users
Returns a collection of users paginated and consolidated
in bundles of 10 per page
"""
# Retrieval of pagination parameters: page, per_page
page_args = PAGINATOR.parse_args()
return orm_user.for_page(page_args['page'], page_args['per_page']).get().serialize(), 200
@API.expect(USER, validate=True)
def post(self):
"""
Creates a new user
It uses the email as a primarily identifier of the user
and as credentials to authenticate in the platform.
"""
try:
new_user = orm_user(API.marshal(ujson.loads(request.data), USER))
flag = new_user.save()
except QueryException:
API.abort(code=400, message='Integrity violation')
if flag:
return new_user.serialize(), 201
@API.route('/<int:user_id>')
class User(Resource):
"""Endpoint for users operations."""
@API.response(200, 'User found', USER_DATA)
@API.marshal_with(USER_DATA)
def get(self, user_id):
"""
Fetch a user by its identifier
Users are identified by a unique number
as an immutable integer that identifies them.
"""
try:
return orm_user.find_or_fail(user_id).serialize()
except ModelNotFound:
API.abort(code=404, message='User not found')
@API.response(204, 'User successfully deleted')
def delete(self, user_id):
"""
Deletes an existing user
Removes in cascade all information associated
to an individual user in Qualibrate
"""
try:
old_user = orm_user.find_or_fail(user_id)
flag = old_user.delete()
except ModelNotFound:
API.abort(code=404, message='User not found')
except QueryException:
API.abort(code=400, message='Integrity violation')
if flag:
return user_id, 204
@API.response(202, 'User successfully updated')
@API.expect(USER)
def put(self, user_id):
"""
Updates an existing user
The body of the requests contains a json representation
of the User model with the new set of attributes
"""
# Empty user creation
current_user = orm_user.find_or_fail(user_id)
current_user.set_raw_attributes(ujson.loads(request.data))
if current_user.save():
return current_user.serialize(), 202
@API.route('/<int:user_id>/projects')
class UserProjects(Resource):
"""
Fetch all the projects for an individual user
Projects are the test asset containers in Qualibrate
and contains all information about a test project
"""
def get(self, user_id):
"""
A list of projects that belong to a particular user
"""
try:
return orm_user.find_or_fail(user_id).projects.serialize()
except ModelNotFound:
API.abort(404)
@API.route('/<int:user_id>/projects/<int:project_id>')
class UserAddSProjects(Resource):
"""
Assigns the project ownership to a singular user
Users can own many projects, this association provides
a specified user the ownership of a project
"""
def put(self, user_id, project_id):
"""
User takes ownership of a project
"""
operation = orm_project.find(project_id).user().associate(orm_user.find(user_id))
if operation.save():
return orm_user.with_('projects').get().filter(
lambda x: x.id == user_id
).serialize(), 201
@API.route('/<int:user_id>/files')
class UserFiles(Resource):
"""
Fetch all the files for an individual user
Retrieve all reference files that belong
to this user, it includes attachments, images,
references and all the files used in Qualibrate
"""
def get(self, user_id):
"""
A list of files that belong to a particular user
"""
try:
return orm_file.select('uuid','name','mime','created_at','updated_at').where('user_id','=',user_id).get().serialize(), 200
except ModelNotFound:
API.abort(404)
| StarcoderdataPython |
12838081 | '''
Tkinter implementation of Meta Noughts and Crosses.
Requires Python >3.6, tkinter and mnac.
1.0: release
1.1: keyboard indicators / keyboard controls are like numpad
1.2: new status menu, controls, help menu
1.3: better mouse handling
1.4: UI tweaks and touchups
'''
import random
import os
import tkinter as tk
import numpy as np
import mnac
import render
__version__ = '1.4'
TITLE = f'TkMNAC v{__version__} / yunru.se'
class CanvasRender(render.Render):
'''Tkinter Canvas-based renderer.'''
font = 'Segoe UI'
def __init__(self, app, theme='light'):
self.app = app
self.canvas = app.canvas
self.coordinates = {}
self.theme = render.THEMES[theme]
self.error = False
def draw(self):
self.game = self.app.game
self.error = self.app.error
# determine colours and status
players = [
('gray', 'Unknown error', 'Unknown error'),
('nought', 'Noughts', 'Noughts wins!'),
('cross', 'Crosses', 'Crosses wins!'),
('gray', 'Neutral', "It's a draw...")
]
code, name, _ = players[self.game.player]
titlefill = self.theme[code]['light']
if self.error:
text = self.error
elif self.game.winner:
text = players[self.game.winner][2]
else:
statuses = {
'begin': 'grid to start in',
'inner': 'cell to play in',
'outer': 'grid to send to',
}
text = '{}, pick a {}'.format(name, statuses[self.game.state])
# get canvas details
w, h, self.size, self.topleft, header_height = self.app.coordinate()
if w > h:
self.topleft += ((w - h) / 2, 0)
else:
self.topleft += (0, (h - w) / 2)
self.canvas.config(bg=self.background())
self.canvas.delete('status', 'backing', 'mark', 'play')
self.canvas.tag_unbind('backing', '<Button-1>')
font_size = int(self.size / 32)
glyph_size = int(font_size * 1.5)
leftText = 'tab: help'
if self.app.showHelp:
text = ''
leftText = 'tab: back to game'
header = (
lambda x, y=header_height / 2, fill=titlefill, **kw:
self.canvas.create_text(
x, y, fill=fill,
tags='status', font=(self.font, font_size), **kw))
header(self.topleft[0] + 5, anchor='w', text=leftText)
header(self.topleft[0] + self.size/2, anchor='center', text=text)
def draw_glyph(fromRight, glyph, fill): return self.canvas.create_polygon(
*(glyph * glyph_size + (
self.topleft[0] + self.size + fromRight * glyph_size,
(header_height - glyph_size) / 2 + 2)).flatten(),
width=0, fill=fill, tags='status')
render.Render.draw(self)
# draw beginning help in middle cell
if self.app.showHelp:
self.canvas.create_rectangle(
*self.topleft, *(self.topleft + self.size),
width=0, fill=titlefill, tags='status', stipple="gray50")
for i, text in enumerate((
'The board is 9 grids each with 9 cells. Play to win',
'a grid, and win the larger grids to win the game.',
'',
'Place a tile in the tile and you will put your opponent',
'into the equivalent grid. For example, if you are in the',
'top left grid and play the bottom cell, your opponent',
'will have to play in the bottom grid, and so on.',
'',
'One exception is that you may never send your',
'opponent to your own grid, or one that is captured -',
'tiles that would do so are marked as green, and are',
"'teleporters' allowing you to choose where to send",
'your opponent. As grids become taken, there is less',
'choice, so be careful to tactically set up traps!',
'',
'CONTROLS:',
'Control-R: Restart the game',
'Keys 1-9 and mouse/touch: Play in cell / grid'
), start=1):
header(w/2, self.topleft[1] + i * 1.5 *
font_size, fill='black', text=text)
def cell(self, grid, cell, tl, size, fill):
tl += self.topleft
coords = (*tl, *(tl+size))
backing = self.canvas.create_rectangle(
*coords, width=0, fill=fill, tags='backing')
self.coordinates[grid+1, cell+1] = coords
def ellipse(self, coords, outline, width):
coords += (*self.topleft, *self.topleft)
self.canvas.create_oval(
*coords, width=width, outline=outline, tags='mark')
def polygon(self, coords, fill):
coords += self.topleft
self.canvas.create_polygon(
*coords.flatten(), fill=fill, width=0, tags='mark')
def text(self, coords, isLarge, text, size, fill):
coords += self.topleft
# this is arbitrary and needs a lot more playtesting :(
if os.name == 'posix':
fiddle = (2/9, -3/9) if isLarge else (-2/9, -4/9)
else:
fiddle = (1/9, -7/6) if isLarge else (-2/9, -2/3)
coords += np.array(fiddle) * self.size / (9 + 2 * self.SEPARATION)
self.canvas.create_text(
*coords, text=text, fill=fill, font=(self.font, size), anchor='nw', tags='play')
class UIMNAC(tk.Tk):
def __init__(self, **kwargs):
'''Initialise frame. Set players to None or a number.'''
tk.Tk.__init__(self)
self.title(TITLE)
self.minsize(400, 424)
self.columnconfigure(1, weight=1)
self.rowconfigure(1, weight=1)
self.canvas = tk.Canvas(
self, height=0, width=0,
bd=0, highlightthickness=0, relief='ridge')
self.canvas.grid(row=1, column=1, columnspan=3, sticky='news')
self.render = CanvasRender(self)
self.bind_all('<Configure>', self.redraw)
self.bind_all('<Control-r>', self.restart)
self.bind_all('<Tab>', self.toggleHelp)
self.bind_all('<Escape>', self.clearError)
self.canvas.bind('<Button-1>', self.onClick)
def callbacker(i): return lambda *event: self.play(mnac.numpad(i))
for i in range(1, 10):
self.bind_all(str(i), callbacker(i))
self.restart()
def restart(self, *event):
self.showHelp = False
self.error = ''
self.game = mnac.MNAC(middleStart=False)
self.redraw()
def clearError(self, *event):
self.error = ''
self.redraw()
def toggleHelp(self, *event):
self.showHelp = not self.showHelp
self.redraw()
def coordinate(self):
w, h = self.canvas.winfo_width(), self.canvas.winfo_height()
header_height = h / 18
h -= header_height
s = min(w, h)
tl = np.array((0, header_height), dtype=float)
return w, h, s, tl, header_height
def redraw(self, *event):
self.render.draw()
def onClick(self, event):
if self.game.winner:
return
w, h, s, tl, header_height = self.coordinate()
x = (event.x - tl[0]) * 9 / s
if (0 < event.y < header_height) and (0 < x < 9):
# status bar click
if x < 2 or self.showHelp:
self.toggleHelp()
else:
self.clearError()
# Iterate through all coordinates the renderer claims
# each cell was at
for coord, bounds in self.render.coordinates.items():
x1, y1, x2, y2 = bounds
if x1 <= event.x <= x2 and y1 <= event.y <= y2:
grid, cell = coord
break
else:
return
if self.game.state in ('outer', 'begin'):
self.play(grid)
elif self.game.state == 'inner':
if grid == (self.game.grid + 1):
self.play(cell)
else:
self.play(grid)
def play(self, index):
if self.game.winner:
return
self.error = ''
try:
self.game.play(index)
except mnac.MoveError as e:
self.error = mnac.ERRORS[e.args[0]]
self.redraw()
def test_turn(self, *event):
'''debug: play random moves'''
choices = list(range(9))
random.shuffle(choices)
for i in choices:
try:
self.game.play(i + 1)
break
except mnac.MoveError:
continue
self.render.draw()
if not self.game.winner:
self.after(500, self.test_turn)
if __name__ == '__main__':
self = UIMNAC()
self.mainloop()
| StarcoderdataPython |
3294069 | #!/usr/bin/python
import subprocess
p = subprocess.Popen(["ps", "-aux"], stdout=subprocess.PIPE)
out, err = p.communicate()
if ('netdisco-dhcp-listener.py' in out):
print('\nDHCP Sniffer is running')
else:
print('\nDHCP Sniffer NOT running!')
if ('netdisco-pinger.py' in out):
print('Pinger is running')
else:
print('Pinger is NOT running!')
if ('netdisco-srx-syslog-receiver.py' in out):
print('SRX SYSLOG receiver is running')
else:
print('SRX SYSLOG receiver is NOT running!')
if ('netdisco-srx-update.py' in out):
print('SRX WebAPI update agent is running')
else:
print('SRX WebAPI update agent is NOT running!')
if ('netdisco-ex-poller.py' in out):
print('EX-Series Polling agent is running')
else:
print('EX-Series Polling agent is NOT running!')
if ('netdisco-radius.py' in out):
print('RADIUS Auth/Accounting agent is running')
else:
print('RADIUS Auth/Accounting agent is NOT running!')
if ('netdisco-useragent.py' in out):
print('HTTP USERAGENT collector is running')
else:
print('HTTP USERAGENT collector is NOT running!')
if ('netdisco-admin.py' in out):
print('Web Interface is running')
else:
print('Web Interface is NOT running!')
print '\n'
| StarcoderdataPython |
1943769 | <reponame>efabless/volare
#!/usr/bin/env python3
from setuptools import setup, find_packages
from volare import __version__
requirements = open("requirements.txt").read().strip().split("\n")
setup(
name="volare",
packages=find_packages(),
version=__version__,
description="A sky130 PDK builder/version manager",
long_description=open("Readme.md").read(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
],
entry_points={"console_scripts": ["volare = volare.__main__:cli"]},
python_requires=">3.6",
)
| StarcoderdataPython |
5036684 | from summariser.vector.vector_generator import Vectoriser
from summariser.utils.corpus_reader import CorpusReader
from resources import *
from summariser.utils.writer import append_to_file
import sys
def writeSample(actions,reward,path):
if 'heuristic' in path:
str = '\nactions:'
for act in actions:
str += repr(act)+','
str = str[:-1]
str += '\nutility:'+repr(reward)
append_to_file(str, path)
else:
assert 'rouge' in path
str = '\n'
for j,model_name in enumerate(reward):
str += '\nmodel {}:{}'.format(j,model_name)
str += '\nactions:'
for act in actions:
str += repr(act)+','
str = str[:-1]
str += '\nR1:{};R2:{};R3:{};R4:{};RL:{};RSU:{}'.format(
reward[model_name][0],reward[model_name][1],reward[model_name][2],
reward[model_name][3],reward[model_name][4],reward[model_name][5]
)
append_to_file(str, path)
if __name__ == '__main__':
if len(sys.argv) == 4:
dataset = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
else:
dataset = 'DUC2001' #DUC2001, DUC2002, DUC2004
start = 0
end = 9999
language = 'english'
summary_len = 100
summary_num = 10001
base_dir = os.path.join(SUMMARY_DB_DIR,dataset)
reader = CorpusReader(PROCESSED_PATH)
data = reader.get_data(dataset,summary_len)
topic_cnt = 0
for topic, docs, models in data:
topic_cnt += 1
if not(topic_cnt > start and topic_cnt <= end):
continue
dir_path = os.path.join(base_dir,topic)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
vec = Vectoriser(docs,summary_len)
print('-----Generate samples for topic {}: {}-----'.format(topic_cnt,topic))
act_list, h_rewards, r_rewards = vec.sampleRandomReviews(summary_num,True,True,models)
assert len(act_list) == len(h_rewards) == len(r_rewards)
for ii in range(len(act_list)):
writeSample(act_list[ii],h_rewards[ii],os.path.join(dir_path,'heuristic'))
writeSample(act_list[ii],r_rewards[ii],os.path.join(dir_path,'rouge'))
| StarcoderdataPython |
105421 | <filename>src/navi/components/other/low_pass_filter.py
from navi.components.component import Component
__author__ = 'paoolo'
class LowPassFilter(Component):
"""
Used to low pass.
"""
def __init__(self):
super(LowPassFilter, self).__init__(enable=True)
self._old_left = 0.0
self._old_right = 0.0
self._alpha = 0.3
def modify(self, left, right):
left = self._low_pass_filter(left, self._old_left)
right = self._low_pass_filter(right, self._old_right)
self._old_left, self._old_right = left, right
return left, right
def _low_pass_filter(self, new_value, old_value):
if old_value is None:
return new_value
return old_value + self._alpha * (new_value - old_value)
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, val):
self._alpha = float(val) | StarcoderdataPython |
6686830 | from __future__ import annotations
import os
from pathlib import Path
from typing import Union, TypeVar, Type, Generic
from loguru import logger
from transformers import PreTrainedModel
from codenets.recordable import RecordableTorchModule
from codenets.utils import full_classname, instance_full_classname, runtime_import
PretrainedRec_T = TypeVar("PretrainedRec_T", bound="PreTrainedModelRecordable")
Pretrained_T = TypeVar("Pretrained_T", bound="PreTrainedModel")
class PreTrainedModelRecordable(Generic[Pretrained_T], RecordableTorchModule):
"""
Wrap any generic HuggingFace PreTrainedModel as a Recordable Torch module
equipped with load/save
"""
def __init__(self, model: Pretrained_T):
super().__init__()
self.model = model
def save(self, output_dir: Union[Path, str]) -> bool:
full_dir = Path(output_dir) / instance_full_classname(self) / instance_full_classname(self.model)
logger.info(f"Saving HuggingFace model to {full_dir}")
os.makedirs(full_dir, exist_ok=True)
self.model.save_pretrained(full_dir)
return True
@classmethod
def load(cls: Type[PretrainedRec_T], restore_dir: Union[Path, str]) -> PretrainedRec_T:
import json
full_dir = Path(restore_dir) / full_classname(cls)
logger.info(f"Loading HuggingFace Pretrained model from {full_dir}")
_, dirs, _ = list(os.walk(full_dir))[0]
model_cls_name = dirs[0]
logger.info(f"Loading HuggingFace {model_cls_name} model from {full_dir}/{model_cls_name}")
klass = runtime_import(model_cls_name)
assert issubclass(klass, PreTrainedModel)
model = klass.from_pretrained(str(full_dir / model_cls_name))
return cls(model)
def forward(self, *args, **kwargs):
return self.model.forward(*args, **kwargs)
# BertModelRecordable = PreTrainedModelRecordable[BertModel]
# class BertModelRecordable(RecordableTorchModule):
# """
# Wrapper to make BertModel recordable
# Haven't found a way to make that generic in a typesafe mode,
# mypy and generics are too limited but I'll search again because
# all Transformers classes have the same save/load_pretrained functions
# so in theory there is no reason no to have one single recordable for
# them all
# """
# def __init__(self, model: BertModel):
# super().__init__()
# self.model = model
# def save(self, output_dir: Union[Path, str]) -> bool:
# full_dir = Path(output_dir) / instance_full_classname(self)
# logger.debug(f"Saving BertModel to {full_dir}")
# os.makedirs(full_dir, exist_ok=True)
# self.model.save_pretrained(full_dir)
# return True
# @classmethod
# def load(cls, restore_dir: Union[Path, str]) -> BertModelRecordable:
# full_dir = Path(restore_dir) / full_classname(cls)
# logger.debug(f"Loading BertModel from {full_dir}")
# model = BertModel.from_pretrained(str(full_dir))
# return BertModelRecordable(model)
# def forward(self, tokens, tokens_mask):
# return self.model.forward(tokens, tokens_mask)
| StarcoderdataPython |
3254290 | from .base import * # noqa pylint: disable=unused-import,unused-wildcard-import,wildcard-import
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
import logging
import sentry_sdk
DEBUG = True
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = os.environ.get("SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405 pylint: disable-all
DATABASES["default"]["CONN_MAX_AGE"] = os.environ.get(
"CONN_MAX_AGE", default=60
) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": os.environ.get("REDIS_URL", "redis://127.0.0.1:6379/1"),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# # Mimicing memcache behavior.
# # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
# "IGNORE_EXCEPTIONS": True,
# },
# }
# }
# Email Service
# https://sendgrid.com/docs/for-developers/sending-email/django/
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"backend.Profile.authentication.JWTAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [],
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"DEFAULT_PARSER_CLASSES": [
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.FormParser",
"rest_framework.parsers.MultiPartParser",
"rest_framework.parsers.FileUploadParser",
],
"DEFAULT_FILTER_BACKENDS": (
"drf_spectacular.contrib.django_filters.DjangoFilterBackend",
),
}
# Sentry
# ------------------------------------------------------------------------------
# https://docs.sentry.io/platforms/python/guides/django/#Configure
# SECURITY
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = os.environ.get("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = os.environ.get(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = os.environ.get("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = os.environ.get(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SENTRY_DSN = os.environ.get("SENTRY_DSN")
SENTRY_LOG_LEVEL = os.environ.get("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
CORS_ALLOW_ALL_ORIGINS = True
| StarcoderdataPython |
3297147 | import numpy as np
import os
from glob import glob
import scipy.io as sio
from skimage.io import imread, imsave
from time import time
from api import PRN
from utils.write import write_obj_with_colors
# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
prn = PRN(is_dlib = False)
# ------------- load data
image_folder = 'TestImages/AFLW2000/'
save_folder = 'TestImages/AFLW2000_results'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
types = ('*.jpg', '*.png')
image_path_list= []
for files in types:
image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)
for i, image_path in enumerate(image_path_list):
# read image
image = imread(image_path)
# the core: regress position map
if 'AFLW2000' in image_path:
mat_path = image_path.replace('jpg', 'mat')
info = sio.loadmat(mat_path)
kpt = info['pt3d_68']
pos = prn.process(image, kpt) # kpt information is only used for detecting face and cropping image
else:
pos = prn.process(image) # use dlib to detect face
# -- Basic Applications
# get landmarks
kpt = prn.get_landmarks(pos)
# 3D vertices
vertices = prn.get_vertices(pos)
# corresponding colors
colors = prn.get_colors(image, vertices)
# -- save
#name = image_path.strip().split('/')[-1][:-4]
name = os.path.splitext(os.path.basename(image_path))[0]
np.savetxt(os.path.join(save_folder, name + '.txt'), kpt)
write_obj_with_colors(os.path.join(save_folder, name + '.obj'), vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})
| StarcoderdataPython |
1919890 | #!/home/andrew/.envs/venv38/bin/python3
import sys
import numpy as np
def read_input():
vent_lines = []
for line in sys.stdin:
halves = (x.strip() for x in line.strip().split("->"))
points = [[int(x) for x in y.split(",")] for y in halves]
vent_lines.append(points)
return vent_lines
def is_rook_move(vent_line):
v = vent_line
return (v[0][0] == v[1][0]) or (v[0][1] == v[1][1])
def integer_points(vent_line):
v = vent_line
if not is_rook_move(v):
return []
points = []
x_begin = min(v[0][0], v[1][0])
x_end = max(v[0][0], v[1][0]) + 1
y_begin = min(v[0][1], v[1][1])
y_end = max(v[0][1], v[1][1]) + 1
for x in range(x_begin, x_end):
for y in range(y_begin, y_end):
points.append([x,y])
return points
def infer_grid_shape(vent_lines):
x_max = -1
y_max = -1
for v in vent_lines:
for p in v:
x_max = max(x_max, p[0])
y_max = max(y_max, p[1])
return [y_max + 1, x_max + 1]
# Read in vent lines and infer grid shape
vent_lines = read_input()
grid_shape = infer_grid_shape(vent_lines)
#print(vent_lines)
print("Grid shape:", grid_shape)
# Create grid and weight each point by number of vents
grid = np.zeros(grid_shape, dtype=int)
for v in vent_lines:
for x,y in integer_points(v):
grid[y, x] += 1
#print(grid)
# Compute the number of points with weight at least 2
dangerous_points = np.sum(grid >= 2)
print("Number of dangerous points:", dangerous_points)
| StarcoderdataPython |
1910845 | from .utils import get_cv_data_ann_kfold as get_cv_data
from .optimizer import Optimizer as Opt
from .stats import calculate_p_values
from collections import namedtuple
from .full_model import FullModel
from .model import Model
import numpy as np
TestResult = namedtuple('TestResult', ['ml_trn', 'ml_cv_mean', 'ml_cv'])
class StructureAnalyzer(object):
def __init__(self, observed_variables: list, model_desc: str, data,
use_cv=True):
"""
Keyword arguments:
observed-variables -- A list of observed variables whose
relationships are unclear.
model_desc -- A model description in a valid syntax,
usually assumed to be a measurement part,
but extra information (given caution)
can be provided.
data -- A data.
use_cv -- Use cross-validation.
"""
self.observed_vars = observed_variables
self.model_desc = model_desc
self.full_data = data.copy()
self.training_set, self.testing_sets = get_cv_data(data)
if not use_cv:
self.training_set = self.full_data
self.testing_sets = [self.full_data]
def get_model(self, params_to_ignore):
model = FullModel(self.observed_vars, self.model_desc,
params_to_ignore)
model.load_dataset(self.full_data)
return model
def test_model_cv(self, mod):
mls = list()
model = Model(self.get_model_description(mod),
force_load=self.observed_vars)
opt = Opt(model)
opt.load_dataset(self.training_set)
lf = opt.optimize()
# pvals = np.zeros((model.beta_range[1] - model.beta_range[0],))
for data in self.testing_sets:
data = data[model.vars['IndsObs']]
cov = np.cov(data, rowvar=False, bias=True)
opt.mx_cov = cov
mls.append(opt.ml_wishart(opt.params))
# pvals /= len(self.testing_sets)
# pvals_nums = np.count_nonzero((pvals > 1e-1) | np.isnan(pvals))
return TestResult(lf, np.mean(mls), mls)
def get_least_significant_param(self, pvalues, params_to_pen, model, opt):
pvalues = np.array(pvalues)
t = pvalues[params_to_pen]
if not len(t):
return
t = np.max(t)
if np.isnan(t):
t = np.array([True if i in params_to_pen else False
for i in range(len(pvalues))])
i = np.where(np.isnan(pvalues) & t)[0][0]
else:
i = np.where(pvalues == t)[0][0]
m, n = model.parameters['Beta'][i - model.beta_range[0]]
lval, rval = model.beta_names[0][m], model.beta_names[1][n]
return lval, rval
def get_num_pvals(self, opt, pvals):
pvals = np.array(pvals)
return np.count_nonzero(pvals[list(range(*opt.model.beta_range))] > 5e-2)
def get_model_description(self, model):
d = model.description
op = model.operations.REGRESSION
s = str()
for lv in d:
if d[lv][op]:
s += '{} ~ {}\n'.format(lv, ' + '.join(list(d[lv][op].keys())))
op = model.operations.MEASUREMENT
for lv in d:
if d[lv][op]:
s += '{} =~ {}\n'.format(lv, ' + '.join(list(d[lv][op].keys())))
return s
def run(self):
params_to_ignore = set()
while True:
model = self.get_model(params_to_ignore)
params_to_pen = list(range(*model.beta_range))
opt = Opt(model)
lf = opt.optimize()
conn = self.get_num_components_connected(opt)
if conn < 6:
break
pvalues = calculate_p_values(opt)
num_pvals = self.get_num_pvals(opt, pvalues)
ind = self.get_least_significant_param(pvalues, params_to_pen,
model, opt)
params_to_ignore.add(ind)
if len(params_to_pen) > 2 * conn:
lf = np.inf
desc = self.get_model_description(model)
yield lf, self.test_model_cv(model), num_pvals, conn, desc
if len(params_to_pen) < 2:
break
def get_num_components_connected(self, opt):
"""Get number of variables present in structural part.
Keyword arguments:
opt -- Optimizer with optimized parameters.
Returns:
Number of variables
"""
n = opt.mx_beta.shape[0]
for i in range(n):
r_nonzeros = np.abs(opt.mx_beta[i]) > 1e-16
c_nonzeros = np.abs(opt.mx_beta[:, i]) > 1e-16
num_nonzeros = r_nonzeros | c_nonzeros
if not np.count_nonzero(num_nonzeros):
n -= 1
return n
def analyze(self, print_status=False):
"""Wraps run method and returns helper structures.
Keyword arguments:
print_status -- Whether to print intermediate information on each step.
Returns:
Array of models numbers, MLs of FullModel, mean CV ML, CV MLs, numbers
of p-values exceeding set bound, sums of pvalues, numbers of present
in variables in structural part, models' descriptions.
"""
n, lfs, descs = list(), list(), list()
ml_trns, ml_means, ml_cvs = list(), list(), list()
pvals_nums, conns = list(), list()
for i, (lf, test, num_pvals, conn, desc) in enumerate(self.run()):
if lf is None or lf is np.nan:
continue
n.append(i)
lfs.append(lf)
conns.append(conn)
ml_trns.append(test.ml_trn)
ml_means.append(test.ml_cv_mean)
ml_cvs.append(test.ml_cv)
pvals_nums.append(num_pvals)
descs.append(desc)
if ml_means[-1] > 16:
break
if print_status:
print("Step {}, {:.4f}, {:.4f}, pnum: {}".format(i,
test.ml_cv_mean,
lf, num_pvals))
n, lfs, ml_cvs = np.array(n), np.array(lfs), np.array(ml_cvs).T
ml_means, conns = np.array(ml_means), np.array(conns)
pvals_nums = np.array(pvals_nums)
return n, lfs, ml_trns, ml_means, ml_cvs, pvals_nums, conns, descs
| StarcoderdataPython |
1906246 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""
This module contains project settings.
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
# Host and port for web server to listen
HOST = "0.0.0.0"
PORT = 9999
# Number of seconds to sleep before sending HTTP response to client
DELAY = 0.25
# Administrator's email
ADMIN_EMAIL = "<EMAIL>"
# Project's root directory
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
# Project's server dumps root directory
DUMPS_ROOT = os.path.join(PROJECT_ROOT, "dumps")
# Project's templates root directory
TEMPLATES_ROOT = os.path.join(PROJECT_ROOT, "templates")
# Project's static files root directory
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
# Project's log root directory
LOG_ROOT = os.path.join(PROJECT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_ROOT, "robobattleship.log")
# Logger config
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'logsna': {
'()': 'logsna.Formatter',
}
},
'handlers': {
'robobattleship': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 20,
'filename': LOG_FILE,
'formatter': 'logsna',
},
},
'loggers': {
'robobattleship': {
'handlers': ['robobattleship'],
'level': 'DEBUG',
'propagate': True,
},
'bottle': {
'handlers': ['robobattleship'],
'level': 'ERROR',
'propagate': True,
},
'requests': {
'handlers': ['robobattleship'],
'level': 'ERROR',
'propagate': True,
},
}
}
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.