code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from flask import Flask, flash
def display_error(error,preface="",postface=""):
flash(f"{preface} {error} {postface}") | [
"flask.flash"
] | [((85, 123), 'flask.flash', 'flash', (['f"""{preface} {error} {postface}"""'], {}), "(f'{preface} {error} {postface}')\n", (90, 123), False, 'from flask import Flask, flash\n')] |
import sys
import typing
from collections import deque
from typing import Callable, Optional
import mypy.errorcodes
import mypy.errors
import mypy.nodes
import mypy.options
import mypy.plugin
import mypy.types
if sys.version_info >= (3, 10): # pragma: no cover
from typing import TypeGuard
else: # pragma: no cover
from typing_extensions import TypeGuard
SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext]
class ProtocolIntersectionPlugin(mypy.plugin.Plugin):
# pylint: disable=unused-argument
def get_type_analyze_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]:
if fullname == "typing_protocol_intersection.types.ProtocolIntersection":
return type_analyze_hook(fullname)
return None
def get_method_signature_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]:
return intersection_function_signature_hook
def get_function_signature_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]:
return intersection_function_signature_hook
class TypeInfoWrapper(typing.NamedTuple):
type_info: mypy.nodes.TypeInfo
base_classes: typing.List[mypy.nodes.TypeInfo]
class IncomparableTypeName(str):
"""A string that never returns True when compared (equality) with another instance of this type."""
def __eq__(self, x: object) -> bool:
if isinstance(x, IncomparableTypeName):
return False
return super().__eq__(x)
def __hash__(self) -> int: # pylint: disable=useless-super-delegation
return super().__hash__()
def mk_protocol_intersection_typeinfo(
name: str,
*,
# For ProtocolIntersections to not be treated as the same type, but just as protocols,
# their fullnames need to differ - that's it's an IncomparableTypeName.
fullname: IncomparableTypeName,
symbol_table: Optional[mypy.nodes.SymbolTable] = None,
) -> mypy.nodes.TypeInfo:
defn = mypy.nodes.ClassDef(
name=name,
defs=mypy.nodes.Block([]),
base_type_exprs=[
mypy.nodes.NameExpr("typing.Protocol"),
# mypy expects object to be here at the last index ('we skip "object" since everyone implements it')
mypy.nodes.NameExpr("builtins.object"),
],
type_vars=[],
)
defn.fullname = IncomparableTypeName(fullname)
defn.info.is_protocol = True
type_info = mypy.nodes.TypeInfo(
names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(),
defn=defn,
module_name="typing_protocol_intersection",
)
type_info.mro = [type_info]
type_info.is_protocol = True
return type_info
class ProtocolIntersectionResolver:
def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type:
folded_type = self.fold_intersection(type_)
if isinstance(folded_type, mypy.types.Instance):
folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args)
return folded_type
def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type:
if not self._is_intersection(type_):
return type_
type_info = mk_protocol_intersection_typeinfo(
"ProtocolIntersection",
fullname=IncomparableTypeName("typing_protocol_intersection.types.ProtocolIntersection"),
)
type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, []))
args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes]
return mypy.types.Instance(type_info_wrapper.type_info, args=args)
def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper:
intersections_to_process = deque([type_])
while intersections_to_process:
intersection = intersections_to_process.popleft()
for arg in intersection.args:
if self._is_intersection(arg):
intersections_to_process.append(arg)
continue
if isinstance(arg, mypy.types.Instance):
self._add_type_to_intersection(intersection_type_info_wrapper, arg)
return intersection_type_info_wrapper
@staticmethod
def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None:
name_expr = mypy.nodes.NameExpr(typ.type.name)
name_expr.node = typ.type
intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr)
intersection_type_info_wrapper.type_info.mro.insert(0, typ.type)
intersection_type_info_wrapper.base_classes.insert(0, typ.type)
@staticmethod
def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]:
return isinstance(typ, mypy.types.Instance) and typ.type.fullname == (
"typing_protocol_intersection.types.ProtocolIntersection"
)
def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike:
resolver = ProtocolIntersectionResolver()
signature = context.default_signature
signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type)
signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types]
return signature
def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]:
def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type:
args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args)
symbol_table = mypy.nodes.SymbolTable()
for arg in args:
if isinstance(arg, mypy.types.Instance):
if not arg.type.is_protocol:
context.api.fail(
"Only Protocols can be used in ProtocolIntersection.", arg, code=mypy.errorcodes.VALID_TYPE
)
symbol_table.update(arg.type.names)
type_info = mk_protocol_intersection_typeinfo(
context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table
)
return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column)
return _type_analyze_hook
def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]:
# ignore version argument if the plugin works with all mypy versions.
return ProtocolIntersectionPlugin
| [
"collections.deque"
] | [((3972, 3986), 'collections.deque', 'deque', (['[type_]'], {}), '([type_])\n', (3977, 3986), False, 'from collections import deque\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-18 04:43
from __future__ import unicode_literals
from django.db import migrations, models
def update_names(apps, schema_editor):
for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator():
x.name = x.data.get('nazwa', '')
x.save()
for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator():
x.name = x.data.get('name', '')
x.save()
class Migration(migrations.Migration):
dependencies = [
('institutions', '0013_auto_20170718_0256'),
]
operations = [
migrations.AddField(
model_name='regon',
name='name',
field=models.CharField(default='', max_length=200, verbose_name='Name'),
preserve_default=False,
),
migrations.AddField(
model_name='resp',
name='name',
field=models.CharField(default='', max_length=200, verbose_name='Name'),
preserve_default=False,
),
migrations.RunPython(update_names)
]
| [
"django.db.migrations.RunPython",
"django.db.models.CharField"
] | [((1079, 1113), 'django.db.migrations.RunPython', 'migrations.RunPython', (['update_names'], {}), '(update_names)\n', (1099, 1113), False, 'from django.db import migrations, models\n'), ((740, 805), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)', 'verbose_name': '"""Name"""'}), "(default='', max_length=200, verbose_name='Name')\n", (756, 805), False, 'from django.db import migrations, models\n'), ((957, 1022), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)', 'verbose_name': '"""Name"""'}), "(default='', max_length=200, verbose_name='Name')\n", (973, 1022), False, 'from django.db import migrations, models\n')] |
# coding: utf-8
# pylint: disable=missing-docstring, invalid-name
import flask
import auth
import config
from main import app
import model.user as user #import User#, UserVdr
github_config = dict(
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
base_url='https://api.github.com/',
# consumer_key=config.CONFIG_DB.auth_github_id,
# consumer_secret=config.CONFIG_DB.auth_github_secret,
request_token_params={'scope': 'user:email'},
)
github = auth.create_oauth_app(github_config, 'github')
@app.route('/_s/callback/github/oauth-authorized/')
def github_authorized():
response = github.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
flask.session['oauth_token'] = (response['access_token'], '')
me = github.get('user')
usr = retrieve_user_from_github(me.data)
return auth.signin_via_social(usr)
@github.tokengetter
def get_github_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/github/')
def signin_github():
return auth.signin_oauth(github)
def retrieve_user_from_github(response):
auth_id = 'github_%s' % str(response['id'])
usr = User.get_by('authIDs_', auth_id)
bio = response['bio'][:user.bio_span[1]] if response['bio'] else ''
location = response['location'][:user.location_span[1]] if response['location'] else ''
return usr or auth.create_or_get_user_db(
auth_id,
response.get('name', ''),
response.get('login'),
response.get('email', ''),
location=location,
bio=bio,
github=response.get('login')
)
# Todo replace opaque and repeated code such as
# bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else ''
# with
# bio = getField(response, 'bio')
def getField(response, name):
field = response[name]
if field:
span = name + '_span' # depend on validators following this naming convention
max = getattr(user, span)[1]
return field [:max]
return ''
| [
"flask.flash",
"auth.signin_oauth",
"flask.session.get",
"flask.url_for",
"main.app.route",
"auth.signin_via_social",
"auth.create_oauth_app"
] | [((574, 620), 'auth.create_oauth_app', 'auth.create_oauth_app', (['github_config', '"""github"""'], {}), "(github_config, 'github')\n", (595, 620), False, 'import auth\n'), ((624, 674), 'main.app.route', 'app.route', (['"""/_s/callback/github/oauth-authorized/"""'], {}), "('/_s/callback/github/oauth-authorized/')\n", (633, 674), False, 'from main import app\n'), ((1158, 1186), 'main.app.route', 'app.route', (['"""/signin/github/"""'], {}), "('/signin/github/')\n", (1167, 1186), False, 'from main import app\n'), ((1031, 1058), 'auth.signin_via_social', 'auth.signin_via_social', (['usr'], {}), '(usr)\n', (1053, 1058), False, 'import auth\n'), ((1122, 1154), 'flask.session.get', 'flask.session.get', (['"""oauth_token"""'], {}), "('oauth_token')\n", (1139, 1154), False, 'import flask\n'), ((1219, 1244), 'auth.signin_oauth', 'auth.signin_oauth', (['github'], {}), '(github)\n', (1236, 1244), False, 'import auth\n'), ((777, 826), 'flask.flash', 'flask.flash', (['"""You denied the request to sign in."""'], {}), "('You denied the request to sign in.')\n", (788, 826), False, 'import flask\n'), ((857, 879), 'flask.url_for', 'flask.url_for', (['"""index"""'], {}), "('index')\n", (870, 879), False, 'import flask\n')] |
from pages.service_page.models import ServicePage
from pages.topic_page.factories import JanisBasePageWithTopicsFactory
from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify
class ServicePageFactory(JanisBasePageWithTopicsFactory):
@classmethod
def create(cls, *args, **kwargs):
if 'dynamic_content' in kwargs:
kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content'])
step_keywords = ['steps', 'steps_es']
for step_keyword in step_keywords:
if step_keyword in kwargs:
kwargs[step_keyword] = streamfieldify(kwargs[step_keyword])
return super(ServicePageFactory, cls).create(*args, **kwargs)
class Meta:
model = ServicePage
| [
"pages.base_page.fixtures.helpers.streamfieldify.streamfieldify"
] | [((390, 431), 'pages.base_page.fixtures.helpers.streamfieldify.streamfieldify', 'streamfieldify', (["kwargs['dynamic_content']"], {}), "(kwargs['dynamic_content'])\n", (404, 431), False, 'from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify\n'), ((600, 636), 'pages.base_page.fixtures.helpers.streamfieldify.streamfieldify', 'streamfieldify', (['kwargs[step_keyword]'], {}), '(kwargs[step_keyword])\n', (614, 636), False, 'from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify\n')] |
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`.
"""
import os
import copy
import json
import gzip
import re
import logging
import subprocess
import importlib
from multiprocessing import Process
from multiprocessing import Queue
from glob import glob
from dlbs.exceptions import ConfigurationError
class OpenFile(object):
"""Class that can work with gzipped and regular textual files."""
def __init__(self, fname, mode='r'):
self.__fname = fname
self.__flags = ['rb', 'r'] if mode == 'r' else ['wb', 'w']
def __enter__(self):
if self.__fname.endswith('.gz'):
self.__fobj = gzip.open(self.__fname, self.__flags[0])
else:
self.__fobj = open(self.__fname, self.__flags[1])
return self.__fobj
def __exit__(self, type, value, traceback):
self.__fobj.close()
class IOUtils(object):
"""Container for input/output helpers"""
@staticmethod
def mkdirf(file_name):
"""Makes sure that parent folder of this file exists.
The file itself may not exist. A typical usage is to ensure that we can
write to this file. If path to parent folder does not exist, it will be
created.
See documentation for :py:func:`os.makedirs` for more details.
:param str file_name: A name of the file for which we want to make sure\
its parent directory exists.
"""
dir_name = os.path.dirname(file_name)
if dir_name != '' and not os.path.isdir(dir_name):
os.makedirs(dir_name)
@staticmethod
def find_files(directory, file_name_pattern, recursively=False):
"""Find files in a directory, possibly, recursively.
Find files which names satisfy *file_name_pattern* pattern in folder
*directory*. If *recursively* is True, scans subfolders as well.
:param str directory: A directory to search files in.
:param str file_name_pattern: A file name pattern to search. For instance,
is can be '*.log'
:param bool recursively: If True, search in subdirectories.
:return: List of file names satisfying *file_name_pattern* pattern.
"""
if not recursively:
files = [f for f in glob(os.path.join(directory, file_name_pattern))]
else:
files = [f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))]
return files
@staticmethod
def gather_files(path_specs, file_name_pattern, recursively=False):
"""Find/get files specified by an `inputs` parameter.
:param list path_specs: A list of file names / directories.
:param str file_name_pattern: A file name pattern to search. Only
used for entries in path_specs that
are directories.
:param bool recursively: If True, search in subdirectories. Only used
for entries in path_specs that are directories.
:return: List of file names satisfying *file_name_pattern* pattern.
"""
files = []
for path_spec in path_specs:
if os.path.isdir(path_spec):
files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively))
elif os.path.isfile(path_spec):
files.append(path_spec)
return files
@staticmethod
def get_non_existing_file(file_name, max_attempts = 1000):
"""Return file name that does not exist.
:param str file_name: Input file name.
:rtype: str
:return: The 'file_name' if this file does not exist else find first
file name that file does not exist.
"""
if not os.path.exists(file_name):
return file_name
attempt = 0
while True:
candidate_file_name = "%s.%d" % (file_name, attempt)
if not os.path.exists(candidate_file_name):
return candidate_file_name
attempt += 1
if attempt >= max_attempts:
msg = "Cannot find non existing file from pattern %s"
raise ValueError(msg % file_name)
@staticmethod
def check_file_extensions(fname, extensions):
"""Checks that fname has one of the provided extensions.
:param str fname: The file name to check.
:param tuple extensions: A tuple of extensions to use.
Raises exception of fname does not end with one of the extensions.
"""
if fname is None:
return
assert isinstance(extensions, tuple), "The 'extensions' must be a tuple."
if not fname.endswith(extensions):
raise ValueError("Invalid file extension (%s). Must be one of %s" % extensions)
@staticmethod
def read_json(fname, check_extension=False):
"""Reads JSON object from file 'fname'.
:param str fname: File name.
:param boolean check_extension: If True, raises exception if fname does not end
with '.json' or '.json.gz'.
:rtype: None or JSON object
:return: None of fname is None else JSON loaded from the file.
"""
if fname is None:
return None
if check_extension:
IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))
with OpenFile(fname, 'r') as fobj:
return json.load(fobj)
@staticmethod
def write_json(fname, data, check_extension=False):
""" Dumps *dictionary* as a json object to a file with *file_name* name.
:param dict dictionary: Dictionary to serialize.
:param any data: A data to dump into a JSON file.
:param str file_name: Name of a file to serialie dictionary in.
"""
if fname is None:
raise ValueError("File name is None")
if check_extension:
IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))
IOUtils.mkdirf(fname)
with OpenFile(fname, 'w') as fobj:
json.dump(data, fobj, indent=4)
class DictUtils(object):
"""Container for dictionary helpers."""
@staticmethod
def subdict(dictionary, keys):
"""Return subdictionary containing only keys from 'keys'.
:param dict dictionary: Input dictionary.
:param list_or_val keys: Keys to extract
:rtype: dict
:return: Dictionary that contains key/value pairs for key in keys.
"""
if keys is None:
return dictionary
return dict((k, dictionary[k]) for k in keys if k in dictionary)
@staticmethod
def contains(dictionary, keys):
"""Checkes if dictionary contains all keys in 'keys'
:param dict dictionary: Input dictionary.
:param list_or_val keys: Keys to find in dictionary
:rtype: boolean
:return: True if all keys are in dictionary or keys is None
"""
if keys is None:
return True
keys = keys if isinstance(keys, list) else [keys]
for key in keys:
if key not in dictionary:
return False
return True
@staticmethod
def ensure_exists(dictionary, key, default_value=None):
""" Ensures that the dictionary *dictionary* contains key *key*
If key does not exist, it adds a new item with value *default_value*.
The dictionary is modified in-place.
:param dict dictionary: Dictionary to check.
:param str key: A key that must exist.
:param obj default_value: Default value for key if it does not exist.
"""
if key not in dictionary:
dictionary[key] = copy.deepcopy(default_value)
@staticmethod
def lists_to_strings(dictionary, separator=' '):
""" Converts every value in dictionary that is list to strings.
For every item in *dictionary*, if type of a value is 'list', converts
this list into a string using separator *separator*.
The dictictionary is modified in-place.
:param dict dictionary: Dictionary to modify.
:param str separator: An item separator.
"""
for key in dictionary:
if isinstance(dictionary[key], list):
dictionary[key] = separator.join(str(elem) for elem in dictionary[key])
@staticmethod
def filter_by_key_prefix(dictionary, prefix, remove_prefix=True):
"""Creates new dictionary with items which keys start with *prefix*.
Creates new dictionary with items from *dictionary* which keys
names starts with *prefix*. If *remove_prefix* is True, keys in new
dictionary will not contain this prefix.
The dictionary *dictionary* is not modified.
:param dict dictionary: Dictionary to search keys in.
:param str prefix: Prefix of keys to be extracted.
:param bool remove_prefix: If True, remove prefix in returned dictionary.
:return: New dictionary with items which keys names start with *prefix*.
"""
return_dictionary = {}
for key in dictionary:
if key.startswith(prefix):
return_key = key[len(prefix):] if remove_prefix else key
return_dictionary[return_key] = copy.deepcopy(dictionary[key])
return return_dictionary
@staticmethod
def dump_json_to_file(dictionary, file_name):
""" Dumps *dictionary* as a json object to a file with *file_name* name.
:param dict dictionary: Dictionary to serialize.
:param str file_name: Name of a file to serialie dictionary in.
"""
if file_name is not None:
IOUtils.mkdirf(file_name)
with open(file_name, 'w') as file_obj:
json.dump(dictionary, file_obj, indent=4)
@staticmethod
def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):
""" Updates *dictionary* with items from *iterable* object.
This method modifies/updates *dictionary* with items from *iterable*
object. This object must support ``for something in iterable`` (list,
opened file etc). Only those items in *iterable* are considered, that match
*pattern* (it's a regexp epression). If a particular item does not match,
and *must_match* is True, *ConfigurationError* exception is thrown.
Regexp pattern must return two groups (1 and 2). First group is considered
as a key, and second group is considered to be value. Values must be a
json-parseable strings.
If *add_only_keys* is not None, only those items are added to *dictionary*,
that are in this list.
Existing items in *dictionary* are overwritten with new ones if key already
exists.
One use case to use this method is to populate a dictionary with key-values
from log files.
:param dict dictionary: Dictionary to update in-place.
:param obj iterable: Iterable object (list, opened file name etc).
:param str patter: A regexp pattern for matching items in ``iterable``.
:param bool must_match: Specifies if every element in *iterable* must match\
*pattern*. If True and not match, raises exception.
:param list add_only_keys: If not None, specifies keys that are added into\
*dictionary*. Others are ignored.
:param boolean ignore_erros: If true, ignore errors.
:raises ConfigurationError: If *must_match* is True and not match or if value\
is not a json-parseable string.
"""
matcher = re.compile(pattern)
for line in iterable:
match = matcher.match(line)
if not match:
if must_match:
raise ConfigurationError("Cannot match key-value from '%s' with pattern '%s'. Must match is set to true" % (line, pattern))
else:
continue
key = match.group(1).strip()
try:
value = match.group(2).strip()
value = json.loads(value) if len(value) > 0 else None
if add_only_keys is None or key in add_only_keys:
dictionary[key] = value
logging.debug("Key-value item (%s=%s) has been parsed and added to dictionary", key, str(value))
except ValueError as err:
if not ignore_errors:
raise ConfigurationError("Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s" % (value, key, line, str(err)))
@staticmethod
def match(dictionary, query, policy='relaxed', matches=None):
""" Match *query* against *dictionary*.
The *query* and *dictionary* are actually dictionaries. If policy is 'strict',
every key in query must exist in dictionary with the same value to match.
If policy is 'relaxed', dictionary may not contain all keys from query
to be matched. In this case, the intersection of keys in dictionary and query
is used for matching.
It's assuemd we match primitive types such as numbers and strings not
lists or dictionaries. If values in query are lists, then condition OR applies.
For instance:
match(dictionary, query = { "framework": "tensorflow" }, policy='strict')
Match dictionary only if it contains key 'framework' with value "tensorflow".
match(dictionary, query = { "framework": "tensorflow" }, policy='relaxed')
Match dictionary if it does not contain key 'framework' OR contains\
key 'framework' with value "tensorflow".
match(dictionary, query = { "framework": ["tensorflow", "caffe2"] }, policy='strict')
Match dictionary only if it contains key 'framework' with value "tensorflow" OR\
"caffe2".
match(dictionary, query = { "framework": ["tensorflow", "caffe2"], "batch": [16, 32] }, policy='strict')
Match dictionary only if it (a) contains key 'framework' with value "tensorflow" OR "caffe2"\
and (b) it contains key 'batch' with value 16 OR 32.
:param dict dictionary: Dictionary to match.
:param dict query: Query to use.
:param ['relaxed', 'strict'] policy: Policy to match.
:param dict matches: Dictionary where matches will be stored if match has been identified.
:return: True if match or query is None
:rtype: bool
"""
if query is None:
return True
assert policy in ['relaxed', 'strict'], ""
for field, value in query.iteritems():
if field not in dictionary:
if policy == 'relaxed':
continue
else:
return False
if isinstance(value, list) or not isinstance(value, basestring):
values = value if isinstance(value, list) else [value]
if dictionary[field] not in values:
return False
if matches is not None:
matches['%s_0' % (field)] = dictionary[field]
else:
if value == '':
# Take special care if value is an empty string
if value != dictionary[field]:
return False
elif matches is not None:
matches['%s_0' % (field)] = dictionary[field]
continue
else:
match = re.compile(value).match(dictionary[field])
if not match:
return False
else:
if matches is not None:
matches['%s_0' % (field)] = dictionary[field]
for index, group in enumerate(match.groups()):
matches['%s_%d' % (field, index+1)] = group
continue
return True
class ConfigurationLoader(object):
"""Loads experimenter configuration from multiple files."""
@staticmethod
def load(path, files=None):
"""Loads configurations (normally in `conigs`) folder.
:param str path: Path to load configurations from
:param list files: List of file names to load. If None, all files with
JSON extension in **path** are loaded.
:return: A tuple consisting of a list of config files, configuration
object (dictionary) and dictionary of parameters info
This method loads configuration files located in 'path'. If `files` is
empty, all json files are loaded from that folder.
This method fails if one parameter is defined in multiple files. This
is intended behaviour for now (this also applies for update_param_info method).
"""
if path is None:
raise ValueError("Configuration load error. The 'path' parameter cannot be None.")
if not os.path.isdir(path):
raise ValueError("Configuration load error. The 'path' parameter (%s) must point to an existing directory." % path)
if files is not None:
config_files = [os.path.join(path, f) for f in files]
else:
config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')]
config = {} # Configuration with params/vars/extensions
param_info = {} # Information on params such as type and help messages
for config_file in config_files:
if not os.path.isfile(config_file):
raise ValueError("Configuration load error. Configuration data cannot be loaded for not a file (%s)" % config_file)
with open(config_file) as file_obj:
try:
# A part of global configuration from this particular file
config_section = json.load(file_obj)
# Update parameters info.
ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False)
# Joing configuration from this single file.
ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section))
except ValueError:
logging.error("Configuration load error. Invalid JSON configuration in file %s", config_file)
raise
return (config_files, config, param_info)
@staticmethod
def update_param_info(param_info, config, is_user_config=False):
"""Update parameter info dictionary based on configurationi in **config**
:param dict param_info: A parameter info dictionary that maps parameter
name to its description dictionary that contains
such fileds as value, help message, type, constraints
etc.
:param dict config: A dictionary with configuration section that may contain
parameters, variables and extensions. The **config** is
a result of parsing a JSON configuration file.
:param bool is_user_config: If True, the config object represents user-provided
configuration. If False, this is a system configuration.
Based on this flag, we deal with parameters in config
that redefine parameters in existing param_info
differently. See comments below.
We are interested here only in parameters section where parameter information
is defined. There are two scenarios this method is used:
1. Load standard configuration. In this case, parameter redefinition is
prohibited. If `parameters` section in `config` redefines existing
parameters in param_info (already loaded params), program terminates.
2. Load user-provided configuration. In this case, we still update parameter
info structure, but deal with it in slightly different way. If parameter in
`config` exists in param_info, it means user has provided their specific
value for this parameter.
Types of user defined parameters are defined either by user in a standard way as
we define types for standard parameters or induced automatically based on JSON
parse result.
"""
if 'parameters' not in config:
return
params = config['parameters']
for name in params:
val = params[name]
if not is_user_config:
# If this is not a user-provided configuration, we disallow parameter redefinition.
if name in param_info:
raise ConfigurationError(
"Parameter info update error."
" Parameter redefinition is not allowed for non-user configuration."
" This is a system configuration error that must not happen."
" Parameter %s=%s, new parameter definition (value) is %s" % (name, str(param_info[name]), val)
)
if isinstance(val, dict):
# This is a complete parameter definition with name, value and description.
if 'val' not in val:
raise ConfigurationError(
"Parameter info update error."
" Parameter that is defined by a dictionary must contain 'val' field that"
" defines its default value. Found this definition: %s=%s" % (name, val)
)
if name not in param_info:
param_info[name] = copy.deepcopy(val) # New parameter, set it info object.
# TODO what about parameter type and description?
else:
logging.warn(
" Parameter (%s) entirely redefines existing parameter (%s)."
" Normally, only value needs to be provided."
" We will proceed but you may want to fix this.",
json.dumps(val),
json.dumps(param_info[name])
)
param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value
else:
# Just parameter value
val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__
if name not in param_info:
param_info[name] = {
'val': val,
'type': val_type,
'desc': "No description for this parameter provided (it was automatically converted from its value)."
}
else:
param_info[name]['val'] = val
# Do final validations
if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):
raise ConfigurationError(
"Parameter info update error."
" Parameter has invalid type = '%s'."
" Parameter definition is %s = %s" % (param_info[name]['type'], name, param_info[name])
)
if 'type' not in param_info[name] or 'desc' not in param_info[name]:
logging.warn(
"Parameter definition does not contain type ('type') and/or description ('desc')."
" You should fix this. Parameter definition is"
" %s = %s", name, param_info[name]
)
@staticmethod
def remove_info(config):
"""In parameter section of a **config** the function removes parameter info
leaving only their values
:param dict config: A dictionary with configuration section that may contain
parameters, variables and extensions. The **config** is
a result of parsing a JSON configuration file.
:return: A copy of **config** with info removed
"""
clean_config = copy.deepcopy(config)
if 'parameters' in clean_config:
params = clean_config['parameters']
for name in params:
val = params[name]
if isinstance(val, dict):
# This should not generally happen since we deal with it in update_param_info, but just in case
if 'val' not in val:
raise ConfigurationError(
"Parameter info remove error."
" Parameter that is defined by a dictionary must contain 'val' field that"
" defines its default value. Found this definition: %s=%s" % (name, val)
)
params[name] = val['val']
return clean_config
@staticmethod
def update(dest, source, is_root=True):
"""Merge **source** dictionary into **dest** dictionary assuming source
and dest are JSON configuration configs or their members.
:param dict dest: Merge data to this dictionary.
:param dict source: Merge data from this dictionary.
:param bool is_root: True if **dest** and *source** are root configuration
objects. False if these objects are members.
"""
def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types):
raise ConfigurationError(
"Configuration update error - expecting value types to be same and one of %s but"
" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__)
)
# Types and expected key names. Types must always match, else exception is thrown.
if is_root:
schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']}
else:
schema = {'types':(list, basestring, int, float, long)}
for key in source:
# Firstly, check that type of value is expected.
val_type = type(source[key]).__name__
if not isinstance(source[key], schema['types']):
raise ConfigurationError(
"Configuration update error - unexpected type of key value: "
" is_root=%s, key=%s, value type=%s, expected type is one of %s" % \
(str(is_root), key, val_type, str(schema['types']))
)
# So, the type is expected. Warn if key value is suspicious - we can do it only for root.
if is_root and key not in schema[val_type]:
logging.warn("The name of a root key is '%s' but expected is one of '%s'", key, schema[val_type])
if key not in dest:
# The key in source dictionary is not in destination dictionary.
dest[key] = copy.deepcopy(source[key])
else:
# The key from source is in dest.
both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict)
both_lists = isinstance(dest[key], list) and isinstance(source[key], list)
both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long))
if is_root:
if not both_dicts and not both_lists:
_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]')
if both_dicts:
ConfigurationLoader.update(dest[key], source[key], is_root=False)
else:
dest[key].extend(source[key])
else:
if not both_lists and not both_primitive:
_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]')
dest[key] = copy.deepcopy(source[key]) if both_lists else source[key]
class ResourceMonitor(object):
"""The class is responsible for launching/shutting down/communicating with
external resource manager that monitors system resource consumption.
proc_pid date virt res shrd cpu mem power gpus_power
"""
def __init__(self, launcher, pid_folder, frequency, fields_specs):
"""Initializes resource monitor but does not create queue and process.
:param str launcher: A full path to resource monitor script.
:param str pid_folder: A full path to folder where pid file is created. The
file name is fixed and its value is `proc.pid`.
:param float frequency: A sampling frequency in seconds. Can be something like
0.1 seconds
"""
self.launcher = launcher
self.pid_file = os.path.join(pid_folder, 'proc.pid')
self.frequency = frequency
self.queue = None
self.monitor_process = None
# Parse fields specs
# time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8:
self.fields = {}
raw_fields = fields_specs.split(',')
for raw_field in raw_fields:
fields_split = raw_field.split(':')
assert len(fields_split) in (3, 4),\
"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count" % raw_field
field_name = fields_split[0]
assert field_name not in self.fields,\
"Found duplicate timeseries field (%s)" % field_name
field_type = fields_split[1]
assert field_type in ('str', 'int', 'float', 'bool'),\
"Invalid field type (%s). Must be one of ('str', 'int', 'float', 'bool')" % field_type
index = int(fields_split[2])
if len(fields_split) == 3:
count = -1
elif fields_split[3] == '':
count = 0
else:
count = int(fields_split[3])
self.fields[field_name] = {
'type': field_type,
'index': index,
'count': count
}
@staticmethod
def monitor_function(launcher, pid_file, frequency, queue):
"""A main monitor worker function.
:param str launcher: A full path to resource monitor script.
:param str pid_folder: A full path to folder where pid file is created. The
file name is fixed and its value is `proc.pid`.
:param float frequency: A sampling frequency in seconds. Can be something like
0.1 seconds
:param multiprocessing.Queue queue: A queue to communicate measurements.
A resource monitor is launched as a subprocess. The thread is reading its
output and will put the data into a queue. A main thread will then dequeue all
data at once once experiment is completed.
"""
cmd = [
launcher,
pid_file,
'',
str(frequency)
]
process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
# The 'output' is a string printed out by a resource monitor
# script. It's a whitespace separated string of numbers.
queue.put(output.strip())
@staticmethod
def str_to_type(str_val, val_type):
if val_type == 'str':
return str_val
elif val_type == 'int':
return int(str_val)
elif val_type == 'float':
return float(str_val)
elif val_type == 'bool':
v = str_val.lower()
assert v in ('true', 'false', '1', '0', 'on', 'off'),\
"Invalid boolean value in string (%s)" % str_val
return v in ('true', 1, 'on')
else:
assert False, "Invalid value type %s" % val_type
def get_measurements(self):
"""Dequeue all data, put it into lists and return them.
time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8-
:return: Dictionary that maps metric field to a time series of its value.
"""
metrics = {}
for key in self.fields.keys():
metrics[key] = []
# What's in output:
# proc_pid date virt res shrd cpu mem power gpus_power
while not self.queue.empty():
data = self.queue.get().strip().split()
for field in self.fields:
tp = self.fields[field]['type']
idx = self.fields[field]['index']
count = self.fields[field]['count']
if count == -1:
metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))
elif count == 0:
metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])
else:
metrics[field].append([
ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)
])
return metrics
def remove_pid_file(self):
"""Deletes pif file from disk."""
try:
os.remove(self.pid_file)
except OSError:
pass
def empty_pid_file(self):
"""Empty pid file."""
try:
with open(self.pid_file, 'w'):
pass
except IOError:
pass
def write_pid_file(self, pid):
"""Write the pid into pid file.
:param int pid: A pid to write.
This is a debugging function and most likely should not be used.
"""
with open(self.pid_file, 'w') as fhandle:
fhandle.write('%d' % pid)
def run(self):
"""Create queue and start resource monitor in background thread.
Due to possible execution of benchmarks in containers, we must not delete
file here, but create or empty it in host OS.
"""
self.empty_pid_file()
self.queue = Queue()
self.monitor_process = Process(
target=ResourceMonitor.monitor_function,
args=(self.launcher, self.pid_file, self.frequency, self.queue)
)
self.monitor_process.start()
def stop(self):
"""Closes queue and waits for resource monitor to finish."""
with open(self.pid_file, 'w') as fhandle:
fhandle.write('exit')
self.queue.close()
self.queue.join_thread()
self.monitor_process.join()
self.remove_pid_file()
class _ModuleImporter(object):
"""A private class that imports a particular models and return boolean
variable indicating if import has been succesfull or not. Used by a Modules
class to identify if optional python modules are available.
"""
@staticmethod
def try_import(module_name):
"""Tries to import module.
:param str module_name: A name of a module to try to import, something like
'numpy', 'pandas', 'matplotlib' etc.
:return: True if module has been imported, False otherwise.
"""
have_module = True
try:
importlib.import_module(module_name)
except ImportError:
logging.warn("Module '%s' cannot be imported, certain system information will not be available", module_name)
have_module = False
return have_module
class Modules(object):
"""A class that enumerates non-standard python modules this project depends on.
They are optional, so we can disable certain functionality if something is missing.
"""
HAVE_NUMPY = _ModuleImporter.try_import('numpy')
HAVE_PANDAS = _ModuleImporter.try_import('pandas')
HAVE_MATPLOTLIB = _ModuleImporter.try_import('matplotlib')
| [
"os.remove",
"os.walk",
"json.dumps",
"os.path.isfile",
"multiprocessing.Queue",
"os.path.join",
"logging.error",
"json.loads",
"os.path.dirname",
"os.path.exists",
"json.dump",
"copy.deepcopy",
"subprocess.Popen",
"importlib.import_module",
"os.listdir",
"re.compile",
"json.load",
"gzip.open",
"os.makedirs",
"dlbs.exceptions.ConfigurationError",
"os.path.isdir",
"logging.warn",
"multiprocessing.Process"
] | [((2094, 2120), 'os.path.dirname', 'os.path.dirname', (['file_name'], {}), '(file_name)\n', (2109, 2120), False, 'import os\n'), ((12416, 12435), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (12426, 12435), False, 'import re\n'), ((25162, 25183), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (25175, 25183), False, 'import copy\n'), ((30018, 30054), 'os.path.join', 'os.path.join', (['pid_folder', '"""proc.pid"""'], {}), "(pid_folder, 'proc.pid')\n", (30030, 30054), False, 'import os\n'), ((32354, 32454), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'universal_newlines': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, universal_newlines=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (32370, 32454), False, 'import subprocess\n'), ((35572, 35579), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (35577, 35579), False, 'from multiprocessing import Queue\n'), ((35611, 35729), 'multiprocessing.Process', 'Process', ([], {'target': 'ResourceMonitor.monitor_function', 'args': '(self.launcher, self.pid_file, self.frequency, self.queue)'}), '(target=ResourceMonitor.monitor_function, args=(self.launcher, self.\n pid_file, self.frequency, self.queue))\n', (35618, 35729), False, 'from multiprocessing import Process\n'), ((1274, 1314), 'gzip.open', 'gzip.open', (['self.__fname', 'self.__flags[0]'], {}), '(self.__fname, self.__flags[0])\n', (1283, 1314), False, 'import gzip\n'), ((2192, 2213), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (2203, 2213), False, 'import os\n'), ((3865, 3889), 'os.path.isdir', 'os.path.isdir', (['path_spec'], {}), '(path_spec)\n', (3878, 3889), False, 'import os\n'), ((4444, 4469), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (4458, 4469), False, 'import os\n'), ((6128, 6143), 'json.load', 'json.load', (['fobj'], {}), '(fobj)\n', (6137, 6143), False, 'import json\n'), ((6762, 6793), 'json.dump', 'json.dump', (['data', 'fobj'], {'indent': '(4)'}), '(data, fobj, indent=4)\n', (6771, 6793), False, 'import json\n'), ((8401, 8429), 'copy.deepcopy', 'copy.deepcopy', (['default_value'], {}), '(default_value)\n', (8414, 8429), False, 'import copy\n'), ((17838, 17857), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (17851, 17857), False, 'import os\n'), ((26564, 26805), 'dlbs.exceptions.ConfigurationError', 'ConfigurationError', (["('Configuration update error - expecting value types to be same and one of %s but Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)'\n % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__))"], {}), "(\n 'Configuration update error - expecting value types to be same and one of %s but Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)'\n % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__))\n", (26582, 26805), False, 'from dlbs.exceptions import ConfigurationError\n'), ((34743, 34767), 'os.remove', 'os.remove', (['self.pid_file'], {}), '(self.pid_file)\n', (34752, 34767), False, 'import os\n'), ((36729, 36765), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (36752, 36765), False, 'import importlib\n'), ((2155, 2178), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (2168, 2178), False, 'import os\n'), ((4000, 4025), 'os.path.isfile', 'os.path.isfile', (['path_spec'], {}), '(path_spec)\n', (4014, 4025), False, 'import os\n'), ((4624, 4659), 'os.path.exists', 'os.path.exists', (['candidate_file_name'], {}), '(candidate_file_name)\n', (4638, 4659), False, 'import os\n'), ((9983, 10013), 'copy.deepcopy', 'copy.deepcopy', (['dictionary[key]'], {}), '(dictionary[key])\n', (9996, 10013), False, 'import copy\n'), ((10478, 10519), 'json.dump', 'json.dump', (['dictionary', 'file_obj'], {'indent': '(4)'}), '(dictionary, file_obj, indent=4)\n', (10487, 10519), False, 'import json\n'), ((18046, 18067), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (18058, 18067), False, 'import os\n'), ((18126, 18147), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (18138, 18147), False, 'import os\n'), ((18413, 18440), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (18427, 18440), False, 'import os\n'), ((24050, 24230), 'dlbs.exceptions.ConfigurationError', 'ConfigurationError', (['("Parameter info update error. Parameter has invalid type = \'%s\'. Parameter definition is %s = %s"\n % (param_info[name][\'type\'], name, param_info[name]))'], {}), '(\n "Parameter info update error. Parameter has invalid type = \'%s\'. Parameter definition is %s = %s"\n % (param_info[name][\'type\'], name, param_info[name]))\n', (24068, 24230), False, 'from dlbs.exceptions import ConfigurationError\n'), ((24402, 24585), 'logging.warn', 'logging.warn', (['"""Parameter definition does not contain type (\'type\') and/or description (\'desc\'). You should fix this. Parameter definition is %s = %s"""', 'name', 'param_info[name]'], {}), '(\n "Parameter definition does not contain type (\'type\') and/or description (\'desc\'). You should fix this. Parameter definition is %s = %s"\n , name, param_info[name])\n', (24414, 24585), False, 'import logging\n'), ((27817, 27918), 'logging.warn', 'logging.warn', (['"""The name of a root key is \'%s\' but expected is one of \'%s\'"""', 'key', 'schema[val_type]'], {}), '("The name of a root key is \'%s\' but expected is one of \'%s\'",\n key, schema[val_type])\n', (27829, 27918), False, 'import logging\n'), ((28057, 28083), 'copy.deepcopy', 'copy.deepcopy', (['source[key]'], {}), '(source[key])\n', (28070, 28083), False, 'import copy\n'), ((36806, 36925), 'logging.warn', 'logging.warn', (['"""Module \'%s\' cannot be imported, certain system information will not be available"""', 'module_name'], {}), '(\n "Module \'%s\' cannot be imported, certain system information will not be available"\n , module_name)\n', (36818, 36925), False, 'import logging\n'), ((3028, 3046), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (3035, 3046), False, 'import os\n'), ((12589, 12716), 'dlbs.exceptions.ConfigurationError', 'ConfigurationError', (['("Cannot match key-value from \'%s\' with pattern \'%s\'. Must match is set to true"\n % (line, pattern))'], {}), '(\n "Cannot match key-value from \'%s\' with pattern \'%s\'. Must match is set to true"\n % (line, pattern))\n', (12607, 12716), False, 'from dlbs.exceptions import ConfigurationError\n'), ((12887, 12904), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (12897, 12904), False, 'import json\n'), ((18157, 18173), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (18167, 18173), False, 'import os\n'), ((18759, 18778), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (18768, 18778), False, 'import json\n'), ((22304, 22506), 'dlbs.exceptions.ConfigurationError', 'ConfigurationError', (['("Parameter info update error. Parameter that is defined by a dictionary must contain \'val\' field that defines its default value. Found this definition: %s=%s"\n % (name, val))'], {}), '(\n "Parameter info update error. Parameter that is defined by a dictionary must contain \'val\' field that defines its default value. Found this definition: %s=%s"\n % (name, val))\n', (22322, 22506), False, 'from dlbs.exceptions import ConfigurationError\n'), ((22679, 22697), 'copy.deepcopy', 'copy.deepcopy', (['val'], {}), '(val)\n', (22692, 22697), False, 'import copy\n'), ((2937, 2979), 'os.path.join', 'os.path.join', (['directory', 'file_name_pattern'], {}), '(directory, file_name_pattern)\n', (2949, 2979), False, 'import os\n'), ((3061, 3098), 'os.path.join', 'os.path.join', (['p[0]', 'file_name_pattern'], {}), '(p[0], file_name_pattern)\n', (3073, 3098), False, 'import os\n'), ((19157, 19255), 'logging.error', 'logging.error', (['"""Configuration load error. Invalid JSON configuration in file %s"""', 'config_file'], {}), "('Configuration load error. Invalid JSON configuration in file %s'\n , config_file)\n", (19170, 19255), False, 'import logging\n'), ((23116, 23131), 'json.dumps', 'json.dumps', (['val'], {}), '(val)\n', (23126, 23131), False, 'import json\n'), ((23157, 23185), 'json.dumps', 'json.dumps', (['param_info[name]'], {}), '(param_info[name])\n', (23167, 23185), False, 'import json\n'), ((25570, 25772), 'dlbs.exceptions.ConfigurationError', 'ConfigurationError', (['("Parameter info remove error. Parameter that is defined by a dictionary must contain \'val\' field that defines its default value. Found this definition: %s=%s"\n % (name, val))'], {}), '(\n "Parameter info remove error. Parameter that is defined by a dictionary must contain \'val\' field that defines its default value. Found this definition: %s=%s"\n % (name, val))\n', (25588, 25772), False, 'from dlbs.exceptions import ConfigurationError\n'), ((29126, 29152), 'copy.deepcopy', 'copy.deepcopy', (['source[key]'], {}), '(source[key])\n', (29139, 29152), False, 'import copy\n'), ((16354, 16371), 're.compile', 're.compile', (['value'], {}), '(value)\n', (16364, 16371), False, 'import re\n')] |
import json
import os
import requests
from client import Submission
from settings import API_BASE_URL, ACCESS_TOKEN
def get_task_url(task_id: int):
return API_BASE_URL + f"/tasks/{task_id}/download_grader/"
def get_agent_url(submission_id: int):
return API_BASE_URL + f"/submissions/{submission_id}/download/"
def start_job(job_id, task_id) -> Submission:
worker_name = "unknown_worker"
if os.getenv("WORKER_NAME") is not None:
worker_name = os.getenv("WORKER_NAME")
resp = requests.get(API_BASE_URL + f"/jobs/{job_id}/start_job/",
headers={"Authorization": f"Token {ACCESS_TOKEN}"},
data={
"worker_name": worker_name,
"task_id": task_id
})
if resp.status_code != 200:
raise Exception(resp.content)
obj = json.loads(resp.content)
return Submission(sid=obj["submission"], task_url=get_task_url(obj["task"]),
agent_url=get_agent_url(obj["submission"]))
def submit_job(job_id, task_id, result):
resp = requests.get(API_BASE_URL + f"/jobs/{job_id}/submit_job/",
headers={"Authorization": f"Token {ACCESS_TOKEN}"},
data={
"result": result,
"task_id": task_id
})
return resp
| [
"os.getenv",
"json.loads",
"requests.get"
] | [((510, 683), 'requests.get', 'requests.get', (["(API_BASE_URL + f'/jobs/{job_id}/start_job/')"], {'headers': "{'Authorization': f'Token {ACCESS_TOKEN}'}", 'data': "{'worker_name': worker_name, 'task_id': task_id}"}), "(API_BASE_URL + f'/jobs/{job_id}/start_job/', headers={\n 'Authorization': f'Token {ACCESS_TOKEN}'}, data={'worker_name':\n worker_name, 'task_id': task_id})\n", (522, 683), False, 'import requests\n'), ((885, 909), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (895, 909), False, 'import json\n'), ((1111, 1275), 'requests.get', 'requests.get', (["(API_BASE_URL + f'/jobs/{job_id}/submit_job/')"], {'headers': "{'Authorization': f'Token {ACCESS_TOKEN}'}", 'data': "{'result': result, 'task_id': task_id}"}), "(API_BASE_URL + f'/jobs/{job_id}/submit_job/', headers={\n 'Authorization': f'Token {ACCESS_TOKEN}'}, data={'result': result,\n 'task_id': task_id})\n", (1123, 1275), False, 'import requests\n'), ((414, 438), 'os.getenv', 'os.getenv', (['"""WORKER_NAME"""'], {}), "('WORKER_NAME')\n", (423, 438), False, 'import os\n'), ((474, 498), 'os.getenv', 'os.getenv', (['"""WORKER_NAME"""'], {}), "('WORKER_NAME')\n", (483, 498), False, 'import os\n')] |
import time
import logging
from extended_networkx_tools import Analytics, AnalyticsGraph
from timeit import default_timer as timer
from utils import Solvers
from utils.GraphUtils import GraphUtils
from utils.ServerUtil import ServerUtil
from datetime import datetime
class GraphThread:
@staticmethod
def start_thread(base_url, client_name, thread_id, color=None, recalc=False):
current_sleep = 10
gt = GraphThread(base_url, client_name, thread_id, color)
while True:
try:
gt.run(recalc)
current_sleep = 10
except Exception as e:
logging.exception("Failed when running thread")
gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL)
time.sleep(current_sleep)
current_sleep += 10
client_name: str
server: ServerUtil
thread_id: int
color: None
def __init__(self, base_url, client_name, thread_id, color):
self.client_name = client_name
self.thread_id = thread_id
self.server = ServerUtil(base_url)
self.color = color
def run(self, recalc=False):
# Get a new task from the server
task = self.get_task(recalc)
self.print("(%d) Received graph (%d nodes), type %s" % (task['Id'], task['NodeCount'], task['SolveType']))
# Solve it and get a graph
start = timer()
analytics_graph, custom_data = self.solve_task(task=task)
end = timer()
# Calculate deltatime
delta_time = end - start
time_minutes = round((delta_time / 60)-0.49)
time_seconds = round(delta_time % 60)
self.print("(%d) Solved graph (%d nodes) in %sm %ss" %
(task['Id'], task['NodeCount'], time_minutes, time_seconds))
# Get the results
results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data)
# Upload the results to the server
self.upload_results(results=results, analytics_graph=analytics_graph)
self.print("(%d) Uploaded results (%d nodes)" % (task['Id'], task['NodeCount']))
def get_task(self, recalc=False):
if recalc:
task = self.server.get_recalc_task()
else:
task = self.server.get_task(self.client_name)
return task
@staticmethod
def solve_task(task) -> (AnalyticsGraph, object):
solve_type = task['SolveType']
if solve_type == 'diff':
return Solvers.Diff.solve(task)
elif solve_type == 'spec':
return Solvers.Spec.solve(task)
elif solve_type == 'random':
return Solvers.Random.solve(task)
elif solve_type == 'field' or solve_type == 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr':
return Solvers.Field.solve(task)
else:
return Solvers.Random.solve(task)
def upload_results(self, results, analytics_graph: AnalyticsGraph):
worker_id = results['Id']
self.server.upload_results(worker_id, results)
self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())})
self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())})
def print(self, msg, type=None):
start_color = None
if type is None:
start_color = self.color
ts = datetime.now().strftime('%H:%M:%S')
print("%s%s%s %s P%d: %s%s" %
(Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC))
class Styles:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | [
"utils.Solvers.Diff.solve",
"utils.GraphUtils.GraphUtils.get_results",
"logging.exception",
"utils.Solvers.Random.solve",
"timeit.default_timer",
"utils.Solvers.Field.solve",
"time.sleep",
"utils.ServerUtil.ServerUtil",
"utils.Solvers.Spec.solve",
"datetime.datetime.now"
] | [((1097, 1117), 'utils.ServerUtil.ServerUtil', 'ServerUtil', (['base_url'], {}), '(base_url)\n', (1107, 1117), False, 'from utils.ServerUtil import ServerUtil\n'), ((1424, 1431), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1429, 1431), True, 'from timeit import default_timer as timer\n'), ((1512, 1519), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1517, 1519), True, 'from timeit import default_timer as timer\n'), ((1871, 1966), 'utils.GraphUtils.GraphUtils.get_results', 'GraphUtils.get_results', ([], {'analytics_graph': 'analytics_graph', 'task': 'task', 'custom_data': 'custom_data'}), '(analytics_graph=analytics_graph, task=task,\n custom_data=custom_data)\n', (1893, 1966), False, 'from utils.GraphUtils import GraphUtils\n'), ((2537, 2561), 'utils.Solvers.Diff.solve', 'Solvers.Diff.solve', (['task'], {}), '(task)\n', (2555, 2561), False, 'from utils import Solvers\n'), ((2616, 2640), 'utils.Solvers.Spec.solve', 'Solvers.Spec.solve', (['task'], {}), '(task)\n', (2634, 2640), False, 'from utils import Solvers\n'), ((3465, 3479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3477, 3479), False, 'from datetime import datetime\n'), ((638, 685), 'logging.exception', 'logging.exception', (['"""Failed when running thread"""'], {}), "('Failed when running thread')\n", (655, 685), False, 'import logging\n'), ((793, 818), 'time.sleep', 'time.sleep', (['current_sleep'], {}), '(current_sleep)\n', (803, 818), False, 'import time\n'), ((2697, 2723), 'utils.Solvers.Random.solve', 'Solvers.Random.solve', (['task'], {}), '(task)\n', (2717, 2723), False, 'from utils import Solvers\n'), ((2860, 2885), 'utils.Solvers.Field.solve', 'Solvers.Field.solve', (['task'], {}), '(task)\n', (2879, 2885), False, 'from utils import Solvers\n'), ((2919, 2945), 'utils.Solvers.Random.solve', 'Solvers.Random.solve', (['task'], {}), '(task)\n', (2939, 2945), False, 'from utils import Solvers\n')] |
from django import forms
SACCO_DRIVER_STATUS_OPTIONS = [
('Approved', ('Approved to operate')),
('Suspended', ('Suspended for the time being')),
('Blacklisted', ('Blacklisted from operating'))
]
class VehicleForm(forms.Form):
# sacco = forms.CharField(label="Sacco", max_length=100)
regno = forms.CharField(label="Registration Number", max_length=7)
# def get_sacco(self):
# """Return the name of the sacco."""
# return self.sacco
def get_regno(self):
"""Return the regno of the vehicle."""
return self.regno
class DriverForm(forms.Form):
"""Viewset for add driver."""
national_id = forms.CharField(max_length=8)
# first_name = forms.CharField(max_length=10)
# last_name = forms.CharField(max_length=10)
# # sacco = forms.CharField(max_length=10)
# email = forms.CharField(max_length=15)
# phone_number = forms.CharField(max_length=12)
class UpdateSaccoDriverStatusForm(forms.Form):
"""."""
status = forms.CharField(
widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS)
)
description = forms.CharField(widget=forms.Textarea)
class SearchDriverIdForm(forms.Form):
"""Search for a driver."""
national_id = forms.CharField(max_length=10, help_text="Enter driver id")
| [
"django.forms.CharField",
"django.forms.Select"
] | [((314, 372), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Registration Number"""', 'max_length': '(7)'}), "(label='Registration Number', max_length=7)\n", (329, 372), False, 'from django import forms\n'), ((658, 687), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(8)'}), '(max_length=8)\n', (673, 687), False, 'from django import forms\n'), ((1111, 1149), 'django.forms.CharField', 'forms.CharField', ([], {'widget': 'forms.Textarea'}), '(widget=forms.Textarea)\n', (1126, 1149), False, 'from django import forms\n'), ((1239, 1298), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(10)', 'help_text': '"""Enter driver id"""'}), "(max_length=10, help_text='Enter driver id')\n", (1254, 1298), False, 'from django import forms\n'), ((1037, 1086), 'django.forms.Select', 'forms.Select', ([], {'choices': 'SACCO_DRIVER_STATUS_OPTIONS'}), '(choices=SACCO_DRIVER_STATUS_OPTIONS)\n', (1049, 1086), False, 'from django import forms\n')] |
from typing import Optional
from talon import Context
from user.emacs.utils.voicemacs import rpc_call
from user.utils.formatting import SurroundingText
context = Context()
context.matches = r"""
tag: user.emacs
"""
@context.action_class("self")
class UserActions:
def surrounding_text() -> Optional[SurroundingText]:
# TODO: If the voicemacs server is inactive, return nothing.
raw_info = rpc_call(
"voicemacs-surrounding-text",
[":chars-before", 30000, ":chars-after", 30000],
# Use a very long timeout
timeout=10,
)
return SurroundingText(
text_before=raw_info["text-before"], text_after=raw_info["text-after"]
)
| [
"talon.Context",
"user.emacs.utils.voicemacs.rpc_call",
"user.utils.formatting.SurroundingText"
] | [((166, 175), 'talon.Context', 'Context', ([], {}), '()\n', (173, 175), False, 'from talon import Context\n'), ((416, 519), 'user.emacs.utils.voicemacs.rpc_call', 'rpc_call', (['"""voicemacs-surrounding-text"""', "[':chars-before', 30000, ':chars-after', 30000]"], {'timeout': '(10)'}), "('voicemacs-surrounding-text', [':chars-before', 30000,\n ':chars-after', 30000], timeout=10)\n", (424, 519), False, 'from user.emacs.utils.voicemacs import rpc_call\n'), ((616, 708), 'user.utils.formatting.SurroundingText', 'SurroundingText', ([], {'text_before': "raw_info['text-before']", 'text_after': "raw_info['text-after']"}), "(text_before=raw_info['text-before'], text_after=raw_info[\n 'text-after'])\n", (631, 708), False, 'from user.utils.formatting import SurroundingText\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import zipfile
def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None):
if pitch_file is not None:
files = [nvm_file, target_file, pitch_file]
arc_names = ['target.nvm', 'target.pb', 'pitch.pb']
else:
files = [nvm_file, target_file]
arc_names = ['target.nvm', 'target.pb']
with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
for file_loop in range(len(arc_names)):
new_zip.write(files[file_loop], arcname=arc_names[file_loop])
if __name__ == '__main__':
output_file = 'outputs/target.nvz'
if len(sys.argv) > 1:
output_file = sys.argv[1]
nvm_file = 'outputs/target.nvm'
target_file = 'outputs/target.pb'
pitch_file = 'outputs/pitch.pb'
if len(sys.argv) == 5:
nvm_file = sys.argv[2]
target_file = sys.argv[3]
pitch_file = sys.argv[4]
make_nvz_main(output_file, nvm_file, target_file, pitch_file)
| [
"zipfile.ZipFile"
] | [((417, 484), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output_file', '"""w"""'], {'compression': 'zipfile.ZIP_DEFLATED'}), "(output_file, 'w', compression=zipfile.ZIP_DEFLATED)\n", (432, 484), False, 'import zipfile\n')] |
import click
from mutacc.mutaccDB.remove_case import remove_case_from_db
@click.command('remove')
@click.argument('case_id')
@click.pass_context
def remove_command(context, case_id):
"""
Deletes case from mutacc DB
"""
adapter = context.obj['adapter']
remove_case_from_db(adapter, case_id)
| [
"mutacc.mutaccDB.remove_case.remove_case_from_db",
"click.argument",
"click.command"
] | [((76, 99), 'click.command', 'click.command', (['"""remove"""'], {}), "('remove')\n", (89, 99), False, 'import click\n'), ((101, 126), 'click.argument', 'click.argument', (['"""case_id"""'], {}), "('case_id')\n", (115, 126), False, 'import click\n'), ((280, 317), 'mutacc.mutaccDB.remove_case.remove_case_from_db', 'remove_case_from_db', (['adapter', 'case_id'], {}), '(adapter, case_id)\n', (299, 317), False, 'from mutacc.mutaccDB.remove_case import remove_case_from_db\n')] |
# encoding: utf-8
# http://www.hexblog.com/?p=120
# Default IDA Pro Paths:
# MAC /Applications/IDA\ Pro\ X/idaq.app/Contents/MacOS/plugins/
# Windows C:\Program Files (x86)\IDA X\plugins
# to make it autoexec on openfile
# add this to plugins.cfg
# ; Other plugins
#FullColor FullColor.py 0 0 SILENT
# thanks @JR0driguezB for help :)
from __future__ import print_function
from idautils import Heads
from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM
import idaapi
#idaapi.auto_wait()
PLUGIN_TEST = 1
class FullColor_t(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Set colors :)"
help = "No help needed"
wanted_name = "FullColor"
wanted_hotkey = ""
def init(self):
#idaapi.msg("init() called!\n")
#self.run(0)
return idaapi.PLUGIN_OK
def run(self, arg=0):
print("hell2")
idaapi.msg("run() called with %d!\n" % arg)
heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea()))
funcCalls = []
xor = []
antiVM = []
for i in heads:
# Color the Calls off-white
if print_insn_mnem(i) == "call":
funcCalls.append(i)
# Color Anti-VM instructions Red and print their location
elif print_insn_mnem(i) in ("sidt", "sgdt", "sldt", "smsw", "str", "in", "cpuid"):
antiVM.append(i)
# Color non-zeroing out xor instructions Orange
elif print_insn_mnem(i) == "xor" and (print_operand(i,0) != print_operand(i,1)):
xor.append(i)
print("Number of calls: %d" % (len(funcCalls)))
for i in funcCalls:
set_color(i, CIC_ITEM, 0xc7fdff)
print("Number of potential Anti-VM instructions: %d" % (len(antiVM)))
for i in antiVM:
print("Anti-VM potential at %x" % i)
set_color(i, CIC_ITEM, 0x0000ff)
print("Number of xor: %d" % (len(xor)))
for i in xor:
set_color(i, CIC_ITEM, 0x00a5ff)
def term(self):
idaapi.msg("term() called!\n")
def PLUGIN_ENTRY():
return FullColor_t()
if PLUGIN_TEST:
# Create form
f = PLUGIN_ENTRY()
f.init()
f.run()
f.term()
| [
"idaapi.msg",
"idc.set_color",
"idc.get_screen_ea",
"idc.print_operand",
"idc.print_insn_mnem"
] | [((952, 995), 'idaapi.msg', 'idaapi.msg', (["('run() called with %d!\\n' % arg)"], {}), "('run() called with %d!\\n' % arg)\n", (962, 995), False, 'import idaapi\n'), ((2142, 2172), 'idaapi.msg', 'idaapi.msg', (['"""term() called!\n"""'], {}), "('term() called!\\n')\n", (2152, 2172), False, 'import idaapi\n'), ((1766, 1798), 'idc.set_color', 'set_color', (['i', 'CIC_ITEM', '(13106687)'], {}), '(i, CIC_ITEM, 13106687)\n', (1775, 1798), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1964, 1991), 'idc.set_color', 'set_color', (['i', 'CIC_ITEM', '(255)'], {}), '(i, CIC_ITEM, 255)\n', (1973, 1991), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((2080, 2109), 'idc.set_color', 'set_color', (['i', 'CIC_ITEM', '(42495)'], {}), '(i, CIC_ITEM, 42495)\n', (2089, 2109), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1033, 1048), 'idc.get_screen_ea', 'get_screen_ea', ([], {}), '()\n', (1046, 1048), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1064, 1079), 'idc.get_screen_ea', 'get_screen_ea', ([], {}), '()\n', (1077, 1079), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1221, 1239), 'idc.print_insn_mnem', 'print_insn_mnem', (['i'], {}), '(i)\n', (1236, 1239), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1374, 1392), 'idc.print_insn_mnem', 'print_insn_mnem', (['i'], {}), '(i)\n', (1389, 1392), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1563, 1581), 'idc.print_insn_mnem', 'print_insn_mnem', (['i'], {}), '(i)\n', (1578, 1581), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1596, 1615), 'idc.print_operand', 'print_operand', (['i', '(0)'], {}), '(i, 0)\n', (1609, 1615), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n'), ((1618, 1637), 'idc.print_operand', 'print_operand', (['i', '(1)'], {}), '(i, 1)\n', (1631, 1637), False, 'from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM\n')] |
import os
import random
import subprocess
MYPATH = './out-of-dropbox-2020-08to12-'
FILES = os.listdir(MYPATH)
INP = ''
while INP != 'q':
INP = input('q to quit, enter anything else to continue')
file_choice = random.choice(FILES)
pathname_choice = MYPATH + '/' + file_choice
subprocess.run(["open", pathname_choice])
| [
"subprocess.run",
"random.choice",
"os.listdir"
] | [((92, 110), 'os.listdir', 'os.listdir', (['MYPATH'], {}), '(MYPATH)\n', (102, 110), False, 'import os\n'), ((218, 238), 'random.choice', 'random.choice', (['FILES'], {}), '(FILES)\n', (231, 238), False, 'import random\n'), ((292, 333), 'subprocess.run', 'subprocess.run', (["['open', pathname_choice]"], {}), "(['open', pathname_choice])\n", (306, 333), False, 'import subprocess\n')] |
import argparse
import json
from multiprocessing import Pool
from puyotable.canonization import canonize_deals
def all_deals(num_deals, num_colors):
if not num_deals:
return [[]]
result = []
for c0 in range(num_colors):
for c1 in range(num_colors):
for deals in all_deals(num_deals - 1, num_colors):
result.append(deals + [(c0, c1)])
return result
def for_all_deals(num_deals, num_colors, callback, prefix=[]):
if not num_deals:
callback(prefix)
return
for c0 in range(num_colors):
for c1 in range(num_colors):
for_all_deals(
num_deals - 1,
num_colors,
callback,
prefix + [(c0, c1)]
)
def unique_deals(num_deals, num_colors, prefix_=[]):
canonized = set()
def callback(deals):
canonized.add(canonize_deals(deals, num_colors))
# Known symmetry reduction
prefix = [(0, 0)] + prefix_
for_all_deals(num_deals - 1, num_colors, callback, prefix)
prefix = [(0, 1)] + prefix_
for_all_deals(num_deals - 1, num_colors, callback, prefix)
return canonized
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Tabulate all opening sequences in Puyo Puyo.'
)
parser.add_argument(
'num_colors', metavar='colors', type=int,
help='Number of available colors'
)
parser.add_argument(
'depth', metavar='depth', type=int,
help='How many pieces deep to tabulate'
)
parser.add_argument(
'--outfile', metavar='f', type=str,
help='Filename for JSON output'
)
args = parser.parse_args()
canonized = set()
if args.depth > 1:
prefixes = [[(c0, c1)] for c0 in range(args.num_colors) for c1 in range(args.num_colors)] # noqa
process_args = [
(args.depth - 1, args.num_colors, prefix) for prefix in prefixes
]
pool = Pool()
subsets = pool.starmap(unique_deals, process_args)
for subset in subsets:
canonized.update(subset)
else:
canonized = unique_deals(args.depth, args.num_colors)
print("Found", len(canonized), "unique sequences.")
if args.outfile:
with open(args.outfile, 'w') as f:
json.dump(sorted(canonized), f)
print("Saved result to", args.outfile)
| [
"puyotable.canonization.canonize_deals",
"argparse.ArgumentParser",
"multiprocessing.Pool"
] | [((1216, 1304), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tabulate all opening sequences in Puyo Puyo."""'}), "(description=\n 'Tabulate all opening sequences in Puyo Puyo.')\n", (1239, 1304), False, 'import argparse\n'), ((1988, 1994), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (1992, 1994), False, 'from multiprocessing import Pool\n'), ((895, 928), 'puyotable.canonization.canonize_deals', 'canonize_deals', (['deals', 'num_colors'], {}), '(deals, num_colors)\n', (909, 928), False, 'from puyotable.canonization import canonize_deals\n')] |
import matplotlib.pyplot as plt
import numpy as np
import numpy.polynomial.polynomial as nppol
class Metawalk:
def __init__(self,
time_intervals=None,
nodes=None,
):
"""
A basic constructor for a ``Metwalks`` object
:param times : A list of couples of floats which represents times corresponding to the time intervals
:param links : A list of nodes. (first node = source ; last node = destination)
"""
self.time_intervals = time_intervals
self.nodes = nodes
def add_link(self, l, t):
self.time_intervals.append(t)
self.nodes.append(l)
def length(self):
return len(self.time_intervals)
def duration(self):
return self.time_intervals[-1][1] - self.time_intervals[0][0]
def clone(self):
return Metawalk(self.time_intervals[:],self.nodes[:])
def __hash__(self):
m = tuple(self.nodes)
n = tuple(self.time_intervals)
return hash((m,n))
def __str__(self):
s = ""
for i in range(0,self.length()):
s += " "
s += str(self.nodes[i])
s += " "
s += str(self.time_intervals[i])
s += " "
s += str(self.nodes[i+1])
s += " | volume = "
s += str(self.volume())
return s
def __repr__(self):
return self.__str__()
def __eq__(self, m):
if m == None:
return False
if m.length() != self.length():
return False
if (m.nodes == self.nodes) and (m.time_intervals == self.time_intervals):
return True
return False
def is_instantenous(self):
#we check from the last because of the algirothm that uses it add new links to the end of the metawalk
b = True
if len(self.time_intervals) == 1:
return True
x = self.time_intervals[-1]
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i] != x:
return False
return True
def update_following_last(self,b):
#sometimes when adding a metaedge the metawalk has to be cut at some points because some paths are no longer valid.
if b == 0:
#last edge added ends at same time but starts before
self.time_intervals[-1][0] = self.time_intervals[-2][0]
else:
end = self.time_intervals[-1][1]
# last edge starts at same time but ends before
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i][1] > end:
self.time_intervals[i][1] = end
def volume(self):
"""Normally the link are either exactly the same or disjoint, need to check for inclusion, exclusion of intervals """
time_intervals = self.time_intervals[:]
time_intervals.append([-1,-1])
res = [0 for i in range(len(time_intervals)+ 1)]
last_x,last_y = time_intervals[0]
b = True
if len(time_intervals)==1:
last_x,last_y = time_intervals[0]
if last_x != last_y:
b = False
res[1] = np.around((last_y - last_x), decimals=2)
else:
if last_x == last_y:
degree = 0
else:
degree = 1
for i in range(1,len(time_intervals)):
if last_x != last_y:
b = False
x,y = time_intervals[i]
#it should be enough to check one bound no overlap in linkq in fragmented link streams but maybe its ok to generalise it and make it work whenvever later on, update : false, [1,2],[1,1]
if x == last_x and y == last_y and degree > 0:
degree += 1
else:
res[degree] += np.around((last_y - last_x)/np.math.factorial(degree), decimals=2)
if x != y:
degree = 1
last_x = x
last_y = y
if b == True:
res[0] = 1
res = [np.around(e,decimals=2) for e in res]
return nppol.Polynomial(res)
def passes_through(self,t,v):
if v in self.nodes:
indice = self.nodes.index(v)
else:
return False
if indice == 0:
if t < self.time_intervals[0][0]:
return True
else:
return False
elif indice == len(self.nodes) -1:
if t >= self.time_intervals[-1][1]:
return True
else:
return False
else:
if t >= self.time_intervals[indice-1][1] and t < self.time_intervals[indice][0]:
return True
else:
return False
def passes_through_whole_interval(self,v,t1,t2):
return False
def passes_through_somewhere_interval(self,v,t1,t2):
#t1 included, but t2 not
return False
def add_interval_betweenness(self,t_max,interval_size):
res = []
for i in range(0,len(self.time_intervals)-1):
left_bound = self.time_intervals[i][1]
right_bound = self.time_intervals[i+1][0]
nb_interval_contributes_to = (left_bound - right_bound) // interval_size
fst_interval_left_bound = left_bound // interval_size
for j in range(1,nb_interval_contributes_to+1):
res.append((self.nodes[i+1], fst_interval_left_bound, fst_interval_left_bound + j * interval_size ))
fst_interval_left_bound = fst_interval_left_bound + j * interval_size
return res
def fastest_meta_walk(self):
if self.time_intervals[0] == self.time_intervals[-1]:
return self.clone()
else:
nodes = self.nodes[:]
time_intervals = self.time_intervals[:]
time_intervals[0] = (time_intervals[0][1],time_intervals[0][1])
time_intervals[-1] = (time_intervals[-1][0],time_intervals[-1][0])
for i in range(1,len(time_intervals)):
if time_intervals[i][0] < time_intervals[0][0]:
time_intervals[i] = (time_intervals[0][0],time_intervals[i][1])
if time_intervals[i][1] > time_intervals[-1][1]:
time_intervals[i] = (time_intervals[i][0],time_intervals[-1][1])
return Metawalk(time_intervals,nodes)
def first_time(self):
return self.time_intervals[0][0]
def last_departure(self):
return self.time_intervals[0][1]
def first_arrival(self):
return self.time_intervals[-1][0]
def first_node(self):
return self.nodes[0]
def last_node(self):
return self.nodes[-1]
def plot(self, S, color="#18036f",
markersize=10, dag=False, fig=None):
"""
Draw a path on the ``StreamGraph`` object *S*
:param S:
:param color:
:param markersize:
:param dag:
:param fig:
:return:
"""
if fig is None:
fig, ax = plt.subplots()
else:
ax = plt.gca()
if dag:
dag = S.condensation_dag()
dag.plot(node_to_label=S.node_to_label, ax=ax)
else:
S.plot(ax=ax)
# Plot Source
id_source = S.nodes.index(self.nodes[0])
plt.plot([self.time_intervals[0]], [id_source], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Destination
id_destination = S.nodes.index(self.nodes[-1])
plt.plot([self.time_intervals[-1]], [id_destination], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Path
for i in range(self.length()):
l = self.nodes[i]
l2 = self.nodes[i+1]
t = self.time_intervals[i][0]
t2 = self.time_intervals[i][1]
id1 = S.nodes.index(l)
id2 = S.nodes.index(l2)
idmax = max(id1, id2)
idmin = min(id1, id2)
# verts = [
# (idmin, t), # left, bottom
# (idmax, t), # left, top
# (idmax, t2), # right, top
# (idmin, t2), # right, bottom
# ]
plt.vlines(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
plt.vlines(t2, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
if i != self.length() - 1:
plt.hlines(id2, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
plt.hlines(id1, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
# Plot marker
# if t != self.times[i + 1]:
# plt.plot([t], [id2], color=color,
# marker='>', alpha=0.8, markersize=markersize)
# if i != 0 and (t, id1) != (self.times[0], id_source) != (self.times[-1], id_destination):
# # Plot marker
# if id1 == idmin:
# plt.plot([t], [id1], color=color,
# marker='^', alpha=0.8, markersize=markersize)
# else:
# plt.plot([t], [id1], color=color,
# marker='v', alpha=0.8, markersize=markersize)
plt.tight_layout()
return fig
def check_coherence(self, S):
for i in range(self.length()):
l = (self.nodes[i],self.nodes[i+1])
inter = self.time_intervals[i]
l_ = (self.nodes[i+1],self.nodes[i]) # Inverse the order of the interval
if l not in S.links and l_ not in S.links:
raise ValueError("Link : " + str(l) + " does not exists in the Stream Graph !")
else:
t = inter[0]
t2 = inter[1]
if l in S.links:
id_link = S.links.index(l)
else:
id_link = S.links.index(l_)
is_present = False
for lt0, lt1 in zip(S.link_presence[id_link][::2], S.link_presence[id_link][1::2]):
if (lt0 <= t <= lt1) and (lt0 <= t2 <= lt1) and (t <= t2):
is_present = True
if not is_present:
raise ValueError("Link : " + str(l) + " does not exists at time " + str(t) + " !")
print("Check Path Coherence ok !")
return
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"numpy.polynomial.polynomial.Polynomial",
"matplotlib.pyplot.vlines",
"numpy.around",
"numpy.math.factorial",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.subplots"
] | [((4201, 4222), 'numpy.polynomial.polynomial.Polynomial', 'nppol.Polynomial', (['res'], {}), '(res)\n', (4217, 4222), True, 'import numpy.polynomial.polynomial as nppol\n'), ((7457, 7567), 'matplotlib.pyplot.plot', 'plt.plot', (['[self.time_intervals[0]]', '[id_source]'], {'color': 'color', 'marker': '"""o"""', 'alpha': '(0.8)', 'markersize': 'markersize'}), "([self.time_intervals[0]], [id_source], color=color, marker='o',\n alpha=0.8, markersize=markersize)\n", (7465, 7567), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7788), 'matplotlib.pyplot.plot', 'plt.plot', (['[self.time_intervals[-1]]', '[id_destination]'], {'color': 'color', 'marker': '"""o"""', 'alpha': '(0.8)', 'markersize': 'markersize'}), "([self.time_intervals[-1]], [id_destination], color=color, marker=\n 'o', alpha=0.8, markersize=markersize)\n", (7679, 7788), True, 'import matplotlib.pyplot as plt\n'), ((9491, 9509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9507, 9509), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4172), 'numpy.around', 'np.around', (['e'], {'decimals': '(2)'}), '(e, decimals=2)\n', (4157, 4172), True, 'import numpy as np\n'), ((7166, 7180), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7178, 7180), True, 'import matplotlib.pyplot as plt\n'), ((7212, 7221), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7219, 7221), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8460), 'matplotlib.pyplot.vlines', 'plt.vlines', (['t'], {'ymin': 'idmin', 'ymax': 'idmax', 'linewidth': '(6)', 'alpha': '(0.8)', 'color': 'color'}), '(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)\n', (8396, 8460), True, 'import matplotlib.pyplot as plt\n'), ((8473, 8548), 'matplotlib.pyplot.vlines', 'plt.vlines', (['t2'], {'ymin': 'idmin', 'ymax': 'idmax', 'linewidth': '(6)', 'alpha': '(0.8)', 'color': 'color'}), '(t2, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)\n', (8483, 8548), True, 'import matplotlib.pyplot as plt\n'), ((3219, 3257), 'numpy.around', 'np.around', (['(last_y - last_x)'], {'decimals': '(2)'}), '(last_y - last_x, decimals=2)\n', (3228, 3257), True, 'import numpy as np\n'), ((8604, 8673), 'matplotlib.pyplot.hlines', 'plt.hlines', (['id2'], {'xmin': 't', 'xmax': 't2', 'linewidth': '(4)', 'alpha': '(0.8)', 'color': 'color'}), '(id2, xmin=t, xmax=t2, linewidth=4, alpha=0.8, color=color)\n', (8614, 8673), True, 'import matplotlib.pyplot as plt\n'), ((8717, 8786), 'matplotlib.pyplot.hlines', 'plt.hlines', (['id1'], {'xmin': 't', 'xmax': 't2', 'linewidth': '(4)', 'alpha': '(0.8)', 'color': 'color'}), '(id1, xmin=t, xmax=t2, linewidth=4, alpha=0.8, color=color)\n', (8727, 8786), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3945), 'numpy.math.factorial', 'np.math.factorial', (['degree'], {}), '(degree)\n', (3937, 3945), True, 'import numpy as np\n')] |
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db.models.signals import post_delete
from django.dispatch import receiver
from myauth import models as myauth_models
from products.models import Book
class CommentProducts(models.Model):
profile = models.ForeignKey(
myauth_models.Profile,
on_delete=models.PROTECT,
related_name='comments'
)
book = models.ForeignKey(
Book,
on_delete=models.PROTECT,
related_name='comments'
)
comment = models.TextField(
verbose_name='Комментарий',
default='',
blank=True,
null=True
)
date_add = models.DateTimeField(
auto_now=False,
auto_now_add=True,
verbose_name='Дата внесения в каталог'
)
date_last_change = models.DateTimeField(
auto_now=True,
auto_now_add=False,
verbose_name='Дата последнего изменения карточки'
)
stars = models.IntegerField(
validators=[
MinValueValidator(0),
MaxValueValidator(5)]
)
def __str__(self):
return f'Комментарий №{self.pk}, пользователя: {self.profile}, к книге: {self.book}'
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
@receiver(post_delete, sender=CommentProducts)
def post_delete_review(sender, instance, **kwargs):
avr = instance.book.get_rating()
instance.book.avr_rating = avr
instance.book.save()
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.core.validators.MinValueValidator",
"django.dispatch.receiver",
"django.db.models.DateTimeField",
"django.core.validators.MaxValueValidator"
] | [((1334, 1379), 'django.dispatch.receiver', 'receiver', (['post_delete'], {'sender': 'CommentProducts'}), '(post_delete, sender=CommentProducts)\n', (1342, 1379), False, 'from django.dispatch import receiver\n'), ((317, 412), 'django.db.models.ForeignKey', 'models.ForeignKey', (['myauth_models.Profile'], {'on_delete': 'models.PROTECT', 'related_name': '"""comments"""'}), "(myauth_models.Profile, on_delete=models.PROTECT,\n related_name='comments')\n", (334, 412), False, 'from django.db import models\n'), ((450, 524), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Book'], {'on_delete': 'models.PROTECT', 'related_name': '"""comments"""'}), "(Book, on_delete=models.PROTECT, related_name='comments')\n", (467, 524), False, 'from django.db import models\n'), ((569, 648), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Комментарий"""', 'default': '""""""', 'blank': '(True)', 'null': '(True)'}), "(verbose_name='Комментарий', default='', blank=True, null=True)\n", (585, 648), False, 'from django.db import models\n'), ((702, 802), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)', 'verbose_name': '"""Дата внесения в каталог"""'}), "(auto_now=False, auto_now_add=True, verbose_name=\n 'Дата внесения в каталог')\n", (722, 802), False, 'from django.db import models\n'), ((851, 962), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)', 'verbose_name': '"""Дата последнего изменения карточки"""'}), "(auto_now=True, auto_now_add=False, verbose_name=\n 'Дата последнего изменения карточки')\n", (871, 962), False, 'from django.db import models\n'), ((1054, 1074), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (1071, 1074), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((1088, 1108), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(5)'], {}), '(5)\n', (1105, 1108), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n')] |
from urllib.parse import urlparse, urlunparse
from functools import wraps
from flask import abort, request, current_app
from lib.twilio import TwilioClient
def validate_twilio_request(f):
"""Validates that incoming requests genuinely originated from Twilio"""
# Adapted from https://www.twilio.com/docs/usage/tutorials/how-to-secure-your-flask-app-by-validating-incoming-twilio-requests?code-sample=code-custom-decorator-for-flask-apps-to-validate-twilio-requests-3&code-language=Python&code-sdk-version=6.x
@wraps(f)
def decorated_function(*args, **kwargs):
twilio_client = TwilioClient(
current_app.config['SECRETS'].TWILIO_ACCOUNT_SID,
current_app.config['SECRETS'].TWILIO_AUTH_TOKEN
)
# save variables from original request as we will be making transformations on it below
original_url = request.url
original_host_header = request.headers.get('X-Original-Host')
# the url parts to be transformed
twilio_url_parts = urlparse(original_url)
"""
Solve issues with NGROK
Twilio sees: http://somedomain.ngrok.io
App sees: http://localhost:5000
So we replace the domain our app sees with the X-Original-Host header
"""
if original_host_header:
twilio_url_parts = twilio_url_parts._replace(netloc=original_host_header)
"""
Solve issues with API Gateway custom domains
Twilio sees: https://custom-domain.com/bot/validate-next-alert
App sees: https://custom-domain.com/{stage}/bot/validate-next-alert
So we strip API_GATEWAY_BASE_PATH from the beginning of the path
"""
api_gateway_base_path = current_app.config['API_GATEWAY_BASE_PATH']
if api_gateway_base_path:
# Strip N chars from beginning of path.
chars_to_strip = len(f"/{api_gateway_base_path}")
new_path = twilio_url_parts.path[chars_to_strip:]
twilio_url_parts = twilio_url_parts._replace(path=new_path)
# Validate the request using its URL, POST data, and X-TWILIO-SIGNATURE header
request_valid = twilio_client.validate_request(
urlunparse(twilio_url_parts), request.form, request.headers.get('X-TWILIO-SIGNATURE', '')
)
# Continue processing the request if it's valid, return a 403 error if it's not
if request_valid:
return f(*args, **kwargs)
else:
return abort(403)
return decorated_function
| [
"flask.request.headers.get",
"urllib.parse.urlunparse",
"flask.abort",
"functools.wraps",
"lib.twilio.TwilioClient",
"urllib.parse.urlparse"
] | [((524, 532), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (529, 532), False, 'from functools import wraps\n'), ((602, 718), 'lib.twilio.TwilioClient', 'TwilioClient', (["current_app.config['SECRETS'].TWILIO_ACCOUNT_SID", "current_app.config['SECRETS'].TWILIO_AUTH_TOKEN"], {}), "(current_app.config['SECRETS'].TWILIO_ACCOUNT_SID, current_app.\n config['SECRETS'].TWILIO_AUTH_TOKEN)\n", (614, 718), False, 'from lib.twilio import TwilioClient\n'), ((911, 949), 'flask.request.headers.get', 'request.headers.get', (['"""X-Original-Host"""'], {}), "('X-Original-Host')\n", (930, 949), False, 'from flask import abort, request, current_app\n'), ((1020, 1042), 'urllib.parse.urlparse', 'urlparse', (['original_url'], {}), '(original_url)\n', (1028, 1042), False, 'from urllib.parse import urlparse, urlunparse\n'), ((2239, 2267), 'urllib.parse.urlunparse', 'urlunparse', (['twilio_url_parts'], {}), '(twilio_url_parts)\n', (2249, 2267), False, 'from urllib.parse import urlparse, urlunparse\n'), ((2283, 2328), 'flask.request.headers.get', 'request.headers.get', (['"""X-TWILIO-SIGNATURE"""', '""""""'], {}), "('X-TWILIO-SIGNATURE', '')\n", (2302, 2328), False, 'from flask import abort, request, current_app\n'), ((2525, 2535), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (2530, 2535), False, 'from flask import abort, request, current_app\n')] |
#!/usr/bin/env python3
import os
from typing import Union
from pathlib import Path
import requests
from rpicam.utils.logging_utils import get_logger
class TelegramPoster:
"""
Bare-bones class to post videos to a Telegram chat.
Uses per default credentials stored in environment.
"""
API_URL = 'https://api.telegram.org'
API_TOKEN_ENV_VAR = 'RPICAM_TG_API_TOKEN'
CHAT_ID_ENV_VAR = 'RPICAM_TG_CHAT_ID'
def __init__(self, api_token: str = None, chat_id: str = None):
if api_token is not None and chat_id is not None:
self.api_token = api_token
self.chat_id = chat_id
else:
self.api_token = os.getenv(self.API_TOKEN_ENV_VAR, None)
self.chat_id = os.getenv(self.CHAT_ID_ENV_VAR, None)
self._logger = get_logger(self.__class__.__name__, verb=True)
if self.api_token is None or self.chat_id is None:
raise RuntimeError('Could not find Telegram credentials in environment.')
def send_video(self, p: Union[Path, str]):
"""Post the given video to Telegram using stored credentials."""
p = Path(str(p)).resolve()
if not p.is_file():
raise RuntimeError(f'file not found: {p}')
url = f'{self.API_URL}/bot{self.api_token}/sendVideo'
files = {
'chat_id': (None, self.chat_id),
'video': (str(p), open(p, 'rb'))
}
r = requests.post(url, files=files)
if r.status_code != 200:
self._logger.error(f'Could not upload file. Exit code was {r.status_code}')
| [
"requests.post",
"rpicam.utils.logging_utils.get_logger",
"os.getenv"
] | [((806, 852), 'rpicam.utils.logging_utils.get_logger', 'get_logger', (['self.__class__.__name__'], {'verb': '(True)'}), '(self.__class__.__name__, verb=True)\n', (816, 852), False, 'from rpicam.utils.logging_utils import get_logger\n'), ((1429, 1460), 'requests.post', 'requests.post', (['url'], {'files': 'files'}), '(url, files=files)\n', (1442, 1460), False, 'import requests\n'), ((678, 717), 'os.getenv', 'os.getenv', (['self.API_TOKEN_ENV_VAR', 'None'], {}), '(self.API_TOKEN_ENV_VAR, None)\n', (687, 717), False, 'import os\n'), ((745, 782), 'os.getenv', 'os.getenv', (['self.CHAT_ID_ENV_VAR', 'None'], {}), '(self.CHAT_ID_ENV_VAR, None)\n', (754, 782), False, 'import os\n')] |
"""
This module provides a class for interfacing with the Sense HAT add-on board for Raspberry Pi.
"""
import os
from multiprocessing.managers import RemoteError
from myDevices.utils.logger import error, exception, info
from sensehat.manager import connect_client
class SenseHAT():
"""Class for interacting with a Sense HAT device"""
def __init__(self, use_emulator=False):
"""Initializes Sense HAT device.
Arguments:
use_emulator: True if the Sense HAT Emulator should be used. This requires the Emulator to be installed and running on the desktop.
"""
self.use_emulator = use_emulator
self.sense_hat = None
self.digital_value = 0
self.analog_value = 0.0
self.image_file = os.path.join('/etc/myDevices/plugins/cayenne-plugin-sensehat/data/image.png')
self.call_sense_hat_function('clear')
def init_sense_hat(self):
"""Initializes connection to Sense HAT service and gets a SenseHat shared object."""
if not self.sense_hat:
try:
self.manager = connect_client()
self.manager.use_emulator(self.use_emulator)
self.sense_hat = self.manager.SenseHat()
except ConnectionRefusedError as e:
info('Sense HAT service connection refused')
error(e)
except RemoteError as e:
error('Failed to connect to Sense HAT device')
def call_sense_hat_function(self, function_name, *args):
"""Calls a function of the SenseHat shared object.
Arguments:
function_name: Name of the function to call.
args: Arguments to pass to the function.
"""
self.init_sense_hat()
try:
if self.sense_hat is not None:
func = getattr(self.sense_hat, function_name)
value = func(*args)
return value
except EOFError as e:
error(e)
sense_hat = None
except AttributeError as e:
error(e)
sense_hat = None
def get_temperature(self):
"""Gets the temperature as a tuple with type and unit."""
return (self.call_sense_hat_function('get_temperature'), 'temp', 'c')
def get_humidity(self):
"""Gets the humidity as a tuple with type and unit."""
return (self.call_sense_hat_function('get_humidity'), 'rel_hum', 'p')
def get_pressure(self):
"""Gets the pressure as a tuple with type and unit."""
value = self.call_sense_hat_function('get_pressure')
if value is not None:
return (value * 100, 'bp', 'pa')
def get_acclerometer(self):
"""Gets the g-force as a tuple with type and unit."""
values = self.call_sense_hat_function('get_accelerometer_raw')
if values is not None:
g_force = []
g_force.append(values['x'])
g_force.append(values['y'])
g_force.append(values['z'])
return (g_force, 'accel', 'g')
def get_gyroscope(self):
"""Gets radians per second from the gyroscope."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_gyroscope_raw')
if values is not None:
rps = []
rps.append(values['x'])
rps.append(values['y'])
rps.append(values['z'])
return rps
def get_magnetometer(self):
"""Gets microteslas from the magnetometer."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_compass_raw')
if values is not None:
gyro = []
gyro.append(values['x'])
gyro.append(values['y'])
gyro.append(values['z'])
return gyro
def get_digital(self):
"""Gets the digital value as a tuple specifying this is a digital actuator."""
return (self.digital_value, 'digital_actuator')
def set_digital(self, value):
"""Displays an image on the Sense HAT LED matrix if the digital value is equal to True."""
self.digital_value = value
if self.digital_value:
self.call_sense_hat_function('load_image', self.image_file)
else:
self.call_sense_hat_function('clear')
def get_analog(self):
"""Gets the digital value as a tuple specifying this is an analog actuator."""
return (self.analog_value, 'analog_actuator')
def set_analog(self, value):
"""Displays the analog value on the Sense HAT LED matrix."""
self.analog_value = value
self.call_sense_hat_function('show_message', str(self.analog_value))
| [
"myDevices.utils.logger.info",
"myDevices.utils.logger.error",
"os.path.join",
"sensehat.manager.connect_client"
] | [((759, 836), 'os.path.join', 'os.path.join', (['"""/etc/myDevices/plugins/cayenne-plugin-sensehat/data/image.png"""'], {}), "('/etc/myDevices/plugins/cayenne-plugin-sensehat/data/image.png')\n", (771, 836), False, 'import os\n'), ((1086, 1102), 'sensehat.manager.connect_client', 'connect_client', ([], {}), '()\n', (1100, 1102), False, 'from sensehat.manager import connect_client\n'), ((1973, 1981), 'myDevices.utils.logger.error', 'error', (['e'], {}), '(e)\n', (1978, 1981), False, 'from myDevices.utils.logger import error, exception, info\n'), ((2059, 2067), 'myDevices.utils.logger.error', 'error', (['e'], {}), '(e)\n', (2064, 2067), False, 'from myDevices.utils.logger import error, exception, info\n'), ((1285, 1329), 'myDevices.utils.logger.info', 'info', (['"""Sense HAT service connection refused"""'], {}), "('Sense HAT service connection refused')\n", (1289, 1329), False, 'from myDevices.utils.logger import error, exception, info\n'), ((1346, 1354), 'myDevices.utils.logger.error', 'error', (['e'], {}), '(e)\n', (1351, 1354), False, 'from myDevices.utils.logger import error, exception, info\n'), ((1408, 1454), 'myDevices.utils.logger.error', 'error', (['"""Failed to connect to Sense HAT device"""'], {}), "('Failed to connect to Sense HAT device')\n", (1413, 1454), False, 'from myDevices.utils.logger import error, exception, info\n')] |
#!/usr/bin/env python
from isrutils.looper import simpleloop
# %% users param
P = {
"path": "~/data/2015-10-07/isr",
"beamid": 64157,
"acf": True,
"vlimacf": (18, 45),
"zlim_pl": [None, None],
"vlim_pl": [72, 90],
"flim_pl": [3.5, 5.5],
"odir": "out/2015-10-07",
"vlim": [25, 55],
"zlim": (90, None),
"verbose": True,
}
# %%
flist = ()
simpleloop(flist, P)
| [
"isrutils.looper.simpleloop"
] | [((383, 403), 'isrutils.looper.simpleloop', 'simpleloop', (['flist', 'P'], {}), '(flist, P)\n', (393, 403), False, 'from isrutils.looper import simpleloop\n')] |
"""turbotutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.views.generic import TemplateView
from django.urls import path
from chat import views
urlpatterns = [
path("admin/", admin.site.urls),
# path("", views.RoomList.as_view(), name="room_list"),
# path("<slug:pk>/", views.RoomDetail.as_view(), name="room_detail"),
# path("create_message/", views.TurboTest.as_view(), name="message_create"),
path("quickstart/", TemplateView.as_view(template_name="broadcast_example.html")),
]
| [
"django.views.generic.TemplateView.as_view",
"django.urls.path"
] | [((789, 820), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (793, 820), False, 'from django.urls import path\n'), ((1061, 1121), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""broadcast_example.html"""'}), "(template_name='broadcast_example.html')\n", (1081, 1121), False, 'from django.views.generic import TemplateView\n')] |
import os
# ETL
ETL_MODE = os.environ.get('ETL_MODE')
ETL_CHUNK_SIZE = int(os.environ.get('ETL_CHUNK_SIZE'))
ETL_SYNC_DELAY = int(os.environ.get('ETL_SYNC_DELAY'))
ETL_FILE_STATE = os.environ.get('ETL_FILE_STATE')
ETL_DEFAULT_DATE = os.environ.get('ETL_DEFAULT_DATE')
# Postgres
POSTGRES_NAME = os.environ.get('POSTGRES_NAME')
POSTGRES_USER = os.environ.get('POSTGRES_USER')
POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD')
POSTGRES_HOST = os.environ.get('POSTGRES_HOST')
POSTGRES_PORT = os.environ.get('POSTGRES_PORT')
# Elasticsearch
ELASTICSEARCH_HOST = os.environ.get('ELASTICSEARCH_HOST')
ELASTICSEARCH_PORT = os.environ.get('ELASTICSEARCH_PORT') | [
"os.environ.get"
] | [((28, 54), 'os.environ.get', 'os.environ.get', (['"""ETL_MODE"""'], {}), "('ETL_MODE')\n", (42, 54), False, 'import os\n'), ((182, 214), 'os.environ.get', 'os.environ.get', (['"""ETL_FILE_STATE"""'], {}), "('ETL_FILE_STATE')\n", (196, 214), False, 'import os\n'), ((234, 268), 'os.environ.get', 'os.environ.get', (['"""ETL_DEFAULT_DATE"""'], {}), "('ETL_DEFAULT_DATE')\n", (248, 268), False, 'import os\n'), ((297, 328), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_NAME"""'], {}), "('POSTGRES_NAME')\n", (311, 328), False, 'import os\n'), ((345, 376), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_USER"""'], {}), "('POSTGRES_USER')\n", (359, 376), False, 'import os\n'), ((397, 432), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_PASSWORD"""'], {}), "('POSTGRES_PASSWORD')\n", (411, 432), False, 'import os\n'), ((449, 480), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_HOST"""'], {}), "('POSTGRES_HOST')\n", (463, 480), False, 'import os\n'), ((497, 528), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_PORT"""'], {}), "('POSTGRES_PORT')\n", (511, 528), False, 'import os\n'), ((567, 603), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_HOST"""'], {}), "('ELASTICSEARCH_HOST')\n", (581, 603), False, 'import os\n'), ((625, 661), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_PORT"""'], {}), "('ELASTICSEARCH_PORT')\n", (639, 661), False, 'import os\n'), ((76, 108), 'os.environ.get', 'os.environ.get', (['"""ETL_CHUNK_SIZE"""'], {}), "('ETL_CHUNK_SIZE')\n", (90, 108), False, 'import os\n'), ((131, 163), 'os.environ.get', 'os.environ.get', (['"""ETL_SYNC_DELAY"""'], {}), "('ETL_SYNC_DELAY')\n", (145, 163), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import absolute_import
from goonpug import app
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
| [
"goonpug.app.run"
] | [((105, 124), 'goonpug.app.run', 'app.run', ([], {'debug': '(True)'}), '(debug=True)\n', (112, 124), False, 'from goonpug import app\n')] |
from setuptools import setup, find_packages
from Cython.Distutils.extension import Extension
from Cython.Build import cythonize, build_ext
import numpy
import os
from glob import glob
"""
ext_modules = [Extension("traj_dist.cydist.basic_geographical", ["traj_dist/cydist/basic_geographical.pyx"]),
Extension("traj_dist.cydist.basic_euclidean", ["traj_dist/cydist/basic_euclidean.pyx"]),
Extension("traj_dist.cydist.sspd", ["traj_dist/cydist/sspd.pyx"]),
Extension("traj_dist.cydist.dtw", ["traj_dist/cydist/dtw.pyx"]),
Extension("traj_dist.cydist.lcss", ["traj_dist/cydist/lcss.pyx"]),
Extension("traj_dist.cydist.hausdorff", ["traj_dist/cydist/hausdorff.pyx"]),
Extension("traj_dist.cydist.discret_frechet", ["traj_dist/cydist/discret_frechet.pyx"]),
Extension("traj_dist.cydist.frechet", ["traj_dist/cydist/frechet.pyx"]),
Extension("traj_dist.cydist.segment_distance", ["traj_dist/cydist/segment_distance.pyx"]),
Extension("traj_dist.cydist.sowd", ["traj_dist/cydist/sowd.pyx"]),
Extension("traj_dist.cydist.erp", ["traj_dist/cydist/erp.pyx"]),
Extension("traj_dist.cydist.edr", ["traj_dist/cydist/edr.pyx"])]
"""
sources = glob('traj_dist/cydist/*.pyx')
extensions = [
Extension(filename.split('.')[0].replace(os.path.sep, '.'),
sources=[filename],
)
for filename in sources]
setup(
name="trajectory_distance_py3",
version="1.0.1",
author="<NAME>",
author_email="<EMAIL>",
cmdclass={'build_ext': build_ext},
# ext_modules=ext_modules,
ext_modules=extensions,
include_dirs=[numpy.get_include()],
install_requires=["numpy>=1.14.0", "cython>=0.27.3", "shapely>=1.6.3", "geohash2>=1.1", 'pandas>=0.20.3',
'scipy>=0.19.1'],
description="Distance to compare 2D-trajectories in Cython",
packages=find_packages()
)
| [
"numpy.get_include",
"setuptools.find_packages",
"glob.glob"
] | [((1294, 1324), 'glob.glob', 'glob', (['"""traj_dist/cydist/*.pyx"""'], {}), "('traj_dist/cydist/*.pyx')\n", (1298, 1324), False, 'from glob import glob\n'), ((1950, 1965), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1963, 1965), False, 'from setuptools import setup, find_packages\n'), ((1700, 1719), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1717, 1719), False, 'import numpy\n')] |
import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('numberOfCategories', 1))
with h.list('categories'):
for i in range(0, h.get_l('numberOfCategories')):
h.add(_.Codetable('categoryType', 1, "4.91.table", _.Get('masterDir'), _.Get('localDir')))
h.add(_.Unsigned('codeFigure', 1))
h.add(_.Unsigned('scaleFactorOfLowerLimit', 1))
h.add(_.Unsigned('scaledValueOfLowerLimit', 4))
h.add(_.Unsigned('scaleFactorOfUpperLimit', 1))
h.add(_.Unsigned('scaledValueOfUpperLimit', 4))
| [
"pyeccodes.accessors.Unsigned",
"pyeccodes.accessors.Get"
] | [((58, 93), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""numberOfCategories"""', '(1)'], {}), "('numberOfCategories', 1)\n", (68, 93), True, 'import pyeccodes.accessors as _\n'), ((306, 333), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""codeFigure"""', '(1)'], {}), "('codeFigure', 1)\n", (316, 333), True, 'import pyeccodes.accessors as _\n'), ((353, 393), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""scaleFactorOfLowerLimit"""', '(1)'], {}), "('scaleFactorOfLowerLimit', 1)\n", (363, 393), True, 'import pyeccodes.accessors as _\n'), ((413, 453), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""scaledValueOfLowerLimit"""', '(4)'], {}), "('scaledValueOfLowerLimit', 4)\n", (423, 453), True, 'import pyeccodes.accessors as _\n'), ((473, 513), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""scaleFactorOfUpperLimit"""', '(1)'], {}), "('scaleFactorOfUpperLimit', 1)\n", (483, 513), True, 'import pyeccodes.accessors as _\n'), ((533, 573), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""scaledValueOfUpperLimit"""', '(4)'], {}), "('scaledValueOfUpperLimit', 4)\n", (543, 573), True, 'import pyeccodes.accessors as _\n'), ((248, 266), 'pyeccodes.accessors.Get', '_.Get', (['"""masterDir"""'], {}), "('masterDir')\n", (253, 266), True, 'import pyeccodes.accessors as _\n'), ((268, 285), 'pyeccodes.accessors.Get', '_.Get', (['"""localDir"""'], {}), "('localDir')\n", (273, 285), True, 'import pyeccodes.accessors as _\n')] |
import numpy as np
from numba import guvectorize
from pygama.dsp.errors import DSPFatal
@guvectorize(["void(float32[:], float32, float32, float32, float32[:])",
"void(float64[:], float64, float64, float64, float64[:])"],
"(n),(),(),()->()", nopython=True, cache=True)
def time_point_thresh(w_in, a_threshold, t_start, walk_forward, t_out):
"""
Find the index where the waveform value crosses the threshold,
walking either forward or backward from the starting index.
Parameters
----------
w_in : array-like
The input waveform
a_threshold : float
The threshold value
t_start : int
The starting index
walk_forward: int
The backward (0) or forward (1) search direction
t_out : float
The index where the waveform value crosses the threshold
Processing Chain Example
------------------------
"tp_0": {
"function": "time_point_thresh",
"module": "pygama.dsp.processors",
"args": ["wf_atrap", "bl_std", "tp_start", 0, "tp_0"],
"unit": "ns",
"prereqs": ["wf_atrap", "bl_std", "tp_start"]
}
"""
t_out[0] = np.nan
if np.isnan(w_in).any() or np.isnan(a_threshold) or np.isnan(t_start) or np.isnan(walk_forward):
return
if np.floor(t_start) != t_start:
raise DSPFatal('The starting index must be an integer')
if np.floor(walk_forward) != walk_forward:
raise DSPFatal('The search direction must be an integer')
if int(t_start) < 0 or int(t_start) >= len(w_in):
raise DSPFatal('The starting index is out of range')
if int(walk_forward) == 1:
for i in range(int(t_start), len(w_in) - 1, 1):
if w_in[i] <= a_threshold < w_in[i+1]:
t_out[0] = i
return
else:
for i in range(int(t_start), 1, -1):
if w_in[i-1] < a_threshold <= w_in[i]:
t_out[0] = i
return
| [
"numpy.floor",
"pygama.dsp.errors.DSPFatal",
"numba.guvectorize",
"numpy.isnan"
] | [((90, 276), 'numba.guvectorize', 'guvectorize', (["['void(float32[:], float32, float32, float32, float32[:])',\n 'void(float64[:], float64, float64, float64, float64[:])']", '"""(n),(),(),()->()"""'], {'nopython': '(True)', 'cache': '(True)'}), "(['void(float32[:], float32, float32, float32, float32[:])',\n 'void(float64[:], float64, float64, float64, float64[:])'],\n '(n),(),(),()->()', nopython=True, cache=True)\n", (101, 276), False, 'from numba import guvectorize\n'), ((1277, 1298), 'numpy.isnan', 'np.isnan', (['a_threshold'], {}), '(a_threshold)\n', (1285, 1298), True, 'import numpy as np\n'), ((1302, 1319), 'numpy.isnan', 'np.isnan', (['t_start'], {}), '(t_start)\n', (1310, 1319), True, 'import numpy as np\n'), ((1323, 1345), 'numpy.isnan', 'np.isnan', (['walk_forward'], {}), '(walk_forward)\n', (1331, 1345), True, 'import numpy as np\n'), ((1374, 1391), 'numpy.floor', 'np.floor', (['t_start'], {}), '(t_start)\n', (1382, 1391), True, 'import numpy as np\n'), ((1418, 1467), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The starting index must be an integer"""'], {}), "('The starting index must be an integer')\n", (1426, 1467), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1476, 1498), 'numpy.floor', 'np.floor', (['walk_forward'], {}), '(walk_forward)\n', (1484, 1498), True, 'import numpy as np\n'), ((1530, 1581), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The search direction must be an integer"""'], {}), "('The search direction must be an integer')\n", (1538, 1581), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1651, 1697), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The starting index is out of range"""'], {}), "('The starting index is out of range')\n", (1659, 1697), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1253, 1267), 'numpy.isnan', 'np.isnan', (['w_in'], {}), '(w_in)\n', (1261, 1267), True, 'import numpy as np\n')] |
import tornado
import json
import uuid
import pandas as pd
from handler.mlsklearn.util import regqeust_arg_to_sklearn_arg
from sklearn.model_selection import train_test_split
from data.persistence import *
from data.data_source import DataSource
from data.data_storage import DataStorage
class TrainTestSplitHandler(tornado.web.RequestHandler):
def post(self):
try:
json_data = json.loads(self.request.body)
data_id = json_data.get('dataID', None)
if data_id == None:
raise Exception("please input data_id")
data_column_names = json_data.get('dataColumnNames', None)
if data_column_names == None:
raise Exception("please input dataColumnNames")
target_column_name = json_data.get('targetColumnName', None)
if target_column_name == None:
raise Exception("please input targetColumnName")
sklearn_arg = json_data.get('sklearn', None)
data_obj = DataStorage.get_data_obj_by_data_id(data_id)
if data_obj:
data_column_names = data_column_names.split(',')
data = data_obj.pandas_data[data_column_names]
target = data_obj.pandas_data[target_column_name]
data = data.values
target = target.values
sklearn_arg = regqeust_arg_to_sklearn_arg(sklearn_arg, ['test_size', 'random_state'])
arrays = [data, target]
X_train, X_test, y_train, y_test = train_test_split(*arrays, **sklearn_arg)
X_train = pd.DataFrame(X_train, columns=data_column_names)
data_obj_X_train = DataStorage.create_data_obj_by_pandas_data(X_train)
X_test = pd.DataFrame(X_test, columns=data_column_names)
data_obj_X_test = DataStorage.create_data_obj_by_pandas_data(X_test)
y_train = pd.DataFrame(y_train, columns=[target_column_name])
data_obj_y_train = DataStorage.create_data_obj_by_pandas_data(y_train)
y_test = pd.DataFrame(y_test, columns=[target_column_name])
data_obj_y_test = DataStorage.create_data_obj_by_pandas_data(y_test)
result = {}
result_X_train = {}
result_X_train['dataID'] = data_obj_X_train.data_id
result_X_train['columnNames'] = data_obj_X_train.column_names
result_X_test = {}
result_X_test['dataID'] = data_obj_X_test.data_id
result_X_test['columnNames'] = data_obj_X_test.column_names
result_y_train = {}
result_y_train['dataID'] = data_obj_y_train.data_id
result_y_train['columnNames'] = data_obj_y_train.column_names
result_y_test = {}
result_y_test['dataID'] = data_obj_y_test.data_id
result_y_test['columnNames'] = data_obj_y_test.column_names
result['X_train'] = result_X_train
result['X_test'] = result_X_test
result['y_train'] = result_y_train
result['y_test'] = result_y_test
self.write(json.dumps(result))
else:
raise Exception("invalid source_id")
except Exception as e:
self.write(str(e)) | [
"pandas.DataFrame",
"json.loads",
"sklearn.model_selection.train_test_split",
"json.dumps",
"data.data_storage.DataStorage.create_data_obj_by_pandas_data",
"handler.mlsklearn.util.regqeust_arg_to_sklearn_arg",
"data.data_storage.DataStorage.get_data_obj_by_data_id"
] | [((387, 416), 'json.loads', 'json.loads', (['self.request.body'], {}), '(self.request.body)\n', (397, 416), False, 'import json\n'), ((890, 934), 'data.data_storage.DataStorage.get_data_obj_by_data_id', 'DataStorage.get_data_obj_by_data_id', (['data_id'], {}), '(data_id)\n', (925, 934), False, 'from data.data_storage import DataStorage\n'), ((1178, 1249), 'handler.mlsklearn.util.regqeust_arg_to_sklearn_arg', 'regqeust_arg_to_sklearn_arg', (['sklearn_arg', "['test_size', 'random_state']"], {}), "(sklearn_arg, ['test_size', 'random_state'])\n", (1205, 1249), False, 'from handler.mlsklearn.util import regqeust_arg_to_sklearn_arg\n'), ((1317, 1357), 'sklearn.model_selection.train_test_split', 'train_test_split', (['*arrays'], {}), '(*arrays, **sklearn_arg)\n', (1333, 1357), False, 'from sklearn.model_selection import train_test_split\n'), ((1372, 1420), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': 'data_column_names'}), '(X_train, columns=data_column_names)\n', (1384, 1420), True, 'import pandas as pd\n'), ((1444, 1495), 'data.data_storage.DataStorage.create_data_obj_by_pandas_data', 'DataStorage.create_data_obj_by_pandas_data', (['X_train'], {}), '(X_train)\n', (1486, 1495), False, 'from data.data_storage import DataStorage\n'), ((1509, 1556), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {'columns': 'data_column_names'}), '(X_test, columns=data_column_names)\n', (1521, 1556), True, 'import pandas as pd\n'), ((1579, 1629), 'data.data_storage.DataStorage.create_data_obj_by_pandas_data', 'DataStorage.create_data_obj_by_pandas_data', (['X_test'], {}), '(X_test)\n', (1621, 1629), False, 'from data.data_storage import DataStorage\n'), ((1644, 1695), 'pandas.DataFrame', 'pd.DataFrame', (['y_train'], {'columns': '[target_column_name]'}), '(y_train, columns=[target_column_name])\n', (1656, 1695), True, 'import pandas as pd\n'), ((1724, 1775), 'data.data_storage.DataStorage.create_data_obj_by_pandas_data', 'DataStorage.create_data_obj_by_pandas_data', (['y_train'], {}), '(y_train)\n', (1766, 1775), False, 'from data.data_storage import DataStorage\n'), ((1789, 1839), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {'columns': '[target_column_name]'}), '(y_test, columns=[target_column_name])\n', (1801, 1839), True, 'import pandas as pd\n'), ((1862, 1912), 'data.data_storage.DataStorage.create_data_obj_by_pandas_data', 'DataStorage.create_data_obj_by_pandas_data', (['y_test'], {}), '(y_test)\n', (1904, 1912), False, 'from data.data_storage import DataStorage\n'), ((2703, 2721), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2713, 2721), False, 'import json\n')] |
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import datetime
from yahoo_fin import stock_info as si
plt.rcParams['figure.figsize'] = (15, 10)
tickers = si.tickers_dow()
individual_stock = input(f"Which of the following stocks would you like to backtest \n{tickers}\n:")
num_of_years = 1
start = datetime.date.today() - datetime.timedelta(days = int(365.25*num_of_years))
yf_prices = yf.download(tickers, start=start)
# Individual Stock Strategy
prices = yf_prices['Adj Close'][individual_stock]
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title=f'{individual_stock} Moving Average Crossovers and Positions')
pos.plot(ax=ax[1])
plt.show()
my_rs = pos.shift(1)*rs
plt.subplots()
my_rs.cumsum().apply(np.exp).plot(title=f'{individual_stock} MA Strategy Performance')
rs.cumsum().apply(np.exp).plot()
plt.legend([f'{individual_stock} MA Performace', f'{individual_stock} Buy and Hold Performnace'])
plt.show()
print (f'Performance Statistics for {individual_stock} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Full Portfolio Strategy
prices = yf_prices['Adj Close']
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
pos /= pos.abs().sum(1).values.reshape(-1,1)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title='Individual Moving Average Crossovers and Positions')
ax[0].legend(bbox_to_anchor=(1.1, 1.05))
pos.plot(ax=ax[1])
ax[1].legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
my_rs = (pos.shift(1)*rs)
my_rs.cumsum().apply(np.exp).plot(title='Individual Stocks Strategy Performance')
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {num_of_years} years:')
for i in range(len(tickers)):
print (f'Moving Average Return for {tickers[i]}: ' + str(100 * round(my_rs.cumsum().apply(np.exp)[tickers[i]].tolist()[-1], 4)) + '%')
i = i + 1
plt.subplots()
my_rs = (pos.shift(1)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot(title='Full Portfolio Strategy Performance')
rs.mean(1).cumsum().apply(np.exp).plot()
plt.legend(['Portfolio MA Performace', 'Buy and Hold Performnace'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.mean(1).cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Portfolio Tests
# Look-Ahead Bias
my_rs1 = (pos*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1)
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot(title='Full Portfolio Performance')
my_rs2.cumsum().apply(np.exp).plot()
plt.legend(['With Look-Ahead Bias', 'Without Look-Ahead Bias'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('With Look-Ahead Bias: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Without Look-Ahead Bias: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Signal Lags
lags = range(1, 11)
lagged_rs = pd.Series(dtype=float, index=lags)
print ('-' * 60)
print (f'Lag Performance Statistics for {tickers} ({num_of_years} years):')
for lag in lags:
my_rs = (pos.shift(lag)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot()
lagged_rs[lag] = my_rs.sum()
print (f'Lag {lag} Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
plt.title('Full Portfolio Strategy Performance with Lags')
plt.legend(lags, bbox_to_anchor=(1.1, 0.95))
plt.show()
# Transaction Costs
tc_pct = 0.01
delta_pos = pos.diff(1).abs().sum(1)
my_tcs = tc_pct*delta_pos
my_rs1 = (pos.shift(1)*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1) - my_tcs
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot()
my_rs2.cumsum().apply(np.exp).plot()
plt.title('Full Portfolio Performance')
plt.legend(['Without Transaction Costs', 'With Transaction Costs'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Without Transaction Costs: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('With Transaction Costs: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%') | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"yfinance.download",
"matplotlib.pyplot.legend",
"datetime.date.today",
"yahoo_fin.stock_info.tickers_dow",
"pandas.Series",
"matplotlib.pyplot.subplots"
] | [((202, 218), 'yahoo_fin.stock_info.tickers_dow', 'si.tickers_dow', ([], {}), '()\n', (216, 218), True, 'from yahoo_fin import stock_info as si\n'), ((434, 467), 'yfinance.download', 'yf.download', (['tickers'], {'start': 'start'}), '(tickers, start=start)\n', (445, 467), True, 'import yfinance as yf\n'), ((705, 723), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (717, 723), True, 'import matplotlib.pyplot as plt\n'), ((831, 841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (839, 841), True, 'import matplotlib.pyplot as plt\n'), ((867, 881), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (879, 881), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1103), 'matplotlib.pyplot.legend', 'plt.legend', (["[f'{individual_stock} MA Performace',\n f'{individual_stock} Buy and Hold Performnace']"], {}), "([f'{individual_stock} MA Performace',\n f'{individual_stock} Buy and Hold Performnace'])\n", (1012, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1110), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1108, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1677), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1671, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1986, 1988), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2265), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2263, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2486), 'matplotlib.pyplot.legend', 'plt.legend', (["['Portfolio MA Performace', 'Buy and Hold Performnace']"], {}), "(['Portfolio MA Performace', 'Buy and Hold Performnace'])\n", (2429, 2486), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2495, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2897, 2911), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2909, 2911), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3083), 'matplotlib.pyplot.legend', 'plt.legend', (["['With Look-Ahead Bias', 'Without Look-Ahead Bias']"], {}), "(['With Look-Ahead Bias', 'Without Look-Ahead Bias'])\n", (3030, 3083), True, 'import matplotlib.pyplot as plt\n'), ((3084, 3094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3092, 3094), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3478), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float', 'index': 'lags'}), '(dtype=float, index=lags)\n', (3453, 3478), True, 'import pandas as pd\n'), ((3808, 3866), 'matplotlib.pyplot.title', 'plt.title', (['"""Full Portfolio Strategy Performance with Lags"""'], {}), "('Full Portfolio Strategy Performance with Lags')\n", (3817, 3866), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3915), 'matplotlib.pyplot.legend', 'plt.legend', (['lags'], {'bbox_to_anchor': '(1.1, 0.95)'}), '(lags, bbox_to_anchor=(1.1, 0.95))\n', (3881, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3926), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3924, 3926), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4119), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4117, 4119), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4233), 'matplotlib.pyplot.title', 'plt.title', (['"""Full Portfolio Performance"""'], {}), "('Full Portfolio Performance')\n", (4203, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4301), 'matplotlib.pyplot.legend', 'plt.legend', (["['Without Transaction Costs', 'With Transaction Costs']"], {}), "(['Without Transaction Costs', 'With Transaction Costs'])\n", (4244, 4301), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4310, 4312), True, 'import matplotlib.pyplot as plt\n'), ((346, 367), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (365, 367), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 14:07:42 2018
@author: HORSE
"""
import logging
import logging.handlers
import os
def ARLogger(log_filename = 'log.txt'):
# if not os.path.exists('logs'):
# os.makedirs('logs')
fmt = '%(asctime)s %(levelname)s %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
ar_logger = logging.getLogger('ARLogger')
ar_logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
log_filename, maxBytes=10000*4, backupCount=5)
formatter = logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
ar_logger.addHandler(handler)
return ar_logger | [
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] | [((372, 401), 'logging.getLogger', 'logging.getLogger', (['"""ARLogger"""'], {}), "('ARLogger')\n", (389, 401), False, 'import logging\n'), ((454, 543), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['log_filename'], {'maxBytes': '(10000 * 4)', 'backupCount': '(5)'}), '(log_filename, maxBytes=10000 * 4,\n backupCount=5)\n', (490, 543), False, 'import logging\n'), ((570, 601), 'logging.Formatter', 'logging.Formatter', (['fmt', 'datefmt'], {}), '(fmt, datefmt)\n', (587, 601), False, 'import logging\n')] |
import unittest
from desky.rect import Rect
from desky.panel import Panel
from enum import Enum
from functools import reduce, partial
from toolz.dicttoolz import valfilter
# | Type of sizing | Maximum extra width allocation
# --------------------------------------------------------------
# | Fixed (200 px) | 0px
# | Child (use child size) | 0px
# | Percentage (30% of width) | 1px
# | Custom (custom function) | configurable
# | Even (equally divide) | 1px
# | Fill (use remaining space) | any
#
# The types of sizing in the table above are ordered in evalulation priority.
# Fixed, Child, and Percentage sizings are evaluated first. Custom is then
# evaluated and is given the remaining area size as its argument. Even is
# evaluated next. Even panels will split remaining space evenly between
# themselves. Fill evaluates last and will take the remaining space.
#
# If the resulting layout exceeds the bounds of the parent, it is up to the
# parent to decide if it should resize.
def zero_func():
return 0
class GridLayout:
FIXED = 0
CHILD = 1
PERCENTAGE = 2
CUSTOM = 3
EVEN = 4
FILL = 5
def __init__(self, *, column_count = 1, row_count = 1, spacing = 0):
self.panels = dict()
self.column_sizings = dict()
self.row_sizings = dict()
self.column_count = column_count
self.row_count = row_count
self.spacing = spacing
def add(self, panel, column, row, column_count=1, row_count=1):
self.add_rect(panel, Rect(column, row, column_count, row_count))
def add_rect(self, panel, rect):
assert(rect.x >= 0)
assert(rect.y >= 0)
assert(rect.right <= self.column_count)
assert(rect.bottom <= self.row_count)
assert(self.area_empty(rect))
self.panels[rect.frozen_copy()] = panel
def remove(self, panel):
self.panels = valfilter(lambda p: p != panel, self.panels)
def clear(self, *, remove_panels):
if remove_panels:
for panel in self.panels.values():
panel.remove()
self.panels = dict()
def area_empty(self, rect):
for rect_other in self.panels.keys():
if rect.intersects(rect_other):
return False
return True
def set_fixed_column_sizing(self, column, size):
self.column_sizings[column] = (self.FIXED, size)
def set_fixed_row_sizing(self, row, size):
self.row_sizings[row] = (self.FIXED, size)
def set_child_column_sizing(self, column):
self.column_sizings[column] = (self.CHILD,)
def set_child_row_sizing(self, row):
self.row_sizings[row] = (self.CHILD,)
def set_percentage_column_sizing(self, column, percentage):
self.column_sizings[column] = (self.PERCENTAGE, percentage)
def set_percentage_row_sizing(self, row, percentage):
self.row_sizings[row] = (self.PERCENTAGE, percentage)
def set_custom_column_sizing(self, column, sizing_func, extra_func=zero_func):
self.column_sizings[column] = (self.CUSTOM, sizing_func, extra_func)
def set_custom_row_sizing(self, row, sizing_func, extra_func=zero_func):
self.row_sizings[row] = (self.CUSTOM, sizing_func, extra_func)
def set_even_column_sizing(self, column):
self.column_sizings[column] = (self.EVEN,)
def set_even_row_sizing(self, row):
self.row_sizings[row] = (self.EVEN,)
def set_fill_column_sizing(self, column):
self.column_sizings[column] = (self.FILL,)
def set_fill_row_sizing(self, row):
self.row_sizings[row] = (self.FILL,)
def widest_child_in_column(self, column):
column_rect = Rect(column, 0, 1, self.row_count)
rect_panel_tuples_that_intersect_column = list(
filter(
lambda rect_panel_tuple: rect_panel_tuple[0].intersects(column_rect),
self.panels.items()))
def calculate_width(rect_panel_tuple):
rect, panel = rect_panel_tuple
# In case a panel spans multiple columns, determine the height as a
# proportional amount.
return int((panel.rect_outer.w - (rect.w - 1) * self.spacing) / rect.w)
return reduce(max, map(calculate_width, rect_panel_tuples_that_intersect_column), 0)
def tallest_child_in_row(self, row):
row_rect = Rect(0, row, self.column_count, 1)
rect_panel_tuples_that_intersect_row = list(
filter(
lambda rect_panel_tuple: rect_panel_tuple[0].intersects(row_rect),
self.panels.items()))
def calculate_height(rect_panel_tuple):
rect, panel = rect_panel_tuple
# In case a panel spans multiple rows, determine the height as a
# proportional amount.
return int((panel.rect_outer.h - (rect.h - 1) * self.spacing) / rect.h)
return reduce(max, map(calculate_height, rect_panel_tuples_that_intersect_row), 0)
def layout(self, panel):
area = (panel.rect_inner
.move(-panel.x, -panel.y)
.shrink(
0,
0,
(self.column_count - 1) * self.spacing,
(self.row_count - 1) * self.spacing)
)
column_sizings_by_type = dict()
row_sizings_by_type = dict()
# Group columns and rows by their sizing types while preserving the order.
for column in range(self.column_count):
sizing = self.column_sizings.get(column, (self.EVEN,))
group = column_sizings_by_type.get(sizing[0], list())
group.append((column, sizing))
column_sizings_by_type[sizing[0]] = group
for row in range(self.row_count):
sizing = self.row_sizings.get(row, (self.EVEN,))
group = row_sizings_by_type.get(sizing[0], list())
group.append((row, sizing))
row_sizings_by_type[sizing[0]] = group
# Determine column widths and row heights.
column_widths = [0 for _ in range(self.column_count)]
row_heights = [0 for _ in range(self.row_count)]
def calculate_fixed_sizes(sizings_by_type, sizes):
for sizing_tuple in sizings_by_type.get(self.FIXED, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = sizing[1]
calculate_fixed_sizes(column_sizings_by_type, column_widths)
calculate_fixed_sizes(row_sizings_by_type, row_heights)
def calculate_child_sizes(sizings_by_type, sizes, largest_func):
for sizing_tuple in sizings_by_type.get(self.CHILD, []):
column_or_row, _ = sizing_tuple
sizes[column_or_row] = largest_func(column_or_row)
calculate_child_sizes(column_sizings_by_type, column_widths, self.widest_child_in_column)
calculate_child_sizes(row_sizings_by_type, row_heights, self.tallest_child_in_row)
def calculate_percentage_sizes(sizings_by_type, sizes, area_size):
for sizing_tuple in sizings_by_type.get(self.PERCENTAGE, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = int(area_size * sizing[1])
calculate_percentage_sizes(column_sizings_by_type, column_widths, area.w)
calculate_percentage_sizes(row_sizings_by_type, row_heights, area.h)
def calculate_custom_sizes(sizings_by_type, sizes, area_size, remaining_size):
for sizing_tuple in sizings_by_type.get(self.CUSTOM, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = int(sizing[1](area_size, remaining_size))
calculate_custom_sizes(column_sizings_by_type, column_widths, area.w, area.w - sum(column_widths))
calculate_custom_sizes(row_sizings_by_type, row_heights, area.h, area.h - sum(row_heights))
def calculate_even_sizes(sizings_by_type, sizes, remaining_size):
size = int(remaining_size / len(sizings_by_type.get(self.EVEN, [1])))
for sizing_tuple in sizings_by_type.get(self.EVEN, []):
column_or_row, _ = sizing_tuple
sizes[column_or_row] = size
calculate_even_sizes(
column_sizings_by_type,
column_widths,
area.w - sum(column_widths))
calculate_even_sizes(
row_sizings_by_type,
row_heights,
area.h - sum(row_heights))
fill_columns = column_sizings_by_type.get(self.FILL, [])
if fill_columns:
column_widths[fill_columns[0][0]] = area.w - sum(column_widths)
fill_rows = row_sizings_by_type.get(self.FILL, [])
if fill_rows:
row_heights[fill_rows[0][0]] = area.h - sum(row_heights)
# Allocate extra width and height to columns and rows.
extra_width = max(area.w - sum(column_widths), 0)
extra_height = max(area.h - sum(row_heights), 0)
def allocate_extra_percentage(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.PERCENTAGE, []):
column_or_row, _ = sizing_tuple
amount = min(extra, 1)
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_percentage(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_percentage(row_sizings_by_type, row_heights, extra_height)
def allocate_extra_custom(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.CUSTOM, []):
column_or_row, sizing = sizing_tuple
amount = int(sizing[2](extra))
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_custom(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_custom(row_sizings_by_type, row_heights, extra_height)
def allocate_extra_even(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.EVEN, []):
column_or_row, _ = sizing_tuple
amount = min(extra, 1)
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_even(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_even(row_sizings_by_type, row_heights, extra_height)
# Save column widths and row heights for users to access.
self.column_widths = column_widths
self.row_heights = row_heights
# Position child panels.
for rect, panel in self.panels.items():
x = area.x + sum(column_widths[:rect.x]) + rect.x * self.spacing
y = area.y + sum(row_heights[:rect.y]) + rect.y * self.spacing
width = sum(column_widths[rect.x:rect.right]) + (rect.w - 1) * self.spacing
height = sum(row_heights[rect.y:rect.bottom]) + (rect.h - 1) * self.spacing
panel.rect_outer = Panel.Rect(x, y, width, height)
class GridLayoutTest(unittest.TestCase):
def setUp(self):
from desky.gui import Gui
self.gui = Gui()
self.parent = self.gui.create(Panel)
self.parent.size = (200, 300)
self.parent.padding = (2, 3, 4, 5)
self.parent.margins = (20, 30, 40, 50)
def test_tallest_child_in_column_or_row_1(self):
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
self.assertEqual(0, grid.widest_child_in_column(2))
self.assertEqual(0, grid.tallest_child_in_row(2))
def test_tallest_child_in_column_or_row_2(self):
def create_grid():
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
return grid
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (60, 38)
grid.add(child, 2, 1)
self.assertEqual(60, grid.widest_child_in_column(2))
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (38, 60)
grid.add(child, 1, 2)
self.assertEqual(60, grid.tallest_child_in_row(2))
def test_tallest_child_in_column_or_row_3(self):
def create_grid():
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
return grid
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (66, 38)
grid.add_rect(child, Rect(2, 1, 3, 2))
self.assertEqual(20, grid.widest_child_in_column(2))
self.assertEqual(20, grid.widest_child_in_column(3))
self.assertEqual(20, grid.widest_child_in_column(4))
with self.subTest("row"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (38, 66)
grid.add_rect(child, Rect(1, 2, 2, 3))
self.assertEqual(20, grid.tallest_child_in_row(2))
self.assertEqual(20, grid.tallest_child_in_row(3))
self.assertEqual(20, grid.tallest_child_in_row(4))
def test_area_empty(self):
scenarios = [
(Rect(2, 0, 4, 2), Rect(1, 1, 9, 9), False),
(Rect(10, 2, 4, 4), Rect(1, 1, 9, 9), True),
]
for rect_left, rect_right, empty in scenarios:
with self.subTest(rect_left=rect_left, rect_right=rect_right, empty=empty):
grid = GridLayout(column_count=20, row_count=20, spacing=5)
grid.add_rect(self.gui.create(Panel), rect_left)
self.assertEqual(empty, grid.area_empty(rect_right))
def test_single_fixed(self):
grid = GridLayout(spacing=5)
grid.set_fixed_column_sizing(0, 90)
grid.set_fixed_row_sizing(0, 120)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 71, 102), child.rect)
def test_multiple_fixed_1(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_fixed_column_sizing(0, 101)
grid.set_fixed_column_sizing(1, 58)
grid.set_fixed_row_sizing(0, 33)
grid.set_fixed_row_sizing(1, 93)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 101, 33), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(108, 3, 58, 33), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 41, 101, 93), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(108, 41, 58, 93), child_1_1.rect_outer)
def test_multiple_fixed_2(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_fixed_column_sizing(0, 101)
grid.set_fixed_column_sizing(1, 0)
grid.set_fixed_row_sizing(0, 0)
grid.set_fixed_row_sizing(1, 93)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 101, 18), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(108, 3, 19, 18), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 8, 101, 93), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(108, 8, 19, 93), child_1_1.rect_outer)
def test_single_child(self):
grid = GridLayout(spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 53, 81), child.rect)
def test_multiple_child_1(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_column_sizing(1)
grid.set_child_row_sizing(0)
grid.set_child_row_sizing(1)
def create_child(rect, size):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = size
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1), (58, 39))
child_1_0 = create_child(Rect(1, 0, 1, 1), (25, 71))
child_0_1 = create_child(Rect(0, 1, 1, 1), (61, 62))
child_1_1 = create_child(Rect(1, 1, 1, 1), (54, 20))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 80, 89), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(87, 3, 73, 89), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 97, 80, 80), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(87, 97, 73, 80), child_1_1.rect_outer)
def test_multiple_child_2(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_column_sizing(1)
grid.set_child_row_sizing(0)
grid.set_child_row_sizing(1)
def create_child(rect, size):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = size
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1), (58, 31))
child_0_1 = create_child(Rect(0, 1, 1, 1), (61, 31))
child_1_0 = create_child(Rect(1, 0, 1, 2), (25, 87))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 80, 50), child_0_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 58, 80, 50), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(87, 3, 44, 105), child_1_0.rect_outer)
def test_single_percentage(self):
grid = GridLayout(spacing=5)
grid.set_percentage_column_sizing(0, 0.333)
grid.set_percentage_row_sizing(0, 0.8)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
width = int(self.parent.rect_inner.w * 0.333) - 19 + 1
height = int(self.parent.rect_inner.h * 0.8) - 18 + 1
self.assertEqual(Panel.Rect(13, 19, width, height), child.rect)
def test_multiple_percentage(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_percentage_column_sizing(0, 0.333)
grid.set_percentage_column_sizing(1, 0.333)
grid.set_percentage_row_sizing(0, 0.8139)
grid.set_percentage_row_sizing(1, 1 - 0.8139)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.333) + 1
width_1 = width_0
height_0 = int(287 * 0.8139) + 1
height_1 = int(287 * (1 - 0.8139))
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_custom(self):
def custom_sizing(area_size, remaining_size):
return area_size ** 0.5
def custom_extra(extra):
return extra / 2
grid = GridLayout(spacing=5)
grid.set_custom_column_sizing(0, custom_sizing, custom_extra)
grid.set_custom_row_sizing(0, custom_sizing, custom_extra)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
root_width = int(194 ** 0.5)
root_height = int(292 ** 0.5)
final_width = root_width + int((194 - root_width) / 2) - 19
final_height = root_height + int((292 - root_height) / 2) - 18
self.assertEqual(Panel.Rect(13, 19, final_width, final_height), child.rect)
def test_multiple_custom(self):
def custom_sizing_1(area_size, remaining_size):
return area_size ** 0.8
def custom_sizing_2(area_size, remaining_size):
return area_size - area_size ** 0.8
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_custom_column_sizing(0, custom_sizing_1, partial(min, 1))
grid.set_custom_column_sizing(1, custom_sizing_2, partial(min, 1))
grid.set_custom_row_sizing(0, custom_sizing_2, partial(min, 1))
grid.set_custom_row_sizing(1, custom_sizing_1, partial(min, 1))
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(custom_sizing_1(189, None)) + 1
width_1 = int(custom_sizing_2(189, None))
height_0 = int(custom_sizing_2(287, None)) + 1
height_1 = int(custom_sizing_1(287, None))
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_even(self):
# Since even sizing is the default we should make sure it works even
# when we don't excplicitly set the columns and rows to even sizing.
for default in (True, False):
with self.subTest(default=default):
grid = GridLayout(spacing=5)
if not default:
grid.set_even_column_sizing(0)
grid.set_even_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 175, 274), child.rect)
def test_multiple_even(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_even_column_sizing(0)
grid.set_even_column_sizing(1)
grid.set_even_row_sizing(0)
grid.set_even_row_sizing(1)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.5) + 1
width_1 = int(189 * 0.5)
height_0 = int(287 * 0.5) + 1
height_1 = int(287 * 0.5)
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_fill(self):
grid = GridLayout(spacing=5)
grid.set_fill_column_sizing(0)
grid.set_fill_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 175, 274), child.rect)
def test_multiple_fill(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_percentage_column_sizing(0, 0.3333)
grid.set_fill_column_sizing(1)
grid.set_fill_row_sizing(0)
grid.set_fixed_row_sizing(1, 100)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.3333)
width_1 = 189 - int(189 * 0.3333)
height_0 = 287 - 100
height_1 = 100
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def grid_example(gui):
panel = gui.create(Panel)
panel.rect = (50, 50, 500, 500)
panel.padding = (8, 16, 24, 32)
grid = GridLayout(column_count = 3, row_count = 4, spacing = 4)
for row in range(0, grid.row_count):
for column in range(0, grid.column_count):
child = gui.create(Panel)
child.parent = panel
grid.add(child, column, row)
grid.layout(panel)
def main():
from desky.gui import example
#example(grid_example)
unittest.main()
if __name__ == "__main__":
main()
| [
"unittest.main",
"functools.partial",
"desky.panel.Panel.Rect",
"toolz.dicttoolz.valfilter",
"desky.rect.Rect",
"desky.gui.Gui"
] | [((29067, 29082), 'unittest.main', 'unittest.main', ([], {}), '()\n', (29080, 29082), False, 'import unittest\n'), ((1911, 1955), 'toolz.dicttoolz.valfilter', 'valfilter', (['(lambda p: p != panel)', 'self.panels'], {}), '(lambda p: p != panel, self.panels)\n', (1920, 1955), False, 'from toolz.dicttoolz import valfilter\n'), ((3700, 3734), 'desky.rect.Rect', 'Rect', (['column', '(0)', '(1)', 'self.row_count'], {}), '(column, 0, 1, self.row_count)\n', (3704, 3734), False, 'from desky.rect import Rect\n'), ((4390, 4424), 'desky.rect.Rect', 'Rect', (['(0)', 'row', 'self.column_count', '(1)'], {}), '(0, row, self.column_count, 1)\n', (4394, 4424), False, 'from desky.rect import Rect\n'), ((11342, 11347), 'desky.gui.Gui', 'Gui', ([], {}), '()\n', (11345, 11347), False, 'from desky.gui import Gui\n'), ((1541, 1583), 'desky.rect.Rect', 'Rect', (['column', 'row', 'column_count', 'row_count'], {}), '(column, row, column_count, row_count)\n', (1545, 1583), False, 'from desky.rect import Rect\n'), ((11193, 11224), 'desky.panel.Panel.Rect', 'Panel.Rect', (['x', 'y', 'width', 'height'], {}), '(x, y, width, height)\n', (11203, 11224), False, 'from desky.panel import Panel\n'), ((14940, 14967), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', '(71)', '(102)'], {}), '(13, 19, 71, 102)\n', (14950, 14967), False, 'from desky.panel import Panel\n'), ((15592, 15608), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (15596, 15608), False, 'from desky.rect import Rect\n'), ((15643, 15659), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (15647, 15659), False, 'from desky.rect import Rect\n'), ((15694, 15710), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (15698, 15710), False, 'from desky.rect import Rect\n'), ((15745, 15761), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (15749, 15761), False, 'from desky.rect import Rect\n'), ((15823, 15848), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', '(101)', '(33)'], {}), '(2, 3, 101, 33)\n', (15833, 15848), False, 'from desky.panel import Panel\n'), ((15900, 15926), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(108)', '(3)', '(58)', '(33)'], {}), '(108, 3, 58, 33)\n', (15910, 15926), False, 'from desky.panel import Panel\n'), ((15977, 16003), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(41)', '(101)', '(93)'], {}), '(2, 41, 101, 93)\n', (15987, 16003), False, 'from desky.panel import Panel\n'), ((16054, 16081), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(108)', '(41)', '(58)', '(93)'], {}), '(108, 41, 58, 93)\n', (16064, 16081), False, 'from desky.panel import Panel\n'), ((16715, 16731), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (16719, 16731), False, 'from desky.rect import Rect\n'), ((16766, 16782), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (16770, 16782), False, 'from desky.rect import Rect\n'), ((16817, 16833), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (16821, 16833), False, 'from desky.rect import Rect\n'), ((16868, 16884), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (16872, 16884), False, 'from desky.rect import Rect\n'), ((16946, 16971), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', '(101)', '(18)'], {}), '(2, 3, 101, 18)\n', (16956, 16971), False, 'from desky.panel import Panel\n'), ((17024, 17050), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(108)', '(3)', '(19)', '(18)'], {}), '(108, 3, 19, 18)\n', (17034, 17050), False, 'from desky.panel import Panel\n'), ((17102, 17127), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(8)', '(101)', '(93)'], {}), '(2, 8, 101, 93)\n', (17112, 17127), False, 'from desky.panel import Panel\n'), ((17179, 17205), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(108)', '(8)', '(19)', '(93)'], {}), '(108, 8, 19, 93)\n', (17189, 17205), False, 'from desky.panel import Panel\n'), ((17651, 17677), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', '(53)', '(81)'], {}), '(13, 19, 53, 81)\n', (17661, 17677), False, 'from desky.panel import Panel\n'), ((18283, 18299), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (18287, 18299), False, 'from desky.rect import Rect\n'), ((18344, 18360), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (18348, 18360), False, 'from desky.rect import Rect\n'), ((18405, 18421), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (18409, 18421), False, 'from desky.rect import Rect\n'), ((18466, 18482), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (18470, 18482), False, 'from desky.rect import Rect\n'), ((18554, 18578), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', '(80)', '(89)'], {}), '(2, 3, 80, 89)\n', (18564, 18578), False, 'from desky.panel import Panel\n'), ((18629, 18654), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(87)', '(3)', '(73)', '(89)'], {}), '(87, 3, 73, 89)\n', (18639, 18654), False, 'from desky.panel import Panel\n'), ((18704, 18729), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(97)', '(80)', '(80)'], {}), '(2, 97, 80, 80)\n', (18714, 18729), False, 'from desky.panel import Panel\n'), ((18779, 18805), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(87)', '(97)', '(73)', '(80)'], {}), '(87, 97, 73, 80)\n', (18789, 18805), False, 'from desky.panel import Panel\n'), ((19421, 19437), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (19425, 19437), False, 'from desky.rect import Rect\n'), ((19482, 19498), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (19486, 19498), False, 'from desky.rect import Rect\n'), ((19543, 19559), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(2)'], {}), '(1, 0, 1, 2)\n', (19547, 19559), False, 'from desky.rect import Rect\n'), ((19631, 19655), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', '(80)', '(50)'], {}), '(2, 3, 80, 50)\n', (19641, 19655), False, 'from desky.panel import Panel\n'), ((19707, 19732), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(58)', '(80)', '(50)'], {}), '(2, 58, 80, 50)\n', (19717, 19732), False, 'from desky.panel import Panel\n'), ((19783, 19809), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(87)', '(3)', '(44)', '(105)'], {}), '(87, 3, 44, 105)\n', (19793, 19809), False, 'from desky.panel import Panel\n'), ((20406, 20439), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', 'width', 'height'], {}), '(13, 19, width, height)\n', (20416, 20439), False, 'from desky.panel import Panel\n'), ((21104, 21120), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (21108, 21120), False, 'from desky.rect import Rect\n'), ((21155, 21171), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (21159, 21171), False, 'from desky.rect import Rect\n'), ((21206, 21222), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (21210, 21222), False, 'from desky.rect import Rect\n'), ((21257, 21273), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (21261, 21273), False, 'from desky.rect import Rect\n'), ((21484, 21519), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', 'width_0', 'height_0'], {}), '(2, 3, width_0, height_0)\n', (21494, 21519), False, 'from desky.panel import Panel\n'), ((21589, 21635), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(8 + height_0)', 'width_0', 'height_1'], {}), '(2, 8 + height_0, width_0, height_1)\n', (21599, 21635), False, 'from desky.panel import Panel\n'), ((21694, 21739), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(3)', 'width_1', 'height_0'], {}), '(7 + width_0, 3, width_1, height_0)\n', (21704, 21739), False, 'from desky.panel import Panel\n'), ((21799, 21855), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(8 + height_0)', 'width_1', 'height_1'], {}), '(7 + width_0, 8 + height_0, width_1, height_1)\n', (21809, 21855), False, 'from desky.panel import Panel\n'), ((22730, 22775), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', 'final_width', 'final_height'], {}), '(13, 19, final_width, final_height)\n', (22740, 22775), False, 'from desky.panel import Panel\n'), ((23149, 23164), 'functools.partial', 'partial', (['min', '(1)'], {}), '(min, 1)\n', (23156, 23164), False, 'from functools import reduce, partial\n'), ((23224, 23239), 'functools.partial', 'partial', (['min', '(1)'], {}), '(min, 1)\n', (23231, 23239), False, 'from functools import reduce, partial\n'), ((23296, 23311), 'functools.partial', 'partial', (['min', '(1)'], {}), '(min, 1)\n', (23303, 23311), False, 'from functools import reduce, partial\n'), ((23368, 23383), 'functools.partial', 'partial', (['min', '(1)'], {}), '(min, 1)\n', (23375, 23383), False, 'from functools import reduce, partial\n'), ((23721, 23737), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (23725, 23737), False, 'from desky.rect import Rect\n'), ((23772, 23788), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (23776, 23788), False, 'from desky.rect import Rect\n'), ((23823, 23839), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (23827, 23839), False, 'from desky.rect import Rect\n'), ((23874, 23890), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (23878, 23890), False, 'from desky.rect import Rect\n'), ((24163, 24198), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', 'width_0', 'height_0'], {}), '(2, 3, width_0, height_0)\n', (24173, 24198), False, 'from desky.panel import Panel\n'), ((24268, 24314), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(8 + height_0)', 'width_0', 'height_1'], {}), '(2, 8 + height_0, width_0, height_1)\n', (24278, 24314), False, 'from desky.panel import Panel\n'), ((24373, 24418), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(3)', 'width_1', 'height_0'], {}), '(7 + width_0, 3, width_1, height_0)\n', (24383, 24418), False, 'from desky.panel import Panel\n'), ((24478, 24534), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(8 + height_0)', 'width_1', 'height_1'], {}), '(7 + width_0, 8 + height_0, width_1, height_1)\n', (24488, 24534), False, 'from desky.panel import Panel\n'), ((25972, 25988), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (25976, 25988), False, 'from desky.rect import Rect\n'), ((26023, 26039), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (26027, 26039), False, 'from desky.rect import Rect\n'), ((26074, 26090), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (26078, 26090), False, 'from desky.rect import Rect\n'), ((26125, 26141), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (26129, 26141), False, 'from desky.rect import Rect\n'), ((26346, 26381), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', 'width_0', 'height_0'], {}), '(2, 3, width_0, height_0)\n', (26356, 26381), False, 'from desky.panel import Panel\n'), ((26451, 26497), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(8 + height_0)', 'width_0', 'height_1'], {}), '(2, 8 + height_0, width_0, height_1)\n', (26461, 26497), False, 'from desky.panel import Panel\n'), ((26556, 26601), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(3)', 'width_1', 'height_0'], {}), '(7 + width_0, 3, width_1, height_0)\n', (26566, 26601), False, 'from desky.panel import Panel\n'), ((26661, 26717), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(8 + height_0)', 'width_1', 'height_1'], {}), '(7 + width_0, 8 + height_0, width_1, height_1)\n', (26671, 26717), False, 'from desky.panel import Panel\n'), ((27158, 27186), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', '(175)', '(274)'], {}), '(13, 19, 175, 274)\n', (27168, 27186), False, 'from desky.panel import Panel\n'), ((27807, 27823), 'desky.rect.Rect', 'Rect', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (27811, 27823), False, 'from desky.rect import Rect\n'), ((27858, 27874), 'desky.rect.Rect', 'Rect', (['(0)', '(1)', '(1)', '(1)'], {}), '(0, 1, 1, 1)\n', (27862, 27874), False, 'from desky.rect import Rect\n'), ((27909, 27925), 'desky.rect.Rect', 'Rect', (['(1)', '(0)', '(1)', '(1)'], {}), '(1, 0, 1, 1)\n', (27913, 27925), False, 'from desky.rect import Rect\n'), ((27960, 27976), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (27964, 27976), False, 'from desky.rect import Rect\n'), ((28169, 28204), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(3)', 'width_0', 'height_0'], {}), '(2, 3, width_0, height_0)\n', (28179, 28204), False, 'from desky.panel import Panel\n'), ((28274, 28320), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(2)', '(8 + height_0)', 'width_0', 'height_1'], {}), '(2, 8 + height_0, width_0, height_1)\n', (28284, 28320), False, 'from desky.panel import Panel\n'), ((28379, 28424), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(3)', 'width_1', 'height_0'], {}), '(7 + width_0, 3, width_1, height_0)\n', (28389, 28424), False, 'from desky.panel import Panel\n'), ((28484, 28540), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(7 + width_0)', '(8 + height_0)', 'width_1', 'height_1'], {}), '(7 + width_0, 8 + height_0, width_1, height_1)\n', (28494, 28540), False, 'from desky.panel import Panel\n'), ((13363, 13379), 'desky.rect.Rect', 'Rect', (['(2)', '(1)', '(3)', '(2)'], {}), '(2, 1, 3, 2)\n', (13367, 13379), False, 'from desky.rect import Rect\n'), ((13755, 13771), 'desky.rect.Rect', 'Rect', (['(1)', '(2)', '(2)', '(3)'], {}), '(1, 2, 2, 3)\n', (13759, 13771), False, 'from desky.rect import Rect\n'), ((14034, 14050), 'desky.rect.Rect', 'Rect', (['(2)', '(0)', '(4)', '(2)'], {}), '(2, 0, 4, 2)\n', (14038, 14050), False, 'from desky.rect import Rect\n'), ((14052, 14068), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(9)', '(9)'], {}), '(1, 1, 9, 9)\n', (14056, 14068), False, 'from desky.rect import Rect\n'), ((14095, 14112), 'desky.rect.Rect', 'Rect', (['(10)', '(2)', '(4)', '(4)'], {}), '(10, 2, 4, 4)\n', (14099, 14112), False, 'from desky.rect import Rect\n'), ((14114, 14130), 'desky.rect.Rect', 'Rect', (['(1)', '(1)', '(9)', '(9)'], {}), '(1, 1, 9, 9)\n', (14118, 14130), False, 'from desky.rect import Rect\n'), ((25343, 25371), 'desky.panel.Panel.Rect', 'Panel.Rect', (['(13)', '(19)', '(175)', '(274)'], {}), '(13, 19, 175, 274)\n', (25353, 25371), False, 'from desky.panel import Panel\n')] |
from distutils.core import setup, Extension
def main():
setup(name="seqrepc",
version="beta1.0",
description="SeqrepC is a module for fundamental operations related to numerical representations of genomic sequences.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ednilsonlomazi/SeqrepC",
license="BSD 3-Clause License",
ext_modules=[Extension("seqrepc", ["./src/seqrepc.c"])])
if __name__ == "__main__":
main()
| [
"distutils.core.Extension"
] | [((429, 470), 'distutils.core.Extension', 'Extension', (['"""seqrepc"""', "['./src/seqrepc.c']"], {}), "('seqrepc', ['./src/seqrepc.c'])\n", (438, 470), False, 'from distutils.core import setup, Extension\n')] |
# pylint: disable=no-member,invalid-name,line-too-long,trailing-whitespace
"""Add IsEnabled column to EventHandlerBASE
Revision ID: <KEY>
Revises: 6b5369ab5224
Create Date: 2021-02-17 20:15:42.776190
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '6b5369ab5224'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('EventHandlerBASE',
sa.Column('IsEnabled', sa.Boolean, nullable=False, default=False))
def downgrade():
op.drop_column('EventHandlerBASE', 'IsEnabled')
| [
"alembic.op.drop_column",
"sqlalchemy.Column"
] | [((548, 595), 'alembic.op.drop_column', 'op.drop_column', (['"""EventHandlerBASE"""', '"""IsEnabled"""'], {}), "('EventHandlerBASE', 'IsEnabled')\n", (562, 595), False, 'from alembic import op\n'), ((458, 523), 'sqlalchemy.Column', 'sa.Column', (['"""IsEnabled"""', 'sa.Boolean'], {'nullable': '(False)', 'default': '(False)'}), "('IsEnabled', sa.Boolean, nullable=False, default=False)\n", (467, 523), True, 'import sqlalchemy as sa\n')] |
from resource_management.libraries.script.script import Script
from resource_management.core.resources.packaging import Package
class Client(Script):
def install(self, env):
packages = ['percona-server-client']
Package(packages)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
env.set_params(params)
def stop(self, env):
import params
env.set_params(params)
def status(self, env):
import params
env.set_params(params)
if __name__ == "__main__":
Client().execute()
| [
"resource_management.core.resources.packaging.Package"
] | [((233, 250), 'resource_management.core.resources.packaging.Package', 'Package', (['packages'], {}), '(packages)\n', (240, 250), False, 'from resource_management.core.resources.packaging import Package\n')] |
# Generated by Django 2.2.1 on 2019-07-19 12:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0035_auto_20190625_0900'),
]
operations = [
migrations.RenameField(
model_name='verenigingen',
old_name='ontgroening',
new_name='introductietijd',
),
]
| [
"django.db.migrations.RenameField"
] | [((227, 332), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""verenigingen"""', 'old_name': '"""ontgroening"""', 'new_name': '"""introductietijd"""'}), "(model_name='verenigingen', old_name='ontgroening',\n new_name='introductietijd')\n", (249, 332), False, 'from django.db import migrations\n')] |
#!/usr/bin/python
# Flask is used to create a somewhat lightweight listening server
from flask import Flask
from requests import get
def spawn_proxy():
myproxy = Flask('__name__')
# Quick health check override
@myproxy.route('/healthcheck', methods=['GET'])
def health():
return "OK"
# Let's not spam google if we don't get a query, and return a Bad Request.
@myproxy.route('/', methods=['GET'])
def empty():
return "Empty search string", 400
# This is a very dumb proxy, we're only doing GET.
@myproxy.route('/<path:req>', methods=['GET'])
def proxy(req):
# We're only going to google here, so let's just keep it in the proxy settings for now.
target = 'https://www.google.com/'
return get(f'{target}/search?q={req}').content
return myproxy
| [
"flask.Flask",
"requests.get"
] | [((170, 187), 'flask.Flask', 'Flask', (['"""__name__"""'], {}), "('__name__')\n", (175, 187), False, 'from flask import Flask\n'), ((774, 805), 'requests.get', 'get', (['f"""{target}/search?q={req}"""'], {}), "(f'{target}/search?q={req}')\n", (777, 805), False, 'from requests import get\n')] |
#! /usr/bin/env python
import pefile
import datetime
import os
import re
from pecli.plugins.base import Plugin
from pecli.lib.utils import cli_out
ASCII_BYTE = b" !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t"
class PluginStrings(Plugin):
name = "strings"
description = "Extract strings from the PE file"
def add_arguments(self, parser):
parser.add_argument('--ascii', '-a', action="store_true", help="ASCII strings only")
parser.add_argument('--wide', '-w', action="store_true", help="Wide strings only")
parser.add_argument('-n', '--min-len', type=int, default=4, help='Print sequences of ' +
'characters that are at least min-len characters long, instead of ' +
'the default 4.')
self.parser = parser
def get_results(self, data, min_len=4, wide_only=False, ascii_only=False, cli_mode=False):
# regular expressions from flare-floss:
# https://github.com/fireeye/flare-floss/blob/master/floss/strings.py#L7-L9
re_narrow = re.compile(b'([%s]{%d,})' % (ASCII_BYTE, min_len))
re_wide = re.compile(b'((?:[%s]\x00){%d,})' % (ASCII_BYTE, min_len))
strings = []
# print ascii strings unless we only want wide strings
if not wide_only:
for match in re_narrow.finditer(data):
s = match.group().decode('ascii')
strings.append(s)
cli_out(s, cli_mode)
# print wide strings unless we only want ascii strings
if not ascii_only:
for match in re_wide.finditer(data):
try:
s = match.group().decode('utf-16')
cli_out(s, cli_mode)
strings.append(s)
except UnicodeDecodeError:
pass
return {"strings": strings}
def run_cli(self, args, pe, data):
if args.ascii and args.wide:
print("to print both ascii and wide strings, omit both")
else:
self.get_results(data, args.min_len, args.wide, args.ascii, cli_mode=True)
| [
"pecli.lib.utils.cli_out",
"re.compile"
] | [((1130, 1180), 're.compile', 're.compile', (["(b'([%s]{%d,})' % (ASCII_BYTE, min_len))"], {}), "(b'([%s]{%d,})' % (ASCII_BYTE, min_len))\n", (1140, 1180), False, 'import re\n'), ((1199, 1257), 're.compile', 're.compile', (["(b'((?:[%s]\\x00){%d,})' % (ASCII_BYTE, min_len))"], {}), "(b'((?:[%s]\\x00){%d,})' % (ASCII_BYTE, min_len))\n", (1209, 1257), False, 'import re\n'), ((1521, 1541), 'pecli.lib.utils.cli_out', 'cli_out', (['s', 'cli_mode'], {}), '(s, cli_mode)\n', (1528, 1541), False, 'from pecli.lib.utils import cli_out\n'), ((1778, 1798), 'pecli.lib.utils.cli_out', 'cli_out', (['s', 'cli_mode'], {}), '(s, cli_mode)\n', (1785, 1798), False, 'from pecli.lib.utils import cli_out\n')] |
from django.core.management.base import BaseCommand
from api.models import Country
from django.db import transaction
from django.db.models import Q
from api.logger import logger
class Command(BaseCommand):
help = 'Update Countries initially to set/revoke their in_search field (probably one-time run only)'
@transaction.atomic
def handle(self, *args, **options):
try:
# Update countries which should appear in search
inc_c = Country.objects.filter(independent=True, is_deprecated=False, record_type=1).update(in_search=True)
# Update countries which should NOT appear in search
# independent can be null too thus why negated check
exc_c = Country.objects.filter(~Q(independent=True) | Q(is_deprecated=True) | ~Q(record_type=1)).update(in_search=False)
logger.info('Successfully set in_search for Countries')
except Exception as ex:
logger.error(f'Failed to set in_search for Countries. Error: {str(ex)}')
| [
"api.logger.logger.info",
"django.db.models.Q",
"api.models.Country.objects.filter"
] | [((847, 902), 'api.logger.logger.info', 'logger.info', (['"""Successfully set in_search for Countries"""'], {}), "('Successfully set in_search for Countries')\n", (858, 902), False, 'from api.logger import logger\n'), ((472, 548), 'api.models.Country.objects.filter', 'Country.objects.filter', ([], {'independent': '(True)', 'is_deprecated': '(False)', 'record_type': '(1)'}), '(independent=True, is_deprecated=False, record_type=1)\n', (494, 548), False, 'from api.models import Country\n'), ((768, 789), 'django.db.models.Q', 'Q', ([], {'is_deprecated': '(True)'}), '(is_deprecated=True)\n', (769, 789), False, 'from django.db.models import Q\n'), ((793, 809), 'django.db.models.Q', 'Q', ([], {'record_type': '(1)'}), '(record_type=1)\n', (794, 809), False, 'from django.db.models import Q\n'), ((746, 765), 'django.db.models.Q', 'Q', ([], {'independent': '(True)'}), '(independent=True)\n', (747, 765), False, 'from django.db.models import Q\n')] |
from django import template
from tos.models import CGUItem
register = template.Library()
@register.simple_tag
def get_cgu_items():
return CGUItem.objects.filter(deleted_at__isnull=True)
| [
"django.template.Library",
"tos.models.CGUItem.objects.filter"
] | [((72, 90), 'django.template.Library', 'template.Library', ([], {}), '()\n', (88, 90), False, 'from django import template\n'), ((145, 192), 'tos.models.CGUItem.objects.filter', 'CGUItem.objects.filter', ([], {'deleted_at__isnull': '(True)'}), '(deleted_at__isnull=True)\n', (167, 192), False, 'from tos.models import CGUItem\n')] |
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a factory test to check the brightness of LCD backlight or LEDs."""
from cros.factory.device import device_utils
from cros.factory.test.i18n import arg_utils as i18n_arg_utils
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.utils.arg_utils import Arg
class BrightnessTest(test_case.TestCase):
ARGS = [
i18n_arg_utils.I18nArg('msg', 'Message HTML'),
Arg('timeout_secs', int, 'Timeout value for the test in seconds.',
default=10),
Arg('levels', list, 'A sequence of brightness levels.'),
Arg('interval_secs', (int, float),
'Time for each brightness level in seconds.')
]
def setUp(self):
self.dut = device_utils.CreateDUTInterface()
self.ui.ToggleTemplateClass('font-large', True)
self.ui.BindStandardKeys()
self.ui.SetState([self.args.msg, test_ui.PASS_FAIL_KEY_LABEL])
def runTest(self):
"""Starts an infinite loop to change brightness."""
self.ui.StartFailingCountdownTimer(self.args.timeout_secs)
while True:
for level in self.args.levels:
self._SetBrightnessLevel(level)
self.Sleep(self.args.interval_secs)
def _SetBrightnessLevel(self, level):
raise NotImplementedError
| [
"cros.factory.test.i18n.arg_utils.I18nArg",
"cros.factory.utils.arg_utils.Arg",
"cros.factory.device.device_utils.CreateDUTInterface"
] | [((538, 583), 'cros.factory.test.i18n.arg_utils.I18nArg', 'i18n_arg_utils.I18nArg', (['"""msg"""', '"""Message HTML"""'], {}), "('msg', 'Message HTML')\n", (560, 583), True, 'from cros.factory.test.i18n import arg_utils as i18n_arg_utils\n'), ((591, 669), 'cros.factory.utils.arg_utils.Arg', 'Arg', (['"""timeout_secs"""', 'int', '"""Timeout value for the test in seconds."""'], {'default': '(10)'}), "('timeout_secs', int, 'Timeout value for the test in seconds.', default=10)\n", (594, 669), False, 'from cros.factory.utils.arg_utils import Arg\n'), ((687, 742), 'cros.factory.utils.arg_utils.Arg', 'Arg', (['"""levels"""', 'list', '"""A sequence of brightness levels."""'], {}), "('levels', list, 'A sequence of brightness levels.')\n", (690, 742), False, 'from cros.factory.utils.arg_utils import Arg\n'), ((750, 835), 'cros.factory.utils.arg_utils.Arg', 'Arg', (['"""interval_secs"""', '(int, float)', '"""Time for each brightness level in seconds."""'], {}), "('interval_secs', (int, float), 'Time for each brightness level in seconds.'\n )\n", (753, 835), False, 'from cros.factory.utils.arg_utils import Arg\n'), ((880, 913), 'cros.factory.device.device_utils.CreateDUTInterface', 'device_utils.CreateDUTInterface', ([], {}), '()\n', (911, 913), False, 'from cros.factory.device import device_utils\n')] |
import pandas as pd
import os
import dotenv
from dotenv import load_dotenv
import datetime
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from app.other_data_pull import spy_pull, fred_pull
from app.port_data_pull import port_data_pull
from app.portfolio_import import portfolio_import
from app import APP_ENV
# -------------------------------------------------------------------------------------
# FUNCTIONS ---------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
def to_pct(dec):
'''
Converts a numeric value to formatted string for printing and display purposes.
Param: dec (int or float) like 0.403321
Example: to_pct(0.403321)
Returns: 40.33%
'''
return f'{dec:.2%}'
def two_dec(dec):
'''
Converts a numeric value to formatted string for printing and display purposes.
Param: dec (int or float) like 4000.444444
Example: two_dec(4000.444444)
Returns: 4,000.44
'''
return f'{dec:,.2f}'
def pd_describe(mon_len):
'''
Converts a specified number of months to a text description of years and months.
Param: mon_len (int) like 17
Example: mon_len(17)
Returns: 1 Year and 5 Months
'''
full_years = int(mon_len / 12)
resid_months = mon_len % 12
if (full_years > 0 and resid_months > 0):
join_str = ' and '
else:
join_str = ''
if full_years == 0:
yr_str = ''
elif full_years == 1:
yr_str = f'{full_years} Year'
else:
yr_str = f'{full_years} Years'
if resid_months == 0:
mon_str = ''
elif resid_months == 1:
mon_str = f'{resid_months} Month'
else:
mon_str=f'{resid_months} Months'
pd_detail=f'{yr_str}{join_str}{mon_str}'
return pd_detail
def returns(dataset, period_length, min_start, max_end):
'''
Calculates various portfolio performance measures and prepares data for data visualization.
'''
# Calculate percent returns of individual portfolio positions
working_data = dataset
working_data['mret'] = working_data.groupby('ticker')['adj close'].pct_change()
working_data['mretp1'] = working_data['mret'] + 1
# Calculate share values over time (used to pull analysis period starting portfolio values)
working_data['sh val'] = working_data['qty'] * working_data['close']
# Define analysis period length. For now, analysis period start date is
# based on the data availability of the individual positions. The most recent
# first monthly data point for a given stock in the portfolio becomes the analysis
# start date. This is a limitation of the data/API.
pd_len = period_length
pd_end = max_end
pd_start = max(max_end - (pd_len * 12), min_start)
# Create dataset of asset values by position at the analysis start date
pd_start_val = working_data.loc[working_data['month'] == pd_start]
pd_start_val = pd_start_val.set_index('ticker')
pd_start_val = pd_start_val['sh val'].rename('start val')
# Caclulate cumulative returns and corresponding monthly values of individual
# portfolio positions over time
cum_ret_set = working_data.loc[(working_data['month'] > pd_start) & (working_data['month'] <= pd_end)]
cum_ret_set = cum_ret_set.set_index('ticker')
cum_ret_set['cumret'] = cum_ret_set.groupby('ticker')['mretp1'].cumprod()
cum_ret_set = cum_ret_set.join(pd_start_val, on='ticker')
cum_ret_set['mon val'] = cum_ret_set['start val'] * cum_ret_set['cumret']
# Calculate monthly returns on the total portfolio over time
port_ret = cum_ret_set.groupby('month')[['start val', 'mon val']].sum()
port_ret['cum ret'] = port_ret['mon val'] / port_ret['start val']
port_ret['mon ret'] = port_ret['mon val'].pct_change()
# Replace analysis period start month portfolio return (was na due to
# pct_change() function)
port_ret.loc[pd_start + 1,
'mon ret'] = port_ret.loc[pd_start + 1, 'cum ret'] - 1
# Merge in S&P 500 data from other_data_pull module
port_ret = port_ret.join(spy_join)
# Merge in 1Y constant maturity treasury data from other_data_pull module
port_ret = port_ret.join(fred_join)
# Calculate S&P 500 returns and cumulative return over analysis period
port_ret['spretp1'] = port_ret['spret'] + 1
port_ret['cum spret'] = port_ret['spretp1'].cumprod()
port_ret = port_ret.drop(columns=['spretp1'])
# Calculate portfolio and S&P 500 excess returns over risk free rate
port_ret['exret'] = port_ret['mon ret'] - port_ret['rate']
port_ret['exspret'] = port_ret['spret'] - port_ret['rate']
# Calculate average annual and monthly returns
months = len(port_ret)
years = months / 12
avg_ann_ret = (port_ret.loc[pd_end, 'cum ret'])**(1 / years) - 1
avg_mon_ret = (port_ret.loc[pd_end, 'cum ret'])**(1 / months) - 1
avg_ann_spret = (port_ret.loc[pd_end, 'cum spret'])**(1 / years) - 1
avg_mon_spret = (port_ret.loc[pd_end, 'cum spret'])**(1 / months) - 1
#Calculate return standard deviations
mon_sdev = port_ret['mon ret'].std()
ann_sdev = mon_sdev * (12 ** .5)
mon_sp_sdev = port_ret['spret'].std()
ann_sp_sdev = mon_sp_sdev * (12 ** .5)
# Calculate portfolio beta (covariance of portfolio and S&P 500 divided by
# volatility of S&P 500)
beta = port_ret.cov().loc['mon ret', 'spret'] / port_ret.cov().loc['spret', 'spret']
# Calculate sharpe ratios
sharpe_port = (port_ret['exret'].mean() / port_ret['exret'].std()) * (12 ** .5)
sharpe_sp = (port_ret['exspret'].mean() / port_ret['exspret'].std()) * (12 ** .5)
# Assemble dictionary of calculation results
ret_calc = {'years_tgt': pd_len, 'years_act': years, 'months_act': months, 'st_date': pd_start.strftime('%Y-%m'),
'end_date': pd_end.strftime('%Y-%m'), 'ann_ret': avg_ann_ret, 'mon_ret': avg_mon_ret, 'ann_sdev': ann_sdev, 'mon_sdev': mon_sdev, 'ann_spret': avg_ann_spret, 'mon_spret': avg_mon_spret, 'ann_sp_sdev': ann_sp_sdev, 'mon_sp_sdev': mon_sp_sdev, 'beta': beta, 'sharpe_port': sharpe_port, 'sharpe_sp': sharpe_sp}
# Create total (cumulative) returns dataset for data visualization
tot_ret_data = port_ret[['cum ret', 'cum spret']] - 1
app_df = pd.DataFrame([[tot_ret_data.index.min() - 1, 0, 0]], columns=['month', 'cum ret', 'cum spret']).set_index('month')
tot_ret_data=tot_ret_data.append(app_df).sort_index()
tot_ret_data.index = tot_ret_data.index.to_series().astype(str)
tot_ret_dict = tot_ret_data.reset_index().to_dict(orient='list')
return ret_calc, tot_ret_dict, port_ret
# -------------------------------------------------------------------------------------
# CODE --------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
if __name__=='__main__':
# Loan environment variables
load_dotenv()
port_file_name = os.environ.get('PORTFOLIO_FILE_NAME')
ap_api_key = os.environ.get('ALPHAVANTAGE_API_KEY')
fred_api_key = os.environ.get('FRED_API_KEY')
portfolio = portfolio_import(port_file_name)
if APP_ENV == 'development':
# Requires that each of other_data_pull and port_data_pull modules be
# run separately/individually (i.e., not called from within this program)
sub = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_port.csv"), parse_dates=['timestamp', 'month'])
sub['month']=sub['month'].dt.to_period('M')
spy_join = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_spy.csv"), parse_dates=['month'])
spy_join['month'] = spy_join['month'].dt.to_period('M')
spy_join=spy_join.set_index('month')
fred_join = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_fred.csv"), parse_dates=['month'])
fred_join['month'] = fred_join['month'].dt.to_period('M')
fred_join=fred_join.set_index('month')
maxomin = sub['month'].min()
minomax = sub['month'].max()
else:
# Call on other_data_pull module for S&P 500 and risk free rate data from
# Alpha Vantage and FRED (Federal Reserve Economic Data) APIs
spy_join = spy_pull(ap_api_key)
fred_join = fred_pull(fred_api_key)
# Call on port_data_pull module for monthly data on individual portfolio stocks
# from Alpha Vantage API
sub, minomax, maxomin=port_data_pull(portfolio,ap_api_key)
# Collect and store results, datasets, and chart elements for 1, 2, 3, and 5 year analysis periods
# (but only if sufficient data exists for all portfolio positions). If data are insufficient,
# only store results for complete or near complete periods. For example, if the longest data
# sampling period for one stock in the portfolio is 2 years and 7 months, then the 3-year
# analysis period will record results for a period of 2 years and 7 months, and the loop
# will not bother with the 5-year calculations.
results = []
tot_ret=[]
x = 0
keep = []
figs = []
for i in [1,2,3,5]:
if x==0:
temp_returns, temp_tot, temp_review = returns(sub, i, maxomin, minomax)
results.append(temp_returns)
tot_ret.append(temp_tot)
keep.append(i)
figs.append({'port line': go.Scatter(x=temp_tot['month'], y=temp_tot['cum ret'], name='Portfolio Cumulative Return', line=dict(color='firebrick', width=4)), 'sp line': go.Scatter(x=temp_tot['month'], y=temp_tot['cum spret'], name='S&P 500 Cumulative Return', line=dict(color='royalblue', width=4))})
if temp_returns['years_tgt'] != temp_returns['years_act']:
x = 1
# MAKE CHARTS/TABLES!
axis_font = dict(size=16, family='Times New Roman')
tick_font = dict(size=12, family='Times New Roman')
for i in range(len(figs)):
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.03, row_width=[0.75,0.25], specs=[[{'type':'table'}], [{'type':'scatter'}]])
fig.add_trace(figs[i]['port line'], row=2, col=1)
fig.add_trace(figs[i]['sp line'], row=2, col=1,)
pd_months = results[i]['months_act']
fig.update_layout(title=dict(text=f'Portfolio Performance Report: Monthly Returns over Last {pd_describe(pd_months)}', font=dict(family='Times New Roman', size=20)))
fig.update_layout(xaxis=dict(title=dict(text='Month', font=axis_font), ticks='outside', tickfont=tick_font))
fig.update_layout(yaxis=dict(title=dict(text='Cumulative Monthly Returns (%)', font=axis_font), ticks='outside', tickfont=tick_font, tickformat='.1%'))
fig.update_layout(legend=dict(orientation='h', font=axis_font))
col1 = ['Avg. Annual Return', 'Std. Dev. (Ann.)', 'Sharpe Ratio', 'Beta']
col2 = [to_pct(results[i]['ann_ret']), to_pct(results[i]['ann_sdev']), two_dec(results[i]['sharpe_port']), two_dec(results[i]['beta'])]
col3 = [to_pct(results[i]['ann_spret']), to_pct(results[i]['ann_sp_sdev']), two_dec(results[i]['sharpe_sp']), two_dec(1.00)]
fig.add_trace(go.Table(header=dict(values=['Statistic', 'Portfolio', 'S&P 500']), cells=dict(values=[col1, col2, col3])),row=1,col=1)
fig.show()
| [
"app.port_data_pull.port_data_pull",
"os.path.abspath",
"app.other_data_pull.fred_pull",
"app.other_data_pull.spy_pull",
"dotenv.load_dotenv",
"os.environ.get",
"app.portfolio_import.portfolio_import",
"plotly.subplots.make_subplots"
] | [((7076, 7089), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7087, 7089), False, 'from dotenv import load_dotenv\n'), ((7111, 7148), 'os.environ.get', 'os.environ.get', (['"""PORTFOLIO_FILE_NAME"""'], {}), "('PORTFOLIO_FILE_NAME')\n", (7125, 7148), False, 'import os\n'), ((7166, 7204), 'os.environ.get', 'os.environ.get', (['"""ALPHAVANTAGE_API_KEY"""'], {}), "('ALPHAVANTAGE_API_KEY')\n", (7180, 7204), False, 'import os\n'), ((7224, 7254), 'os.environ.get', 'os.environ.get', (['"""FRED_API_KEY"""'], {}), "('FRED_API_KEY')\n", (7238, 7254), False, 'import os\n'), ((7272, 7304), 'app.portfolio_import.portfolio_import', 'portfolio_import', (['port_file_name'], {}), '(port_file_name)\n', (7288, 7304), False, 'from app.portfolio_import import portfolio_import\n'), ((8520, 8540), 'app.other_data_pull.spy_pull', 'spy_pull', (['ap_api_key'], {}), '(ap_api_key)\n', (8528, 8540), False, 'from app.other_data_pull import spy_pull, fred_pull\n'), ((8561, 8584), 'app.other_data_pull.fred_pull', 'fred_pull', (['fred_api_key'], {}), '(fred_api_key)\n', (8570, 8584), False, 'from app.other_data_pull import spy_pull, fred_pull\n'), ((8737, 8774), 'app.port_data_pull.port_data_pull', 'port_data_pull', (['portfolio', 'ap_api_key'], {}), '(portfolio, ap_api_key)\n', (8751, 8774), False, 'from app.port_data_pull import port_data_pull\n'), ((10210, 10342), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'vertical_spacing': '(0.03)', 'row_width': '[0.75, 0.25]', 'specs': "[[{'type': 'table'}], [{'type': 'scatter'}]]"}), "(rows=2, cols=1, vertical_spacing=0.03, row_width=[0.75, 0.25],\n specs=[[{'type': 'table'}], [{'type': 'scatter'}]])\n", (10223, 10342), False, 'from plotly.subplots import make_subplots\n'), ((7555, 7580), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7570, 7580), False, 'import os\n'), ((7780, 7805), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7795, 7805), False, 'import os\n'), ((8049, 8074), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (8064, 8074), False, 'import os\n')] |
from keras_tweaks import idseqs_to_mask
import tensorflow as tf
class AllTests(tf.test.TestCase):
def test1(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3], dense=False)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test2(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[1 for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3],
dense=False, dtype=tf.uint8)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
# self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test3(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[1.0 for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3],
dense=False, dtype=tf.float64)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
# self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test4(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 2, 0],
[0, 3, 0],
[0, 4, 1],
[0, 5, 1],
[1, 1, 2],
[1, 2, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(8)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, ignore=[1],
dense=False, dtype=tf.bool)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test5(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 2, 0],
[0, 3, 0],
[0, 4, 1],
[0, 5, 1],
[1, 1, 2],
[1, 2, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(8)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, ignore=[1],
dense=True, dtype=tf.bool)
self.assertAllEqual(masks, tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"keras_tweaks.idseqs_to_mask",
"tensorflow.sparse.to_dense"
] | [((4219, 4233), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (4231, 4233), True, 'import tensorflow as tf\n'), ((646, 719), 'keras_tweaks.idseqs_to_mask', 'idseqs_to_mask', (['idseqs'], {'n_seqlen': '(6)', 'n_vocab_sz': '(3)', 'ignore': '[3]', 'dense': '(False)'}), '(idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3], dense=False)\n', (660, 719), False, 'from keras_tweaks import idseqs_to_mask\n'), ((1483, 1576), 'keras_tweaks.idseqs_to_mask', 'idseqs_to_mask', (['idseqs'], {'n_seqlen': '(6)', 'n_vocab_sz': '(3)', 'ignore': '[3]', 'dense': '(False)', 'dtype': 'tf.uint8'}), '(idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3], dense=False,\n dtype=tf.uint8)\n', (1497, 1576), False, 'from keras_tweaks import idseqs_to_mask\n'), ((2352, 2447), 'keras_tweaks.idseqs_to_mask', 'idseqs_to_mask', (['idseqs'], {'n_seqlen': '(6)', 'n_vocab_sz': '(3)', 'ignore': '[3]', 'dense': '(False)', 'dtype': 'tf.float64'}), '(idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3], dense=False,\n dtype=tf.float64)\n', (2366, 2447), False, 'from keras_tweaks import idseqs_to_mask\n'), ((3142, 3216), 'keras_tweaks.idseqs_to_mask', 'idseqs_to_mask', (['idseqs'], {'n_seqlen': '(6)', 'ignore': '[1]', 'dense': '(False)', 'dtype': 'tf.bool'}), '(idseqs, n_seqlen=6, ignore=[1], dense=False, dtype=tf.bool)\n', (3156, 3216), False, 'from keras_tweaks import idseqs_to_mask\n'), ((3913, 3986), 'keras_tweaks.idseqs_to_mask', 'idseqs_to_mask', (['idseqs'], {'n_seqlen': '(6)', 'ignore': '[1]', 'dense': '(True)', 'dtype': 'tf.bool'}), '(idseqs, n_seqlen=6, ignore=[1], dense=True, dtype=tf.bool)\n', (3927, 3986), False, 'from keras_tweaks import idseqs_to_mask\n'), ((775, 800), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['masks'], {}), '(masks)\n', (793, 800), True, 'import tensorflow as tf\n'), ((802, 828), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['target'], {}), '(target)\n', (820, 828), True, 'import tensorflow as tf\n'), ((1640, 1665), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['masks'], {}), '(masks)\n', (1658, 1665), True, 'import tensorflow as tf\n'), ((1667, 1693), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['target'], {}), '(target)\n', (1685, 1693), True, 'import tensorflow as tf\n'), ((2511, 2536), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['masks'], {}), '(masks)\n', (2529, 2536), True, 'import tensorflow as tf\n'), ((2538, 2564), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['target'], {}), '(target)\n', (2556, 2564), True, 'import tensorflow as tf\n'), ((3284, 3309), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['masks'], {}), '(masks)\n', (3302, 3309), True, 'import tensorflow as tf\n'), ((3311, 3337), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['target'], {}), '(target)\n', (3329, 3337), True, 'import tensorflow as tf\n'), ((4048, 4074), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['target'], {}), '(target)\n', (4066, 4074), True, 'import tensorflow as tf\n')] |
# apis_v1/test_views_voter_email_address_save.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.urls import reverse
from django.test import TestCase
from email_outbound.models import EmailAddress, EmailManager
import json
class WeVoteAPIsV1TestsVoterEmailAddressRetrieve(TestCase):
databases = ["default", "readonly"]
def setUp(self):
self.generate_voter_device_id_url = reverse("apis_v1:deviceIdGenerateView")
self.voter_create_url = reverse("apis_v1:voterCreateView")
self.voter_email_address_save_url = reverse("apis_v1:voterEmailAddressSaveView")
self.voter_email_address_retrieve_url = reverse("apis_v1:voterEmailAddressRetrieveView")
def test_retrieve_with_no_voter_device_id(self):
response = self.client.get(self.voter_email_address_retrieve_url)
json_data = json.loads(response.content.decode())
self.assertEqual('status' in json_data, True, "status expected in the json response, and not found")
self.assertEqual(json_data['status'],
"VALID_VOTER_DEVICE_ID_MISSING",
"status = {status} Expected status VALID_VOTER_DEVICE_ID_MISSING"
"voter_device_id: {voter_device_id}".format(status=json_data['status'],
voter_device_id=json_data['voter_device_id']))
self.assertEqual(len(json_data["email_address_list"]), 0,
"Expected email_address_list to have length 0, "
"actual length = {length}".format(length=len(json_data['email_address_list'])))
def test_retrieve_with_voter_device_id(self):
response = self.client.get(self.generate_voter_device_id_url)
json_data = json.loads(response.content.decode())
voter_device_id = json_data['voter_device_id'] if 'voter_device_id' in json_data else ''
# Create a voter so we can test retrieve
response2 = self.client.get(self.voter_create_url, {'voter_device_id': voter_device_id})
json_data2 = json.loads(response2.content.decode())
self.assertEqual('status' in json_data2, True,
"status expected in the voterEmailAddressRetrieveView json response but not found")
self.assertEqual('voter_device_id' in json_data2, True,
"voter_device_id expected in the voterEmailAddressRetrieveView json response but not found")
| [
"django.urls.reverse"
] | [((417, 456), 'django.urls.reverse', 'reverse', (['"""apis_v1:deviceIdGenerateView"""'], {}), "('apis_v1:deviceIdGenerateView')\n", (424, 456), False, 'from django.urls import reverse\n'), ((489, 523), 'django.urls.reverse', 'reverse', (['"""apis_v1:voterCreateView"""'], {}), "('apis_v1:voterCreateView')\n", (496, 523), False, 'from django.urls import reverse\n'), ((568, 612), 'django.urls.reverse', 'reverse', (['"""apis_v1:voterEmailAddressSaveView"""'], {}), "('apis_v1:voterEmailAddressSaveView')\n", (575, 612), False, 'from django.urls import reverse\n'), ((661, 709), 'django.urls.reverse', 'reverse', (['"""apis_v1:voterEmailAddressRetrieveView"""'], {}), "('apis_v1:voterEmailAddressRetrieveView')\n", (668, 709), False, 'from django.urls import reverse\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Redundant misc. functions to be eventually removed from AC_tools.
"""
import os
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from pandas import DataFrame
# time
import time
import datetime as datetime
# math
from math import radians, sin, cos, asin, sqrt, pi, atan2
def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None,
verbose=True, debug=False):
"""
Find indices in a lon, lat (2D) grid, where value does not equal a given
value ( e.g. the edge )
"""
if verbose:
print(('get_arr_edge_indices for arr of shape: ', arr.shape))
# initialise variables
lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True)
lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False)
lon_diff = lon_e[-5]-lon_e[-6]
lat_diff = lat_e[-5]-lat_e[-6]
nn, n, = 0, 0
last_lat_box = arr[nn, n]
coords = []
last_lon_box = arr[nn, n]
need_lon_outer_edge, need_lat_outer_edge = False, False
if debug:
print((lon_e, lat_e))
# ---- Loop X dimension ( lon )
for nn, lon_ in enumerate(lon_c):
# Loop Y dimension ( lat ) and store edges
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
if arr[nn, n] != last_lat_box:
# If 1st lat, selct bottom of box
point_lon = lon_e[nn]+lon_diff/2
if need_lat_outer_edge:
point_lat = lat_e[n+1]
else:
point_lat = lat_e[n]
need_lat_outer_edge = True
need_lat_outer_edge = False
# Add mid point to cordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lat_box = arr[nn, n]
# ---- Loop Y dimension ( lat )
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
# Loop X dimension ( lon ) and store edges
for nn, lon_ in enumerate(lon_c):
# If change in value at to list
if arr[nn, n] != last_lon_box:
point_lat = lat_e[n]+lat_diff/2
# Make sure we select the edge lon
if need_lon_outer_edge:
point_lon = lon_e[nn+1]
else:
point_lon = lon_e[nn]
need_lon_outer_edge = True
need_lon_outer_edge = False
# Add mid point to coordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lon_box = arr[nn, n]
return coords
def split_data_by_days(data=None, dates=None, day_list=None,
verbose=False, debug=False):
"""
Takes a list of datetimes and data and returns a list of data and
the bins ( days )
"""
if verbose:
print('split_data_by_days called')
# Create DataFrame of Data and dates
df = DataFrame(data, index=dates, columns=['data'])
# Add list of dates ( just year, month, day ) <= this is mappable, update?
df['days'] = [datetime.datetime(*i.timetuple()[:3]) for i in dates]
if debug:
print(df)
# Get list of unique days
if isinstance(day_list, type(None)):
day_list = sorted(set(df['days'].values))
# Loop unique days and select data on these days
data4days = []
for day in day_list:
print((day, df[df['days'] == day]))
data4days += [df['data'][df['days'] == day]]
# Just return the values ( i.e. not pandas array )
data4days = [i.values.astype(float) for i in data4days]
print([type(i) for i in data4days])
# print data4days[0]
# sys.exit()
if debug:
print(('returning data for {} days, with lengths: '.format(
len(day_list)), [len(i) for i in data4days]))
# Return as list of days (datetimes) + list of data for each day
return data4days, day_list
def obs2grid(glon=None, glat=None, galt=None, nest='high res global',
sites=None, debug=False):
"""
values that have a given lat, lon and alt
Notes
-------
- Function flagged for removal
"""
if isinstance(glon, type(None)):
glon, glat, galt = get_latlonalt4res(nest=nest, centre=False,
debug=debug)
# Assume use of known CAST sites... unless others given.
if isinstance(sites, type(None)):
loc_dict = get_loc(rtn_dict=True)
sites = list(loc_dict.keys())
# Pull out site location indicies
indices_list = []
for site in sites:
lon, lat, alt = loc_dict[site]
vars = get_xy(lon, lat, glon, glat)
indices_list += [vars]
return indices_list
| [
"pandas.DataFrame",
"numpy.linspace"
] | [((4158, 4204), 'pandas.DataFrame', 'DataFrame', (['data'], {'index': 'dates', 'columns': "['data']"}), "(data, index=dates, columns=['data'])\n", (4167, 4204), False, 'from pandas import DataFrame\n'), ((2254, 2314), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'extra_points_point_on_edge'], {'endpoint': '(True)'}), '(0, 1, extra_points_point_on_edge, endpoint=True)\n', (2265, 2314), True, 'import numpy as np\n'), ((3607, 3667), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'extra_points_point_on_edge'], {'endpoint': '(True)'}), '(0, 1, extra_points_point_on_edge, endpoint=True)\n', (3618, 3667), True, 'import numpy as np\n')] |
from nanome._internal._util._serializers import _ArraySerializer, _StringSerializer, _ColorSerializer
from . import _AtomSerializerID
from . import _BondSerializer
from .. import _Residue
from nanome.util import Logs
from nanome._internal._util._serializers import _TypeSerializer
class _ResidueSerializer(_TypeSerializer):
def __init__(self, shallow = False):
self.shallow = shallow
self.array = _ArraySerializer()
self.atom = _AtomSerializerID()
self.bond = _BondSerializer()
self.color = _ColorSerializer()
self.string = _StringSerializer()
def version(self):
#Version 0 corresponds to Nanome release 1.10
return 1
def name(self):
return "Residue"
def serialize(self, version, value, context):
context.write_long(value._index)
self.array.set_type(self.atom)
if (self.shallow):
context.write_using_serializer(self.array, [])
else:
context.write_using_serializer(self.array, value._atoms)
self.array.set_type(self.bond)
if (self.shallow):
context.write_using_serializer(self.array, [])
else:
context.write_using_serializer(self.array, value._bonds)
context.write_bool(value._ribboned)
context.write_float(value._ribbon_size)
context.write_int(value._ribbon_mode)
context.write_using_serializer(self.color, value._ribbon_color)
if (version > 0):
context.write_bool(value._labeled)
context.write_using_serializer(self.string, value._label_text)
context.write_using_serializer(self.string, value._type)
context.write_int(value._serial)
context.write_using_serializer(self.string, value._name)
context.write_int(value._secondary_structure.value)
def deserialize(self, version, context):
residue = _Residue._create()
residue._index = context.read_long()
self.array.set_type(self.atom)
residue._set_atoms(context.read_using_serializer(self.array))
self.array.set_type(self.bond)
residue._set_bonds(context.read_using_serializer(self.array))
residue._ribboned = context.read_bool()
residue._ribbon_size = context.read_float()
residue._ribbon_mode = _Residue.RibbonMode.safe_cast(context.read_int())
residue._ribbon_color = context.read_using_serializer(self.color)
if (version > 0):
residue._labeled = context.read_bool()
residue._label_text = context.read_using_serializer(self.string)
residue._type = context.read_using_serializer(self.string)
residue._serial = context.read_int()
residue._name = context.read_using_serializer(self.string)
residue._secondary_structure = _Residue.SecondaryStructure.safe_cast(context.read_int())
return residue | [
"nanome._internal._util._serializers._StringSerializer",
"nanome._internal._util._serializers._ArraySerializer",
"nanome._internal._util._serializers._ColorSerializer"
] | [((419, 437), 'nanome._internal._util._serializers._ArraySerializer', '_ArraySerializer', ([], {}), '()\n', (435, 437), False, 'from nanome._internal._util._serializers import _ArraySerializer, _StringSerializer, _ColorSerializer\n'), ((537, 555), 'nanome._internal._util._serializers._ColorSerializer', '_ColorSerializer', ([], {}), '()\n', (553, 555), False, 'from nanome._internal._util._serializers import _ArraySerializer, _StringSerializer, _ColorSerializer\n'), ((578, 597), 'nanome._internal._util._serializers._StringSerializer', '_StringSerializer', ([], {}), '()\n', (595, 597), False, 'from nanome._internal._util._serializers import _ArraySerializer, _StringSerializer, _ColorSerializer\n')] |
#!/usr/bin/env python3
import logging
import subprocess
from typing import Dict
class Agent:
name: str
image: str
environment: Dict[str, str]
def __init__(self, name: str, image: str, environment: Dict[str, str]) -> None:
self.name = name
self.image = image
self.environment = environment
def run(self) -> None:
logging.info("Starting agent '%s' based on image '%s'", self.name, self.image)
subprocess.run(
[
"docker",
"run",
"-d",
"--rm",
"-it",
"--name",
self.name,
self.image,
"/bin/sleep",
"infinity",
]
)
def cleanup(self) -> None:
logging.info("Stopping agent '%s'", self.name)
subprocess.run(["docker", "stop", self.name])
def get_agent_by_label(name: str, label: str) -> Agent:
# TODO: lookup label in config file?
return Agent("ci-agent", "ubuntu:20.04", {})
| [
"logging.info",
"subprocess.run"
] | [((369, 447), 'logging.info', 'logging.info', (['"""Starting agent \'%s\' based on image \'%s\'"""', 'self.name', 'self.image'], {}), '("Starting agent \'%s\' based on image \'%s\'", self.name, self.image)\n', (381, 447), False, 'import logging\n'), ((456, 573), 'subprocess.run', 'subprocess.run', (["['docker', 'run', '-d', '--rm', '-it', '--name', self.name, self.image,\n '/bin/sleep', 'infinity']"], {}), "(['docker', 'run', '-d', '--rm', '-it', '--name', self.name,\n self.image, '/bin/sleep', 'infinity'])\n", (470, 573), False, 'import subprocess\n'), ((807, 853), 'logging.info', 'logging.info', (['"""Stopping agent \'%s\'"""', 'self.name'], {}), '("Stopping agent \'%s\'", self.name)\n', (819, 853), False, 'import logging\n'), ((862, 907), 'subprocess.run', 'subprocess.run', (["['docker', 'stop', self.name]"], {}), "(['docker', 'stop', self.name])\n", (876, 907), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
#################################################################################################
import logging
import urllib
import requests
from resources.lib.util import JSONRPC
##################################################################################################
log = logging.getLogger("DINGS."+__name__)
##################################################################################################
class Artwork(object):
xbmc_host = 'localhost'
xbmc_port = None
xbmc_username = None
xbmc_password = None
def __init__(self):
if not self.xbmc_port:
self._set_webserver_details()
def _double_urlencode(self, text):
text = self.single_urlencode(text)
text = self.single_urlencode(text)
return text
@classmethod
def single_urlencode(cls, text):
# urlencode needs a utf- string
text = urllib.urlencode({'blahblahblah': text.encode('utf-8')})
text = text[13:]
return text.decode('utf-8') #return the result again as unicode
def _set_webserver_details(self):
# Get the Kodi webserver details - used to set the texture cache
get_setting_value = JSONRPC('Settings.GetSettingValue')
web_query = {
"setting": "services.webserver"
}
result = get_setting_value.execute(web_query)
try:
xbmc_webserver_enabled = result['result']['value']
except (KeyError, TypeError):
xbmc_webserver_enabled = False
if not xbmc_webserver_enabled:
# Enable the webserver, it is disabled
set_setting_value = JSONRPC('Settings.SetSettingValue')
web_port = {
"setting": "services.webserverport",
"value": 8080
}
set_setting_value.execute(web_port)
self.xbmc_port = 8080
web_user = {
"setting": "services.webserver",
"value": True
}
set_setting_value.execute(web_user)
self.xbmc_username = "kodi"
else:
# Webserver already enabled
web_port = {
"setting": "services.webserverport"
}
result = get_setting_value.execute(web_port)
try:
self.xbmc_port = result['result']['value']
except (TypeError, KeyError):
pass
web_user = {
"setting": "services.webserverusername"
}
result = get_setting_value.execute(web_user)
try:
self.xbmc_username = result['result']['value']
except (TypeError, KeyError):
pass
web_pass = {
"setting": "services.webserverpassword"
}
result = get_setting_value.execute(web_pass)
try:
self.xbmc_password = result['result']['value']
except (TypeError, KeyError):
pass
def cache_texture(self, url):
# Cache a single image url to the texture cache
if url:
log.debug("Processing: %s", url)
url = self._double_urlencode(url)
action_url = "http://%s:%s/image/image://%s" % (self.xbmc_host, self.xbmc_port, url)
try: # Add image to texture cache by simply calling it at the http endpoint
requests.head(url=(action_url),
auth=(self.xbmc_username, self.xbmc_password),
timeout=1)
except Exception as e: # We don't need the result
log.error("Feil ved precaching av fil %s med feilmelding %s", action_url, e.message) | [
"resources.lib.util.JSONRPC",
"requests.head",
"logging.getLogger"
] | [((317, 355), 'logging.getLogger', 'logging.getLogger', (["('DINGS.' + __name__)"], {}), "('DINGS.' + __name__)\n", (334, 355), False, 'import logging\n'), ((1228, 1263), 'resources.lib.util.JSONRPC', 'JSONRPC', (['"""Settings.GetSettingValue"""'], {}), "('Settings.GetSettingValue')\n", (1235, 1263), False, 'from resources.lib.util import JSONRPC\n'), ((1675, 1710), 'resources.lib.util.JSONRPC', 'JSONRPC', (['"""Settings.SetSettingValue"""'], {}), "('Settings.SetSettingValue')\n", (1682, 1710), False, 'from resources.lib.util import JSONRPC\n'), ((3424, 3515), 'requests.head', 'requests.head', ([], {'url': 'action_url', 'auth': '(self.xbmc_username, self.xbmc_password)', 'timeout': '(1)'}), '(url=action_url, auth=(self.xbmc_username, self.xbmc_password),\n timeout=1)\n', (3437, 3515), False, 'import requests\n')] |
#!/usr/bin/env python
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
=========================================
Discrete time particle physics simulation
=========================================
A discrete time simulator of a system of bonded and unbonded particles, of
multiple types.
The actual physics calculations are deferred to the particles themselves. You
can have as many, or few, spatial dimensions as you like.
Example Usage
-------------
Create 3 particles, two of which are bonded and move noticeably closer after 5
cycles of simulation::
>>> laws = SimpleLaws(bondLength=5)
>>> sim = ParticleSystem(laws)
>>> sim.add( Particle(position=(10,10)) )
>>> sim.add( Particle(position=(10,20)) )
>>> sim.add( Particle(position=(30,40)) )
>>> sim.particles[0].makeBond(sim.particles, 1) # bond 1st and 2nd particles
>>> for p in sim.particles: print p.getLoc()
...
(10, 10)
(10, 20)
(30, 40)
>>> sim.run(cycles=5)
>>> for p in sim.particles: print p.getLoc()
...
[10.0, 13.940067328]
[10.0, 16.059932671999999]
[30, 40]
>>>
How does it work?
-----------------
Set up ParticleSystem by instantiating, specifying the laws to act between
particles and an (optional) set of initial particles.
Particles should be derived from the Particle base class (or have equivalent
functionality).
Particles can be added or removed from the system by reference, or removed by
their ID.
ParticleSystem will work for particles in space with any number of dimensions -
so long as all particles use the same!
Bonds between particles are up to the particles to manage for themselves.
The simulation runs in cycles when the run(...) method is called. Each cycle
advances the 'tick' count by 1. The tick count starts at zero, unless otherwise
specified during initialization.
The following attributes store the particles registered in ParticleSystem:
- particles -- simple list
- particleDict -- dictionary, indexed by particle.ID
ParticleSystem uses a SpatialIndexer object to speed up calculations.
SpatialIndexer reduce the search space when determining what particles lie
within a given region (radius of a point).
If your code changes the position of a particle, the simulator must be informed,
so it can update its spatial indexing data, by calling updateLoc(...)
The actual interactions between particles are calculated by the particles
themselves, *not* by ParticleSystem.
ParticleSystem calls the doInteractions(...) methods of all particles so they
can influence each other. It then calls the update(...) methods of all particles
so they can all update their positions and velocities ready for the next cycle.
This is a two stage process so that, in a given cycle, all particles see each
other at the same positions, irrespective of which particle's
doInteractions(...) method is called first. Particles should not apply their
velocities to update their position until their update(...) method is called.
"""
from SpatialIndexer import SpatialIndexer
class ParticleSystem(object):
"""\
ParticleSystem(laws[,initialParticles][,initialTick]) -> new ParticleSystem object
Discrete time simulator for a system of particles.
Keyword arguments:
- initialParticles -- list of particles (default=[])
- initialTick -- start value of the time 'tick' count (default=0)
"""
def __init__(self, laws, initialParticles = [], initialTick = 0):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
self.indexer = SpatialIndexer(laws.maxInteractRadius)
self.laws = laws
self.particles = []
self.tick = initialTick
self.particleDict = {}
self.add(*initialParticles)
def add(self, *newParticles):
"""Add the specified particle(s) into the system"""
self.particles.extend(newParticles)
for p in newParticles:
self.particleDict[p.ID] = p
self.indexer.updateLoc(*newParticles)
def remove(self, *oldParticles):
"""\
Remove the specified particle(s) from the system.
Note that this method does not destroy bonds from other particles to
these ones.
"""
for particle in oldParticles:
self.particles.remove(particle)
del self.particleDict[particle.ID]
self.indexer.remove(*oldParticles)
def removeByID(self, *ids):
"""\
Remove particle(s) as specified by id(s) from the system.
Note that this method does not destroy bonds from other particles to
these ones.
"""
particles = [self.particleDict[id] for id in ids]
self.remove( *particles )
def updateLoc(self, *particles):
"""\
Notify this physics system that the specified particle(s)
have changed position.
Must be called if you change a particle's position,
before calling run().
"""
self.indexer.updateLoc(*particles)
def withinRadius(self, centre, radius, filter=(lambda particle:True)):
"""\
withinRadius(centre,radius[,filter]) -> list of (particle,distSquared)
Returns a list of zero or more (particle,distSquared) tuples. The
particles listed are those within the specified radius of the specified
centre point, and that passed the (optional) filter function:
filter(particle) -> True if the particle is to be included in the list
"""
return self.indexer.withinRadius(centre, radius, filter)
def run(self, cycles = 1):
"""Run the simulation for a given number of cycles (default=1)"""
# optimisation to speed up access to these functions:
_indexer = self.indexer
_laws = self.laws
while cycles > 0:
cycles -= 1
self.tick += 1
_tick = self.tick
for p in self.particles:
p.doInteractions(_indexer, _laws, _tick)
for p in self.particles:
p.update(_laws)
_indexer.updateAll()
| [
"SpatialIndexer.SpatialIndexer"
] | [((4558, 4596), 'SpatialIndexer.SpatialIndexer', 'SpatialIndexer', (['laws.maxInteractRadius'], {}), '(laws.maxInteractRadius)\n', (4572, 4596), False, 'from SpatialIndexer import SpatialIndexer\n')] |
from rest_framework import parsers, renderers
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import ClientTokenSerializer
from .models import ClientToken, App
class ObtainClientAuth(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = ClientTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
client_secret = serializer.data['client_secret']
app_id = serializer.data['app_id']
app = App.objects.get(pk=app_id)
token, created = ClientToken.objects.get_or_create(client_secret=client_secret, app=app)
return Response({'token': token.key})
obtain_client_auth = ObtainClientAuth.as_view()
| [
"rest_framework.response.Response"
] | [((1018, 1048), 'rest_framework.response.Response', 'Response', (["{'token': token.key}"], {}), "({'token': token.key})\n", (1026, 1048), False, 'from rest_framework.response import Response\n')] |
# !/usr/bin/python
from itertools import groupby
def compress(data):
return ((len(list(group)), name) for name, group in groupby(data))
def decompress(data):
return (car * size for size, car in data)
my_data = 'get uuuuuuuuuuuuuuuup'
print(list(my_data))
compressed = compress(my_data)
print(''.join(decompress(compressed)))
| [
"itertools.groupby"
] | [((128, 141), 'itertools.groupby', 'groupby', (['data'], {}), '(data)\n', (135, 141), False, 'from itertools import groupby\n')] |
import numpy as np
import magpie
# check cartesian
def test_get_xedges():
xedges = magpie.grids.get_xedges(1., 2)
xedges = np.round(xedges, decimals=2)
assert len(xedges) == 3, "Length of xedges is incorrect."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
xedges = magpie.grids.get_xedges(1., 2, xmin=-1.)
xedges = np.round(xedges, decimals=2)
assert xedges[0]==-1. and xedges[1]==-0.5 and xedges[-1]==0., "xedges with xmin are not as expected."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
def test_xedges2mid():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xmid = np.round(xmid, decimals=2)
assert len(xedges) == len(xmid) + 1, "Length of xmid is incorrect."
assert xmid[0] == 0.05 and xmid[1] == 0.15 and xmid[5] == 0.55, "xmid is not as expected."
def test_xmid2edges():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xedges2 = magpie.grids.xmid2edges(xmid)
assert np.round(np.sum(xedges-xedges2), decimals=2), "Conversion from xmid to xedges is not consistent with input xedges."
def test_grid1d():
xmid = magpie.grids.grid1d(10., 10)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
xmid = magpie.grids.grid1d(10., 10, xmin=10)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid1d unexpected results."
xmid, xedges = magpie.grids.grid1d(10., 10, return_edges=True)
assert len(xmid)+1 == len(xedges), "Length of xmid and xedges is not as expected."
assert np.round(xedges[0], decimals=4) == 0. and np.round(xedges[7], decimals=4) == 7., "grid1d unexpected results."
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
def test_grid2d():
x2d, y2d = magpie.grids.grid2d(10, 10)
assert np.shape(x2d) == (10, 10), "shape is not as expected."
assert np.shape(y2d) == (10, 10), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d = magpie.grids.grid2d(10, [10, 20])
assert np.shape(x2d) == (10, 20), "shape is not as expected."
assert np.shape(y2d) == (10, 20), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
def test_grid3d():
x3d, y3d, z3d = magpie.grids.grid3d(10, 10)
assert np.shape(x3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(y3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(z3d) == (10, 10, 10), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, mins=[10., 20., 30.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d = magpie.grids.grid3d(10, [10, 20, 30])
assert np.shape(x3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(y3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(z3d) == (10, 20, 30), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], mins=[10., 20., 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
# check polar
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polarEA():
r, p = magpie.grids.polarEA_grid(10)
npix = magpie.grids.polarEA_npix(10)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(6, base_nphi=3)
npix = magpie.grids.polarEA_npix(6, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(10, base_nphi=3)
npix = magpie.grids.polarEA_npix(10, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
assert r[3*4**2] == 0.45, "r values are incorrect."
assert r[3*7**2] == 0.75, "r values are incorrect."
assert np.round(p[3*4**2], decimals=4) == np.round(np.pi/(3*(2*4+1)), decimals=4), "p values are incorrect."
assert np.round(p[3*7**2 + 7], decimals=4) == np.round(15*np.pi/(3*(2*7+1)), decimals=4), "p values are incorrect."
area = magpie.grids.polarEA_area(10, rmax=10., base_nphi=4)
assert(np.round(area, decimals=4) == np.round(np.pi/4., decimals=4)), "area calculation is incorrect."
| [
"magpie.grids.polargrid",
"numpy.sum",
"magpie.grids.grid1d",
"magpie.grids.grid3d",
"magpie.grids.xmid2edges",
"magpie.grids.get_xedges",
"magpie.grids.polarEA_grid",
"numpy.shape",
"magpie.grids.polarEA_npix",
"magpie.grids.polarEA_area",
"magpie.grids.xedges2mid",
"numpy.round",
"magpie.grids.grid2d"
] | [((90, 121), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (113, 121), False, 'import magpie\n'), ((134, 162), 'numpy.round', 'np.round', (['xedges'], {'decimals': '(2)'}), '(xedges, decimals=2)\n', (142, 162), True, 'import numpy as np\n'), ((308, 350), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(2)'], {'xmin': '(-1.0)'}), '(1.0, 2, xmin=-1.0)\n', (331, 350), False, 'import magpie\n'), ((362, 390), 'numpy.round', 'np.round', (['xedges'], {'decimals': '(2)'}), '(xedges, decimals=2)\n', (370, 390), True, 'import numpy as np\n'), ((604, 636), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(10)'], {}), '(1.0, 10)\n', (627, 636), False, 'import magpie\n'), ((647, 678), 'magpie.grids.xedges2mid', 'magpie.grids.xedges2mid', (['xedges'], {}), '(xedges)\n', (670, 678), False, 'import magpie\n'), ((690, 716), 'numpy.round', 'np.round', (['xmid'], {'decimals': '(2)'}), '(xmid, decimals=2)\n', (698, 716), True, 'import numpy as np\n'), ((921, 953), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(10)'], {}), '(1.0, 10)\n', (944, 953), False, 'import magpie\n'), ((964, 995), 'magpie.grids.xedges2mid', 'magpie.grids.xedges2mid', (['xedges'], {}), '(xedges)\n', (987, 995), False, 'import magpie\n'), ((1010, 1039), 'magpie.grids.xmid2edges', 'magpie.grids.xmid2edges', (['xmid'], {}), '(xmid)\n', (1033, 1039), False, 'import magpie\n'), ((1198, 1227), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {}), '(10.0, 10)\n', (1217, 1227), False, 'import magpie\n'), ((1357, 1395), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {'xmin': '(10)'}), '(10.0, 10, xmin=10)\n', (1376, 1395), False, 'import magpie\n'), ((1535, 1583), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {'return_edges': '(True)'}), '(10.0, 10, return_edges=True)\n', (1554, 1583), False, 'import magpie\n'), ((1945, 1972), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {}), '(10, 10)\n', (1964, 1972), False, 'import magpie\n'), ((2132, 2174), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {'return1d': '(True)'}), '(10, 10, return1d=True)\n', (2151, 2174), False, 'import magpie\n'), ((2662, 2723), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {'mins': '[10.0, 20.0]', 'return1d': '(True)'}), '(10, 10, mins=[10.0, 20.0], return1d=True)\n', (2681, 2723), False, 'import magpie\n'), ((3201, 3234), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '[10, 20]'], {}), '(10, [10, 20])\n', (3220, 3234), False, 'import magpie\n'), ((3394, 3448), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['[10, 20]', '[10, 20]'], {'return1d': '(True)'}), '([10, 20], [10, 20], return1d=True)\n', (3413, 3448), False, 'import magpie\n'), ((3936, 4009), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['[10, 20]', '[10, 20]'], {'mins': '[10.0, 20.0]', 'return1d': '(True)'}), '([10, 20], [10, 20], mins=[10.0, 20.0], return1d=True)\n', (3955, 4009), False, 'import magpie\n'), ((4513, 4540), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {}), '(10, 10)\n', (4532, 4540), False, 'import magpie\n'), ((4789, 4831), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {'return1d': '(True)'}), '(10, 10, return1d=True)\n', (4808, 4831), False, 'import magpie\n'), ((5560, 5627), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {'mins': '[10.0, 20.0, 30.0]', 'return1d': '(True)'}), '(10, 10, mins=[10.0, 20.0, 30.0], return1d=True)\n', (5579, 5627), False, 'import magpie\n'), ((6341, 6378), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '[10, 20, 30]'], {}), '(10, [10, 20, 30])\n', (6360, 6378), False, 'import magpie\n'), ((6627, 6689), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['[10, 20, 30]', '[10, 20, 30]'], {'return1d': '(True)'}), '([10, 20, 30], [10, 20, 30], return1d=True)\n', (6646, 6689), False, 'import magpie\n'), ((7418, 7507), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['[10, 20, 30]', '[10, 20, 30]'], {'mins': '[10.0, 20.0, 30]', 'return1d': '(True)'}), '([10, 20, 30], [10, 20, 30], mins=[10.0, 20.0, 30],\n return1d=True)\n', (7437, 7507), False, 'import magpie\n'), ((8252, 8282), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {}), '(10, 20)\n', (8274, 8282), False, 'import magpie\n'), ((8442, 8487), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {'return1d': '(True)'}), '(10, 20, return1d=True)\n', (8464, 8487), False, 'import magpie\n'), ((9040, 9145), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(10)'], {'rmin': '(10.0)', 'rmax': '(20.0)', 'phimin': '(np.pi / 2.0)', 'phimax': 'np.pi', 'return1d': '(True)'}), '(10, 10, rmin=10.0, rmax=20.0, phimin=np.pi / 2.0,\n phimax=np.pi, return1d=True)\n', (9062, 9145), False, 'import magpie\n'), ((9743, 9773), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {}), '(10, 20)\n', (9765, 9773), False, 'import magpie\n'), ((9933, 9978), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {'return1d': '(True)'}), '(10, 20, return1d=True)\n', (9955, 9978), False, 'import magpie\n'), ((10531, 10636), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(10)'], {'rmin': '(10.0)', 'rmax': '(20.0)', 'phimin': '(np.pi / 2.0)', 'phimax': 'np.pi', 'return1d': '(True)'}), '(10, 10, rmin=10.0, rmax=20.0, phimin=np.pi / 2.0,\n phimax=np.pi, return1d=True)\n', (10553, 10636), False, 'import magpie\n'), ((11227, 11256), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(10)'], {}), '(10)\n', (11252, 11256), False, 'import magpie\n'), ((11268, 11297), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(10)'], {}), '(10)\n', (11293, 11297), False, 'import magpie\n'), ((11469, 11510), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(6)'], {'base_nphi': '(3)'}), '(6, base_nphi=3)\n', (11494, 11510), False, 'import magpie\n'), ((11522, 11563), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(6)'], {'base_nphi': '(3)'}), '(6, base_nphi=3)\n', (11547, 11563), False, 'import magpie\n'), ((11735, 11777), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(10)'], {'base_nphi': '(3)'}), '(10, base_nphi=3)\n', (11760, 11777), False, 'import magpie\n'), ((11789, 11831), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(10)'], {'base_nphi': '(3)'}), '(10, base_nphi=3)\n', (11814, 11831), False, 'import magpie\n'), ((12348, 12401), 'magpie.grids.polarEA_area', 'magpie.grids.polarEA_area', (['(10)'], {'rmax': '(10.0)', 'base_nphi': '(4)'}), '(10, rmax=10.0, base_nphi=4)\n', (12373, 12401), False, 'import magpie\n'), ((1060, 1084), 'numpy.sum', 'np.sum', (['(xedges - xedges2)'], {}), '(xedges - xedges2)\n', (1066, 1084), True, 'import numpy as np\n'), ((1984, 1997), 'numpy.shape', 'np.shape', (['x2d'], {}), '(x2d)\n', (1992, 1997), True, 'import numpy as np\n'), ((2050, 2063), 'numpy.shape', 'np.shape', (['y2d'], {}), '(y2d)\n', (2058, 2063), True, 'import numpy as np\n'), ((3246, 3259), 'numpy.shape', 'np.shape', (['x2d'], {}), '(x2d)\n', (3254, 3259), True, 'import numpy as np\n'), ((3312, 3325), 'numpy.shape', 'np.shape', (['y2d'], {}), '(y2d)\n', (3320, 3325), True, 'import numpy as np\n'), ((4552, 4565), 'numpy.shape', 'np.shape', (['x3d'], {}), '(x3d)\n', (4560, 4565), True, 'import numpy as np\n'), ((4622, 4635), 'numpy.shape', 'np.shape', (['y3d'], {}), '(y3d)\n', (4630, 4635), True, 'import numpy as np\n'), ((4692, 4705), 'numpy.shape', 'np.shape', (['z3d'], {}), '(z3d)\n', (4700, 4705), True, 'import numpy as np\n'), ((6390, 6403), 'numpy.shape', 'np.shape', (['x3d'], {}), '(x3d)\n', (6398, 6403), True, 'import numpy as np\n'), ((6460, 6473), 'numpy.shape', 'np.shape', (['y3d'], {}), '(y3d)\n', (6468, 6473), True, 'import numpy as np\n'), ((6530, 6543), 'numpy.shape', 'np.shape', (['z3d'], {}), '(z3d)\n', (6538, 6543), True, 'import numpy as np\n'), ((8294, 8307), 'numpy.shape', 'np.shape', (['r2d'], {}), '(r2d)\n', (8302, 8307), True, 'import numpy as np\n'), ((8360, 8373), 'numpy.shape', 'np.shape', (['p2d'], {}), '(p2d)\n', (8368, 8373), True, 'import numpy as np\n'), ((9785, 9798), 'numpy.shape', 'np.shape', (['r2d'], {}), '(r2d)\n', (9793, 9798), True, 'import numpy as np\n'), ((9851, 9864), 'numpy.shape', 'np.shape', (['p2d'], {}), '(p2d)\n', (9859, 9864), True, 'import numpy as np\n'), ((12115, 12150), 'numpy.round', 'np.round', (['p[3 * 4 ** 2]'], {'decimals': '(4)'}), '(p[3 * 4 ** 2], decimals=4)\n', (12123, 12150), True, 'import numpy as np\n'), ((12150, 12197), 'numpy.round', 'np.round', (['(np.pi / (3 * (2 * 4 + 1)))'], {'decimals': '(4)'}), '(np.pi / (3 * (2 * 4 + 1)), decimals=4)\n', (12158, 12197), True, 'import numpy as np\n'), ((12228, 12267), 'numpy.round', 'np.round', (['p[3 * 7 ** 2 + 7]'], {'decimals': '(4)'}), '(p[3 * 7 ** 2 + 7], decimals=4)\n', (12236, 12267), True, 'import numpy as np\n'), ((12267, 12319), 'numpy.round', 'np.round', (['(15 * np.pi / (3 * (2 * 7 + 1)))'], {'decimals': '(4)'}), '(15 * np.pi / (3 * (2 * 7 + 1)), decimals=4)\n', (12275, 12319), True, 'import numpy as np\n'), ((12412, 12438), 'numpy.round', 'np.round', (['area'], {'decimals': '(4)'}), '(area, decimals=4)\n', (12420, 12438), True, 'import numpy as np\n'), ((12442, 12475), 'numpy.round', 'np.round', (['(np.pi / 4.0)'], {'decimals': '(4)'}), '(np.pi / 4.0, decimals=4)\n', (12450, 12475), True, 'import numpy as np\n'), ((1238, 1267), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1246, 1267), True, 'import numpy as np\n'), ((1279, 1308), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1287, 1308), True, 'import numpy as np\n'), ((1406, 1435), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1414, 1435), True, 'import numpy as np\n'), ((1448, 1477), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1456, 1477), True, 'import numpy as np\n'), ((1681, 1712), 'numpy.round', 'np.round', (['xedges[0]'], {'decimals': '(4)'}), '(xedges[0], decimals=4)\n', (1689, 1712), True, 'import numpy as np\n'), ((1723, 1754), 'numpy.round', 'np.round', (['xedges[7]'], {'decimals': '(4)'}), '(xedges[7], decimals=4)\n', (1731, 1754), True, 'import numpy as np\n'), ((1802, 1831), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1810, 1831), True, 'import numpy as np\n'), ((1843, 1872), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1851, 1872), True, 'import numpy as np\n'), ((2186, 2215), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (2194, 2215), True, 'import numpy as np\n'), ((2227, 2256), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (2235, 2256), True, 'import numpy as np\n'), ((2416, 2445), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (2424, 2445), True, 'import numpy as np\n'), ((2457, 2486), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (2465, 2486), True, 'import numpy as np\n'), ((2733, 2762), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (2741, 2762), True, 'import numpy as np\n'), ((2775, 2804), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (2783, 2804), True, 'import numpy as np\n'), ((2965, 2994), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (2973, 2994), True, 'import numpy as np\n'), ((3007, 3036), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (3015, 3036), True, 'import numpy as np\n'), ((3460, 3489), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (3468, 3489), True, 'import numpy as np\n'), ((3501, 3530), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (3509, 3530), True, 'import numpy as np\n'), ((3690, 3719), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (3698, 3719), True, 'import numpy as np\n'), ((3731, 3760), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (3739, 3760), True, 'import numpy as np\n'), ((4019, 4048), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (4027, 4048), True, 'import numpy as np\n'), ((4061, 4090), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (4069, 4090), True, 'import numpy as np\n'), ((4251, 4280), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (4259, 4280), True, 'import numpy as np\n'), ((4293, 4322), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (4301, 4322), True, 'import numpy as np\n'), ((4843, 4872), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (4851, 4872), True, 'import numpy as np\n'), ((4884, 4913), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (4892, 4913), True, 'import numpy as np\n'), ((5073, 5102), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (5081, 5102), True, 'import numpy as np\n'), ((5114, 5143), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (5122, 5143), True, 'import numpy as np\n'), ((5303, 5332), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (5311, 5332), True, 'import numpy as np\n'), ((5344, 5373), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (5352, 5373), True, 'import numpy as np\n'), ((5636, 5665), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (5644, 5665), True, 'import numpy as np\n'), ((5678, 5707), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (5686, 5707), True, 'import numpy as np\n'), ((5868, 5897), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (5876, 5897), True, 'import numpy as np\n'), ((5910, 5939), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (5918, 5939), True, 'import numpy as np\n'), ((6100, 6129), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (6108, 6129), True, 'import numpy as np\n'), ((6142, 6171), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (6150, 6171), True, 'import numpy as np\n'), ((6701, 6730), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (6709, 6730), True, 'import numpy as np\n'), ((6742, 6771), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (6750, 6771), True, 'import numpy as np\n'), ((6931, 6960), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (6939, 6960), True, 'import numpy as np\n'), ((6972, 7001), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (6980, 7001), True, 'import numpy as np\n'), ((7161, 7190), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (7169, 7190), True, 'import numpy as np\n'), ((7202, 7231), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (7210, 7231), True, 'import numpy as np\n'), ((7513, 7542), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (7521, 7542), True, 'import numpy as np\n'), ((7555, 7584), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (7563, 7584), True, 'import numpy as np\n'), ((7745, 7774), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (7753, 7774), True, 'import numpy as np\n'), ((7787, 7816), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (7795, 7816), True, 'import numpy as np\n'), ((7977, 8006), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (7985, 8006), True, 'import numpy as np\n'), ((8019, 8048), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (8027, 8048), True, 'import numpy as np\n'), ((8499, 8528), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (8507, 8528), True, 'import numpy as np\n'), ((8541, 8570), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (8549, 8570), True, 'import numpy as np\n'), ((8734, 8763), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (8742, 8763), True, 'import numpy as np\n'), ((8767, 8799), 'numpy.round', 'np.round', (['(np.pi / 20)'], {'decimals': '(4)'}), '(np.pi / 20, decimals=4)\n', (8775, 8799), True, 'import numpy as np\n'), ((8802, 8831), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (8810, 8831), True, 'import numpy as np\n'), ((8835, 8872), 'numpy.round', 'np.round', (['(15 * np.pi / 20)'], {'decimals': '(4)'}), '(15 * np.pi / 20, decimals=4)\n', (8843, 8872), True, 'import numpy as np\n'), ((9148, 9177), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (9156, 9177), True, 'import numpy as np\n'), ((9190, 9219), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (9198, 9219), True, 'import numpy as np\n'), ((9383, 9412), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (9391, 9412), True, 'import numpy as np\n'), ((9416, 9468), 'numpy.round', 'np.round', (['(np.pi / 2.0 / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(np.pi / 2.0 / 20 + np.pi / 2.0, decimals=4)\n', (9424, 9468), True, 'import numpy as np\n'), ((9477, 9506), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (9485, 9506), True, 'import numpy as np\n'), ((9510, 9569), 'numpy.round', 'np.round', (['(15 * (np.pi / 2.0) / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(15 * (np.pi / 2.0) / 20 + np.pi / 2.0, decimals=4)\n', (9518, 9569), True, 'import numpy as np\n'), ((9990, 10019), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (9998, 10019), True, 'import numpy as np\n'), ((10032, 10061), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (10040, 10061), True, 'import numpy as np\n'), ((10225, 10254), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (10233, 10254), True, 'import numpy as np\n'), ((10258, 10290), 'numpy.round', 'np.round', (['(np.pi / 20)'], {'decimals': '(4)'}), '(np.pi / 20, decimals=4)\n', (10266, 10290), True, 'import numpy as np\n'), ((10293, 10322), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (10301, 10322), True, 'import numpy as np\n'), ((10326, 10363), 'numpy.round', 'np.round', (['(15 * np.pi / 20)'], {'decimals': '(4)'}), '(15 * np.pi / 20, decimals=4)\n', (10334, 10363), True, 'import numpy as np\n'), ((10639, 10668), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (10647, 10668), True, 'import numpy as np\n'), ((10681, 10710), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (10689, 10710), True, 'import numpy as np\n'), ((10874, 10903), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (10882, 10903), True, 'import numpy as np\n'), ((10907, 10959), 'numpy.round', 'np.round', (['(np.pi / 2.0 / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(np.pi / 2.0 / 20 + np.pi / 2.0, decimals=4)\n', (10915, 10959), True, 'import numpy as np\n'), ((10968, 10997), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (10976, 10997), True, 'import numpy as np\n'), ((11001, 11060), 'numpy.round', 'np.round', (['(15 * (np.pi / 2.0) / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(15 * (np.pi / 2.0) / 20 + np.pi / 2.0, decimals=4)\n', (11009, 11060), True, 'import numpy as np\n')] |
from communication import *
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1123))
while True:
m = receive_message(s)
if m:
print(m, "\n")
ping(s)
print(s.getsockname())
print(socket.gethostbyname(socket.gethostname()))
print(socket.get) | [
"socket.gethostname",
"socket.socket"
] | [((47, 96), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (60, 96), False, 'import socket\n'), ((108, 128), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (126, 128), False, 'import socket\n'), ((262, 282), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (280, 282), False, 'import socket\n')] |
"""
dmsetup commands - Command ``dmsetup``
======================================
Parsers for parsing and extracting data from output of commands related to
``dmsetup``.
Parsers contained in this module are:
DmsetupInfo - command ``dmsetup info -C``
-----------------------------------------
"""
from insights import parser, CommandParser
from insights.parsers import parse_delimited_table
from insights.specs import Specs
@parser(Specs.dmsetup_info)
class DmsetupInfo(CommandParser):
"""
``dmsetup info -C`` command output
Example input::
Name Maj Min Stat Open Targ Event UUID
VG00-tmp 253 8 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4
VG00-home 253 3 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxCqXOnbGe2zjhX923dFiIdl1oi7mO9tXp
VG00-var 253 6 L--w 1 2 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxicvyvt67113nTb8vMlGfgdEjDx0LKT2O
VG00-swap 253 1 L--w 2 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax3Ll2XhOYZkylx1CjOQi7G4yHgrIOsyqG
VG00-root 253 0 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxKpnAKYhrYMYMNMwjegkW965bUgtJFTRY
VG00-var_log_audit 253 5 L--w 1 1 0 LVM-<KEY>
Example data structure produced::
data = [
{
'Stat': 'L--w',
'Name': 'VG00-tmp',
'Min': '8',
'Targ': '1',
'Maj': '253',
'Open': '1',
'Event': '0',
'UUID': 'LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4'
},...
]
Attributes:
data (list): List of devices found, in order
names (list): Device names, in order found
uuids (list): UUID
by_name (dict): Access to each device by devicename
by_uuid (dict): Access to each device by uuid
Example:
>>> len(info)
6
>>> info.names[0]
'VG00-tmp'
>>> info[1]['Maj']
'253'
>>> info[1]['Stat']
'L--w'
"""
def parse_content(self, content):
self.data = parse_delimited_table(content)
self.names = [dm['Name'] for dm in self.data if 'Name' in dm]
self.by_name = dict((dm['Name'], dm) for dm in self.data if 'Name' in dm)
self.uuids = [dm['UUID'] for dm in self.data if 'UUID' in dm]
self.by_uuid = dict((dm['UUID'], dm) for dm in self.data if 'UUID' in dm)
def __len__(self):
"""
The length of the devices list
"""
return len(self.data)
def __iter__(self):
"""
Iterate through the devices list
"""
for dm in self.data:
yield dm
def __getitem__(self, idx):
"""
Fetch a device by index in devices list
"""
return self.data[idx]
| [
"insights.parsers.parse_delimited_table",
"insights.parser"
] | [((430, 456), 'insights.parser', 'parser', (['Specs.dmsetup_info'], {}), '(Specs.dmsetup_info)\n', (436, 456), False, 'from insights import parser, CommandParser\n'), ((2195, 2225), 'insights.parsers.parse_delimited_table', 'parse_delimited_table', (['content'], {}), '(content)\n', (2216, 2225), False, 'from insights.parsers import parse_delimited_table\n')] |
import sys, os, random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import *
# find a current file' directory path.
try:
dirpath = os.path.dirname(__file__)
except Exception as inst:
dirpath = ''
pass
f_name1 = os.path.join(dirpath,"../datasets/breast-cancer.npz")
f_name2 = os.path.join(dirpath,"../datasets/diabetes.npz")
f_name3 = os.path.join(dirpath,"../datasets/digit.npz")
f_name4 = os.path.join(dirpath,"../datasets/iris.npz")
f_name5 = os.path.join(dirpath,"../datasets/wine.npz")
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class ClassModels:
def __init__(self):
self.name = ''
self.grid = ''
self.param_grid = ''
self.cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
self.scoring = 'neg_log_loss' #'accuracy', 'f1', 'precision', 'recall', 'roc_auc'
def trainModel(self, cname):
if (cname == "Logistic Regression"):
self.trainLogisticRegression()
elif (cname == "Linear SVM"):
self.trainLinearSVM()
elif (cname == "RBF SVM"):
self.trainRBFSVM()
elif (cname == "Neural Nets"):
self.trainNeuralNets()
else:
print("Please put existing classifier names")
pass
# run CV according to params for each classifier
def trainLogisticRegression(self):
# TODO: try different scoring rule such as Accuracy (default), F1-measure, AUC
loss_range = ['log']
penalty_range = ['l2','l1','none']
alpha_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params
self.param_grid = dict(loss=loss_range, penalty=penalty_range, alpha=alpha_range, max_iter=[1000], tol=[1e-3])
self.grid = GridSearchCV(SGDClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainLinearSVM(self):
kernel_range = ['linear']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
self.param_grid = dict(kernel=kernel_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainRBFSVM(self):
# params C / gamma
kernel_range = ['rbf']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
gamma_range = np.array([0.001,0.005,0.01,0.05,0.1,0.5,1,2,3]) # 9 params
self.param_grid = dict(kernel=kernel_range, gamma=gamma_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainNeuralNets(self):
# early stopping default False, Momentum default 0.9
hidden_layer_sizes_range = np.array([1,2,3,4,5,6,7,8,9,10,16,32]) # 12 params
activation_range = ['logistic']
solver_range = ['sgd']
learning_rate_init_range = np.array([1.0e-04,1.0e-03,1.0e-02,1.0e-01]) # 4 params
self.param_grid = dict(hidden_layer_sizes=hidden_layer_sizes_range,
activation=activation_range,solver=solver_range,
learning_rate_init=learning_rate_init_range,
max_iter=[1000])
self.grid = GridSearchCV(MLPClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
class Report:
def __init__(self):
pass
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
def showResult(self, model, predicted_test, target_test, predicted_train, target_train):
print("The best parameters are %s with a score of %0.3f"
% (model.grid.best_params_, model.grid.best_score_))
print("The Train Log Loss %0.3f Zero one loss %f"
% (log_loss(target_train, predicted_train), zero_one_loss(target_train, predicted_train)))
print("The test Log Loss %0.3f Zero one loss %f"
% (log_loss(target_test, predicted_test), zero_one_loss(target_test, predicted_test)))
print("The train Accuracy %0.3f"
% (accuracy_score(target_train, predicted_train)))
print("The test Accuracy %0.3f"
% (accuracy_score(target_test, predicted_test) ))
print("The test AUC of %0.3f"
% (roc_auc_score(target_test, predicted_test) ))
print("The mean training time of %f"
% (np.mean(model.grid.cv_results_['mean_fit_time'], axis=0)) )
print("The mean test time of %f"
% (np.mean(model.grid.cv_results_['mean_score_time'], axis=0)) )
# confusion matrix
print("confusion matrix / precision recall scores")
print ( confusion_matrix(target_test, predicted_test) )
print ( classification_report(target_test, predicted_test) )
pass
def showPlot(self, model, clfname):
if (clfname == "Logistic Regression"):
self.showLogisticRegression(model, clfname)
elif (clfname == "Linear SVM"):
self.showLinearSVM(model, clfname)
elif (clfname == "RBF SVM"):
self.showRBFSVM(model, clfname)
elif (clfname == "Neural Nets"):
self.showNeuralNets(model, clfname)
else:
print("Please put existing classifier names")
pass
def showLogisticRegression(self, model, clfname):
penalty_range = model.param_grid['penalty']
alpha_range = model.param_grid['alpha'] # 13 params
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(alpha_range),len(penalty_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.xlabel('penalty')
plt.ylabel('alpha (regularization)')
plt.colorbar()
plt.xticks(np.arange(len(penalty_range)), penalty_range, rotation=45)
plt.yticks(np.arange(len(alpha_range)), alpha_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showLinearSVM(self, model, clfname):
C_range = model.param_grid['C']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range),1)
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.ylabel('C')
plt.colorbar()
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showRBFSVM(self, model, clfname):
C_range = model.param_grid['C']
gamma_range = model.param_grid['gamma']
# scores = model.grid.cv_results_['mean_test_score'].reshape(len(C_range), len(gamma_range))
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range), len(gamma_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
# plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
# norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showNeuralNets(self, model, clfname):
hidden_layer_sizes_range = model.param_grid['hidden_layer_sizes']
learning_rate_init_range = model.param_grid['learning_rate_init']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(learning_rate_init_range), len(hidden_layer_sizes_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('hidden_layer_sizes')
plt.ylabel('learning_rate_init')
plt.colorbar()
plt.xticks(np.arange(len(hidden_layer_sizes_range)), hidden_layer_sizes_range, rotation=45)
plt.yticks(np.arange(len(learning_rate_init_range)), learning_rate_init_range)
plt.title('Validation accuracy')
# plt.show()
pass
def plotLROverTime(data_x, loss_y, acc_y, idx):
# Set the style globally
# Alternatives include bmh, fivethirtyeight, ggplot,
# dark_background, seaborn-deep, etc
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
# Set an aspect ratio
width, height = plt.figaspect(1.68)
fig = plt.figure(figsize=(width, height), dpi=400)
plt.plot(data_x, loss_y, linewidth=0.5, linestyle=':', marker='o',
markersize=2, label='loss')
plt.plot(data_x, acc_y, linewidth=0.5, linestyle='--', marker='v',
markersize=2, label='accuracy')
plt.xlabel('Data Points')
plt.ylabel('Score')
# Axes alteration to put zero values inside the figure Axes
# Avoids axis white lines cutting through zero values - fivethirtyeight style
xmin, xmax, ymin, ymax = plt.axis()
plt.axis([xmin - 0.1, xmax + 0.1, ymin, ymax])
plt.title('LR performance over time', fontstyle='italic')
plt.legend(loc='best', numpoints=1, fancybox=True)
# Space plots a bit
plt.subplots_adjust(hspace=0.25, wspace=0.40)
plt.savefig('./LR_overtime_'+str(idx)+'.png', bbox_inches='tight')
pass
def batches(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def runLROverTime(train_X, train_y, test_X, test_y, idx):
clf = SGDClassifier(loss='log') # shuffle=True is useless here
shuffledRange = range(train_X.shape[0])
n_iter = 10
data_point = 0
f_loss = open('./LR_overtime_loss_'+str(idx)+'.txt', 'w')
f_acc = open('./LR_overtime_acc_'+str(idx)+'.txt', 'w')
data_x = []
loss_y = []
acc_y = []
# temp_loss = zero_one_loss(train_y, clf.predict(train_X))
# temp_acc = accuracy_score(train_y, clf.predict(train_X))
# f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
# f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
# data_x.append(data_point)
# loss_y.append(temp_loss)
# acc_y.append(temp_acc)
for n in range(n_iter):
shuffledRange = list(shuffledRange)
random.shuffle(shuffledRange)
shuffledX = [train_X[i] for i in shuffledRange]
shuffledY = [train_y[i] for i in shuffledRange]
for batch in batches(range(len(shuffledX)), 10):
clf.partial_fit(shuffledX[batch[0]:batch[-1] + 1], shuffledY[batch[0]:batch[-1] + 1],
classes=np.unique(train_y))
data_point += len(batch)
temp_loss = zero_one_loss(train_y, clf.predict(train_X))
temp_acc = accuracy_score(train_y, clf.predict(train_X))
f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
data_x.append(data_point)
loss_y.append(temp_loss)
acc_y.append(temp_acc)
f_loss.write("\n===== End of Training / Test Set Results =====\n")
f_loss.write("data_point= %d , zero_one_loss= %f\n" % (data_point, zero_one_loss(test_y, clf.predict(test_X))))
f_acc.write("\n===== End of Training / Test Set Results =====\n")
f_acc.write("data_point= %d , accuracy= %f\n" % (data_point, accuracy_score(test_y, clf.predict(test_X))))
f_loss.close()
f_acc.close()
plotLROverTime(data_x, loss_y, acc_y, idx)
pass
class RunEval:
def __init__(self):
self.dnames = [f_name1, f_name2, f_name3, f_name4, f_name5]
self.train_X = []
self.train_y = []
self.test_X = []
self.test_y = []
def run(self):
report = Report()
for idx, dname in enumerate(self.dnames):
# load data
if len(sys.argv) > 1 and int(sys.argv[1]) != idx:
continue
data = np.load(dname)
self.train_y = data['train_Y']
self.test_y = data['test_Y']
# standardize data (mean=0, std=1)
self.train_X = StandardScaler().fit_transform(data['train_X'])
self.test_X = StandardScaler().fit_transform(data['test_X'])
print ("shape of data set ", self.train_X.shape, self.train_y.shape, self.test_X.shape, self.test_y.shape)
if len(sys.argv) > 2 and int(sys.argv[2]) == 1:
runLROverTime(self.train_X, self.train_y, self.test_X, self.test_y, idx)
continue
clfnames = ["Logistic Regression", "Linear SVM", "RBF SVM", "Neural Nets"]
# clfnames = ["RBF SVM"]
# clfnames = ["Linear SVM"]
for idx2, clfname in enumerate(clfnames):
print("===== %s " %(dname))
print("===== %s" %(clfname))
# (1) train model with CV model = ClassModels()
model = ClassModels()
model.trainModel(clfname)
model.grid.fit(self.train_X, self.train_y)
# (2) show results
predicted_test = model.grid.predict(self.test_X)
predicted_train = model.grid.predict(self.train_X)
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
report.showResult(model, predicted_test, self.test_y, predicted_train, self.train_y)
report.showPlot(model, clfname)
plt.savefig('./'+clfname+'_'+str(idx)+'.png', bbox_inches = 'tight')
if __name__ == '__main__':
eval = RunEval()
eval.run()
exit() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.figaspect",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"random.shuffle",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.SVC",
"numpy.interp",
"os.path.join",
"numpy.unique",
"sklearn.linear_model.SGDClassifier",
"numpy.geomspace",
"os.path.dirname",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.Normalize.__init__",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.array",
"matplotlib.pyplot.xlabel"
] | [((1137, 1191), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/breast-cancer.npz"""'], {}), "(dirpath, '../datasets/breast-cancer.npz')\n", (1149, 1191), False, 'import sys, os, random\n'), ((1202, 1251), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/diabetes.npz"""'], {}), "(dirpath, '../datasets/diabetes.npz')\n", (1214, 1251), False, 'import sys, os, random\n'), ((1262, 1308), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/digit.npz"""'], {}), "(dirpath, '../datasets/digit.npz')\n", (1274, 1308), False, 'import sys, os, random\n'), ((1319, 1364), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/iris.npz"""'], {}), "(dirpath, '../datasets/iris.npz')\n", (1331, 1364), False, 'import sys, os, random\n'), ((1375, 1420), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/wine.npz"""'], {}), "(dirpath, '../datasets/wine.npz')\n", (1387, 1420), False, 'import sys, os, random\n'), ((1045, 1070), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1060, 1070), False, 'import sys, os, random\n'), ((11083, 11106), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (11096, 11106), True, 'import matplotlib.pyplot as plt\n'), ((11629, 11648), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(1.68)'], {}), '(1.68)\n', (11642, 11648), True, 'import matplotlib.pyplot as plt\n'), ((11660, 11704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)', 'dpi': '(400)'}), '(figsize=(width, height), dpi=400)\n', (11670, 11704), True, 'import matplotlib.pyplot as plt\n'), ((11712, 11810), 'matplotlib.pyplot.plot', 'plt.plot', (['data_x', 'loss_y'], {'linewidth': '(0.5)', 'linestyle': '""":"""', 'marker': '"""o"""', 'markersize': '(2)', 'label': '"""loss"""'}), "(data_x, loss_y, linewidth=0.5, linestyle=':', marker='o',\n markersize=2, label='loss')\n", (11720, 11810), True, 'import matplotlib.pyplot as plt\n'), ((11826, 11928), 'matplotlib.pyplot.plot', 'plt.plot', (['data_x', 'acc_y'], {'linewidth': '(0.5)', 'linestyle': '"""--"""', 'marker': '"""v"""', 'markersize': '(2)', 'label': '"""accuracy"""'}), "(data_x, acc_y, linewidth=0.5, linestyle='--', marker='v',\n markersize=2, label='accuracy')\n", (11834, 11928), True, 'import matplotlib.pyplot as plt\n'), ((11944, 11969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data Points"""'], {}), "('Data Points')\n", (11954, 11969), True, 'import matplotlib.pyplot as plt\n'), ((11975, 11994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (11985, 11994), True, 'import matplotlib.pyplot as plt\n'), ((12175, 12185), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (12183, 12185), True, 'import matplotlib.pyplot as plt\n'), ((12191, 12237), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin - 0.1, xmax + 0.1, ymin, ymax]'], {}), '([xmin - 0.1, xmax + 0.1, ymin, ymax])\n', (12199, 12237), True, 'import matplotlib.pyplot as plt\n'), ((12243, 12300), 'matplotlib.pyplot.title', 'plt.title', (['"""LR performance over time"""'], {'fontstyle': '"""italic"""'}), "('LR performance over time', fontstyle='italic')\n", (12252, 12300), True, 'import matplotlib.pyplot as plt\n'), ((12306, 12356), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fancybox': '(True)'}), "(loc='best', numpoints=1, fancybox=True)\n", (12316, 12356), True, 'import matplotlib.pyplot as plt\n'), ((12389, 12433), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.25)', 'wspace': '(0.4)'}), '(hspace=0.25, wspace=0.4)\n', (12408, 12433), True, 'import matplotlib.pyplot as plt\n'), ((12672, 12697), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""'}), "(loss='log')\n", (12685, 12697), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1675, 1717), 'matplotlib.colors.Normalize.__init__', 'Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (1693, 1717), False, 'from matplotlib.colors import Normalize\n'), ((2037, 2103), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(42)'}), '(n_splits=5, test_size=0.2, random_state=42)\n', (2059, 2103), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((2908, 2945), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (2920, 2945), True, 'import numpy as np\n'), ((3294, 3331), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (3306, 3331), True, 'import numpy as np\n'), ((3646, 3683), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (3658, 3683), True, 'import numpy as np\n'), ((3721, 3776), 'numpy.array', 'np.array', (['[0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 2, 3]'], {}), '([0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 2, 3])\n', (3729, 3776), True, 'import numpy as np\n'), ((4123, 4172), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32])\n', (4131, 4172), True, 'import numpy as np\n'), ((4283, 4319), 'numpy.array', 'np.array', (['[0.0001, 0.001, 0.01, 0.1]'], {}), '([0.0001, 0.001, 0.01, 0.1])\n', (4291, 4319), True, 'import numpy as np\n'), ((6959, 7010), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (6967, 7010), True, 'import numpy as np\n'), ((7101, 7124), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (7108, 7124), True, 'import numpy as np\n'), ((7206, 7232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (7216, 7232), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7306), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (7261, 7306), True, 'import matplotlib.pyplot as plt\n'), ((7482, 7503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""penalty"""'], {}), "('penalty')\n", (7492, 7503), True, 'import matplotlib.pyplot as plt\n'), ((7513, 7549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""alpha (regularization)"""'], {}), "('alpha (regularization)')\n", (7523, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7559, 7573), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7571, 7573), True, 'import matplotlib.pyplot as plt\n'), ((7724, 7756), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (7733, 7756), True, 'import matplotlib.pyplot as plt\n'), ((7900, 7951), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (7908, 7951), True, 'import numpy as np\n'), ((8042, 8065), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (8049, 8065), True, 'import numpy as np\n'), ((8126, 8152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (8136, 8152), True, 'import matplotlib.pyplot as plt\n'), ((8162, 8226), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (8181, 8226), True, 'import matplotlib.pyplot as plt\n'), ((8402, 8417), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""C"""'], {}), "('C')\n", (8412, 8417), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8441), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8439, 8441), True, 'import matplotlib.pyplot as plt\n'), ((8505, 8537), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (8514, 8537), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8880), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (8837, 8880), True, 'import numpy as np\n'), ((8971, 8994), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (8978, 8994), True, 'import numpy as np\n'), ((9071, 9097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (9081, 9097), True, 'import matplotlib.pyplot as plt\n'), ((9107, 9171), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (9126, 9171), True, 'import matplotlib.pyplot as plt\n'), ((9489, 9508), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""gamma"""'], {}), "('gamma')\n", (9499, 9508), True, 'import matplotlib.pyplot as plt\n'), ((9518, 9533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""C"""'], {}), "('C')\n", (9528, 9533), True, 'import matplotlib.pyplot as plt\n'), ((9543, 9557), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9555, 9557), True, 'import matplotlib.pyplot as plt\n'), ((9696, 9728), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (9705, 9728), True, 'import matplotlib.pyplot as plt\n'), ((9984, 10035), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (9992, 10035), True, 'import numpy as np\n'), ((10126, 10149), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (10133, 10149), True, 'import numpy as np\n'), ((10256, 10282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (10266, 10282), True, 'import matplotlib.pyplot as plt\n'), ((10292, 10356), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (10311, 10356), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hidden_layer_sizes"""'], {}), "('hidden_layer_sizes')\n", (10541, 10563), True, 'import matplotlib.pyplot as plt\n'), ((10573, 10605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""learning_rate_init"""'], {}), "('learning_rate_init')\n", (10583, 10605), True, 'import matplotlib.pyplot as plt\n'), ((10615, 10629), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10627, 10629), True, 'import matplotlib.pyplot as plt\n'), ((10828, 10860), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (10837, 10860), True, 'import matplotlib.pyplot as plt\n'), ((13485, 13514), 'random.shuffle', 'random.shuffle', (['shuffledRange'], {}), '(shuffledRange)\n', (13499, 13514), False, 'import sys, os, random\n'), ((1865, 1887), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (1874, 1887), True, 'import numpy as np\n'), ((3112, 3127), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (3125, 3127), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3444, 3449), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3447, 3449), False, 'from sklearn.svm import SVC\n'), ((3898, 3903), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3901, 3903), False, 'from sklearn.svm import SVC\n'), ((4656, 4671), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (4669, 4671), False, 'from sklearn.neural_network import MLPClassifier\n'), ((15267, 15281), 'numpy.load', 'np.load', (['dname'], {}), '(dname)\n', (15274, 15281), True, 'import numpy as np\n'), ((5846, 5902), 'numpy.mean', 'np.mean', (["model.grid.cv_results_['mean_fit_time']"], {'axis': '(0)'}), "(model.grid.cv_results_['mean_fit_time'], axis=0)\n", (5853, 5902), True, 'import numpy as np\n'), ((5966, 6024), 'numpy.mean', 'np.mean', (["model.grid.cv_results_['mean_score_time']"], {'axis': '(0)'}), "(model.grid.cv_results_['mean_score_time'], axis=0)\n", (5973, 6024), True, 'import numpy as np\n'), ((13824, 13842), 'numpy.unique', 'np.unique', (['train_y'], {}), '(train_y)\n', (13833, 13842), True, 'import numpy as np\n'), ((15444, 15460), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (15458, 15460), False, 'from sklearn.preprocessing import StandardScaler\n'), ((15519, 15535), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (15533, 15535), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import unittest
import maxixe
from maxixe.tests import decorators
from maxixe.tests import loader
from maxixe.tests import parser
from maxixe.tests import utils
suite = unittest.TestSuite()
suite.addTests(unittest.TestLoader().loadTestsFromModule(decorators))
suite.addTests(unittest.TestLoader().loadTestsFromModule(loader))
suite.addTests(unittest.TestLoader().loadTestsFromModule(parser))
suite.addTests(unittest.TestLoader().loadTestsFromModule(utils))
| [
"unittest.TestLoader",
"unittest.TestSuite"
] | [((171, 191), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (189, 191), False, 'import unittest\n'), ((207, 228), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (226, 228), False, 'import unittest\n'), ((277, 298), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (296, 298), False, 'import unittest\n'), ((343, 364), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (362, 364), False, 'import unittest\n'), ((409, 430), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (428, 430), False, 'import unittest\n')] |
from django.db import models
class Blog(models.Model):
title = models.CharField("标题", unique=True, max_length=200)
class Meta:
db_table = 'blog'
verbose_name = '文章' | [
"django.db.models.CharField"
] | [((71, 122), 'django.db.models.CharField', 'models.CharField', (['"""标题"""'], {'unique': '(True)', 'max_length': '(200)'}), "('标题', unique=True, max_length=200)\n", (87, 122), False, 'from django.db import models\n')] |
from wormer.tools import manager, downloader
from wormer.data import strategy
import re
class Graber:
synopsis_pattern = '''(?=lemma-summary")(.*?)(?<=config) '''
text_pattern = '>\s*?([^\&\b\n\[\]]*?)<'
href_pattern = '<a target=_blank href="(/item/[\w\d%]*?)">'
def __init__(self):
self.urlManager = manager.UrlsManager()
self.downloader = downloader.DownLoader()
self.textManager = manager.TextManager()
self.logManager = manager.LogManager()
self.threadManager = manager.ThreadManager()
self.url_start = ''
def get_readme(self):
self.downloader.grab_single(self.url_start)
tips = self.downloader.get_readme()
return tips
def grabing_urls(self, limit=100, grab_strategy=strategy.GrabStrategy.BREATH_FIRST):
self.urlManager.add_single_url(self.url_start)
self.urlManager.add_single_url(self.url_start, 'urls_grabbed')
while self.urlManager.has_next_url():
page_source = self.downloader.grab_single_url(self.urlManager.get_url()).content.decode('utf-8')
# match need to the beginning of the string, and return is a turple, use [i for i in turple] to change, and findall return list
urls = self.textManager.find_urls_by_regex(page_source, Graber.href_pattern)
synopsis = self.textManager.find_text_by_regex(page_source, Graber.synopsis_pattern, re.VERBOSE|re.MULTILINE|re.DOTALL)
# print(synopsis)
page_content = self.textManager.find_text_by_regex(synopsis, Graber.text_pattern, re.VERBOSE|re.MULTILINE|re.DOTALL)
if urls and page_content is not None:
self.add_urls_head(urls, 'https://baike.baidu.com')
self.urlManager.add_urls(urls)
self.logManager.collect_data(page_content)
self.logManager.save_all_data()
@staticmethod
def add_urls_head(urls, head):
for i, item in enumerate(urls):
item = head + item
urls[i] = item
def get_start(self, url_start):
self.url_start = url_start
if __name__ == '__main__':
# url = input('The website you want:\n')
url_python_baike = 'https://baike.baidu.com/item/Python'
graber = Graber()
graber.get_start(url_python_baike)
graber.grabing_urls()
# text = graber.get_readme(url)
# graber.logManager.log_text(text)
| [
"wormer.tools.manager.ThreadManager",
"wormer.tools.manager.LogManager",
"wormer.tools.manager.TextManager",
"wormer.tools.manager.UrlsManager",
"wormer.tools.downloader.DownLoader"
] | [((329, 350), 'wormer.tools.manager.UrlsManager', 'manager.UrlsManager', ([], {}), '()\n', (348, 350), False, 'from wormer.tools import manager, downloader\n'), ((377, 400), 'wormer.tools.downloader.DownLoader', 'downloader.DownLoader', ([], {}), '()\n', (398, 400), False, 'from wormer.tools import manager, downloader\n'), ((428, 449), 'wormer.tools.manager.TextManager', 'manager.TextManager', ([], {}), '()\n', (447, 449), False, 'from wormer.tools import manager, downloader\n'), ((476, 496), 'wormer.tools.manager.LogManager', 'manager.LogManager', ([], {}), '()\n', (494, 496), False, 'from wormer.tools import manager, downloader\n'), ((526, 549), 'wormer.tools.manager.ThreadManager', 'manager.ThreadManager', ([], {}), '()\n', (547, 549), False, 'from wormer.tools import manager, downloader\n')] |
from telegram.ext import ConversationHandler
from telegram.ext import MessageHandler
from telegram.ext import Filters
from telegram.ext import CallbackQueryHandler
from Model.share import Share
import Controllers.global_states as states
from Utils.logging import get_logger as log
import pandas as pd
import datetime
GETSUMMARY = range(1)
class DividendSummary:
def __init__(self, dispatcher):
self.__dp = dispatcher
self.__handler()
def __handler(self):
ds_handler = ConversationHandler(
entry_points=[CallbackQueryHandler(
self.get_ticker, pattern='^' + str(states.DIVIDENDINFO) + '$')],
states={
GETSUMMARY: [
MessageHandler(Filters.text, self.get_dividend_summary)
],
},
fallbacks=[]
)
self.__dp.add_handler(ds_handler)
@staticmethod
def get_ticker(update, context):
user = update.effective_user
log().info("User %s pressed the dividend summary button.", user.first_name)
query = update.callback_query
query.answer()
query.edit_message_text(
text="Enter ticker symbol (e.g D05)")
return GETSUMMARY
@staticmethod
def get_dividend_summary(update, context):
ticker = update.message.text
user = update.effective_user
log().info("User %s entered ticker value %s.", user.first_name, ticker)
years = 5
share = Share(ticker)
if not share.is_valid:
update.message.reply_text("Invalid ticker. Please use /start to go back to the main menu")
log().info("User %s entered an invalid ticker value %s.", user.first_name, ticker)
return ConversationHandler.END
a = share.get_dividend_summary(datetime.datetime.now().year, datetime.datetime.now().year - years)
s = '<b>' + share.name + '</b>\n\n'
for item in a:
s += '<b>' + str(item.year) + ' (' + str(item.total) + ')</b>' + '\n'
i = 1
for pay_date, pay_amount in zip(item.pay_date, item.amount):
if pay_date == '-':
continue
s += pd.to_datetime(pay_date).strftime('%d %B') + ': ' + str(pay_amount).replace('SGD', 'SGD ') +'\n'
i += 1
s += '\n'
update.message.reply_text(s, parse_mode='HTML')
return ConversationHandler.END
| [
"Model.share.Share",
"Utils.logging.get_logger",
"telegram.ext.MessageHandler",
"pandas.to_datetime",
"datetime.datetime.now"
] | [((1496, 1509), 'Model.share.Share', 'Share', (['ticker'], {}), '(ticker)\n', (1501, 1509), False, 'from Model.share import Share\n'), ((994, 999), 'Utils.logging.get_logger', 'log', ([], {}), '()\n', (997, 999), True, 'from Utils.logging import get_logger as log\n'), ((1388, 1393), 'Utils.logging.get_logger', 'log', ([], {}), '()\n', (1391, 1393), True, 'from Utils.logging import get_logger as log\n'), ((1823, 1846), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1844, 1846), False, 'import datetime\n'), ((1657, 1662), 'Utils.logging.get_logger', 'log', ([], {}), '()\n', (1660, 1662), True, 'from Utils.logging import get_logger as log\n'), ((1853, 1876), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1874, 1876), False, 'import datetime\n'), ((726, 781), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'self.get_dividend_summary'], {}), '(Filters.text, self.get_dividend_summary)\n', (740, 781), False, 'from telegram.ext import MessageHandler\n'), ((2218, 2242), 'pandas.to_datetime', 'pd.to_datetime', (['pay_date'], {}), '(pay_date)\n', (2232, 2242), True, 'import pandas as pd\n')] |
import gzip
import sys
import argparse
import re
import logging
import numpy as np
import pandas as p
from itertools import product, tee
from collections import Counter, OrderedDict
from Bio import SeqIO
def generate_feature_mapping(kmer_len):
BASE_COMPLEMENT = {"A":"T","T":"A","G":"C","C":"G"}
kmer_hash = {}
counter = 0
for kmer in product("ATGC",repeat=kmer_len):
if kmer not in kmer_hash:
kmer_hash[kmer] = counter
rev_compl = tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)])
kmer_hash[rev_compl] = counter
counter += 1
return kmer_hash, counter
def window(seq,n):
els = tee(seq,n)
for i,el in enumerate(els):
for _ in range(i):
next(el, None)
return zip(*els)
def _calculate_composition(read_file, kmer_len, length_threshold=25):
#Generate kmer dictionary
feature_mapping, nr_features = generate_feature_mapping(kmer_len)
composition = np.zeros(nr_features,dtype=np.int)
start_composition = np.zeros(nr_features,dtype=np.int)
with gzip.open(read_file, "rt") as handle:
for seq in SeqIO.parse(handle,"fastq"):
seq_len = len(seq)
if seq_len<= length_threshold:
continue
str_seq = str(seq.seq)
# Create a list containing all kmers, translated to integers
kmers = [
feature_mapping[kmer_tuple]
for kmer_tuple
in window(str_seq.upper(), kmer_len)
if kmer_tuple in feature_mapping
]
# numpy.bincount returns an array of size = max + 1
# so we add the max value and remove it afterwards
# numpy.bincount was found to be much more efficient than
# counting manually or using collections.Counter
kmers.append(nr_features - 1)
composition_v = np.bincount(np.array(kmers))
composition_v[-1] -= 1
# Adding pseudo counts before storing in dict
composition += composition_v
failStart = 0
if seq_len >= kmer_len:
startKmer = str_seq[0:kmer_len].upper()
startKmerT = tuple(startKmer)
if startKmerT in feature_mapping:
start_composition[feature_mapping[startKmerT]]+=1
else:
failStart+=1
return feature_mapping, composition, start_composition, failStart
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("read_file", help="gzipped fastq read file")
parser.add_argument("kmer_length", help="kmer length assumed overlap")
parser.add_argument("outFileStub", help="stub for output files")
args = parser.parse_args()
#import ipdb; ipdb.set_trace()
(feature_mapping, composition, start_composition,failStart) = _calculate_composition(args.read_file, int(args.kmer_length))
print(str(failStart))
for k in sorted(feature_mapping, key=feature_mapping.get):
kidx = feature_mapping[k]
print("".join(k) + "," + str(kidx) + "," + str(composition[kidx]) + "," + str(start_composition[kidx]) )
if __name__ == "__main__":
main(sys.argv[1:])
| [
"gzip.open",
"argparse.ArgumentParser",
"Bio.SeqIO.parse",
"numpy.zeros",
"numpy.array",
"itertools.product",
"itertools.tee"
] | [((355, 387), 'itertools.product', 'product', (['"""ATGC"""'], {'repeat': 'kmer_len'}), "('ATGC', repeat=kmer_len)\n", (362, 387), False, 'from itertools import product, tee\n'), ((664, 675), 'itertools.tee', 'tee', (['seq', 'n'], {}), '(seq, n)\n', (667, 675), False, 'from itertools import product, tee\n'), ((971, 1006), 'numpy.zeros', 'np.zeros', (['nr_features'], {'dtype': 'np.int'}), '(nr_features, dtype=np.int)\n', (979, 1006), True, 'import numpy as np\n'), ((1030, 1065), 'numpy.zeros', 'np.zeros', (['nr_features'], {'dtype': 'np.int'}), '(nr_features, dtype=np.int)\n', (1038, 1065), True, 'import numpy as np\n'), ((2557, 2582), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2580, 2582), False, 'import argparse\n'), ((1079, 1105), 'gzip.open', 'gzip.open', (['read_file', '"""rt"""'], {}), "(read_file, 'rt')\n", (1088, 1105), False, 'import gzip\n'), ((1137, 1165), 'Bio.SeqIO.parse', 'SeqIO.parse', (['handle', '"""fastq"""'], {}), "(handle, 'fastq')\n", (1148, 1165), False, 'from Bio import SeqIO\n'), ((1951, 1966), 'numpy.array', 'np.array', (['kmers'], {}), '(kmers)\n', (1959, 1966), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
<NAME>
1. a. Frequentist confidence intervals do not respect the physical limitations imposed on a system, ie non-negativity of a mass.
b. Typically, that the probability to be found outside the interval on both sides of the distribution is 16% (or (100-CL)/2 %).
Often constructed with a likelihood function, finding where the likelihood reduces by a half.
c. We need a prior PDF to construct the posterior PDF for \mu_t.
d. 1/\mu_t. He justifies that this is invariant over changes of power of \mu_t.
e. Bayesian methods fail to be objective: they must be injected with a prior PDF to construct the posterior from the likelihood function.
Classical intervals fail to consider physical limitations on the measured parameter.
Classical limits also handle systematics in a counterintuitive way, such that a bad calibration leads to a tighter confidence interval.
It seems that generally people use classical statistics except when it produces things that 'seem' wrong, in which case use Bayesian.
f. As Cousins did, perform classical analysis on the mean and statistical error and use a Bayesian analysis of the detector sensitivity.
2. I repeated this entire problem for a quadratic plot. The files ending in "_quad.pdf" are from the second iteration with a quadratic dataset.
a. The data are shown in blue, the linear fit in red, and the quadratic fit in blue.
b. The symmetry of the histogram reflects unbiased estimators.
c. The functional form is:
1/(2^{df/2}\Gamma(df/2)) x^{df/2-1}e^{-x/2}
The single parameter, df, is the number of degrees of freedom in the fit. Since we have 15 data points, this is either 12 or 13.
For the linear fit, we have two free parameters so df=13; for the quadratic fit with three free parameters, df=12.
We expected the reduced chi square to be around 1, and this is the result for both fits.
* For comparison I give a normalized reduced Chi2 distribution for df=12 and df=13. Overlaying them was not obviously easy, but comparing by-eye they are identical.
I plotted reduced chi squares through because of their goodness-of-fit usefulness, but the conversion between the two statistics is simple.
d. In the case of the linear data, the fit gets worse. It is difficult to predict what happens here: if we are lucky enough that we can fit
some noise to the new x^2 degree of freedom, the X^2 will lower. However, the ndf has reduced by 1, so if there is overall no noise we can
fit away, then the reduced chi square will rise.
In the case of the quadratic data, the linear fit is abysmal and the quadratic fit is around 1. This is also expected.
3. a. I sampled the distribution using the cdf; for reference I included both the plot of the distrubution and the cdf.
b. Transforming error bars for log data is not entirely trivial because applying the logarithm literally yields asymmetric error bars.
Instead, I transformed to first-order (d/dx log x), using \sigma_{D,log}=\sigma_D/D
c. It takes a rather long time to run this with a large number of statistics (maybe I am doing something very inefficient).
From running the experiment 500 times, I can say that poor sampling of the tails of the distribution leads to underestimation: that is,
we can see a bias in the distribution that favors the left side. I verified this by reducing the number of samples taken
from the distribution by a factor of 10 and re-running, giving bins that are much less well-populated. I attached outputs for both cases.
Rather than wrestle with masking or reassigning garbage datasets post-log, I discarded all results for which the fit failed.
'''
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stat
#Samples from the PDF and computes the mean.
#Maps random reals in (0,1) to the Poisson distribution using a Poisson lookup table
class MeasurementTaker:
def __init__(self, resolution):
self.theResolution=resolution
def GeneratePointWithResolution(self, val):
point=np.random.normal(loc=val,scale=self.theResolution)
return point
class theLine:
def __init__(self, degree):
self.quadcoeff=1
self.degree=degree
self.m=2
self.b=6
self.res=2
self.X=np.linspace(1,15, 15)
self.Y=[]
self.x=0
self.residuals=0
self.ChiSquare=0
if self.degree == 1:
self.BuildLine()
else:
self.BuildLineQuadratic()
self.FitLine()
def BuildLine(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.m*entry+self.b))
def BuildLineQuadratic(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.quadcoeff*entry**2+self.m*entry+self.b))
def FitLine(self):
self.coeffs = np.polyfit(self.X, self.Y, 1)
self.ChiSquare=np.sum((((self.coeffs[0]*self.X+self.coeffs[1])-self.Y)/self.res) ** 2)
self.quadcoeffs=np.polyfit(self.X, self.Y,2)
self.ChiSquareQuad=np.sum((((self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])-self.Y)/self.res)**2)
def PlotLine(self, title):
plt.errorbar(self.X,self.Y,xerr=0,yerr=2)
plt.plot(self.X,self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])
plt.plot(self.X,self.coeffs[0]*self.X+self.coeffs[1])
plt.xlabel("x")
plt.ylabel("y")
plt.title("The Line")
plt.savefig(title)
plt.clf()
class theExponential:
lookup_x=[]
lookup_y=[]
cdf=[]
maxcdf=0
def GenerateSample(self):
randomNumber = random.uniform(theExponential.cdf[0],theExponential.maxcdf)
index=-1
if randomNumber < theExponential.cdf[0]:
index=0
else:
for i in range(0,len(theExponential.cdf)-1):
if randomNumber > theExponential.cdf[i] and randomNumber < theExponential.cdf[i+1]:
index=i+1
if index != -1:
self.samples.append(theExponential.lookup_x[index])
def GenerateNSamples(self, numSamples):
for i in range(0, numSamples):
self.GenerateSample()
def AnalyzeDistro(self, index):
y,binEdges = np.histogram(self.samples,bins=10)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
menStd = np.sqrt(y)
width = 0.20
plt.clf()
if index == 1:
plt.bar(bincenters, y, width=width, yerr=menStd, ecolor='g')
plt.xlabel("Value")
plt.ylabel("Entries")
plt.title(str(len(self.samples))+" exponential samples")
plt.savefig("3b_exp_samples.png")
plt.clf()
self.logsamples=np.log(y)
logcoeffs = np.polyfit(bincenters, self.logsamples, 1)
if index == 1:
plt.bar(bincenters,self.logsamples,width=width, yerr=menStd/y, ecolor='g')
plt.xlabel("Value")
plt.ylabel("log Entries")
plt.title(str(len(self.samples))+" exponential samples")
theFitX=np.linspace(0,5,1000)
theFitY=theFitX*logcoeffs[0]+logcoeffs[1]
plt.plot(theFitX,theFitY)
plt.savefig("3b_exp_samples_log.png")
plt.clf()
return -1*logcoeffs[0]
def __init__(self, nSamples):
self.samples=[]
self.logbins=[]
self.GenerateNSamples(nSamples)
theExponential.lookup_x=np.linspace(0, 5, 10000)
theExponential.lookup_y=np.exp(-theExponential.lookup_x)
runningAverage=0
for val in theExponential.lookup_y:
runningAverage=runningAverage+val
theExponential.cdf.append(runningAverage)
theExponential.maxcdf=theExponential.cdf[len(theExponential.cdf)-1]
plt.clf()
print("Running...")
plt.plot(theExponential.lookup_x, theExponential.lookup_y)
plt.xlabel("x")
plt.ylabel("$e^{-x}$")
plt.title("Exponential distribution")
plt.savefig("3_exponential_dist.png")
plt.clf()
plt.plot(theExponential.lookup_x, theExponential.cdf)
plt.xlabel("x")
plt.ylabel("cdf")
plt.title("Exponential cdf")
plt.savefig("3_exponential_cdf.png")
plt.clf()
for i in range(0,2):
fileEnding=0
degree=i+1
if i == 0:
fileEnding=".png"
else:
fileEnding="_quad.png"
Lines=[]
slopes=[]
intercepts=[]
quads=[]
chisqs=[]
chisqquads=[]
for j in range(0,1000):
line = theLine(degree)
Lines.append(line)
if j == 1:
line.PlotLine("2a_line"+fileEnding)
if i == 0:
slopes.append(line.coeffs[0])
intercepts.append(line.coeffs[1])
else:
quads.append(line.quadcoeffs[0])
slopes.append(line.quadcoeffs[1])
intercepts.append(line.quadcoeffs[2])
chisqs.append(line.ChiSquare/13)
chisqquads.append(line.ChiSquareQuad/12)
plt.hist(slopes, bins=100)
plt.xlabel("m")
plt.ylabel("Entries")
plt.title("Slopes histogram")
plt.savefig("2b_slopes"+fileEnding)
plt.clf()
plt.hist(intercepts, bins=100)
plt.xlabel("b")
plt.ylabel("Entries")
plt.title("Intercepts histogram")
plt.savefig("2b_intercepts"+fileEnding)
plt.clf()
if i == 1:
plt.hist(intercepts, bins=100)
plt.xlabel("a (quadratic coefficient)")
plt.ylabel("Entries")
plt.title("Quadratic coefficient histogram")
plt.savefig("2b_quads"+fileEnding)
plt.clf()
plt.hist(chisqs, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of linear fit")
plt.savefig("2c_chisq"+fileEnding)
plt.clf()
plt.hist(chisqquads, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of quadratic fit")
plt.savefig("2d_chisq2"+fileEnding)
plt.clf()
theNdf=0
if i ==1:
theNdf=12
else:
theNdf=13
chispace=np.linspace(0,theNdf*3,1000)
chidist=stat.chi2(theNdf,1)
plt.plot(chispace/theNdf, chidist.pdf(chispace))
plt.xlabel("X^2")
plt.ylabel("P")
plt.title("Chi-square distribution (ndf ="+str(theNdf)+")")
plt.savefig("2d_chisq2pdf"+fileEnding)
plt.clf()
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(500)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_500samples.png")
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(50)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_50samples.png")
| [
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.clf",
"numpy.polyfit",
"matplotlib.pyplot.bar",
"numpy.histogram",
"numpy.exp",
"numpy.random.normal",
"numpy.linspace",
"matplotlib.pyplot.errorbar",
"math.isnan",
"scipy.stats.chi2",
"matplotlib.pyplot.ylabel",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"random.uniform",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((8154, 8178), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(10000)'], {}), '(0, 5, 10000)\n', (8165, 8178), True, 'import numpy as np\n'), ((8204, 8236), 'numpy.exp', 'np.exp', (['(-theExponential.lookup_x)'], {}), '(-theExponential.lookup_x)\n', (8210, 8236), True, 'import numpy as np\n'), ((8448, 8457), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8455, 8457), True, 'import matplotlib.pyplot as plt\n'), ((8482, 8540), 'matplotlib.pyplot.plot', 'plt.plot', (['theExponential.lookup_x', 'theExponential.lookup_y'], {}), '(theExponential.lookup_x, theExponential.lookup_y)\n', (8490, 8540), True, 'import matplotlib.pyplot as plt\n'), ((8542, 8557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8552, 8557), True, 'import matplotlib.pyplot as plt\n'), ((8559, 8581), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$e^{-x}$"""'], {}), "('$e^{-x}$')\n", (8569, 8581), True, 'import matplotlib.pyplot as plt\n'), ((8583, 8620), 'matplotlib.pyplot.title', 'plt.title', (['"""Exponential distribution"""'], {}), "('Exponential distribution')\n", (8592, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8659), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3_exponential_dist.png"""'], {}), "('3_exponential_dist.png')\n", (8633, 8659), True, 'import matplotlib.pyplot as plt\n'), ((8661, 8670), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8668, 8670), True, 'import matplotlib.pyplot as plt\n'), ((8674, 8727), 'matplotlib.pyplot.plot', 'plt.plot', (['theExponential.lookup_x', 'theExponential.cdf'], {}), '(theExponential.lookup_x, theExponential.cdf)\n', (8682, 8727), True, 'import matplotlib.pyplot as plt\n'), ((8729, 8744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8739, 8744), True, 'import matplotlib.pyplot as plt\n'), ((8746, 8763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cdf"""'], {}), "('cdf')\n", (8756, 8763), True, 'import matplotlib.pyplot as plt\n'), ((8766, 8794), 'matplotlib.pyplot.title', 'plt.title', (['"""Exponential cdf"""'], {}), "('Exponential cdf')\n", (8775, 8794), True, 'import matplotlib.pyplot as plt\n'), ((8796, 8832), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3_exponential_cdf.png"""'], {}), "('3_exponential_cdf.png')\n", (8807, 8832), True, 'import matplotlib.pyplot as plt\n'), ((8834, 8843), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8841, 8843), True, 'import matplotlib.pyplot as plt\n'), ((11260, 11283), 'matplotlib.pyplot.hist', 'plt.hist', (['Taus'], {'bins': '(20)'}), '(Taus, bins=20)\n', (11268, 11283), True, 'import matplotlib.pyplot as plt\n'), ((11285, 11302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tau"""'], {}), "('Tau')\n", (11295, 11302), True, 'import matplotlib.pyplot as plt\n'), ((11304, 11325), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (11314, 11325), True, 'import matplotlib.pyplot as plt\n'), ((11327, 11353), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated Tau"""'], {}), "('Estimated Tau')\n", (11336, 11353), True, 'import matplotlib.pyplot as plt\n'), ((11355, 11396), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3c_tau_hist_500samples.png"""'], {}), "('3c_tau_hist_500samples.png')\n", (11366, 11396), True, 'import matplotlib.pyplot as plt\n'), ((11615, 11638), 'matplotlib.pyplot.hist', 'plt.hist', (['Taus'], {'bins': '(20)'}), '(Taus, bins=20)\n', (11623, 11638), True, 'import matplotlib.pyplot as plt\n'), ((11640, 11657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tau"""'], {}), "('Tau')\n", (11650, 11657), True, 'import matplotlib.pyplot as plt\n'), ((11659, 11680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (11669, 11680), True, 'import matplotlib.pyplot as plt\n'), ((11682, 11708), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated Tau"""'], {}), "('Estimated Tau')\n", (11691, 11708), True, 'import matplotlib.pyplot as plt\n'), ((11710, 11750), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3c_tau_hist_50samples.png"""'], {}), "('3c_tau_hist_50samples.png')\n", (11721, 11750), True, 'import matplotlib.pyplot as plt\n'), ((9619, 9645), 'matplotlib.pyplot.hist', 'plt.hist', (['slopes'], {'bins': '(100)'}), '(slopes, bins=100)\n', (9627, 9645), True, 'import matplotlib.pyplot as plt\n'), ((9651, 9666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""m"""'], {}), "('m')\n", (9661, 9666), True, 'import matplotlib.pyplot as plt\n'), ((9672, 9693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (9682, 9693), True, 'import matplotlib.pyplot as plt\n'), ((9699, 9728), 'matplotlib.pyplot.title', 'plt.title', (['"""Slopes histogram"""'], {}), "('Slopes histogram')\n", (9708, 9728), True, 'import matplotlib.pyplot as plt\n'), ((9734, 9771), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_slopes' + fileEnding)"], {}), "('2b_slopes' + fileEnding)\n", (9745, 9771), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9784), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9782, 9784), True, 'import matplotlib.pyplot as plt\n'), ((9796, 9826), 'matplotlib.pyplot.hist', 'plt.hist', (['intercepts'], {'bins': '(100)'}), '(intercepts, bins=100)\n', (9804, 9826), True, 'import matplotlib.pyplot as plt\n'), ((9832, 9847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""b"""'], {}), "('b')\n", (9842, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9853, 9874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (9863, 9874), True, 'import matplotlib.pyplot as plt\n'), ((9880, 9913), 'matplotlib.pyplot.title', 'plt.title', (['"""Intercepts histogram"""'], {}), "('Intercepts histogram')\n", (9889, 9913), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9960), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_intercepts' + fileEnding)"], {}), "('2b_intercepts' + fileEnding)\n", (9930, 9960), True, 'import matplotlib.pyplot as plt\n'), ((9964, 9973), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9971, 9973), True, 'import matplotlib.pyplot as plt\n'), ((10268, 10294), 'matplotlib.pyplot.hist', 'plt.hist', (['chisqs'], {'bins': '(100)'}), '(chisqs, bins=100)\n', (10276, 10294), True, 'import matplotlib.pyplot as plt\n'), ((10300, 10323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2 / ndf"""'], {}), "('X^2 / ndf')\n", (10310, 10323), True, 'import matplotlib.pyplot as plt\n'), ((10329, 10350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10339, 10350), True, 'import matplotlib.pyplot as plt\n'), ((10356, 10393), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-square of linear fit"""'], {}), "('Chi-square of linear fit')\n", (10365, 10393), True, 'import matplotlib.pyplot as plt\n'), ((10399, 10435), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2c_chisq' + fileEnding)"], {}), "('2c_chisq' + fileEnding)\n", (10410, 10435), True, 'import matplotlib.pyplot as plt\n'), ((10439, 10448), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10446, 10448), True, 'import matplotlib.pyplot as plt\n'), ((10466, 10496), 'matplotlib.pyplot.hist', 'plt.hist', (['chisqquads'], {'bins': '(100)'}), '(chisqquads, bins=100)\n', (10474, 10496), True, 'import matplotlib.pyplot as plt\n'), ((10502, 10525), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2 / ndf"""'], {}), "('X^2 / ndf')\n", (10512, 10525), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10541, 10552), True, 'import matplotlib.pyplot as plt\n'), ((10558, 10598), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-square of quadratic fit"""'], {}), "('Chi-square of quadratic fit')\n", (10567, 10598), True, 'import matplotlib.pyplot as plt\n'), ((10604, 10641), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2d_chisq2' + fileEnding)"], {}), "('2d_chisq2' + fileEnding)\n", (10615, 10641), True, 'import matplotlib.pyplot as plt\n'), ((10645, 10654), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10652, 10654), True, 'import matplotlib.pyplot as plt\n'), ((10757, 10789), 'numpy.linspace', 'np.linspace', (['(0)', '(theNdf * 3)', '(1000)'], {}), '(0, theNdf * 3, 1000)\n', (10768, 10789), True, 'import numpy as np\n'), ((10799, 10819), 'scipy.stats.chi2', 'stat.chi2', (['theNdf', '(1)'], {}), '(theNdf, 1)\n', (10808, 10819), True, 'import scipy.stats as stat\n'), ((10878, 10895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2"""'], {}), "('X^2')\n", (10888, 10895), True, 'import matplotlib.pyplot as plt\n'), ((10901, 10916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P"""'], {}), "('P')\n", (10911, 10916), True, 'import matplotlib.pyplot as plt\n'), ((10987, 11027), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2d_chisq2pdf' + fileEnding)"], {}), "('2d_chisq2pdf' + fileEnding)\n", (10998, 11027), True, 'import matplotlib.pyplot as plt\n'), ((11031, 11040), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11038, 11040), True, 'import matplotlib.pyplot as plt\n'), ((4403, 4454), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'val', 'scale': 'self.theResolution'}), '(loc=val, scale=self.theResolution)\n', (4419, 4454), True, 'import numpy as np\n'), ((4659, 4681), 'numpy.linspace', 'np.linspace', (['(1)', '(15)', '(15)'], {}), '(1, 15, 15)\n', (4670, 4681), True, 'import numpy as np\n'), ((5400, 5429), 'numpy.polyfit', 'np.polyfit', (['self.X', 'self.Y', '(1)'], {}), '(self.X, self.Y, 1)\n', (5410, 5429), True, 'import numpy as np\n'), ((5454, 5531), 'numpy.sum', 'np.sum', (['(((self.coeffs[0] * self.X + self.coeffs[1] - self.Y) / self.res) ** 2)'], {}), '(((self.coeffs[0] * self.X + self.coeffs[1] - self.Y) / self.res) ** 2)\n', (5460, 5531), True, 'import numpy as np\n'), ((5551, 5580), 'numpy.polyfit', 'np.polyfit', (['self.X', 'self.Y', '(2)'], {}), '(self.X, self.Y, 2)\n', (5561, 5580), True, 'import numpy as np\n'), ((5608, 5732), 'numpy.sum', 'np.sum', (['(((self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X + self.\n quadcoeffs[2] - self.Y) / self.res) ** 2)'], {}), '(((self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X +\n self.quadcoeffs[2] - self.Y) / self.res) ** 2)\n', (5614, 5732), True, 'import numpy as np\n'), ((5766, 5810), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['self.X', 'self.Y'], {'xerr': '(0)', 'yerr': '(2)'}), '(self.X, self.Y, xerr=0, yerr=2)\n', (5778, 5810), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5922), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', '(self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X + self.\n quadcoeffs[2])'], {}), '(self.X, self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] *\n self.X + self.quadcoeffs[2])\n', (5825, 5922), True, 'import matplotlib.pyplot as plt\n'), ((5917, 5975), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', '(self.coeffs[0] * self.X + self.coeffs[1])'], {}), '(self.X, self.coeffs[0] * self.X + self.coeffs[1])\n', (5925, 5975), True, 'import matplotlib.pyplot as plt\n'), ((5980, 5995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5990, 5995), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (6015, 6020), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6051), 'matplotlib.pyplot.title', 'plt.title', (['"""The Line"""'], {}), "('The Line')\n", (6039, 6051), True, 'import matplotlib.pyplot as plt\n'), ((6061, 6079), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {}), '(title)\n', (6072, 6079), True, 'import matplotlib.pyplot as plt\n'), ((6089, 6098), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6096, 6098), True, 'import matplotlib.pyplot as plt\n'), ((6241, 6301), 'random.uniform', 'random.uniform', (['theExponential.cdf[0]', 'theExponential.maxcdf'], {}), '(theExponential.cdf[0], theExponential.maxcdf)\n', (6255, 6301), False, 'import random\n'), ((6895, 6930), 'numpy.histogram', 'np.histogram', (['self.samples'], {'bins': '(10)'}), '(self.samples, bins=10)\n', (6907, 6930), True, 'import numpy as np\n'), ((7007, 7017), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (7014, 7017), True, 'import numpy as np\n'), ((7054, 7063), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7061, 7063), True, 'import matplotlib.pyplot as plt\n'), ((7400, 7409), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (7406, 7409), True, 'import numpy as np\n'), ((7431, 7473), 'numpy.polyfit', 'np.polyfit', (['bincenters', 'self.logsamples', '(1)'], {}), '(bincenters, self.logsamples, 1)\n', (7441, 7473), True, 'import numpy as np\n'), ((10009, 10039), 'matplotlib.pyplot.hist', 'plt.hist', (['intercepts'], {'bins': '(100)'}), '(intercepts, bins=100)\n', (10017, 10039), True, 'import matplotlib.pyplot as plt\n'), ((10053, 10092), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""a (quadratic coefficient)"""'], {}), "('a (quadratic coefficient)')\n", (10063, 10092), True, 'import matplotlib.pyplot as plt\n'), ((10106, 10127), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10116, 10127), True, 'import matplotlib.pyplot as plt\n'), ((10141, 10185), 'matplotlib.pyplot.title', 'plt.title', (['"""Quadratic coefficient histogram"""'], {}), "('Quadratic coefficient histogram')\n", (10150, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10235), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_quads' + fileEnding)"], {}), "('2b_quads' + fileEnding)\n", (10210, 10235), True, 'import matplotlib.pyplot as plt\n'), ((10247, 10256), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10254, 10256), True, 'import matplotlib.pyplot as plt\n'), ((11188, 11206), 'math.isnan', 'math.isnan', (['result'], {}), '(result)\n', (11198, 11206), False, 'import math\n'), ((11543, 11561), 'math.isnan', 'math.isnan', (['result'], {}), '(result)\n', (11553, 11561), False, 'import math\n'), ((7106, 7166), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters', 'y'], {'width': 'width', 'yerr': 'menStd', 'ecolor': '"""g"""'}), "(bincenters, y, width=width, yerr=menStd, ecolor='g')\n", (7113, 7166), True, 'import matplotlib.pyplot as plt\n'), ((7180, 7199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7190, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7213, 7234), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (7223, 7234), True, 'import matplotlib.pyplot as plt\n'), ((7318, 7351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3b_exp_samples.png"""'], {}), "('3b_exp_samples.png')\n", (7329, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7374), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7372, 7374), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7589), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters', 'self.logsamples'], {'width': 'width', 'yerr': '(menStd / y)', 'ecolor': '"""g"""'}), "(bincenters, self.logsamples, width=width, yerr=menStd / y, ecolor='g')\n", (7518, 7589), True, 'import matplotlib.pyplot as plt\n'), ((7599, 7618), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7609, 7618), True, 'import matplotlib.pyplot as plt\n'), ((7632, 7657), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log Entries"""'], {}), "('log Entries')\n", (7642, 7657), True, 'import matplotlib.pyplot as plt\n'), ((7749, 7772), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(1000)'], {}), '(0, 5, 1000)\n', (7760, 7772), True, 'import numpy as np\n'), ((7839, 7865), 'matplotlib.pyplot.plot', 'plt.plot', (['theFitX', 'theFitY'], {}), '(theFitX, theFitY)\n', (7847, 7865), True, 'import matplotlib.pyplot as plt\n'), ((7878, 7915), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3b_exp_samples_log.png"""'], {}), "('3b_exp_samples_log.png')\n", (7889, 7915), True, 'import matplotlib.pyplot as plt\n'), ((7929, 7938), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7936, 7938), True, 'import matplotlib.pyplot as plt\n')] |
from fontTools.misc.fixedTools import floatToFixed
from fontTools.ttLib import TTFont, newTable, registerCustomTableClass
from fontTools.varLib.models import VariationModel, allEqual
from fontTools.varLib.varStore import OnlineVarStoreBuilder
from rcjktools.varco import VarCoFont
from rcjktools.table_VarC import (
fixedCoord,
getToFixedConverterForNumIntBitsForScale,
transformToIntConverters,
transformDefaults,
VARIDX_KEY,
ComponentRecord,
CoordinateRecord,
TransformRecord,
)
def precompileAllComponents(vcData, allLocations, axisTags):
precompiled = {}
masterModel = VariationModel(allLocations, axisTags)
storeBuilder = OnlineVarStoreBuilder(axisTags)
for gn in vcData.keys():
components, locations = vcData[gn]
sparseMapping = [None] * len(allLocations)
for locIndex, loc in enumerate(locations):
allIndex = allLocations.index(loc)
sparseMapping[allIndex] = locIndex
subModel, mapping = masterModel.getSubModel(sparseMapping)
storeBuilder.setModel(subModel)
# reorder master values according to allLocations
components = [[c[i] for i in mapping] for c in components]
precompiledGlyph = precompileVarComponents(
gn, components, storeBuilder, axisTags
)
if precompiledGlyph is not None:
# glyph components do not contain data that has to go to the 'VarC' table
precompiled[gn] = precompiledGlyph
return precompiled, storeBuilder.finish()
def precompileVarComponents(glyphName, components, storeBuilder, axisTags):
precompiled = []
haveVarCData = False
for component in components:
coordKeys = sorted({k for coord, transform in component for k in coord})
coordDefaults = {k: 0 for k in coordKeys}
coordConverters = {k: fixedCoord for k in coordKeys}
dicts = [coord for coord, transform in component]
coordDict = compileDicts(
dicts,
coordDefaults,
coordConverters,
storeBuilder,
allowIndividualVarIdx=True,
)
dicts = [transform for coord, transform in component]
transformToIntConvertersLocal = dict(transformToIntConverters)
numIntBitsForScale = calcNumIntBitsForScale(dicts)
scaleConvert = getToFixedConverterForNumIntBitsForScale(numIntBitsForScale)
transformToIntConvertersLocal["ScaleX"] = scaleConvert
transformToIntConvertersLocal["ScaleY"] = scaleConvert
transformDict = compileDicts(
dicts, transformDefaults, transformToIntConvertersLocal, storeBuilder
)
if coordDict or transformDict:
haveVarCData = True
precompiled.append(
ComponentRecord(
CoordinateRecord(coordDict),
TransformRecord(transformDict),
numIntBitsForScale,
),
)
if haveVarCData:
return precompiled
else:
return None
def compileDicts(
dicts, dictDefaults, dictConverters, storeBuilder, allowIndividualVarIdx=False
):
resultDict = {}
convertedMasterValues = {}
hasVariations = False # True if any key has variations
for k, default in dictDefaults.items():
masterValues = [d.get(k, default) for d in dicts]
if not allEqual(masterValues):
hasVariations = True
elif masterValues[0] == default:
# No variations, value is default, skip altogether
continue
resultDict[k] = dict(value=masterValues[0])
convertedMasterValues[k] = [dictConverters[k](value) for value in masterValues]
if hasVariations:
for k, masterValues in convertedMasterValues.items():
if allowIndividualVarIdx and allEqual(
masterValues
): # TODO: Avoid second allEqual() call?
continue
base, varIdx = storeBuilder.storeMasters(masterValues)
assert base == masterValues[0], (k, base, masterValues)
resultDict[k][VARIDX_KEY] = varIdx
return resultDict
def calcNumIntBitsForScale(dicts):
minScale, maxScale = _calcMinMaxScale(dicts)
numIntBits = _calcNumIntBits(minScale, maxScale)
return numIntBits
def _calcNumIntBits(minValue, maxValue, maxIntBits=7):
# TODO: there must be a better way, but at least this is correct
assert minValue <= maxValue
for i in range(maxIntBits):
precisionBits = 16 - i
minIntVal = floatToFixed(minValue, precisionBits)
maxIntVal = floatToFixed(maxValue, precisionBits)
if -32768 <= minIntVal and maxIntVal <= 32767:
return i + 1 # use one more: deltas may be bigger! (this is rather fuzzy)
raise ValueError("value does not fit in maxBits")
def _calcMinMaxScale(transformDicts):
minScale = 0
maxScale = 0
for d in transformDicts:
minScale = min(minScale, d.get("ScaleX", 0))
minScale = min(minScale, d.get("ScaleY", 0))
maxScale = max(maxScale, d.get("ScaleX", 0))
maxScale = max(maxScale, d.get("ScaleY", 0))
return minScale, maxScale
def remapVarIdxs(precompiled, mapping):
for glyphName, components in precompiled.items():
for component in components:
for v in component.coord.values():
if VARIDX_KEY in v:
v[VARIDX_KEY] = mapping[v[VARIDX_KEY]]
for v in component.transform.values():
if VARIDX_KEY in v:
v[VARIDX_KEY] = mapping[v[VARIDX_KEY]]
def buildVarCTable(ttf, vcData, allLocations):
axisTags = [axis.axisTag for axis in ttf["fvar"].axes]
varc_table = ttf["VarC"] = newTable("VarC")
varc_table.Version = 0x00010000
precompiled, store = precompileAllComponents(vcData, allLocations, axisTags)
mapping = store.optimize()
remapVarIdxs(precompiled, mapping)
varc_table.GlyphData = precompiled
varc_table.VarStore = store
def buildVarC(
designspacePath, ttfPath, outTTFPath, doTTX, saveWoff2, neutralOnly=False
):
import pathlib
registerCustomTableClass("VarC", "rcjktools.table_VarC", "table_VarC")
ttfPath = pathlib.Path(ttfPath)
if outTTFPath is None:
outTTFPath = ttfPath.parent / (ttfPath.stem + "-varc" + ttfPath.suffix)
else:
outTTFPath = pathlib.Path(outTTFPath)
ttf = TTFont(ttfPath)
axisTags = [axis.axisTag for axis in ttf["fvar"].axes]
globalAxisNames = {axisTag for axisTag in axisTags if axisTag[0] != "V"}
vcFont = VarCoFont(designspacePath)
vcData, allLocations, neutralGlyphNames = vcFont.extractVarCoData(
globalAxisNames, neutralOnly
)
if neutralGlyphNames:
gvarTable = ttf["gvar"]
for glyphName in neutralGlyphNames:
del gvarTable.variations[glyphName]
buildVarCTable(ttf, vcData, allLocations)
if doTTX:
outTTXPath = outTTFPath.parent / (outTTFPath.stem + "-before.ttx")
ttf.saveXML(outTTXPath, tables=["VarC"])
ttf.save(outTTFPath)
ttf = TTFont(outTTFPath, lazy=True) # Load from scratch
if doTTX:
outTTXPath = outTTFPath.parent / (outTTFPath.stem + "-after.ttx")
ttf.saveXML(outTTXPath, tables=["VarC"])
if saveWoff2:
outWoff2Path = outTTFPath.parent / (outTTFPath.stem + ".woff2")
ttf.flavor = "woff2"
ttf.save(outWoff2Path)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("designspace", help="The VarCo .designspace source")
parser.add_argument("ttf", help="The input Variable Font")
parser.add_argument("--output", help="The output Variable Font")
parser.add_argument(
"--ttx", action="store_true", help="write TTX dumps for the VarC table."
)
parser.add_argument("--no-woff2", action="store_true")
parser.add_argument(
"--neutral-only",
action="store_true",
help="hack: build a pseudo static COLRv1 table, that won't respond to the "
"non-hidden axes",
)
args = parser.parse_args()
buildVarC(
args.designspace,
args.ttf,
args.output,
args.ttx,
not args.no_woff2,
args.neutral_only,
)
if __name__ == "__main__":
main()
| [
"fontTools.varLib.models.VariationModel",
"fontTools.ttLib.TTFont",
"argparse.ArgumentParser",
"fontTools.varLib.models.allEqual",
"fontTools.ttLib.newTable",
"fontTools.varLib.varStore.OnlineVarStoreBuilder",
"pathlib.Path",
"rcjktools.table_VarC.getToFixedConverterForNumIntBitsForScale",
"fontTools.misc.fixedTools.floatToFixed",
"rcjktools.table_VarC.CoordinateRecord",
"rcjktools.table_VarC.TransformRecord",
"fontTools.ttLib.registerCustomTableClass",
"rcjktools.varco.VarCoFont"
] | [((615, 653), 'fontTools.varLib.models.VariationModel', 'VariationModel', (['allLocations', 'axisTags'], {}), '(allLocations, axisTags)\n', (629, 653), False, 'from fontTools.varLib.models import VariationModel, allEqual\n'), ((673, 704), 'fontTools.varLib.varStore.OnlineVarStoreBuilder', 'OnlineVarStoreBuilder', (['axisTags'], {}), '(axisTags)\n', (694, 704), False, 'from fontTools.varLib.varStore import OnlineVarStoreBuilder\n'), ((5729, 5745), 'fontTools.ttLib.newTable', 'newTable', (['"""VarC"""'], {}), "('VarC')\n", (5737, 5745), False, 'from fontTools.ttLib import TTFont, newTable, registerCustomTableClass\n'), ((6126, 6196), 'fontTools.ttLib.registerCustomTableClass', 'registerCustomTableClass', (['"""VarC"""', '"""rcjktools.table_VarC"""', '"""table_VarC"""'], {}), "('VarC', 'rcjktools.table_VarC', 'table_VarC')\n", (6150, 6196), False, 'from fontTools.ttLib import TTFont, newTable, registerCustomTableClass\n'), ((6211, 6232), 'pathlib.Path', 'pathlib.Path', (['ttfPath'], {}), '(ttfPath)\n', (6223, 6232), False, 'import pathlib\n'), ((6406, 6421), 'fontTools.ttLib.TTFont', 'TTFont', (['ttfPath'], {}), '(ttfPath)\n', (6412, 6421), False, 'from fontTools.ttLib import TTFont, newTable, registerCustomTableClass\n'), ((6572, 6598), 'rcjktools.varco.VarCoFont', 'VarCoFont', (['designspacePath'], {}), '(designspacePath)\n', (6581, 6598), False, 'from rcjktools.varco import VarCoFont\n'), ((7086, 7115), 'fontTools.ttLib.TTFont', 'TTFont', (['outTTFPath'], {'lazy': '(True)'}), '(outTTFPath, lazy=True)\n', (7092, 7115), False, 'from fontTools.ttLib import TTFont, newTable, registerCustomTableClass\n'), ((7474, 7499), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7497, 7499), False, 'import argparse\n'), ((2348, 2408), 'rcjktools.table_VarC.getToFixedConverterForNumIntBitsForScale', 'getToFixedConverterForNumIntBitsForScale', (['numIntBitsForScale'], {}), '(numIntBitsForScale)\n', (2388, 2408), False, 'from rcjktools.table_VarC import fixedCoord, getToFixedConverterForNumIntBitsForScale, transformToIntConverters, transformDefaults, VARIDX_KEY, ComponentRecord, CoordinateRecord, TransformRecord\n'), ((4532, 4569), 'fontTools.misc.fixedTools.floatToFixed', 'floatToFixed', (['minValue', 'precisionBits'], {}), '(minValue, precisionBits)\n', (4544, 4569), False, 'from fontTools.misc.fixedTools import floatToFixed\n'), ((4590, 4627), 'fontTools.misc.fixedTools.floatToFixed', 'floatToFixed', (['maxValue', 'precisionBits'], {}), '(maxValue, precisionBits)\n', (4602, 4627), False, 'from fontTools.misc.fixedTools import floatToFixed\n'), ((6371, 6395), 'pathlib.Path', 'pathlib.Path', (['outTTFPath'], {}), '(outTTFPath)\n', (6383, 6395), False, 'import pathlib\n'), ((3360, 3382), 'fontTools.varLib.models.allEqual', 'allEqual', (['masterValues'], {}), '(masterValues)\n', (3368, 3382), False, 'from fontTools.varLib.models import VariationModel, allEqual\n'), ((2809, 2836), 'rcjktools.table_VarC.CoordinateRecord', 'CoordinateRecord', (['coordDict'], {}), '(coordDict)\n', (2825, 2836), False, 'from rcjktools.table_VarC import fixedCoord, getToFixedConverterForNumIntBitsForScale, transformToIntConverters, transformDefaults, VARIDX_KEY, ComponentRecord, CoordinateRecord, TransformRecord\n'), ((2854, 2884), 'rcjktools.table_VarC.TransformRecord', 'TransformRecord', (['transformDict'], {}), '(transformDict)\n', (2869, 2884), False, 'from rcjktools.table_VarC import fixedCoord, getToFixedConverterForNumIntBitsForScale, transformToIntConverters, transformDefaults, VARIDX_KEY, ComponentRecord, CoordinateRecord, TransformRecord\n'), ((3808, 3830), 'fontTools.varLib.models.allEqual', 'allEqual', (['masterValues'], {}), '(masterValues)\n', (3816, 3830), False, 'from fontTools.varLib.models import VariationModel, allEqual\n')] |
from unifi.objects.base import UnifiBaseObject
from unifi.helper import find_by_attr, json_print
class UnifiDeviceObject(UnifiBaseObject):
def get_port_profile(self, **filter_kwargs):
port = find_by_attr(self.port_table, **filter_kwargs)
port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx'])
portconf_id = port_override['portconf_id'] if port_override and 'portconf_id' in port_override else port['portconf_id']
portconf = find_by_attr(self.controller.portconf(), _id=portconf_id)
return portconf
def set_port_profile(self, portconf, **filter_kwargs):
port = find_by_attr(self.port_table, **filter_kwargs)
port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx'])
if port_override:
port_override['portconf_id'] = portconf['_id']
else:
port_override = {
'port_idx': port['port_idx'],
'portconf_id': portconf['_id']
}
self.port_overrides.append(port_override)
| [
"unifi.helper.find_by_attr"
] | [((206, 252), 'unifi.helper.find_by_attr', 'find_by_attr', (['self.port_table'], {}), '(self.port_table, **filter_kwargs)\n', (218, 252), False, 'from unifi.helper import find_by_attr, json_print\n'), ((277, 337), 'unifi.helper.find_by_attr', 'find_by_attr', (['self.port_overrides'], {'port_idx': "port['port_idx']"}), "(self.port_overrides, port_idx=port['port_idx'])\n", (289, 337), False, 'from unifi.helper import find_by_attr, json_print\n'), ((642, 688), 'unifi.helper.find_by_attr', 'find_by_attr', (['self.port_table'], {}), '(self.port_table, **filter_kwargs)\n', (654, 688), False, 'from unifi.helper import find_by_attr, json_print\n'), ((713, 773), 'unifi.helper.find_by_attr', 'find_by_attr', (['self.port_overrides'], {'port_idx': "port['port_idx']"}), "(self.port_overrides, port_idx=port['port_idx'])\n", (725, 773), False, 'from unifi.helper import find_by_attr, json_print\n')] |
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.dmlab2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
import dmlab2d
from dmlab2d import runfiles_helper
class Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
lab2d = dmlab2d.Lab2d(runfiles_helper.find(),
{'levelName': 'examples/level_api'})
return dmlab2d.Environment(lab2d, lab2d.observation_names(), 0)
class Dmlab2DTest(absltest.TestCase):
def _create_env(self, extra_settings=None):
settings = extra_settings.copy() if extra_settings else {}
settings['levelName'] = 'examples/level_api'
return dmlab2d.Lab2d(runfiles_helper.find(), settings)
def test_lab2d_environment_name(self):
self.assertEqual(self._create_env().name(), 'dmlab2d')
def test_lab2d_observation_names(self):
env = self._create_env()
self.assertEqual(env.observation_names(),
['VIEW' + str(i) for i in range(1, 6)])
def test_lab2d_observation_spec(self):
env = self._create_env()
self.assertEqual(
env.observation_spec('VIEW1'), {
'dtype': np.dtype('uint8'),
'shape': (1,)
})
self.assertEqual(
env.observation_spec('VIEW2'), {
'dtype': np.dtype('double'),
'shape': (2,)
})
self.assertEqual(
env.observation_spec('VIEW3'), {
'dtype': np.dtype('int32'),
'shape': (3,)
})
self.assertEqual(
env.observation_spec('VIEW4'), {
'dtype': np.dtype('int64'),
'shape': (4,)
})
# Text is stored in objects.
self.assertEqual(
env.observation_spec('VIEW5'), {
'dtype': np.dtype('O'),
'shape': ()
})
def test_lab2d_action_spec(self):
env = self._create_env()
self.assertEqual(env.action_discrete_names(), ['REWARD_ACT'])
self.assertEqual(
env.action_discrete_spec('REWARD_ACT'), {
'min': 0,
'max': 4
})
self.assertEqual(env.action_continuous_names(), ['OBSERVATION_ACT'])
self.assertEqual(
env.action_continuous_spec('OBSERVATION_ACT'), {
'min': -5,
'max': 5
})
self.assertEqual(env.action_text_names(), ['LOG_EVENT'])
def test_lab2d_start_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
def test_lab2d_events_start(self):
env = self._create_env()
env.start(episode=0, seed=0)
events = env.events()
self.assertLen(events, 1)
event_name, observations = events[0]
self.assertEqual(event_name, 'start')
self.assertLen(observations, 1)
np.testing.assert_array_equal(observations[0], [1, 2, 3])
def test_lab2d_events_cleared_after_advance_not_read(self):
env = self._create_env()
env.start(episode=0, seed=0)
self.assertLen(env.events(), 1)
self.assertLen(env.events(), 1)
env.advance()
self.assertEmpty(env.events())
def test_lab2d_observe(self):
env = self._create_env()
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW1'), [1])
np.testing.assert_array_equal(env.observation('VIEW2'), [1, 2])
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
np.testing.assert_array_equal(env.observation('VIEW4'), [1, 2, 3, 4])
self.assertEqual(env.observation('VIEW5'), b'')
def test_lab2d_ten_steps_terminate_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
for _ in range(9):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_settings_environment(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
for _ in range(4):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_properties_environment(self):
env = self._create_env({'steps': '5'})
properties = env.list_property('')
self.assertLen(properties, 1)
self.assertEqual(properties[0],
('steps', dmlab2d.PropertyAttribute.READABLE_WRITABLE))
self.assertEqual(env.read_property('steps'), '5')
env.write_property('steps', '3')
self.assertEqual(env.read_property('steps'), '3')
env.start(episode=0, seed=0)
for _ in range(2):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_act_discrete(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
env.act_discrete(np.array([2], np.dtype('int32')))
_, reward = env.advance()
self.assertEqual(reward, 2)
def test_lab2d_act_continuous(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
env.act_continuous([10])
env.advance()
np.testing.assert_array_equal(env.observation('VIEW3'), [11, 12, 13])
def test_lab2d_act_text(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
view = env.observation('VIEW5')
self.assertEqual(view, b'')
env.act_text(['Hello'])
env.advance()
view = env.observation('VIEW5')
self.assertEqual(view, b'Hello')
def test_lab2d_invalid_setting(self):
with self.assertRaises(ValueError):
self._create_env({'missing': '5'})
def test_lab2d_bad_action_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.action_discrete_spec('bad_key')
with self.assertRaises(KeyError):
env.action_continuous_spec('bad_key')
def test_lab2d_bad_observation_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.observation_spec('bad_key')
def test_lab2d_observe_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.observation('VIEW1')
def test_lab2d_act_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.act_discrete([0])
with self.assertRaises(RuntimeError):
env.act_continuous([0])
with self.assertRaises(RuntimeError):
env.act_text([''])
def test_lab2d_act_bad_shape(self):
env = self._create_env()
env.start(0, 0)
with self.assertRaises(ValueError):
env.act_discrete([0, 1])
with self.assertRaises(ValueError):
env.act_continuous([0, 1])
def test_lab2d_advance_after_episode_ends(self):
env = self._create_env({'steps': '2'})
env.start(0, 0)
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
with self.assertRaises(RuntimeError):
env.advance()
def test_lab2d_missing_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(KeyError):
env.list_property('missing')
with self.assertRaises(KeyError):
env.read_property('missing')
with self.assertRaises(KeyError):
env.write_property('missing', '10')
def test_lab2d_invalid_ops_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(ValueError):
env.list_property('steps')
with self.assertRaises(ValueError):
env.write_property('steps', 'mouse')
if __name__ == '__main__':
absltest.main()
| [
"dmlab2d.runfiles_helper.find",
"absl.testing.absltest.main",
"numpy.dtype",
"numpy.testing.assert_array_equal"
] | [((8160, 8175), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (8173, 8175), False, 'from absl.testing import absltest\n'), ((3413, 3470), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['observations[0]', '[1, 2, 3]'], {}), '(observations[0], [1, 2, 3])\n', (3442, 3470), True, 'import numpy as np\n'), ((1006, 1028), 'dmlab2d.runfiles_helper.find', 'runfiles_helper.find', ([], {}), '()\n', (1026, 1028), False, 'from dmlab2d import runfiles_helper\n'), ((1385, 1407), 'dmlab2d.runfiles_helper.find', 'runfiles_helper.find', ([], {}), '()\n', (1405, 1407), False, 'from dmlab2d import runfiles_helper\n'), ((1855, 1872), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1863, 1872), True, 'import numpy as np\n'), ((1996, 2014), 'numpy.dtype', 'np.dtype', (['"""double"""'], {}), "('double')\n", (2004, 2014), True, 'import numpy as np\n'), ((2138, 2155), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (2146, 2155), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (2287, 2296), True, 'import numpy as np\n'), ((2453, 2466), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (2461, 2466), True, 'import numpy as np\n'), ((5411, 5428), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (5419, 5428), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from json import dumps, loads
# for python 2
# from httplib import HTTPConnection
# for python 3
from http.client import HTTPConnection
# connect with REST server
connection = HTTPConnection('127.0.0.1', 80)
connection.connect()
data = {"ip": "192.168.2.254",
"list_of_commands_to_send": "show version"
}
# Get the method response
connection.request(
'POST',
'/add_command_raw',
dumps(data),
{'Content-Type': 'application/json'},
)
print("Waiting for Server response:")
response = connection.getresponse()
code = response.getcode()
headers = response.getheaders()
result = loads(response.read())
print(result)
print("code: ", code)
print("headers: ", headers)
print(dir(result))
# close the connection
connection.close() | [
"http.client.HTTPConnection",
"json.dumps"
] | [((245, 276), 'http.client.HTTPConnection', 'HTTPConnection', (['"""127.0.0.1"""', '(80)'], {}), "('127.0.0.1', 80)\n", (259, 276), False, 'from http.client import HTTPConnection\n'), ((489, 500), 'json.dumps', 'dumps', (['data'], {}), '(data)\n', (494, 500), False, 'from json import dumps, loads\n')] |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
"""
NOTE: This class should always comply to the API definition of NfsDatasetClient present in
services/dts/src/oci_cli_dts/physical_appliance_control_plane/client/nfs_dataset_client.py
"""
from oci_cli import cli_util
from services.dts.src.oci_cli_dts.appliance_config_manager import ApplianceConfigManager
from services.dts.src.oci_cli_dts.appliance_constants import APPLIANCE_CONFIGS_BASE_DIR, APPLIANCE_AUTH_USER, \
APPLIANCE_CERT_FILE_NAME
from services.dts.src.oci_cli_dts.physical_appliance_control_plane.client.nfs_dataset_client import NfsDatasetClient
class NfsDatasetClientProxy:
def __init__(self, ctx, appliance_profile):
config_manager = ApplianceConfigManager(APPLIANCE_CONFIGS_BASE_DIR)
appliance_config = config_manager.get_config(appliance_profile)
self.auth_value = "{}:{}".format(APPLIANCE_AUTH_USER, appliance_config.get_access_token())
self.serial_id = appliance_config.get_appliance_serial_id()
config = cli_util.build_config(ctx.obj)
host_name = appliance_config.get_appliance_url()
self_signed_cert = "{}/{}".format(config_manager.get_config_dir(appliance_profile), APPLIANCE_CERT_FILE_NAME)
self.nfs_dataset_client = NfsDatasetClient(
config=config, service_endpoint=host_name, self_signed_cert=self_signed_cert)
def activate_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.activate_nfs_dataset(dataset_name, **kwargs)
def create_nfs_dataset(self, details, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.create_nfs_dataset(details, **kwargs)
def deactivate_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.deactivate_nfs_dataset(dataset_name, **kwargs)
def delete_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.delete_nfs_dataset(dataset_name, **kwargs)
def get_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset(dataset_name, **kwargs)
def get_nfs_dataset_seal_manifest(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset_seal_manifest(dataset_name, **kwargs)
def get_nfs_dataset_seal_status(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset_seal_status(dataset_name, **kwargs)
def initiate_seal_on_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.initiate_seal_on_nfs_dataset(dataset_name, **kwargs)
def list_nfs_datasets(self, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.list_nfs_datasets(**kwargs)
def reopen_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.reopen_nfs_dataset(dataset_name, **kwargs)
def update_nfs_dataset(self, dataset_name, body, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.update_nfs_dataset(dataset_name, body, **kwargs)
| [
"oci_cli.cli_util.build_config",
"services.dts.src.oci_cli_dts.appliance_config_manager.ApplianceConfigManager",
"services.dts.src.oci_cli_dts.physical_appliance_control_plane.client.nfs_dataset_client.NfsDatasetClient"
] | [((769, 819), 'services.dts.src.oci_cli_dts.appliance_config_manager.ApplianceConfigManager', 'ApplianceConfigManager', (['APPLIANCE_CONFIGS_BASE_DIR'], {}), '(APPLIANCE_CONFIGS_BASE_DIR)\n', (791, 819), False, 'from services.dts.src.oci_cli_dts.appliance_config_manager import ApplianceConfigManager\n'), ((1077, 1107), 'oci_cli.cli_util.build_config', 'cli_util.build_config', (['ctx.obj'], {}), '(ctx.obj)\n', (1098, 1107), False, 'from oci_cli import cli_util\n'), ((1317, 1415), 'services.dts.src.oci_cli_dts.physical_appliance_control_plane.client.nfs_dataset_client.NfsDatasetClient', 'NfsDatasetClient', ([], {'config': 'config', 'service_endpoint': 'host_name', 'self_signed_cert': 'self_signed_cert'}), '(config=config, service_endpoint=host_name,\n self_signed_cert=self_signed_cert)\n', (1333, 1415), False, 'from services.dts.src.oci_cli_dts.physical_appliance_control_plane.client.nfs_dataset_client import NfsDatasetClient\n')] |
from spreaduler import ParamsSheet
from train_attention import train
from options import get_parser
class YourParamsSheet(ParamsSheet):
"""
Your model Params Sheet class
"""
params_sheet_id = '...'
client_credentials = {
"type": "service_account",
"project_id": "....",
"private_key_id": "....",
"private_key": """-----BEGIN PRIVATE KEY-----
........
-----END PRIVATE KEY-----""",
"client_email": "<EMAIL>",
"client_id": "....",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/"
"yourworker%40yourproject.iam.gserviceaccount.com"
}
def __init__(self, parser, server_name):
writable_metrics_and_types = {
'your model precision': float
}
super(YourParamsSheet, self).__init__(
parser,
writable_column_types=writable_metrics_and_types,
experiment_id_column='exp_hash',
server_name=server_name)
if __name__ == '__main__':
server_name = os.environ.get('SERVERNAME', None)
params = YourParamsSheet(get_parser(), server_name)
params.exec_loop(train)
| [
"options.get_parser"
] | [((1351, 1363), 'options.get_parser', 'get_parser', ([], {}), '()\n', (1361, 1363), False, 'from options import get_parser\n')] |
import os
import sys
import time
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
from adaptive_augmentor import AdaAug
from networks import get_model
from networks.projection import Projection
from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension
from config import get_warmup_config
from warmup_scheduler import GradualWarmupScheduler
parser = argparse.ArgumentParser("ada_aug")
parser.add_argument('--dataroot', type=str, default='./', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='name of dataset')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--num_workers', type=int, default=0, help="num_workers")
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--use_cuda', type=bool, default=True, help="use cuda default True")
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--use_parallel', action='store_true', default=False, help="use data parallel default False")
parser.add_argument('--model_name', type=str, default='wresnet40_2', help="model name")
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--epochs', type=int, default=600, help='number of training epochs')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--search_dataset', type=str, default='./', help='search dataset name')
parser.add_argument('--gf_model_name', type=str, default='./', help='gf_model name')
parser.add_argument('--gf_model_path', type=str, default='./', help='gf_model path')
parser.add_argument('--h_model_path', type=str, default='./', help='h_model path')
parser.add_argument('--k_ops', type=int, default=1, help="number of augmentation applied during training")
parser.add_argument('--delta', type=float, default=0.3, help="degree of perturbation in magnitude")
parser.add_argument('--temperature', type=float, default=1.0, help="temperature")
parser.add_argument('--n_proj_layer', type=int, default=0, help="number of additional hidden layer in augmentation policy projection")
parser.add_argument('--n_proj_hidden', type=int, default=128, help="number of hidden units in augmentation policy projection layers")
parser.add_argument('--restore_path', type=str, default='./', help='restore model path')
parser.add_argument('--restore', action='store_true', default=False, help='restore model default False')
args = parser.parse_args()
debug = True if args.save == "debug" else False
args.save = '{}-{}'.format(time.strftime("%Y%m%d-%H%M%S"), args.save)
if debug:
args.save = os.path.join('debug', args.save)
else:
args.save = os.path.join('eval', args.dataset, args.save)
utils.create_exp_dir(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
torch.cuda.set_device(args.gpu)
utils.reproducibility(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
# dataset settings
n_class = get_num_class(args.dataset)
class2label = get_label_name(args.dataset, args.dataroot)
train_queue, valid_queue, _, test_queue = get_dataloaders(
args.dataset, args.batch_size, args.num_workers,
args.dataroot, args.cutout, args.cutout_length,
split=args.train_portion, split_idx=0, target_lb=-1,
search=True)
logging.info(f'Dataset: {args.dataset}')
logging.info(f' |total: {len(train_queue.dataset)}')
logging.info(f' |train: {len(train_queue)*args.batch_size}')
logging.info(f' |valid: {len(valid_queue)*args.batch_size}')
# task model settings
task_model = get_model(model_name=args.model_name,
num_class=n_class,
use_cuda=True, data_parallel=False)
logging.info("param size = %fMB", utils.count_parameters_in_MB(task_model))
# task optimization settings
optimizer = torch.optim.SGD(
task_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
m, e = get_warmup_config(args.dataset)
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=m,
total_epoch=e,
after_scheduler=scheduler)
logging.info(f'Optimizer: SGD, scheduler: CosineAnnealing, warmup: {m}/{e}')
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# restore setting
if args.restore:
trained_epoch = utils.restore_ckpt(task_model, optimizer, scheduler, args.restore_path, location=args.gpu) + 1
n_epoch = args.epochs - trained_epoch
logging.info(f'Restoring model from {args.restore_path}, starting from epoch {trained_epoch}')
else:
trained_epoch = 0
n_epoch = args.epochs
# load trained adaaug sub models
search_n_class = get_num_class(args.search_dataset)
gf_model = get_model(model_name=args.gf_model_name,
num_class=search_n_class,
use_cuda=True, data_parallel=False)
h_model = Projection(in_features=gf_model.fc.in_features,
n_layers=args.n_proj_layer,
n_hidden=args.n_proj_hidden).cuda()
utils.load_model(gf_model, f'{args.gf_model_path}/gf_weights.pt', location=args.gpu)
utils.load_model(h_model, f'{args.h_model_path}/h_weights.pt', location=args.gpu)
for param in gf_model.parameters():
param.requires_grad = False
for param in h_model.parameters():
param.requires_grad = False
after_transforms = train_queue.dataset.after_transforms
adaaug_config = {'sampling': 'prob',
'k_ops': args.k_ops,
'delta': args.delta,
'temp': args.temperature,
'search_d': get_dataset_dimension(args.search_dataset),
'target_d': get_dataset_dimension(args.dataset)}
adaaug = AdaAug(after_transforms=after_transforms,
n_class=search_n_class,
gf_model=gf_model,
h_model=h_model,
save_dir=args.save,
config=adaaug_config)
# start training
for i_epoch in range(n_epoch):
epoch = trained_epoch + i_epoch
lr = scheduler.get_last_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
train_acc, train_obj = train(
train_queue, task_model, criterion, optimizer, epoch, args.grad_clip, adaaug)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj, _, _ = infer(valid_queue, task_model, criterion)
logging.info('valid_acc %f', valid_acc)
scheduler.step()
if epoch % args.report_freq == 0:
test_acc, test_obj, test_acc5, _ = infer(test_queue, task_model, criterion)
logging.info('test_acc %f %f', test_acc, test_acc5)
utils.save_ckpt(task_model, optimizer, scheduler, epoch,
os.path.join(args.save, 'weights.pt'))
adaaug.save_history(class2label)
figure = adaaug.plot_history()
test_acc, test_obj, test_acc5, _ = infer(test_queue, task_model, criterion)
logging.info('test_acc %f %f', test_acc, test_acc5)
logging.info(f'save to {args.save}')
def train(train_queue, model, criterion, optimizer, epoch, grad_clip, adaaug):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(non_blocking=True)
# get augmented training data from adaaug
aug_images = adaaug(input, mode='exploit')
model.train()
optimizer.zero_grad()
logits = model(aug_images)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.detach().item(), n)
top1.update(prec1.detach().item(), n)
top5.update(prec5.detach().item(), n)
global_step = step + epoch * len(train_queue)
if global_step % args.report_freq == 0:
logging.info('train %03d %e %f %f', global_step, objs.avg, top1.avg, top5.avg)
# log the policy
if step == 0:
adaaug.add_history(input, target)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for input, target in valid_queue:
input = input.cuda()
target = target.cuda(non_blocking=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.detach().item(), n)
top1.update(prec1.detach().item(), n)
top5.update(prec5.detach().item(), n)
return top1.avg, objs.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"utils.create_exp_dir",
"dataset.get_dataloaders",
"dataset.get_dataset_dimension",
"time.strftime",
"logging.Formatter",
"dataset.get_num_class",
"utils.AvgrageMeter",
"torch.no_grad",
"os.path.join",
"utils.reproducibility",
"config.get_warmup_config",
"torch.cuda.set_device",
"utils.count_parameters_in_MB",
"utils.load_model",
"utils.accuracy",
"adaptive_augmentor.AdaAug",
"utils.restore_ckpt",
"torch.cuda.is_available",
"networks.get_model",
"sys.exit",
"logging.basicConfig",
"networks.projection.Projection",
"torch.nn.CrossEntropyLoss",
"logging.info",
"warmup_scheduler.GradualWarmupScheduler",
"dataset.get_label_name",
"logging.getLogger"
] | [((433, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""ada_aug"""'], {}), "('ada_aug')\n", (456, 467), False, 'import argparse\n'), ((3752, 3783), 'utils.create_exp_dir', 'utils.create_exp_dir', (['args.save'], {}), '(args.save)\n', (3772, 3783), False, 'import utils\n'), ((3823, 3934), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (3842, 3934), False, 'import logging\n'), ((3582, 3612), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (3595, 3612), False, 'import time\n'), ((3651, 3683), 'os.path.join', 'os.path.join', (['"""debug"""', 'args.save'], {}), "('debug', args.save)\n", (3663, 3683), False, 'import os\n'), ((3706, 3751), 'os.path.join', 'os.path.join', (['"""eval"""', 'args.dataset', 'args.save'], {}), "('eval', args.dataset, args.save)\n", (3718, 3751), False, 'import os\n'), ((3975, 4009), 'os.path.join', 'os.path.join', (['args.save', '"""log.txt"""'], {}), "(args.save, 'log.txt')\n", (3987, 4009), False, 'import os\n'), ((4027, 4056), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (4044, 4056), False, 'import logging\n'), ((4218, 4249), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (4239, 4249), False, 'import torch\n'), ((4254, 4286), 'utils.reproducibility', 'utils.reproducibility', (['args.seed'], {}), '(args.seed)\n', (4275, 4286), False, 'import utils\n'), ((4291, 4333), 'logging.info', 'logging.info', (["('gpu device = %d' % args.gpu)"], {}), "('gpu device = %d' % args.gpu)\n", (4303, 4333), False, 'import logging\n'), ((4338, 4369), 'logging.info', 'logging.info', (['"""args = %s"""', 'args'], {}), "('args = %s', args)\n", (4350, 4369), False, 'import logging\n'), ((4409, 4436), 'dataset.get_num_class', 'get_num_class', (['args.dataset'], {}), '(args.dataset)\n', (4422, 4436), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((4455, 4498), 'dataset.get_label_name', 'get_label_name', (['args.dataset', 'args.dataroot'], {}), '(args.dataset, args.dataroot)\n', (4469, 4498), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((4545, 4732), 'dataset.get_dataloaders', 'get_dataloaders', (['args.dataset', 'args.batch_size', 'args.num_workers', 'args.dataroot', 'args.cutout', 'args.cutout_length'], {'split': 'args.train_portion', 'split_idx': '(0)', 'target_lb': '(-1)', 'search': '(True)'}), '(args.dataset, args.batch_size, args.num_workers, args.\n dataroot, args.cutout, args.cutout_length, split=args.train_portion,\n split_idx=0, target_lb=-1, search=True)\n', (4560, 4732), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((4762, 4802), 'logging.info', 'logging.info', (['f"""Dataset: {args.dataset}"""'], {}), "(f'Dataset: {args.dataset}')\n", (4774, 4802), False, 'import logging\n'), ((5038, 5134), 'networks.get_model', 'get_model', ([], {'model_name': 'args.model_name', 'num_class': 'n_class', 'use_cuda': '(True)', 'data_parallel': '(False)'}), '(model_name=args.model_name, num_class=n_class, use_cuda=True,\n data_parallel=False)\n', (5047, 5134), False, 'from networks import get_model\n'), ((5644, 5675), 'config.get_warmup_config', 'get_warmup_config', (['args.dataset'], {}), '(args.dataset)\n', (5661, 5675), False, 'from config import get_warmup_config\n'), ((5692, 5785), 'warmup_scheduler.GradualWarmupScheduler', 'GradualWarmupScheduler', (['optimizer'], {'multiplier': 'm', 'total_epoch': 'e', 'after_scheduler': 'scheduler'}), '(optimizer, multiplier=m, total_epoch=e,\n after_scheduler=scheduler)\n', (5714, 5785), False, 'from warmup_scheduler import GradualWarmupScheduler\n'), ((5835, 5911), 'logging.info', 'logging.info', (['f"""Optimizer: SGD, scheduler: CosineAnnealing, warmup: {m}/{e}"""'], {}), "(f'Optimizer: SGD, scheduler: CosineAnnealing, warmup: {m}/{e}')\n", (5847, 5911), False, 'import logging\n'), ((5928, 5949), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5947, 5949), True, 'import torch.nn as nn\n'), ((6422, 6456), 'dataset.get_num_class', 'get_num_class', (['args.search_dataset'], {}), '(args.search_dataset)\n', (6435, 6456), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((6472, 6579), 'networks.get_model', 'get_model', ([], {'model_name': 'args.gf_model_name', 'num_class': 'search_n_class', 'use_cuda': '(True)', 'data_parallel': '(False)'}), '(model_name=args.gf_model_name, num_class=search_n_class, use_cuda\n =True, data_parallel=False)\n', (6481, 6579), False, 'from networks import get_model\n'), ((6819, 6908), 'utils.load_model', 'utils.load_model', (['gf_model', 'f"""{args.gf_model_path}/gf_weights.pt"""'], {'location': 'args.gpu'}), "(gf_model, f'{args.gf_model_path}/gf_weights.pt', location=\n args.gpu)\n", (6835, 6908), False, 'import utils\n'), ((6908, 6994), 'utils.load_model', 'utils.load_model', (['h_model', 'f"""{args.h_model_path}/h_weights.pt"""'], {'location': 'args.gpu'}), "(h_model, f'{args.h_model_path}/h_weights.pt', location=\n args.gpu)\n", (6924, 6994), False, 'import utils\n'), ((7532, 7680), 'adaptive_augmentor.AdaAug', 'AdaAug', ([], {'after_transforms': 'after_transforms', 'n_class': 'search_n_class', 'gf_model': 'gf_model', 'h_model': 'h_model', 'save_dir': 'args.save', 'config': 'adaaug_config'}), '(after_transforms=after_transforms, n_class=search_n_class, gf_model=\n gf_model, h_model=h_model, save_dir=args.save, config=adaaug_config)\n', (7538, 7680), False, 'from adaptive_augmentor import AdaAug\n'), ((8765, 8816), 'logging.info', 'logging.info', (['"""test_acc %f %f"""', 'test_acc', 'test_acc5'], {}), "('test_acc %f %f', test_acc, test_acc5)\n", (8777, 8816), False, 'import logging\n'), ((8821, 8857), 'logging.info', 'logging.info', (['f"""save to {args.save}"""'], {}), "(f'save to {args.save}')\n", (8833, 8857), False, 'import logging\n'), ((8950, 8970), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (8968, 8970), False, 'import utils\n'), ((8982, 9002), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (9000, 9002), False, 'import utils\n'), ((9014, 9034), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (9032, 9034), False, 'import utils\n'), ((10090, 10110), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (10108, 10110), False, 'import utils\n'), ((10122, 10142), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (10140, 10142), False, 'import utils\n'), ((10154, 10174), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (10172, 10174), False, 'import utils\n'), ((4058, 4077), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4075, 4077), False, 'import logging\n'), ((4118, 4143), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4141, 4143), False, 'import torch\n'), ((4153, 4192), 'logging.info', 'logging.info', (['"""no gpu device available"""'], {}), "('no gpu device available')\n", (4165, 4192), False, 'import logging\n'), ((4201, 4212), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4209, 4212), False, 'import sys\n'), ((5225, 5265), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['task_model'], {}), '(task_model)\n', (5253, 5265), False, 'import utils\n'), ((6201, 6305), 'logging.info', 'logging.info', (['f"""Restoring model from {args.restore_path}, starting from epoch {trained_epoch}"""'], {}), "(\n f'Restoring model from {args.restore_path}, starting from epoch {trained_epoch}'\n )\n", (6213, 6305), False, 'import logging\n'), ((7405, 7447), 'dataset.get_dataset_dimension', 'get_dataset_dimension', (['args.search_dataset'], {}), '(args.search_dataset)\n', (7426, 7447), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((7481, 7516), 'dataset.get_dataset_dimension', 'get_dataset_dimension', (['args.dataset'], {}), '(args.dataset)\n', (7502, 7516), False, 'from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension\n'), ((7922, 7963), 'logging.info', 'logging.info', (['"""epoch %d lr %e"""', 'epoch', 'lr'], {}), "('epoch %d lr %e', epoch, lr)\n", (7934, 7963), False, 'import logging\n'), ((8101, 8140), 'logging.info', 'logging.info', (['"""train_acc %f"""', 'train_acc'], {}), "('train_acc %f', train_acc)\n", (8113, 8140), False, 'import logging\n'), ((8229, 8268), 'logging.info', 'logging.info', (['"""valid_acc %f"""', 'valid_acc'], {}), "('valid_acc %f', valid_acc)\n", (8241, 8268), False, 'import logging\n'), ((9509, 9552), 'utils.accuracy', 'utils.accuracy', (['logits', 'target'], {'topk': '(1, 5)'}), '(logits, target, topk=(1, 5))\n', (9523, 9552), False, 'import utils\n'), ((10201, 10216), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10214, 10216), False, 'import torch\n'), ((6052, 6146), 'utils.restore_ckpt', 'utils.restore_ckpt', (['task_model', 'optimizer', 'scheduler', 'args.restore_path'], {'location': 'args.gpu'}), '(task_model, optimizer, scheduler, args.restore_path,\n location=args.gpu)\n', (6070, 6146), False, 'import utils\n'), ((6646, 6754), 'networks.projection.Projection', 'Projection', ([], {'in_features': 'gf_model.fc.in_features', 'n_layers': 'args.n_proj_layer', 'n_hidden': 'args.n_proj_hidden'}), '(in_features=gf_model.fc.in_features, n_layers=args.n_proj_layer,\n n_hidden=args.n_proj_hidden)\n', (6656, 6754), False, 'from networks.projection import Projection\n'), ((8438, 8489), 'logging.info', 'logging.info', (['"""test_acc %f %f"""', 'test_acc', 'test_acc5'], {}), "('test_acc %f %f', test_acc, test_acc5)\n", (8450, 8489), False, 'import logging\n'), ((8568, 8605), 'os.path.join', 'os.path.join', (['args.save', '"""weights.pt"""'], {}), "(args.save, 'weights.pt')\n", (8580, 8605), False, 'import os\n'), ((9831, 9909), 'logging.info', 'logging.info', (['"""train %03d %e %f %f"""', 'global_step', 'objs.avg', 'top1.avg', 'top5.avg'], {}), "('train %03d %e %f %f', global_step, objs.avg, top1.avg, top5.avg)\n", (9843, 9909), False, 'import logging\n'), ((10453, 10496), 'utils.accuracy', 'utils.accuracy', (['logits', 'target'], {'topk': '(1, 5)'}), '(logits, target, topk=(1, 5))\n', (10467, 10496), False, 'import utils\n')] |
import tensorflow as tf
sess = tf.Session()
#在名字为foo的命名空间内创建名字为v的变量
with tf.variable_scope("foo"):
#创建一个常量为1的v
v= tf.get_variable('v1',[1],initializer = tf.constant_initializer(1.0))
#因为在foo空间已经创建v的变量,所以下面的代码会报错
#with tf.variable_scope("foo"):
# v= tf.get_variable('v',[1])
#在生成上下文管理器时,将参数reuse设置为True。这样tf.get_variable的函数将直接获取已声明的变量
#且调用with tf.variable_scope("foo")必须是定义的foo空间,而不能是with tf.variable_scope("")未命名或者其他空间。
with tf.variable_scope("foo",reuse =tf.AUTO_REUSE):
v1= tf.get_variable('v1',[1], initializer = tf.constant_initializer(5.0))
print(v1==v) #输出为True,代表v1与v是相同的变量
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
with tf.variable_scope("foo1",reuse = False):
v1= tf.get_variable('v1',[1], initializer = tf.constant_initializer(5.0))
print(v1==v) #输出为True,代表v1与v是相同的变量
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
print(foo.v1.name)
'''
#获取变量的方式主要有以下两种,实践中tf.get_variable产生的变量一定要搭配tf.variable_scope使用,不然运行脚本会报错
#v = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(10.0))
#使用直接定义变量不会报错,可以一直调用
#vc = tf.Variable(tf.constant(1.0,shape = [1]),name = 'v')
#print(vc)
#以下使用with语法,将tf.get_variable与tf.variable_scope搭配使用,且reuse=True时,之前必须定义V
with tf.variable_scope('zdx',reuse = True):
v = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(100.0))
print(v)
v1 = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(2.0))
print(v1==v)
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
''' | [
"tensorflow.constant_initializer",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.initialize_all_variables"
] | [((33, 45), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (43, 45), True, 'import tensorflow as tf\n'), ((75, 99), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""foo"""'], {}), "('foo')\n", (92, 99), True, 'import tensorflow as tf\n'), ((437, 482), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""foo"""'], {'reuse': 'tf.AUTO_REUSE'}), "('foo', reuse=tf.AUTO_REUSE)\n", (454, 482), True, 'import tensorflow as tf\n'), ((612, 641), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (639, 641), True, 'import tensorflow as tf\n'), ((715, 753), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""foo1"""'], {'reuse': '(False)'}), "('foo1', reuse=False)\n", (732, 753), True, 'import tensorflow as tf\n'), ((884, 913), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (911, 913), True, 'import tensorflow as tf\n'), ((163, 191), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (186, 191), True, 'import tensorflow as tf\n'), ((532, 560), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(5.0)'], {}), '(5.0)\n', (555, 560), True, 'import tensorflow as tf\n'), ((804, 832), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(5.0)'], {}), '(5.0)\n', (827, 832), True, 'import tensorflow as tf\n')] |
from collections import namedtuple
from typing import Dict, List, Callable
Node = namedtuple('Node', 'name parent children data')
def make_tree_from_adj_list(adj_list):
root = 'COM'
nodes: Dict['str', Node] = {root: Node(root, None, [], {})}
for parent, child in adj_list:
node = Node(child, parent, [], {})
nodes[child] = node
# N.B. I modify node_lookup under iteration, so I cast to list and slice, to get a fixed iterator
for node in list(nodes.values())[:]:
if not (node.parent in nodes.keys()) and node.name != root:
parent_node = Node(node.parent, root, [], {})
nodes[node.parent] = parent_node
for node in nodes.values():
if node.name != root:
nodes[node.parent].children.append(node)
return nodes[root]
def compute_descendants(tree_root, f_descendants='n_descendants'):
topo_sorted_nodes = all_descendants_BFS(tree_root)
reverse_topo_sort = reversed(topo_sorted_nodes)
for n in reverse_topo_sort:
if len(n.children) == 0:
n.data[f_descendants] = 0
else:
n.data[f_descendants] = len(n.children) + sum(nn.data[f_descendants] for nn in n.children)
def all_descendants_BFS(tree_root: Node) -> List[Node]:
"""All descendents of a node, in Breadth First Search order"""
topo_sorted_nodes = [tree_root]
for n in topo_sorted_nodes:
topo_sorted_nodes += n.children
return topo_sorted_nodes
def find_DFS(predicate: Callable[[Node], bool], node: Node) -> List[Node]:
"""Returns the path in the tree from the root node to the first element that fulfils the predicate"""
def find_DFS_(predicate,node) -> List[Node]:
if predicate(node):
return [node]
elif len(node.children) == 0:
return []
else:
for c in node.children:
dfs1 = find_DFS_(predicate,c)
if len(dfs1) > 0:
return [node] + dfs1
return []
path_found = find_DFS_(predicate,node)
if len(path_found) > 0:
return path_found
else:
raise ValueError("There is no element in the tree that fulfils the predicate.")
def calculate_hops(root: Node) -> int:
nodes = all_descendants_BFS(root)
bottom_up = reversed(nodes)
for node in bottom_up:
try:
p1 = find_DFS(lambda n: n.name == 'YOU', node)
p2 = find_DFS(lambda n: n.name == 'SAN', node)
hops_to_santa = len(p1) + len(p2) - 4 #remove both endpoints of both paths
return hops_to_santa
except ValueError:
pass
raise ValueError("There is no common object that one can orbit hop through to get to Santa!") | [
"collections.namedtuple"
] | [((83, 130), 'collections.namedtuple', 'namedtuple', (['"""Node"""', '"""name parent children data"""'], {}), "('Node', 'name parent children data')\n", (93, 130), False, 'from collections import namedtuple\n')] |
from iotbx import mtz
mtz_obj = mtz.object(file_name="3nd4.mtz")
# Only works with mtz.object.
# Does not work if mtz is read in with iotbx.file_reader.
miller_arrays_dict = mtz_obj.as_miller_arrays_dict()
| [
"iotbx.mtz.object"
] | [((32, 64), 'iotbx.mtz.object', 'mtz.object', ([], {'file_name': '"""3nd4.mtz"""'}), "(file_name='3nd4.mtz')\n", (42, 64), False, 'from iotbx import mtz\n')] |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from collections import namedtuple
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch import distributed
from .initializers import MIN_MAX_INITIALIZERS
from .quantize_functions import symmetric_quantize, asymmetric_quantize
from ..layer_utils import COMPRESSION_MODULES
from ..registry import Registry
from ..utils import get_per_channel_scale_shape
logger = logging.getLogger(__name__)
QUANTIZATION_MODULES = Registry('quantization_modules')
BINARIZATION_MODULES = Registry('binarization_modules')
class QuantizationMode:
SYMMETRIC = "symmetric"
ASYMMETRIC = "asymmetric"
class BinarizationMode:
XNOR = "xnor"
DOREFA = "dorefa"
QuantizationParams = namedtuple(
'QuantizationParams', ['bits', 'mode', 'signed', 'signed_scope', 'per_channel']
)
QuantizationParams.__new__.__defaults__ = (8, QuantizationMode.SYMMETRIC, False, [], False)
class QuantizerConfig:
def __init__(self, params: QuantizationParams, input_shape=None, is_weights=False, per_channel=False,
within_signed_scope=False):
self.params = params
self.is_weights = is_weights
self.within_signed_scope = within_signed_scope
self.per_channel = per_channel
self.input_shape = input_shape
class BaseQuantizer(nn.Module):
def __init__(self, config: QuantizerConfig):
super().__init__()
self.config = config
self.init_stage = False
self.initialized = False
self.state_dict_name = None
class LoadStateListener:
"""
Check whether a quantization module are going to be updated by new values from state_dict or checkpoint.
"""
def __init__(self, module):
# pylint: disable=protected-access
self.hook = module._register_load_state_dict_pre_hook(partial(self.hook_fn, module=module))
def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,
module):
if module.state_dict_name:
for module_key in module.state_dict().keys():
candidate = module.state_dict_name + '.' + module_key
if candidate in state_dict:
module.initialized = True
def close(self):
self.hook.remove()
self.load_listener = LoadStateListener(self)
def forward(self, x):
if self.init_stage:
return x
return self.quantize(x)
def quantize(self, x):
raise NotImplementedError
@COMPRESSION_MODULES.register()
@QUANTIZATION_MODULES.register(QuantizationMode.SYMMETRIC)
class SymmetricQuantizer(BaseQuantizer):
def __init__(self, config):
super().__init__(config)
self.input_shape = config.input_shape
self.per_channel = config.per_channel
self.is_weights = config.is_weights
self.within_signed_scope = config.within_signed_scope
params = config.params
self.num_bits = params.bits
self.signed_tensor = nn.Parameter(torch.IntTensor([params.signed]), requires_grad=False)
self.collect_scale_statistics = False
scale_shape = 1
if self.per_channel:
scale_shape = get_per_channel_scale_shape(self.input_shape, self.is_weights)
self.scale = nn.Parameter(torch.ones(scale_shape), requires_grad=True)
self.init_stage = False
self.eps = 1e-16
self.level_high = self.level_low = 0
self.levels = 2 ** self.num_bits
if self.is_weights:
self.levels -= 1
def set_level_ranges(self):
if self.signed:
self.level_high = 2 ** (self.num_bits - 1) - 1
self.level_low = -(self.level_high + 1)
if self.is_weights:
self.level_low += 1
else:
self.level_high = 2 ** self.num_bits - 1
self.level_low = 0
@property
def signed(self):
return self.signed_tensor.item() == 1
@signed.setter
def signed(self, signed: bool):
self.signed_tensor.fill_(signed)
def quantize(self, x):
self.set_level_ranges()
return symmetric_quantize(x, self.levels, self.level_low, self.level_high, self.scale, self.eps)
@MIN_MAX_INITIALIZERS.register('SymmetricQuantizer')
def _initializer(module, name, min_value, max_value, distributed_):
if min_value.item == np.inf or max_value.item() == -np.inf:
raise AttributeError('Statistics is not collected for {}'.format(name))
sign = min_value.item() < 0 or module.within_signed_scope
if sign != module.signed:
logger.warning("signed set incorrectly")
module.signed = int(sign)
if abs(max_value) > 0.1:
module.scale.data.fill_(max_value.item())
if distributed_:
distributed.broadcast(module.scale, 0)
distributed.broadcast(module.signed_tensor, 0)
logger.debug("Statistics: min={:.2f} max={:.2f}".format(min_value.item(), max_value.item()))
logger.info(
"Set sign: {} and scale: {:04.2f} for {}".format(module.signed, module.scale.item(), name))
@COMPRESSION_MODULES.register()
@QUANTIZATION_MODULES.register(QuantizationMode.ASYMMETRIC)
class AsymmetricQuantizer(BaseQuantizer):
def __init__(self, config):
super().__init__(config)
self.is_weights = config.is_weights
self.input_shape = config.input_shape
self.per_channel = config.per_channel
params = config.params
self.bits = params.bits
scale_shape = 1
if self.per_channel:
scale_shape = get_per_channel_scale_shape(self.input_shape, self.is_weights)
self.input_low = nn.Parameter(torch.zeros(scale_shape), requires_grad=True)
self.input_range = nn.Parameter(torch.ones(scale_shape), requires_grad=True)
self.eps = 1e-16
@property
def signed(self):
return True
@property
def level_high(self):
return 2 ** self.bits - 1
@property
def level_low(self):
return 0
@property
def levels(self):
return 2 ** self.bits
def quantize(self, x):
return asymmetric_quantize(x, self.levels, self.level_low, self.level_high, self.input_low, self.input_range,
self.eps)
@MIN_MAX_INITIALIZERS.register('AsymmetricQuantizer')
def _initializer(module, name, min_value, max_value, distributed_):
if min_value.item() == np.inf or max_value.item() == -np.inf:
raise AttributeError('Statistics is not collected for {}'.format(name))
module.input_low.data.fill_(min_value.item())
range_ = (max_value - min_value).item()
if range_ > 0.01:
module.input_range.data.fill_(range_)
if distributed_:
distributed.broadcast(module.input_low, 0)
distributed.broadcast(module.input_range, 0)
logger.debug("Statistics: min={:.2f} max={:.2f}".format(min_value.item(), max_value.item()))
logger.info("Set input_low: {:04.2f} and input_range: {:04.2f} for {}"
.format(module.input_low.item(), module.input_range.item(), name))
| [
"torch.ones",
"functools.partial",
"torch.IntTensor",
"collections.namedtuple",
"torch.zeros",
"torch.distributed.broadcast",
"logging.getLogger"
] | [((1000, 1027), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1017, 1027), False, 'import logging\n'), ((1314, 1409), 'collections.namedtuple', 'namedtuple', (['"""QuantizationParams"""', "['bits', 'mode', 'signed', 'signed_scope', 'per_channel']"], {}), "('QuantizationParams', ['bits', 'mode', 'signed', 'signed_scope',\n 'per_channel'])\n", (1324, 1409), False, 'from collections import namedtuple\n'), ((5494, 5532), 'torch.distributed.broadcast', 'distributed.broadcast', (['module.scale', '(0)'], {}), '(module.scale, 0)\n', (5515, 5532), False, 'from torch import distributed\n'), ((5541, 5587), 'torch.distributed.broadcast', 'distributed.broadcast', (['module.signed_tensor', '(0)'], {}), '(module.signed_tensor, 0)\n', (5562, 5587), False, 'from torch import distributed\n'), ((7449, 7491), 'torch.distributed.broadcast', 'distributed.broadcast', (['module.input_low', '(0)'], {}), '(module.input_low, 0)\n', (7470, 7491), False, 'from torch import distributed\n'), ((7500, 7544), 'torch.distributed.broadcast', 'distributed.broadcast', (['module.input_range', '(0)'], {}), '(module.input_range, 0)\n', (7521, 7544), False, 'from torch import distributed\n'), ((3744, 3776), 'torch.IntTensor', 'torch.IntTensor', (['[params.signed]'], {}), '([params.signed])\n', (3759, 3776), False, 'import torch\n'), ((4022, 4045), 'torch.ones', 'torch.ones', (['scale_shape'], {}), '(scale_shape)\n', (4032, 4045), False, 'import torch\n'), ((6385, 6409), 'torch.zeros', 'torch.zeros', (['scale_shape'], {}), '(scale_shape)\n', (6396, 6409), False, 'import torch\n'), ((6471, 6494), 'torch.ones', 'torch.ones', (['scale_shape'], {}), '(scale_shape)\n', (6481, 6494), False, 'import torch\n'), ((2467, 2503), 'functools.partial', 'partial', (['self.hook_fn'], {'module': 'module'}), '(self.hook_fn, module=module)\n', (2474, 2503), False, 'from functools import partial\n')] |
import curses, sys, os
#Servo controller connected to IC2
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
from time import sleep
#ESC Brushles motor states: direction, on/off
toggleState = 400
throttle = 450
delta = 20
print("toggleState1")
pwm.set_pwm(2,0,toggleState)
sleep(0.2)
for i in range(1,6):
pwm_value = throttle -i*delta
if pwm_value < toggleState:
pwm.set_pwm(2,0,toggleState)
sleep(0.2)
pwm.set_pwm(2,0, pwm_value)
sleep(0.4)
print(pwm_value)
pwm.set_pwm(2,0,toggleState)
| [
"Adafruit_PCA9685.PCA9685",
"time.sleep"
] | [((89, 115), 'Adafruit_PCA9685.PCA9685', 'Adafruit_PCA9685.PCA9685', ([], {}), '()\n', (113, 115), False, 'import Adafruit_PCA9685\n'), ((304, 314), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (309, 314), False, 'from time import sleep\n'), ((494, 504), 'time.sleep', 'sleep', (['(0.4)'], {}), '(0.4)\n', (499, 504), False, 'from time import sleep\n'), ((447, 457), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (452, 457), False, 'from time import sleep\n')] |
import pytropos.internals.values as pv
from pytropos.internals.values.builtin_values import *
from pytropos.internals.values.python_values.builtin_mutvalues import *
from pytropos.internals.values.python_values.wrappers import *
from pytropos.internals.values.python_values.python_values import PythonValue, PT
exitcode = 1
r = List([pv.int(21)], size=(1, 1))
store = {
'_': PythonValue(PT.Top),
'f': r.get_attrs()['append'],
'r': PythonValue(r),
}
| [
"pytropos.internals.values.int",
"pytropos.internals.values.python_values.python_values.PythonValue"
] | [((380, 399), 'pytropos.internals.values.python_values.python_values.PythonValue', 'PythonValue', (['PT.Top'], {}), '(PT.Top)\n', (391, 399), False, 'from pytropos.internals.values.python_values.python_values import PythonValue, PT\n'), ((440, 454), 'pytropos.internals.values.python_values.python_values.PythonValue', 'PythonValue', (['r'], {}), '(r)\n', (451, 454), False, 'from pytropos.internals.values.python_values.python_values import PythonValue, PT\n'), ((336, 346), 'pytropos.internals.values.int', 'pv.int', (['(21)'], {}), '(21)\n', (342, 346), True, 'import pytropos.internals.values as pv\n')] |
import re
import setuptools
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version('doc_scanner/__init__.py')
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="doc_scanner",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A document scanner based on openCV3 and scikit-image",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Guoli-Lyu/document-scanner",
packages=setuptools.find_packages(),
classifiers=(
'Development Status :: 4 - Beta',
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
),
test_suite='tests',
project_urls={
'Bug Reports': 'https://github.com/Guoli-Lyu/document-scanner/issues',
},
install_requires=[
'numpy',
'scikit-image',
'opencv-python',
'pandas',
],
)
| [
"setuptools.find_packages",
"re.compile"
] | [((233, 286), 're.compile', 're.compile', (['"""__version__ = [\\\\\'"]([^\\\\\'"]*)[\\\\\'"]"""'], {}), '(\'__version__ = [\\\\\\\'"]([^\\\\\\\'"]*)[\\\\\\\'"]\')\n', (243, 286), False, 'import re\n'), ((990, 1016), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1014, 1016), False, 'import setuptools\n')] |
#!/usr/bin/python3
import os.path
import openpyxl
import requests
import json
import argparse
BASE_URL_XIV_API_CHARACTER: str = "https://xivapi.com/character/"
GERMAN_TO_ENGLISH_CLASS_DICT: dict = {}
SUB_30_MAPPING_DICT: dict = {}
CONFIG_LOCATION = os.getcwd()
DEBUG_ENABLED = False
def main(filepath):
"""main method, used to process data and update the excel workbook"""
workbook: openpyxl.Workbook = openpyxl.load_workbook(filepath)
worksheet = workbook.active
class_range: tuple = generate_class_range(worksheet)
for i in range(worksheet.min_row + 1, worksheet.max_row):
current_row: tuple = worksheet[i]
if not current_row[0].value and not current_row[1].value:
break
current_character_name: str = f"{current_row[0].value} {current_row[1].value}"
current_character_info: dict = process_class_info(get_character_info(get_character_id(current_character_name)))
if not current_character_info:
print(f"Cant process data for character: {current_character_name}")
continue
update_character_info(current_character_info, worksheet, class_range, worksheet[worksheet.min_row], i)
workbook.save(filepath.replace(".xlsx", "_updated.xlsx"))
print("Finished!")
def update_character_info(current_character_info: dict, worksheet: openpyxl.workbook.workbook.Worksheet,
class_range: tuple, header_row: tuple, current_row: int):
"""method to update the character class information in the excel sheet"""
for i in range(class_range[0], class_range[1]):
# reduce i by one because column index is the actual index, while the header_row is a list,
# thus reducing the index by 1
mapped_class_name = GERMAN_TO_ENGLISH_CLASS_DICT.get(header_row[i - 1].value)
new_class_val = current_character_info.get(mapped_class_name, 0)
if DEBUG_ENABLED:
character_name = f"{worksheet.cell(current_row, 1).value} {worksheet.cell(current_row, 2).value}"
current_class = header_row[i - 1].value
print(f"Setting value {new_class_val} for class {current_class} for character {character_name}")
current_cell = worksheet.cell(row=current_row, column=i)
current_cell.value = new_class_val
def process_class_info(class_info: dict):
"""method to process the class info of every player, mapping it into a dictionary for easier usage"""
if class_info is None:
return None
data_to_process = class_info.get("Character", {}).get("ClassJobs", None)
if not data_to_process:
raise IOError
out: dict = {SUB_30_MAPPING_DICT.get(entry["UnlockedState"]["Name"], entry["UnlockedState"]["Name"]): entry["Level"]
for entry in data_to_process}
# special case -> arcanist branching into two main jobs
out["Summoner"] = out["Scholar"]
if DEBUG_ENABLED:
print("MAPPED CLASS VALUES:")
print(out)
return out
def generate_class_range(worksheet: openpyxl.workbook.workbook.Worksheet):
"""helper method, to create the excel ranges for the player classes"""
header_row: tuple = worksheet[worksheet.min_row]
end = 0
start = 0
start_set = False
for col in header_row:
if col.value is None:
break
if col.value in GERMAN_TO_ENGLISH_CLASS_DICT.keys() and not start_set:
start = end
start_set = True
end += 1
if DEBUG_ENABLED:
print("CLASS ROW RANGES:")
print(start, end)
return start + 1, end + 1
def do_http_get(request_url: str):
"""helper method to do http requests"""
resp: requests.Response = requests.get(request_url)
if resp.ok:
return resp.json()
else:
raise ConnectionError
def get_character_info(character_id: str):
"""helper method to receive character info via XIV API"""
if not character_id:
return None
current_request_url: str = f"{BASE_URL_XIV_API_CHARACTER}{character_id}"
resp_json: dict = do_http_get(current_request_url)
return resp_json
def get_character_id(character_name: str):
"""Help method to get the ID of an character via XIV API"""
current_request_url: str = f"{BASE_URL_XIV_API_CHARACTER}search?name={character_name}&server=Moogle"
resp_json: dict = do_http_get(current_request_url)
print(f"Processing data for: {character_name}")
return resp_json["Results"][0]["ID"] if resp_json["Results"] else None
def load_config(arguments: argparse.Namespace):
global GERMAN_TO_ENGLISH_CLASS_DICT, SUB_30_MAPPING_DICT
global CONFIG_LOCATION, DEBUG_ENABLED
if arguments.config:
CONFIG_LOCATION = arguments.config
if arguments.d:
DEBUG_ENABLED = arguments.d
with open(os.path.join(CONFIG_LOCATION, "eor_config.json")) as file:
config = json.load(file)
GERMAN_TO_ENGLISH_CLASS_DICT = config.get("class_config", None)
SUB_30_MAPPING_DICT = config.get("sub_30_class_config", None)
if not GERMAN_TO_ENGLISH_CLASS_DICT or not SUB_30_MAPPING_DICT:
raise IOError
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process the EoR Membership excel.")
parser.add_argument("--filename", metavar='[path to file]', type=str, help="the location of the file to process")
parser.add_argument("--config", type=str, required=False)
parser.add_argument("--d", required=False, action='store_true')
args = parser.parse_args()
load_config(args)
main(args.filename)
| [
"argparse.ArgumentParser",
"json.load",
"openpyxl.load_workbook",
"requests.get"
] | [((417, 449), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['filepath'], {}), '(filepath)\n', (439, 449), False, 'import openpyxl\n'), ((3683, 3708), 'requests.get', 'requests.get', (['request_url'], {}), '(request_url)\n', (3695, 3708), False, 'import requests\n'), ((5160, 5232), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process the EoR Membership excel."""'}), "(description='Process the EoR Membership excel.')\n", (5183, 5232), False, 'import argparse\n'), ((4862, 4877), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4871, 4877), False, 'import json\n')] |
import datetime
from functools import partial
from typing import List, Optional, Union
from quickbuild.helpers import ContentType, response2py
class Configurations:
def __init__(self, quickbuild):
self.quickbuild = quickbuild
def _get(self, params: dict) -> List[dict]:
return self.quickbuild._request(
'GET',
'configurations',
callback=response2py,
params=params,
)
def get(self) -> List[dict]:
"""
Get all configurations in the system. For performance reason, only
brief information of the configuration will be returned here, including
`id`, `name`, `description`, `schedule`, `runMode`, `errorMessage`,
`parent id`. You may get the full xml representation using id if necessary.
Returns:
List[dict]: list of configurations.
"""
return self._get(dict(recursive=True))
def get_child(self, parent_id: int) -> List[dict]:
"""
Get a list of child configurations.
Args:
parent_id (int): parent configuration identifier.
Returns:
List[dict]: list of child configurations.
"""
return self._get(dict(parent_id=parent_id))
def get_descendent(self, parent_id: int) -> List[dict]:
"""
Get a list of descendent configurations.
Args:
parent_id (int): parent configuration identifier.
Returns:
List[dict]: list of descendent configurations.
"""
return self._get(dict(recursive=True, parent_id=parent_id))
def get_info(self,
configuration_id: int,
*,
content_type: Optional[ContentType] = None
) -> Union[dict, str]:
"""
Get full configuration info.
Args:
configuration_id (int):
Configuration identifier.
content_type (Optional[ContentType]):
Select needed content type if not set, default value of client
instance is used.
Returns:
Union[dict, str]: configuration content.
"""
return self.quickbuild._request(
'GET',
'configurations/{}'.format(configuration_id),
callback=partial(response2py, content_type=content_type),
content_type=content_type,
)
def get_path(self, configuration_id: int) -> str:
"""
Get configuration path.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration path.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/path'.format(configuration_id),
)
def get_id_by_path(self, path: str) -> int:
"""
Get configuration id by path.
Args:
path (str): configuration path.
Returns:
int: configuration identifier.
"""
return self.quickbuild.identifiers.get_configuration_id_by_path(path)
def get_name(self, configuration_id: int) -> str:
"""
Get configuration name.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration name.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/name'.format(configuration_id),
)
def get_description(self, configuration_id: int) -> str:
"""
Get configuration description.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration description.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/description'.format(configuration_id),
)
def get_error_message(self, configuration_id: int) -> str:
"""
Get configuration error message.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration error message.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/error_message'.format(configuration_id),
)
def get_run_mode(self, configuration_id: int) -> str:
"""
Get configuration run mode.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration run mode.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/run_mode'.format(configuration_id),
)
def get_schedule(self, configuration_id: int) -> dict:
"""
Get configuration schedule.
Args:
configuration_id (int): configuration identifier.
Returns:
dict: configuration schedule.
Raises:
QBProcessingError: will be raised if schedule is inherited from
parent configuration.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/schedule'.format(configuration_id),
callback=response2py,
)
def get_average_duration(self,
configuration_id: int,
*,
from_date: Optional[datetime.date],
to_date: Optional[datetime.date]
) -> int:
"""
Get configuration average duration.
Args:
configuration_id (int): configuration identifier.
Returns:
int: milliseconds of average build duration.
"""
params = dict()
if from_date:
params['from_date'] = str(from_date)
if to_date:
params['to_date'] = str(to_date)
return self.quickbuild._request(
'GET',
'configurations/{}/average_duration'.format(configuration_id),
callback=response2py,
params=params,
)
def get_success_rate(self,
configuration_id: int,
*,
from_date: Optional[datetime.date],
to_date: Optional[datetime.date]
) -> int:
"""
Get configuration success rate.
Args:
configuration_id (int): configuration identifier.
Returns:
int: value in the range of 0~100, with 0 stands for 0%, and 100
stands for 100%.
"""
params = dict()
if from_date:
params['from_date'] = str(from_date)
if to_date:
params['to_date'] = str(to_date)
return self.quickbuild._request(
'GET',
'configurations/{}/success_rate'.format(configuration_id),
callback=response2py,
params=params,
)
def get_parent(self, configuration_id: int) -> int:
"""
Get parent configuration id.
Args:
configuration_id (int): configuration identifier.
Returns:
int: id of parent configuration.
Raises:
QBProcessingError: the configuration is root configuration and does
not have parent.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/parent'.format(configuration_id),
callback=response2py,
)
def update(self, configuration: str) -> int:
"""
Update a configuration using XML configuration.
Normally you do not need to create the XML from scratch: you may get
XML representation of the configuration using `get_info()` method with
content_type=ContentType.XML and modify certain parts of the XML.
Args:
configuration (str): XML document.
Returns:
int: configuration id being updated.
"""
return self.quickbuild._request(
'POST',
'configurations',
callback=response2py,
data=configuration
)
def create(self, configuration: str) -> int:
"""
Create a configuration using XML/JSON configuration.
Please note that:
- The parent element denotes id of the parent configuration. Normally
you do not need to create the xml from scratch: you may retrieve xml
representation of a templating configuration using various configuration
access methods or `get_info()` with content_type=ContentType.XML, remove
the id element, modify certain parts and use it for create() method.
- Secret elements (Elements with attribute "secret=encrypt" in XML
representation of an existing configuration, typically they are
repository passwords, secret variable values, etc.) should not contain
the "secret" attribute; otherwise QuickBuild will think that the password
has already been encrypted. However if you creating configuration by
copying existing one and want to remain the passwords, the "secret"
attribute should then be preserved.
Args:
configuration (str): XML/JSON document.
Returns:
int: configuration id of newly created configuration.
Raises:
QBError: XML validation error
"""
self.quickbuild._validate_for_id(configuration)
return self.update(configuration)
def delete(self, configuration_id: int) -> None:
"""
Delete configuration.
Args:
configuration_id (int): configuration id.
Returns:
None
"""
return self.quickbuild._request(
'DELETE',
'configurations/{}'.format(configuration_id),
callback=response2py,
)
def copy(self,
configuration_id: int,
parent_id: int,
name: str,
recursive: bool
) -> int:
"""
Copy configuration (available since version 4.0.72)
Args:
configuration_id (int):
Configuration id to be copied.
parent_id (int):
Configuration id of the parent to place newly copied configuration.
name (str):
Name of the newly copied configuration.
recursive (bool):
Specify parameter recursive=true to copy specified configuration
and all its descendant configurations recursively; otherwise,
only the configuration itself will be copied.
Returns:
int: configuration id of the newly copied configuration.
"""
params = dict(
parent_id=parent_id,
name=name,
recursive=recursive,
)
return self.quickbuild._request(
'GET',
'configurations/{}/copy'.format(configuration_id),
callback=response2py,
params=params,
)
| [
"functools.partial"
] | [((2333, 2380), 'functools.partial', 'partial', (['response2py'], {'content_type': 'content_type'}), '(response2py, content_type=content_type)\n', (2340, 2380), False, 'from functools import partial\n')] |
from os import path
from pathlib import Path
def curr_file_path() -> Path:
"""Get cuurent file path."""
return Path(__file__).absolute()
def out_folder_path() -> Path:
"""Get output folder path."""
return curr_file_path().parents[3].joinpath("out").absolute()
def out_geom_path() -> Path:
"""Get output geometry folder path."""
return path.abspath(out_folder_path().joinpath("geometry").absolute())
| [
"pathlib.Path"
] | [((121, 135), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (125, 135), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, json, base64
def post_cli(auth, command):
url_cli = "http://" + auth.ipaddr + "/rest/" + auth.version + "/cli"
command_dict = {"cmd": command}
try:
post_command = requests.post(url_cli, headers=auth.cookie, data=json.dumps(command_dict))
cli_response = post_command.json()['result_base64_encoded']
decoded_response = base64.b64decode(cli_response).decode('utf-8')
return decoded_response
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " post_cli: An Error has occurred"
| [
"base64.b64decode",
"json.dumps"
] | [((297, 321), 'json.dumps', 'json.dumps', (['command_dict'], {}), '(command_dict)\n', (307, 321), False, 'import requests, json, base64\n'), ((418, 448), 'base64.b64decode', 'base64.b64decode', (['cli_response'], {}), '(cli_response)\n', (434, 448), False, 'import requests, json, base64\n')] |
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def create(self, group):
wd = self.app.wd
self.go_to_group_page()
wd.find_element_by_name("new").click()
self.fill_form_group(group)
# Submit group creation
wd.find_element_by_name("submit").click()
self.back_to_group_page()
self.group_cache = None
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_all_groups(self):
wd = self.app.wd
self.go_to_group_page()
for i in range(len(wd.find_elements_by_name("selected[]"))):
wd.find_elements_by_name("selected[]")[i].click()
def test_delete_all_groups(self):
self.select_all_groups()
self.delete_groups()
def delete_group_by_index(self, index):
self.go_to_group_page()
self.select_group_by_index(index)
self.delete_groups()
self.group_cache = None
def delete_groups(self):
wd = self.app.wd
wd.find_element_by_name("delete").click()
self.back_to_group_page()
def delete_first_group(self):
self.delete_group_by_index(0)
def update_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.go_to_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
self.fill_form_group(new_group_data)
# Submit group update
wd.find_element_by_name("update").click()
self.back_to_group_page()
self.group_cache = None
def update_first_group(self):
self.update_group_by_index(0, new_group_data)
def select_first_group(self):
self.select_group_by_index(0)
def go_to_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and
len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def back_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def change_group_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_form_group(self, group):
self.change_group_value("group_name", group.name)
self.change_group_value("group_header", group.header)
self.change_group_value("group_footer", group.footer)
def count(self):
wd = self.app.wd
self.go_to_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.go_to_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
group_id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=group_id))
return list(self.group_cache)
| [
"model.group.Group"
] | [((3260, 3289), 'model.group.Group', 'Group', ([], {'name': 'text', 'id': 'group_id'}), '(name=text, id=group_id)\n', (3265, 3289), False, 'from model.group import Group\n')] |
import math
def sieve(n):
primes = list(range(2, n+1))
i = 0
while i < len(primes):
no = primes[i]
m = 2
while (no * m) <= max(primes):
if primes.count(no * m) > 0:
primes.remove(no * m)
m+=1
i+=1
return primes
def maxPower(n, limit):
i = 1
while math.pow(n, i + 1) <= limit:
i += 1
return i
limit = int(input('Limit: '))
primes = sieve(limit)
s = 1
for x in primes:
print(math.pow(x, maxPower(x, limit)))
s *= math.pow(x, maxPower(x, limit))
print(s)
| [
"math.pow"
] | [((348, 366), 'math.pow', 'math.pow', (['n', '(i + 1)'], {}), '(n, i + 1)\n', (356, 366), False, 'import math\n')] |
from os import system
def ler_qtd(n, msg):
n = int(input(msg))
while (n < 1) or (n > 10000):
n = int(input(f' - Entrada invalida!{msg}'))
return n
def preencher_set_cartas(cartas, qtd, p):
""" set de cartas, qtd de cartas, p de pessoa """
from time import sleep
print() #Pular linha
for cont in range(qtd):
carta = int(input(f' - Digite a {cont+1} carta de {p}: '))
while (carta < 1) or (carta > 100000):
carta = int(input(f' - \033[1;31mEntrada invalida!\033[m Digite a {cont+1} carta de {p}: '))
cartas.append(carta)
print(' - OK!')
sleep(1) #Espera 1s
def retirar_repetidos(lista):
l = list()
for cont in range(len(lista)):
if lista[cont] not in l:
l.append(lista[cont])
return l
def qtd_trocas(cartasA, cartasB):
inter_a = list()
inter_b = list()
for i in range(len(cartasA)):
for j in range(len(cartasB)):
if cartasA[i] == cartasB[j]:
break
elif (j == len(cartasB)-1):
inter_a.append(cartasA[i])
inter_a = retirar_repetidos(inter_a) #Pego o conjunto interseccao de a e tiro os repetidos
for i in range(len(cartasB)):
for j in range(len(cartasA)):
if cartasB[i] == cartasA[j]:
break
elif (j == len(cartasA)-1):
inter_b.append(cartasB[i])
inter_b = retirar_repetidos(inter_b) #Pego o conjunto interseccao de b e tiro os repetidos
menor = inter_a if len(inter_a) < len(inter_b) else inter_b
return len(menor)
#Programa principal
qa = 0
a = list() #Set cartas Alice
qb = 0
b = list() #Set cartas Beatriz
system('cls')
print('{:=^50}'.format(' TROCA DE CARTAS POKEMON '))
qa = ler_qtd(qa, ' Quantas cartas Alice possui? ')
qb = ler_qtd(qb, ' Quantas cartas Beatriz possui? ')
preencher_set_cartas(a, qa, 'Alice')
preencher_set_cartas(b, qb, 'Beatriz')
print(sorted(a))
print(sorted(b))
maximo_trocas = qtd_trocas(a, b)
print(f' - Maximo de trocas e igual a {maximo_trocas}')
| [
"os.system",
"time.sleep"
] | [((1764, 1777), 'os.system', 'system', (['"""cls"""'], {}), "('cls')\n", (1770, 1777), False, 'from os import system\n'), ((641, 649), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (646, 649), False, 'from time import sleep\n')] |
import numpy as np
import random
import numbers
import cv2
from PIL import Image
import wpcv
from wpcv.utils.ops import pil_ops, polygon_ops
from wpcv.utils.data_aug.base import Compose, Zip
from wpcv.utils.data_aug import img_aug
class ToPILImage(object):
def __init__(self):
self.to = img_aug.ToPILImage()
def __call__(self, img, *args):
if len(args):
return (self.to(img), *args)
else:
return self.to(img)
class BboxesToPoints(object):
def __call__(self, img, bboxes):
points = np.array(bboxes).reshape((-1, 2, 2))
return img, points
class PointsToBboxes(object):
def __call__(self, img, points):
bboxes = np.array(points).reshape((-1, 4))
return img, bboxes
class Reshape(object):
def __init__(self, shape):
self.target_shape = shape
def __call__(self, x):
return np.array(x).reshape(self.target_shape)
class Limitsize(object):
def __init__(self, maxsize):
limit = maxsize
if isinstance(limit, (tuple, list, set,)):
mw, mh = limit
else:
mw = mh = limit
self.size = (mw, mh)
def __call__(self, img, points):
mw, mh = self.size
w, h = img.size
rw = w / mw
rh = h / mh
r = max(rw, rh)
if r > 1:
nw, nh = int(w / r), int(h / r)
img = pil_ops.resize(img, (nw, nh))
points = polygon_ops.scale(points, 1 / r)
return img, points
class Scale(object):
def __init__(self, scales):
if isinstance(scales, (tuple, list)):
scaleX, scaleY = scales
else:
scaleX = scaleY = scales
self.scaleX, self.scaleY = scaleX, scaleY
def __call__(self, img, points):
scaleX, scaleY = self.scaleX, self.scaleY
img = pil_ops.scale(img, (scaleX, scaleY))
points = polygon_ops.scale(points, (scaleX, scaleY))
return img, points
class Resize(object):
def __init__(self, size, keep_ratio=False, fillcolor='black'):
self.size = size
self.keep_ratio = keep_ratio
self.fillcolor = fillcolor
def __call__(self, img, points):
w, h = img.size
tw, th = self.size
if not self.keep_ratio:
scaleX, scaleY = tw / w, th / h
img = pil_ops.resize(img, self.size)
points = polygon_ops.scale(points, (scaleX, scaleY))
else:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
img = pil_ops.resize_keep_ratio(img, self.size, fillcolor=fillcolor)
rx = w / tw
ry = h / th
r = max(rx, ry)
nw = w / r
nh = h / r
dw = (tw - nw) // 2
dh = (th - nh) // 2
points = polygon_ops.scale(points, 1 / r)
points = polygon_ops.translate(points, (dw, dh))
return img, points
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.hflip(img)
points = [polygon_ops.hflip(pnts, imw) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.vflip(img)
points = [polygon_ops.vflip(pnts, imh) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomTranslate(object):
def __init__(self, max_offset=None, fillcolor='black'):
if max_offset is not None and len(max_offset) == 2:
mx, my = max_offset
max_offset = [-mx, -my, mx, my]
self.max_offset = max_offset
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
rang = polygon_ops.get_translate_range(points, img.size)
if self.max_offset:
def limit_box(box, limits=None):
if limits is None: return box
if len(limits) == 2:
ml, mt = 0, 0
mr, mb = limits
else:
assert len(limits) == 4
ml, mt, mr, mb = limits
l, t, r, b = box
l = max(ml, l)
t = max(mt, t)
r = min(mr, r)
b = min(mb, b)
if l > r:
return None
if t > b: return None
return [l, t, r, b]
rang = limit_box(rang, self.max_offset)
if rang is None:
return img, points
ofx = random.randint(rang[0], rang[2])
ofy = random.randint(rang[1], rang[3])
img = pil_ops.translate(img, offset=(ofx, ofy), fillcolor=fillcolor)
points = [polygon_ops.translate(pnts, (ofx, ofy)) for pnts in points]
return img, points
class RandomRotate(object):
def __init__(self, degree, expand=True, fillcolor='black'):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
self.expand = expand
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.rotate(img, degree, expand=self.expand, fillcolor=fillcolor)
points = [polygon_ops.rotate(pnts, degree, (w // 2, h // 2), img_size=(w, h), expand=self.expand) for pnts in
points]
return img, points
class RandomShearX(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShearY(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShear(object):
def __init__(self, xdegree, ydegree=None, fillcolor='balck'):
def get_param(param, defualt=None):
if param is None: return defualt
return param if not isinstance(param, numbers.Number) else [-param, param]
self.xdegree = get_param(xdegree)
self.ydegree = get_param(ydegree)
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.xdegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.xdegree[1] - self.xdegree[0]) + self.xdegree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
if self.ydegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.ydegree[1] - self.ydegree[0]) + self.ydegree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
# class RandomPerspective:
| [
"wpcv.utils.ops.pil_ops.vflip",
"wpcv.utils.ops.polygon_ops.get_translate_range",
"wpcv.utils.ops.pil_ops.resize_keep_ratio",
"wpcv.utils.ops.polygon_ops.scale",
"random.randint",
"wpcv.utils.ops.pil_ops.scale",
"wpcv.utils.ops.polygon_ops.translate",
"wpcv.utils.data_aug.img_aug.ToPILImage",
"wpcv.utils.ops.pil_ops.shear_y",
"wpcv.utils.ops.pil_ops.shear_x",
"wpcv.utils.ops.pil_ops.resize",
"wpcv.utils.ops.polygon_ops.shear_x",
"random.random",
"wpcv.utils.ops.pil_ops.hflip",
"wpcv.utils.ops.pil_ops.translate",
"wpcv.utils.ops.polygon_ops.vflip",
"wpcv.utils.ops.polygon_ops.rotate",
"wpcv.utils.ops.pil_ops.rotate",
"wpcv.utils.ops.polygon_ops.shear_y",
"numpy.array",
"wpcv.utils.ops.polygon_ops.hflip"
] | [((301, 321), 'wpcv.utils.data_aug.img_aug.ToPILImage', 'img_aug.ToPILImage', ([], {}), '()\n', (319, 321), False, 'from wpcv.utils.data_aug import img_aug\n'), ((1855, 1891), 'wpcv.utils.ops.pil_ops.scale', 'pil_ops.scale', (['img', '(scaleX, scaleY)'], {}), '(img, (scaleX, scaleY))\n', (1868, 1891), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1909, 1952), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(scaleX, scaleY)'], {}), '(points, (scaleX, scaleY))\n', (1926, 1952), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((4390, 4439), 'wpcv.utils.ops.polygon_ops.get_translate_range', 'polygon_ops.get_translate_range', (['points', 'img.size'], {}), '(points, img.size)\n', (4421, 4439), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5196, 5228), 'random.randint', 'random.randint', (['rang[0]', 'rang[2]'], {}), '(rang[0], rang[2])\n', (5210, 5228), False, 'import random\n'), ((5243, 5275), 'random.randint', 'random.randint', (['rang[1]', 'rang[3]'], {}), '(rang[1], rang[3])\n', (5257, 5275), False, 'import random\n'), ((5290, 5352), 'wpcv.utils.ops.pil_ops.translate', 'pil_ops.translate', (['img'], {'offset': '(ofx, ofy)', 'fillcolor': 'fillcolor'}), '(img, offset=(ofx, ofy), fillcolor=fillcolor)\n', (5307, 5352), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6032, 6100), 'wpcv.utils.ops.pil_ops.rotate', 'pil_ops.rotate', (['img', 'degree'], {'expand': 'self.expand', 'fillcolor': 'fillcolor'}), '(img, degree, expand=self.expand, fillcolor=fillcolor)\n', (6046, 6100), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6590, 6618), 'wpcv.utils.ops.pil_ops.shear_x', 'pil_ops.shear_x', (['img', 'degree'], {}), '(img, degree)\n', (6605, 6618), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7066, 7094), 'wpcv.utils.ops.pil_ops.shear_y', 'pil_ops.shear_y', (['img', 'degree'], {}), '(img, degree)\n', (7081, 7094), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1404, 1433), 'wpcv.utils.ops.pil_ops.resize', 'pil_ops.resize', (['img', '(nw, nh)'], {}), '(img, (nw, nh))\n', (1418, 1433), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1455, 1487), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(1 / r)'], {}), '(points, 1 / r)\n', (1472, 1487), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2351, 2381), 'wpcv.utils.ops.pil_ops.resize', 'pil_ops.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (2365, 2381), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2403, 2446), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(scaleX, scaleY)'], {}), '(points, (scaleX, scaleY))\n', (2420, 2446), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2655, 2717), 'wpcv.utils.ops.pil_ops.resize_keep_ratio', 'pil_ops.resize_keep_ratio', (['img', 'self.size'], {'fillcolor': 'fillcolor'}), '(img, self.size, fillcolor=fillcolor)\n', (2680, 2717), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2925, 2957), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(1 / r)'], {}), '(points, 1 / r)\n', (2942, 2957), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2979, 3018), 'wpcv.utils.ops.polygon_ops.translate', 'polygon_ops.translate', (['points', '(dw, dh)'], {}), '(points, (dw, dh))\n', (3000, 3018), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3211, 3226), 'random.random', 'random.random', ([], {}), '()\n', (3224, 3226), False, 'import random\n'), ((3255, 3273), 'wpcv.utils.ops.pil_ops.hflip', 'pil_ops.hflip', (['img'], {}), '(img)\n', (3268, 3273), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3625, 3640), 'random.random', 'random.random', ([], {}), '()\n', (3638, 3640), False, 'import random\n'), ((3669, 3687), 'wpcv.utils.ops.pil_ops.vflip', 'pil_ops.vflip', (['img'], {}), '(img)\n', (3682, 3687), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5371, 5410), 'wpcv.utils.ops.polygon_ops.translate', 'polygon_ops.translate', (['pnts', '(ofx, ofy)'], {}), '(pnts, (ofx, ofy))\n', (5392, 5410), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6119, 6211), 'wpcv.utils.ops.polygon_ops.rotate', 'polygon_ops.rotate', (['pnts', 'degree', '(w // 2, h // 2)'], {'img_size': '(w, h)', 'expand': 'self.expand'}), '(pnts, degree, (w // 2, h // 2), img_size=(w, h), expand=\n self.expand)\n', (6137, 6211), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6637, 6700), 'wpcv.utils.ops.polygon_ops.shear_x', 'polygon_ops.shear_x', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (6656, 6700), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7113, 7176), 'wpcv.utils.ops.polygon_ops.shear_y', 'polygon_ops.shear_y', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (7132, 7176), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7994, 8043), 'wpcv.utils.ops.pil_ops.shear_x', 'pil_ops.shear_x', (['img', 'degree'], {'fillcolor': 'fillcolor'}), '(img, degree, fillcolor=fillcolor)\n', (8009, 8043), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((8490, 8539), 'wpcv.utils.ops.pil_ops.shear_y', 'pil_ops.shear_y', (['img', 'degree'], {'fillcolor': 'fillcolor'}), '(img, degree, fillcolor=fillcolor)\n', (8505, 8539), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((554, 570), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (562, 570), True, 'import numpy as np\n'), ((704, 720), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (712, 720), True, 'import numpy as np\n'), ((898, 909), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (906, 909), True, 'import numpy as np\n'), ((3296, 3324), 'wpcv.utils.ops.polygon_ops.hflip', 'polygon_ops.hflip', (['pnts', 'imw'], {}), '(pnts, imw)\n', (3313, 3324), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3710, 3738), 'wpcv.utils.ops.polygon_ops.vflip', 'polygon_ops.vflip', (['pnts', 'imh'], {}), '(pnts, imh)\n', (3727, 3738), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5925, 5940), 'random.random', 'random.random', ([], {}), '()\n', (5938, 5940), False, 'import random\n'), ((6483, 6498), 'random.random', 'random.random', ([], {}), '()\n', (6496, 6498), False, 'import random\n'), ((6959, 6974), 'random.random', 'random.random', ([], {}), '()\n', (6972, 6974), False, 'import random\n'), ((8066, 8129), 'wpcv.utils.ops.polygon_ops.shear_x', 'polygon_ops.shear_x', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (8085, 8129), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((8562, 8625), 'wpcv.utils.ops.polygon_ops.shear_y', 'polygon_ops.shear_y', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (8581, 8625), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7876, 7891), 'random.random', 'random.random', ([], {}), '()\n', (7889, 7891), False, 'import random\n'), ((8372, 8387), 'random.random', 'random.random', ([], {}), '()\n', (8385, 8387), False, 'import random\n')] |
# coding: utf-8
"""
IbIocProfile.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class IbIocProfile(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IbIocProfile - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'ioc_guid': 'str', # (required parameter)
'vendor_id': 'str', # (required parameter)
'io_device_id': 'int', # (required parameter)
'device_version': 'int', # (required parameter)
'subsystem_vendor_id': 'str', # (required parameter)
'subsystem_id': 'int', # (required parameter)
'io_class': 'int', # (required parameter)
'io_subclass': 'int', # (required parameter)
'protocol': 'int', # (required parameter)
'protocol_version': 'int', # (required parameter)
'send_message_queue_depth': 'int', # (required parameter)
'rdma_read_queue_depth': 'int', # (required parameter)
'send_message_size': 'int', # (required parameter)
'rdma_transfer_size': 'int', # (required parameter)
'controller_ops_capability_mask': 'int', # (required parameter)
'service_entries': 'int', # (required parameter)
'id_string': 'str'
}
self.attribute_map = {
'ioc_guid': 'iocGuid', # (required parameter)
'vendor_id': 'vendorId', # (required parameter)
'io_device_id': 'ioDeviceId', # (required parameter)
'device_version': 'deviceVersion', # (required parameter)
'subsystem_vendor_id': 'subsystemVendorId', # (required parameter)
'subsystem_id': 'subsystemId', # (required parameter)
'io_class': 'ioClass', # (required parameter)
'io_subclass': 'ioSubclass', # (required parameter)
'protocol': 'protocol', # (required parameter)
'protocol_version': 'protocolVersion', # (required parameter)
'send_message_queue_depth': 'sendMessageQueueDepth', # (required parameter)
'rdma_read_queue_depth': 'rdmaReadQueueDepth', # (required parameter)
'send_message_size': 'sendMessageSize', # (required parameter)
'rdma_transfer_size': 'rdmaTransferSize', # (required parameter)
'controller_ops_capability_mask': 'controllerOpsCapabilityMask', # (required parameter)
'service_entries': 'serviceEntries', # (required parameter)
'id_string': 'idString'
}
self._ioc_guid = None
self._vendor_id = None
self._io_device_id = None
self._device_version = None
self._subsystem_vendor_id = None
self._subsystem_id = None
self._io_class = None
self._io_subclass = None
self._protocol = None
self._protocol_version = None
self._send_message_queue_depth = None
self._rdma_read_queue_depth = None
self._send_message_size = None
self._rdma_transfer_size = None
self._controller_ops_capability_mask = None
self._service_entries = None
self._id_string = None
@property
def ioc_guid(self):
"""
Gets the ioc_guid of this IbIocProfile.
The EUI-64 GUID used to uniquely identify the I/O controller.
:return: The ioc_guid of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._ioc_guid
@ioc_guid.setter
def ioc_guid(self, ioc_guid):
"""
Sets the ioc_guid of this IbIocProfile.
The EUI-64 GUID used to uniquely identify the I/O controller.
:param ioc_guid: The ioc_guid of this IbIocProfile.
:type: str
"""
self._ioc_guid = ioc_guid
@property
def vendor_id(self):
"""
Gets the vendor_id of this IbIocProfile.
The I/O controller vendor ID in IEEE format.
:return: The vendor_id of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._vendor_id
@vendor_id.setter
def vendor_id(self, vendor_id):
"""
Sets the vendor_id of this IbIocProfile.
The I/O controller vendor ID in IEEE format.
:param vendor_id: The vendor_id of this IbIocProfile.
:type: str
"""
self._vendor_id = vendor_id
@property
def io_device_id(self):
"""
Gets the io_device_id of this IbIocProfile.
A number assigned by vendor to identify the type of I/O controller
:return: The io_device_id of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_device_id
@io_device_id.setter
def io_device_id(self, io_device_id):
"""
Sets the io_device_id of this IbIocProfile.
A number assigned by vendor to identify the type of I/O controller
:param io_device_id: The io_device_id of this IbIocProfile.
:type: int
"""
self._io_device_id = io_device_id
@property
def device_version(self):
"""
Gets the device_version of this IbIocProfile.
A number assigned by the vendor to identify the device version.
:return: The device_version of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._device_version
@device_version.setter
def device_version(self, device_version):
"""
Sets the device_version of this IbIocProfile.
A number assigned by the vendor to identify the device version.
:param device_version: The device_version of this IbIocProfile.
:type: int
"""
self._device_version = device_version
@property
def subsystem_vendor_id(self):
"""
Gets the subsystem_vendor_id of this IbIocProfile.
The ID of the enclosure vendor in IEEE format, or else all zeros if there is no vendor ID.
:return: The subsystem_vendor_id of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._subsystem_vendor_id
@subsystem_vendor_id.setter
def subsystem_vendor_id(self, subsystem_vendor_id):
"""
Sets the subsystem_vendor_id of this IbIocProfile.
The ID of the enclosure vendor in IEEE format, or else all zeros if there is no vendor ID.
:param subsystem_vendor_id: The subsystem_vendor_id of this IbIocProfile.
:type: str
"""
self._subsystem_vendor_id = subsystem_vendor_id
@property
def subsystem_id(self):
"""
Gets the subsystem_id of this IbIocProfile.
A number identifying the subsystem where the I/O controller resides.
:return: The subsystem_id of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._subsystem_id
@subsystem_id.setter
def subsystem_id(self, subsystem_id):
"""
Sets the subsystem_id of this IbIocProfile.
A number identifying the subsystem where the I/O controller resides.
:param subsystem_id: The subsystem_id of this IbIocProfile.
:type: int
"""
self._subsystem_id = subsystem_id
@property
def io_class(self):
"""
Gets the io_class of this IbIocProfile.
The I/O class of the controller. 0x0000 -0xFFFE is reserved for I/O classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The io_class of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_class
@io_class.setter
def io_class(self, io_class):
"""
Sets the io_class of this IbIocProfile.
The I/O class of the controller. 0x0000 -0xFFFE is reserved for I/O classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param io_class: The io_class of this IbIocProfile.
:type: int
"""
self._io_class = io_class
@property
def io_subclass(self):
"""
Gets the io_subclass of this IbIocProfile.
The I/O sub-class of the controller. 0x0000 -0xFFFE is reserved for I/O sub-classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The io_subclass of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_subclass
@io_subclass.setter
def io_subclass(self, io_subclass):
"""
Sets the io_subclass of this IbIocProfile.
The I/O sub-class of the controller. 0x0000 -0xFFFE is reserved for I/O sub-classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param io_subclass: The io_subclass of this IbIocProfile.
:type: int
"""
self._io_subclass = io_subclass
@property
def protocol(self):
"""
Gets the protocol of this IbIocProfile.
The I/O protocol of the controller. 0x0000 -0xFFFE is reserved for I/O protocols encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The protocol of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this IbIocProfile.
The I/O protocol of the controller. 0x0000 -0xFFFE is reserved for I/O protocols encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param protocol: The protocol of this IbIocProfile.
:type: int
"""
self._protocol = protocol
@property
def protocol_version(self):
"""
Gets the protocol_version of this IbIocProfile.
The protocol version (protocol-specific).
:return: The protocol_version of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""
Sets the protocol_version of this IbIocProfile.
The protocol version (protocol-specific).
:param protocol_version: The protocol_version of this IbIocProfile.
:type: int
"""
self._protocol_version = protocol_version
@property
def send_message_queue_depth(self):
"""
Gets the send_message_queue_depth of this IbIocProfile.
The maximum depth of the Send Message Queue.
:return: The send_message_queue_depth of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._send_message_queue_depth
@send_message_queue_depth.setter
def send_message_queue_depth(self, send_message_queue_depth):
"""
Sets the send_message_queue_depth of this IbIocProfile.
The maximum depth of the Send Message Queue.
:param send_message_queue_depth: The send_message_queue_depth of this IbIocProfile.
:type: int
"""
self._send_message_queue_depth = send_message_queue_depth
@property
def rdma_read_queue_depth(self):
"""
Gets the rdma_read_queue_depth of this IbIocProfile.
The maximum depth of the per-channel RDMA Read Queue
:return: The rdma_read_queue_depth of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._rdma_read_queue_depth
@rdma_read_queue_depth.setter
def rdma_read_queue_depth(self, rdma_read_queue_depth):
"""
Sets the rdma_read_queue_depth of this IbIocProfile.
The maximum depth of the per-channel RDMA Read Queue
:param rdma_read_queue_depth: The rdma_read_queue_depth of this IbIocProfile.
:type: int
"""
self._rdma_read_queue_depth = rdma_read_queue_depth
@property
def send_message_size(self):
"""
Gets the send_message_size of this IbIocProfile.
The maximum size of Send Messages in bytes.
:return: The send_message_size of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._send_message_size
@send_message_size.setter
def send_message_size(self, send_message_size):
"""
Sets the send_message_size of this IbIocProfile.
The maximum size of Send Messages in bytes.
:param send_message_size: The send_message_size of this IbIocProfile.
:type: int
"""
self._send_message_size = send_message_size
@property
def rdma_transfer_size(self):
"""
Gets the rdma_transfer_size of this IbIocProfile.
The maximum size of outbound RDMA transfers initiated by the controller.
:return: The rdma_transfer_size of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._rdma_transfer_size
@rdma_transfer_size.setter
def rdma_transfer_size(self, rdma_transfer_size):
"""
Sets the rdma_transfer_size of this IbIocProfile.
The maximum size of outbound RDMA transfers initiated by the controller.
:param rdma_transfer_size: The rdma_transfer_size of this IbIocProfile.
:type: int
"""
self._rdma_transfer_size = rdma_transfer_size
@property
def controller_ops_capability_mask(self):
"""
Gets the controller_ops_capability_mask of this IbIocProfile.
Supported operation types of this controller.: Bit 0 on = Send Messages to IOCs Bit 1 on = Send Messages from IOCs Bit 2 on = RDMA Read Requests to IOCs Bit 3 on = RDMA Read Requests from IOCs Bit 4 on = RDMA Write Requests to IOCs Bit 5 on = RDMA Write Requests from IOCs Bit 6 on = Atomic operations to IOCs Bit 7 on = Atomic operations from IOCs
:return: The controller_ops_capability_mask of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._controller_ops_capability_mask
@controller_ops_capability_mask.setter
def controller_ops_capability_mask(self, controller_ops_capability_mask):
"""
Sets the controller_ops_capability_mask of this IbIocProfile.
Supported operation types of this controller.: Bit 0 on = Send Messages to IOCs Bit 1 on = Send Messages from IOCs Bit 2 on = RDMA Read Requests to IOCs Bit 3 on = RDMA Read Requests from IOCs Bit 4 on = RDMA Write Requests to IOCs Bit 5 on = RDMA Write Requests from IOCs Bit 6 on = Atomic operations to IOCs Bit 7 on = Atomic operations from IOCs
:param controller_ops_capability_mask: The controller_ops_capability_mask of this IbIocProfile.
:type: int
"""
self._controller_ops_capability_mask = controller_ops_capability_mask
@property
def service_entries(self):
"""
Gets the service_entries of this IbIocProfile.
The number of entries in the service entries table
:return: The service_entries of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._service_entries
@service_entries.setter
def service_entries(self, service_entries):
"""
Sets the service_entries of this IbIocProfile.
The number of entries in the service entries table
:param service_entries: The service_entries of this IbIocProfile.
:type: int
"""
self._service_entries = service_entries
@property
def id_string(self):
"""
Gets the id_string of this IbIocProfile.
A UTF-8 encoded string for identifying the controller to user.
:return: The id_string of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._id_string
@id_string.setter
def id_string(self, id_string):
"""
Sets the id_string of this IbIocProfile.
A UTF-8 encoded string for identifying the controller to user.
:param id_string: The id_string of this IbIocProfile.
:type: str
"""
self._id_string = id_string
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"six.iteritems"
] | [((18511, 18540), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (18520, 18540), False, 'from six import iteritems\n')] |
from csv import reader
from sklearn import preprocessing
from plotly import graph_objects
def import_data(path):
return [[float(f) for f in r] for r in reader(open(path, "r"))]
def normalize_data(dataset):
scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
normalized = scaler.fit_transform(dataset)
return normalized.tolist()
def cross_validation_split(dataset, cross_validation_k):
size = len(dataset)
splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)]
splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1])
sets = list()
offset = 0
for s in splits:
sets.append([dataset[i] for i in range(offset, offset + s)])
offset += s
return sets
def compute_y(model, row):
m = len(row)
y = 0.0
for i in range(m):
y += row[i] * model[i]
return y + model[m]
def compute_grad_mse(model, row, actual):
m = len(row)
grad = [0.0 for _ in range(m + 1)]
diff = compute_y(model, row) - actual
for i in range(m):
grad[i] += 2.0 * row[i] * diff
grad[m] += 2.0 * diff
return grad
def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count):
m = len(fold[0])
model = [0.0 for _ in range(m + 1)]
for i in range(iterations_count):
for j in range(len(fold)):
row = fold[j]
actual = actuals[j]
grad = compute_grad_mse(model, row, actual)
model = [model[k] - learning_rate / (1 + i) * grad[k] for k in range(m + 1)]
return model
def compute_rmse(prediction, actual):
mse = 0.0
n = len(prediction)
for i in range(n):
mse += ((prediction[i] - actual[i]) ** 2) / float(n)
return mse ** 0.5
def compute_r2(prediction, actual):
nominator = 0.0
denominator = 0.0
expect = 0.0
for i in range(len(actual)):
nominator += (actual[i] - prediction[i]) ** 2
for i in range(len(actual)):
expect += actual[i] / float(len(actual))
for i in range(len(actual)):
denominator += (actual[i] - expect) ** 2
return 1 - float(nominator) / float(denominator)
def run_learning(sets, learning_rate, iterations_count):
models = []
rmses = []
r2s = []
rmses_test = []
r2s_test = []
for set in sets:
fold = list(sets)
fold.remove(set)
fold = sum(fold, [])
test = set
fold_actual = [r[-1] for r in fold]
fold = [r[0:-1] for r in fold]
test_actual = [r[-1] for r in test]
test = [r[0:-1] for r in test]
model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count)
fold_pred = [compute_y(model, row) for row in fold]
test_pred = [compute_y(model, row) for row in test]
rmses.append(compute_rmse(fold_pred, fold_actual))
r2s.append(compute_r2(fold_pred, fold_actual))
rmses_test.append(compute_rmse(test_pred, test_actual))
r2s_test.append(compute_r2(test_pred, test_actual))
models.append(model)
return models, rmses, r2s, rmses_test, r2s_test
def compute_stat(data):
n = len(data)
expectation = 0.0
for d in data:
expectation += d / float(n)
sd = 0.0
for d in data:
sd += ((d - expectation) ** 2) / float(n)
return expectation, sd ** 0.5
filepath = "features_var_1.csv"
cv_k = 5
learn_rate = 0.01
iterations = 50
dataset = normalize_data(import_data(filepath))
sets = cross_validation_split(dataset, cv_k)
models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations)
models_tr = [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))]
stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr]
values = ["X"] + ["Fold" + str(i) for i in range(cv_k)] + ["E","SD"]
cells = [ ["RMSE (train)", "R2 (train)", "RMSE (test)", "R2 (test)"] + ["f" + str(i) for i in range(len(dataset[0]))] ] + \
[ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] + \
[ [stats[j][i] for j in range(len(stats))] for i in range(2) ]
table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells))
figure = graph_objects.Figure(data=[table])
figure.show()
| [
"plotly.graph_objects.Figure",
"sklearn.preprocessing.MinMaxScaler"
] | [((4307, 4341), 'plotly.graph_objects.Figure', 'graph_objects.Figure', ([], {'data': '[table]'}), '(data=[table])\n', (4327, 4341), False, 'from plotly import graph_objects\n'), ((227, 275), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (253, 275), False, 'from sklearn import preprocessing\n')] |
#!/bin/python3
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import sys,os
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
browser = webdriver.Chrome(options=options)
browser.get("http://172.16.31.10/")
try:
browser.find_element_by_xpath('//*[@id="logout"]')
print('网络已连接!')
browser.quit()
sys.exit()
except NoSuchElementException:
pass
username_='2020xxxxxxxxxxx'
password_='<PASSWORD>'
# 输入用户名,密码
username = browser.find_element_by_xpath('//*[@id="username"]')
password = browser.find_element_by_xpath('//*[@id="password"]')
username.clear()
username.send_keys(username_)
password.clear()
password.send_keys(password_)
login_btn = browser.find_element_by_xpath('//*[@id="login-account"]')
login_btn.click()
try:
# 页面一直循环,直到显示连接成功
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="logout"]'))
)
print("网络已连接!")
finally:
browser.quit()
browser.quit()
| [
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"selenium.webdriver.support.ui.WebDriverWait",
"sys.exit"
] | [((299, 324), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (322, 324), False, 'from selenium import webdriver\n'), ((499, 532), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (515, 532), False, 'from selenium import webdriver\n'), ((673, 683), 'sys.exit', 'sys.exit', ([], {}), '()\n', (681, 683), False, 'import sys, os\n'), ((1183, 1246), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH, \'//*[@id="logout"]\')'], {}), '((By.XPATH, \'//*[@id="logout"]\'))\n', (1213, 1246), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1141, 1167), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(10)'], {}), '(browser, 10)\n', (1154, 1167), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
# Generated by Django 2.2.4 on 2019-09-11 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20190907_1334'),
]
operations = [
migrations.AddField(
model_name='pic',
name='classification152',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='pic',
name='classification18',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='pic',
name='transfer',
field=models.ImageField(blank=True, upload_to=''),
),
]
| [
"django.db.models.ImageField",
"django.db.models.TextField"
] | [((340, 368), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (356, 368), False, 'from django.db import migrations, models\n'), ((495, 523), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (511, 523), False, 'from django.db import migrations, models\n'), ((642, 685), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '""""""'}), "(blank=True, upload_to='')\n", (659, 685), False, 'from django.db import migrations, models\n')] |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The trainer of EGTA for mean field game."""
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms.EGTA import meta_strategies
from open_spiel.python.mfg.algorithms.EGTA import inner_loop
from open_spiel.python.mfg.algorithms.EGTA import init_oracle
class MFGMetaTrainer(object):
"""
Empirical game-theoretic analysis (EGTA) for MFGs.
"""
def __init__(self,
mfg_game,
oracle_type,
num_inner_iters=None,
initial_policy=None,
meta_strategy_method="nash",
**kwargs):
"""
Initialize the MFG Trainer.
:param mfg_game: a mean-field game.
:param oracle_type: "BR" exact best response or "DQN" RL approximate best response.
:param num_inner_iters: the number of iterations for the inner loop (finding BR target based on the empirical game) if needed.
:param initial_policies: initial policies. Uniform policies by default.
:param meta_strategy_method: method for the inner loop.
"""
self._mfg_game = mfg_game
self._oracle_type = oracle_type
self._num_players = mfg_game.num_players()
self._num_inner_iters = num_inner_iters
self._initial_policy = initial_policy
self._meta_strategy_method = meta_strategy_method
self.initialize_policies_and_distributions()
#TODO: check if policy and dist are being updated.
self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game,
policies=self._policies,
distributions=self._distributions,
num_iterations=num_inner_iters)
self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)
self._output_policy = None
self._current_outer_iter = 0
def initialize_policies_and_distributions(self):
"""
Initialize policies and corresponding distributions.
"""
if self._oracle_type == "BR":
self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game,
initial_policy=self._initial_policy)
elif self._oracle_type == "DQN":
raise NotImplementedError
else:
raise ValueError("Suggested oracle has not been implemented.")
def reset(self):
"""
Reset the trainer.
"""
self._current_outer_iter = 0
self.initialize_policies_and_distributions()
self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game,
policies=self._policies,
distributions=self._distributions,
num_iterations=self._num_inner_iters)
self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)
self._output_policy = None
def iteration(self):
"""
Main training iteration.
"""
self._current_outer_iter += 1
self._meta_strategy_method.reset()
self._output_policy = self._inner_loop.run_inner_loop()
self.update_policies(self._output_policy)
def final_step(self):
""" Final analysis of all generated policies. """
self._meta_strategy_method.reset()
self._output_policy = self._inner_loop.run_inner_loop()
def update_policies(self, output_merged_policy):
"""
Adding new best-response policies to the empirical game.
:param output_merged_policy: a merged policy induced by inner loop.
:return:
"""
output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy)
greedy_pi = self._oracle(self._mfg_game, output_distribution)
self._policies.append(greedy_pi)
self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi))
def get_original_policies_and_weights(self):
"""
Return original policies in the empirical game and corresponding output mixed strategies.
"""
weights = self._meta_strategy_method.get_weights_on_orig_policies()
return self._policies, weights
def get_merged_policy(self):
"""
Return the output merged policy.
Equivalent to merge policies and weights from get_original_policies_and_weights().
"""
return self._output_policy
def get_policies(self):
return self._policies
def get_distrbutions(self):
return self._distributions
| [
"open_spiel.python.mfg.algorithms.distribution.DistributionPolicy",
"open_spiel.python.mfg.algorithms.EGTA.init_oracle.init_br_oracle",
"open_spiel.python.mfg.algorithms.EGTA.inner_loop.InnerLoop"
] | [((2652, 2700), 'open_spiel.python.mfg.algorithms.EGTA.inner_loop.InnerLoop', 'inner_loop.InnerLoop', (['self._meta_strategy_method'], {}), '(self._meta_strategy_method)\n', (2672, 2700), False, 'from open_spiel.python.mfg.algorithms.EGTA import inner_loop\n'), ((4084, 4132), 'open_spiel.python.mfg.algorithms.EGTA.inner_loop.InnerLoop', 'inner_loop.InnerLoop', (['self._meta_strategy_method'], {}), '(self._meta_strategy_method)\n', (4104, 4132), False, 'from open_spiel.python.mfg.algorithms.EGTA import inner_loop\n'), ((4907, 4976), 'open_spiel.python.mfg.algorithms.distribution.DistributionPolicy', 'distribution.DistributionPolicy', (['self._mfg_game', 'output_merged_policy'], {}), '(self._mfg_game, output_merged_policy)\n', (4938, 4976), False, 'from open_spiel.python.mfg.algorithms import distribution\n'), ((3016, 3105), 'open_spiel.python.mfg.algorithms.EGTA.init_oracle.init_br_oracle', 'init_oracle.init_br_oracle', ([], {'game': 'self._mfg_game', 'initial_policy': 'self._initial_policy'}), '(game=self._mfg_game, initial_policy=self.\n _initial_policy)\n', (3042, 3105), False, 'from open_spiel.python.mfg.algorithms.EGTA import init_oracle\n'), ((5124, 5182), 'open_spiel.python.mfg.algorithms.distribution.DistributionPolicy', 'distribution.DistributionPolicy', (['self._mfg_game', 'greedy_pi'], {}), '(self._mfg_game, greedy_pi)\n', (5155, 5182), False, 'from open_spiel.python.mfg.algorithms import distribution\n')] |
# encoding:utf-8
import os, sys
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(basepath, 'FaceDetector'))
import requests
import base64
import cv2
import numpy as np
import urllib.request
import base64
def fetchImageFromHttp(image_url, timeout_s=1):
# 该函数是读取url图片
if image_url:
resp = urllib.request.urlopen(image_url, timeout=timeout_s)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
else:
return []
def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120):
# 该函数的作用是提取图中人脸
message = {}
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>'
response = requests.get(host)
if response:
# print(response.json()['access_token'])
access_token = response.json()['access_token']
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect"
if imgtype == 'Local':
with open(img, "rb") as f: # 转为二进制格式
base64_data = base64.b64encode(f.read()) # 使用base64进行加密
base64_data = base64_data.decode()
params = "\"image\":\"{}\",\"image_type\":\"BASE64\", \"max_face_num\":\"120\"".format(base64_data)
params = '{' + params + '}'
elif imgtype == 'url':
params = "\"image\":\"{}\",\"image_type\":\"URL\", \"max_face_num\":\"120\"".format(img)
params = '{' + params + '}'
# print(params['image'])
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
if response:
print (response.json())
# 提取检测到的所有人脸信息
if response.json()['error_code'] != 0:
message['Error Code'] = response.json()['error_code']
message['Error Message'] = response.json()['error_msg']
message['Data'] = None
return message
# raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg']))
face_number = response.json()['result']['face_num']
face_List = []
for num in range(face_number):
face_loc_left = int(response.json()['result']['face_list'][num]['location']['left'])
face_loc_top = int(response.json()['result']['face_list'][num]['location']['top'])
face_loc_width = int(response.json()['result']['face_list'][num]['location']['width'])
face_loc_height = int(response.json()['result']['face_list'][num]['location']['height'])
face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height])
# 这里是读取图像并画框
if imgtype == 'Local':
image = cv2.imread(img)
elif imgtype == 'url':
image = fetchImageFromHttp(img)
# 图片编号起始
search_all_path = []
num = 0
for pos in face_List:
lefttopx = pos[0]
lefttopy = pos[1]
rightbottomx = lefttopx + pos[2]
rightbottomy = lefttopy + pos[3]
# print(lefttopx, lefttopy, rightbottomx, rightbottomy)
cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2)
if imgpos == 'Example':
savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg')
elif imgpos == 'Search':
pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)])
savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg')
search_all_path.append(savepath)
# cv2.imwrite("C:/WorkSpace/test/detect_face_"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx])
cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx])
num += 1
message['Error Code'] = response.json()['error_code']
message['Error Message'] = message['Error Message'] = response.json()['error_msg']
if imgpos == 'Example':
full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg')
cv2.imwrite(full_face_path, image)
message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path}
elif imgpos == 'Search':
# full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg')
# cv2.imwrite(full_face_path, image)
message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None}
return message
if __name__ == "__main__":
# imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg'
imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2'
wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg'
fetchImageFromHttp(wycpath)
# result = FaceExtract(imgpath, 'url')
# result = FaceExtract(imgpath, 'Local', 'Search')
# cv2.imshow('image', result)
# cv2.waitKey(0)
| [
"os.path.abspath",
"cv2.imwrite",
"cv2.imdecode",
"cv2.imread",
"requests.get",
"cv2.rectangle",
"requests.post",
"os.path.join"
] | [((119, 157), 'os.path.join', 'os.path.join', (['basepath', '"""FaceDetector"""'], {}), "(basepath, 'FaceDetector')\n", (131, 157), False, 'import os, sys\n'), ((886, 904), 'requests.get', 'requests.get', (['host'], {}), '(host)\n', (898, 904), False, 'import requests\n'), ((1739, 1795), 'requests.post', 'requests.post', (['request_url'], {'data': 'params', 'headers': 'headers'}), '(request_url, data=params, headers=headers)\n', (1752, 1795), False, 'import requests\n'), ((75, 100), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (90, 100), False, 'import os, sys\n'), ((491, 528), 'cv2.imdecode', 'cv2.imdecode', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (503, 528), False, 'import cv2\n'), ((2852, 2867), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2862, 2867), False, 'import cv2\n'), ((3217, 3309), 'cv2.rectangle', 'cv2.rectangle', (['image', '(lefttopx, lefttopy)', '(rightbottomx, rightbottomy)', '(0, 255, 0)', '(2)'], {}), '(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0,\n 255, 0), 2)\n', (3230, 3309), False, 'import cv2\n'), ((3856, 3930), 'cv2.imwrite', 'cv2.imwrite', (['savepath', 'image[lefttopy:rightbottomy, lefttopx:rightbottomx]'], {}), '(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx])\n', (3867, 3930), False, 'import cv2\n'), ((4146, 4208), 'os.path.join', 'os.path.join', (['basepath', '"""FaceStatic"""', '"""FullFace"""', '"""Result.jpg"""'], {}), "(basepath, 'FaceStatic', 'FullFace', 'Result.jpg')\n", (4158, 4208), False, 'import os, sys\n'), ((4217, 4251), 'cv2.imwrite', 'cv2.imwrite', (['full_face_path', 'image'], {}), '(full_face_path, image)\n', (4228, 4251), False, 'import cv2\n'), ((3608, 3677), 'os.path.join', 'os.path.join', (['basepath', '"""FaceStatic"""', '"""SearchFace"""', "(pos_name + '.jpg')"], {}), "(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg')\n", (3620, 3677), False, 'import os, sys\n')] |
#
# INF 552 Homework 3
# Part 2: Fast Map
# Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac)
# Date: 2/27/2018
# Programming Language: Python 3.6
#
import numpy as np
import matplotlib.pyplot as plt
DIMENSION = 2
DATA_SIZE = 10
# WORDS = ["acting", "activist", "compute", "coward","forward","interaction","activity","odor","order","international"]
WORDS = []
data_file_name = "fastmap-data.txt"
words_file_name = 'fastmap-wordlist.txt'
table = np.zeros(shape=(DATA_SIZE, DATA_SIZE))
cood = np.zeros(shape=(DATA_SIZE, DIMENSION))
pivot = []
def main():
readFile(data_file_name)
print("\nOriginal table:")
readWords(words_file_name)
print(WORDS)
printTable()
for i in range(DIMENSION):
print("\n\nThe {i}st cood: ".format(i=i+1))
pickLongestPair()
calculateCoordinate(i)
print("\nUpdate table: ")
updateTable(i)
printTable()
plotResult()
def readFile(filename):
with open(filename, "r") as file:
print("Original input:")
for line in file:
line_array = line.split()
print(line_array)
table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \
table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2])
def readWords(filename):
global WORDS
with open(filename) as file:
WORDS = file.read().splitlines()
def printTable():
for row in table:
print(row)
def pickLongestPair():
max = np.amax(table)
indices = list(zip(*np.where(table == max)))
print("The longest distance pair is {pair}".format(pair = indices[0]))
print("Pivot is piont {piv}".format(piv = indices[0][0]))
pivot.append(indices[0])
def calculateCoordinate(dimen):
a = pivot[dimen][0]
b = pivot[dimen][1]
print("The coordinate table")
for i in range(len(table)):
cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b])
print ("{i}\t({x}, {y})".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3)))
def updateTable(dimen):
for i in range(0, DATA_SIZE):
for j in range(0, DATA_SIZE):
table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2))
def plotResult():
x = cood[:, 0]
y = cood[:, 1]
fig, ax = plt.subplots()
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.scatter(x, y)
plt.scatter(x, y, color="red", s=30)
plt.title("Fast Map Result")
for i, txt in enumerate(WORDS):
ax.annotate(txt, (x[i], y[i]))
plt.show()
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.power",
"numpy.zeros",
"numpy.amax",
"numpy.where",
"matplotlib.pyplot.subplots"
] | [((470, 508), 'numpy.zeros', 'np.zeros', ([], {'shape': '(DATA_SIZE, DATA_SIZE)'}), '(shape=(DATA_SIZE, DATA_SIZE))\n', (478, 508), True, 'import numpy as np\n'), ((516, 554), 'numpy.zeros', 'np.zeros', ([], {'shape': '(DATA_SIZE, DIMENSION)'}), '(shape=(DATA_SIZE, DIMENSION))\n', (524, 554), True, 'import numpy as np\n'), ((1503, 1517), 'numpy.amax', 'np.amax', (['table'], {}), '(table)\n', (1510, 1517), True, 'import numpy as np\n'), ((2371, 2385), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2383, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2467, 2503), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""red"""', 's': '(30)'}), "(x, y, color='red', s=30)\n", (2478, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2536), 'matplotlib.pyplot.title', 'plt.title', (['"""Fast Map Result"""'], {}), "('Fast Map Result')\n", (2517, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2625, 2627), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1564), 'numpy.where', 'np.where', (['(table == max)'], {}), '(table == max)\n', (1550, 1564), True, 'import numpy as np\n'), ((1958, 1982), 'numpy.power', 'np.power', (['table[i][b]', '(2)'], {}), '(table[i][b], 2)\n', (1966, 1982), True, 'import numpy as np\n'), ((1906, 1930), 'numpy.power', 'np.power', (['table[a][i]', '(2)'], {}), '(table[a][i], 2)\n', (1914, 1930), True, 'import numpy as np\n'), ((1932, 1956), 'numpy.power', 'np.power', (['table[a][b]', '(2)'], {}), '(table[a][b], 2)\n', (1940, 1956), True, 'import numpy as np\n'), ((2227, 2251), 'numpy.power', 'np.power', (['table[i][j]', '(2)'], {}), '(table[i][j], 2)\n', (2235, 2251), True, 'import numpy as np\n'), ((2253, 2297), 'numpy.power', 'np.power', (['(cood[i][dimen] - cood[j][dimen])', '(2)'], {}), '(cood[i][dimen] - cood[j][dimen], 2)\n', (2261, 2297), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import glob
from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize
if __name__ == '__main__':
path='../../../data/datasets'
for ii,pp in enumerate(glob.glob('%s/*' % path)):
tbn=os.path.basename(pp)
tsize = getDirectorySizeInBytes(pp)
tsizeHuman = humanReadableSize(tsize)
print ('[%d] %s : %s (%d)' % (ii, tbn, tsizeHuman, tsize)) | [
"app.backend.core.utils.getDirectorySizeInBytes",
"os.path.basename",
"app.backend.core.utils.humanReadableSize",
"glob.glob"
] | [((250, 274), 'glob.glob', 'glob.glob', (["('%s/*' % path)"], {}), "('%s/*' % path)\n", (259, 274), False, 'import glob\n'), ((289, 309), 'os.path.basename', 'os.path.basename', (['pp'], {}), '(pp)\n', (305, 309), False, 'import os\n'), ((326, 353), 'app.backend.core.utils.getDirectorySizeInBytes', 'getDirectorySizeInBytes', (['pp'], {}), '(pp)\n', (349, 353), False, 'from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize\n'), ((375, 399), 'app.backend.core.utils.humanReadableSize', 'humanReadableSize', (['tsize'], {}), '(tsize)\n', (392, 399), False, 'from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize\n')] |
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
#
import pytest
import subprocess
from ._subprocess import run_cli_tool
def _unittest_trivial() -> None:
run_cli_tool('show-transport', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool(timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool('invalid-command', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout
run_cli_tool('pub', 'nonexistent.data.Type.1.0', '{}', '--tr=Loopback(None)', timeout=5.0)
| [
"pytest.raises"
] | [((307, 351), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {}), '(subprocess.CalledProcessError)\n', (320, 351), False, 'import pytest\n'), ((397, 441), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {}), '(subprocess.CalledProcessError)\n', (410, 441), False, 'import pytest\n'), ((506, 550), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {}), '(subprocess.CalledProcessError)\n', (519, 550), False, 'import pytest\n'), ((632, 676), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {}), '(subprocess.CalledProcessError)\n', (645, 676), False, 'import pytest\n')] |
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import queue
from insomnia.utils import empty_torch_queue
from insomnia.explores.gaussian_noise import GaussianActionNoise
from insomnia.numeric_models import d4pg
from insomnia.numeric_models.misc import l2_projection
class LearnerD4PG(object):
"""Policy and value network update routine. """
def __init__(self, policy_net, target_policy_net, learner_w_queue,
alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51):
self.v_min = v_min
self.v_max = v_max
self.num_atoms = n_atoms
self.num_train_steps = 10000
self.batch_size = 256
self.tau = 0.001
self.gamma = 0.998
self.prioritized_replay = 0
self.learner_w_queue = learner_w_queue
self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Noise process
self.noise = GaussianActionNoise(mu=np.zeros(n_actions))
# Value and policy nets
self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.policy_net = policy_net
self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.target_policy_net = target_policy_net
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(param.data)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha)
self.value_criterion = nn.BCELoss(reduction='none')
def _update_step(self, batch, replay_priority_queue, update_step):
update_time = time.time()
state, action, reward, next_state, done, gamma, weights, inds = batch
state = np.asarray(state)
action = np.asarray(action)
reward = np.asarray(reward)
next_state = np.asarray(next_state)
done = np.asarray(done)
weights = np.asarray(weights)
inds = np.asarray(inds).flatten()
state = torch.from_numpy(state).float().to(self.device)
next_state = torch.from_numpy(next_state).float().to(self.device)
action = torch.from_numpy(action).float().to(self.device)
reward = torch.from_numpy(reward).float().to(self.device)
done = torch.from_numpy(done).float().to(self.device)
# ------- Update critic -------
# Predict next actions with target policy network
next_action = self.target_policy_net(next_state)
# Predict Z distribution with target value network
target_value = self.target_value_net.get_probs(next_state, next_action.detach())
# Get projected distribution
target_z_projected = l2_projection._l2_project(next_distr_v=target_value,
rewards_v=reward,
dones_mask_t=done,
gamma=self.gamma ** 5,
n_atoms=self.num_atoms,
v_min=self.v_min,
v_max=self.v_max,
delta_z=self.delta_z)
target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device)
critic_value = self.value_net.get_probs(state, action)
critic_value = critic_value.to(self.device)
value_loss = self.value_criterion(critic_value, target_z_projected)
value_loss = value_loss.mean(axis=1)
# Update priorities in buffer
td_error = value_loss.cpu().detach().numpy().flatten()
priority_epsilon = 1e-4
if self.prioritized_replay:
weights_update = np.abs(td_error) + priority_epsilon
replay_priority_queue.put((inds, weights_update))
value_loss = value_loss * torch.tensor(weights).float().to(self.device)
# Update step
value_loss = value_loss.mean()
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
# -------- Update actor -----------
policy_loss = self.value_net.get_probs(state, self.policy_net(state))
policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device)
policy_loss = torch.sum(policy_loss, dim=1)
policy_loss = -policy_loss.mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
# Send updated learner to the queue
if update_step.value % 100 == 0:
try:
params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()]
self.learner_w_queue.put(params)
except:
pass
def run(self, training_on, batch_queue, replay_priority_queue, update_step):
while update_step.value < self.num_train_steps:
try:
batch = batch_queue.get_nowait()
except queue.Empty:
continue
self._update_step(batch, replay_priority_queue, update_step)
update_step.value += 1
if update_step.value % 1000 == 0:
print("Training step ", update_step.value)
training_on.value = 0
empty_torch_queue(self.learner_w_queue)
empty_torch_queue(replay_priority_queue)
print("Exit learner.")
| [
"torch.from_numpy",
"numpy.abs",
"torch.nn.BCELoss",
"insomnia.utils.empty_torch_queue",
"insomnia.numeric_models.d4pg.CriticNetwork",
"numpy.asarray",
"numpy.zeros",
"time.time",
"torch.cuda.is_available",
"torch.sum",
"torch.tensor",
"insomnia.numeric_models.misc.l2_projection._l2_project"
] | [((1153, 1270), 'insomnia.numeric_models.d4pg.CriticNetwork', 'd4pg.CriticNetwork', (['beta', 'input_dims', 'fc1_dims', 'fc2_dims', 'n_actions', 'name', 'self.v_min', 'self.v_max', 'self.num_atoms'], {}), '(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,\n self.v_min, self.v_max, self.num_atoms)\n', (1171, 1270), False, 'from insomnia.numeric_models import d4pg\n'), ((1380, 1497), 'insomnia.numeric_models.d4pg.CriticNetwork', 'd4pg.CriticNetwork', (['beta', 'input_dims', 'fc1_dims', 'fc2_dims', 'n_actions', 'name', 'self.v_min', 'self.v_max', 'self.num_atoms'], {}), '(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,\n self.v_min, self.v_max, self.num_atoms)\n', (1398, 1497), False, 'from insomnia.numeric_models import d4pg\n'), ((2102, 2130), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (2112, 2130), True, 'import torch.nn as nn\n'), ((2225, 2236), 'time.time', 'time.time', ([], {}), '()\n', (2234, 2236), False, 'import time\n'), ((2333, 2350), 'numpy.asarray', 'np.asarray', (['state'], {}), '(state)\n', (2343, 2350), True, 'import numpy as np\n'), ((2368, 2386), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (2378, 2386), True, 'import numpy as np\n'), ((2404, 2422), 'numpy.asarray', 'np.asarray', (['reward'], {}), '(reward)\n', (2414, 2422), True, 'import numpy as np\n'), ((2444, 2466), 'numpy.asarray', 'np.asarray', (['next_state'], {}), '(next_state)\n', (2454, 2466), True, 'import numpy as np\n'), ((2482, 2498), 'numpy.asarray', 'np.asarray', (['done'], {}), '(done)\n', (2492, 2498), True, 'import numpy as np\n'), ((2517, 2536), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (2527, 2536), True, 'import numpy as np\n'), ((3285, 3488), 'insomnia.numeric_models.misc.l2_projection._l2_project', 'l2_projection._l2_project', ([], {'next_distr_v': 'target_value', 'rewards_v': 'reward', 'dones_mask_t': 'done', 'gamma': '(self.gamma ** 5)', 'n_atoms': 'self.num_atoms', 'v_min': 'self.v_min', 'v_max': 'self.v_max', 'delta_z': 'self.delta_z'}), '(next_distr_v=target_value, rewards_v=reward,\n dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min\n =self.v_min, v_max=self.v_max, delta_z=self.delta_z)\n', (3310, 3488), False, 'from insomnia.numeric_models.misc import l2_projection\n'), ((4892, 4921), 'torch.sum', 'torch.sum', (['policy_loss'], {'dim': '(1)'}), '(policy_loss, dim=1)\n', (4901, 4921), False, 'import torch\n'), ((6349, 6388), 'insomnia.utils.empty_torch_queue', 'empty_torch_queue', (['self.learner_w_queue'], {}), '(self.learner_w_queue)\n', (6366, 6388), False, 'from insomnia.utils import empty_torch_queue\n'), ((6397, 6437), 'insomnia.utils.empty_torch_queue', 'empty_torch_queue', (['replay_priority_queue'], {}), '(replay_priority_queue)\n', (6414, 6437), False, 'from insomnia.utils import empty_torch_queue\n'), ((968, 993), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (991, 993), False, 'import torch\n'), ((1074, 1093), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (1082, 1093), True, 'import numpy as np\n'), ((2552, 2568), 'numpy.asarray', 'np.asarray', (['inds'], {}), '(inds)\n', (2562, 2568), True, 'import numpy as np\n'), ((4294, 4310), 'numpy.abs', 'np.abs', (['td_error'], {}), '(td_error)\n', (4300, 4310), True, 'import numpy as np\n'), ((2596, 2619), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2612, 2619), False, 'import torch\n'), ((2665, 2693), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (2681, 2693), False, 'import torch\n'), ((2735, 2759), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (2751, 2759), False, 'import torch\n'), ((2801, 2825), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (2817, 2825), False, 'import torch\n'), ((2865, 2887), 'torch.from_numpy', 'torch.from_numpy', (['done'], {}), '(done)\n', (2881, 2887), False, 'import torch\n'), ((3796, 3832), 'torch.from_numpy', 'torch.from_numpy', (['target_z_projected'], {}), '(target_z_projected)\n', (3812, 3832), False, 'import torch\n'), ((4805, 4845), 'torch.from_numpy', 'torch.from_numpy', (['self.value_net.z_atoms'], {}), '(self.value_net.z_atoms)\n', (4821, 4845), False, 'import torch\n'), ((4430, 4451), 'torch.tensor', 'torch.tensor', (['weights'], {}), '(weights)\n', (4442, 4451), False, 'import torch\n')] |
import logging
from wrapper import *
logger = logging.getLogger(__name__)
# noinspection PyUnusedLocal
def get_user(url='', key='', timeout=60, **kwargs):
return get(url + '/user', headers={'Authorization': "Bearer " + key}, timeout=timeout).json()
# noinspection PyUnusedLocal
def get_user_tokens(url='', key='', timeout=60, **kwargs):
return get(url + '/user/tokens',
headers={'Authorization': "Bearer " + key}, timeout=timeout).json()
# noinspection PyUnusedLocal
def create_user_token(url='', key='', token_name='', timeout=60, **kwargs):
return post(url + '/user/tokens',
headers={'Authorization': "Bearer " + key},
data={'name': token_name}, timeout=timeout).json()
# noinspection PyUnusedLocal
def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs):
return delete(url + '/user/tokens/' + token_name,
headers={'Authorization': "Bearer " + key}, timeout=timeout)
| [
"logging.getLogger"
] | [((47, 74), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (64, 74), False, 'import logging\n')] |
from __future__ import absolute_import # Need to import lwr_client absolutely.
from ..objectstore import ObjectStore
try:
from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager
except ImportError:
from lwr.lwr_client.manager import ObjectStoreClientManager
class LwrObjectStore(ObjectStore):
"""
Object store implementation that delegates to a remote LWR server.
This may be more aspirational than practical for now, it would be good to
Galaxy to a point that a handler thread could be setup that doesn't attempt
to access the disk files returned by a (this) object store - just passing
them along to the LWR unmodified. That modification - along with this
implementation and LWR job destinations would then allow Galaxy to fully
manage jobs on remote servers with completely different mount points.
This implementation should be considered beta and may be dropped from
Galaxy at some future point or significantly modified.
"""
def __init__(self, config, config_xml):
self.lwr_client = self.__build_lwr_client(config_xml)
def exists(self, obj, **kwds):
return self.lwr_client.exists(**self.__build_kwds(obj, **kwds))
def file_ready(self, obj, **kwds):
return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds))
def create(self, obj, **kwds):
return self.lwr_client.create(**self.__build_kwds(obj, **kwds))
def empty(self, obj, **kwds):
return self.lwr_client.empty(**self.__build_kwds(obj, **kwds))
def size(self, obj, **kwds):
return self.lwr_client.size(**self.__build_kwds(obj, **kwds))
def delete(self, obj, **kwds):
return self.lwr_client.delete(**self.__build_kwds(obj, **kwds))
# TODO: Optimize get_data.
def get_data(self, obj, **kwds):
return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds))
def get_filename(self, obj, **kwds):
return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds))
def update_from_file(self, obj, **kwds):
return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds))
def get_store_usage_percent(self):
return self.lwr_client.get_store_usage_percent()
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
return None
def __build_kwds(self, obj, **kwds):
kwds['object_id'] = obj.id
return kwds
pass
def __build_lwr_client(self, config_xml):
url = config_xml.get("url")
private_token = config_xml.get("private_token", None)
transport = config_xml.get("transport", None)
manager_options = dict(transport=transport)
client_options = dict(url=url, private_token=private_token)
lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options)
return lwr_client
def shutdown(self):
pass
| [
"lwr.lwr_client.manager.ObjectStoreClientManager"
] | [((2814, 2857), 'lwr.lwr_client.manager.ObjectStoreClientManager', 'ObjectStoreClientManager', ([], {}), '(**manager_options)\n', (2838, 2857), False, 'from lwr.lwr_client.manager import ObjectStoreClientManager\n')] |
"""
Demo of json_required decorator for API input validation/error handling
"""
import inspect
import functools
import json
from traceback import format_exception
from flask import jsonify, request
import sys
from flask.exceptions import JSONBadRequest
from flask import Flask
import re
app = Flask(__name__)
def api_error_response(code=404, message="Requested resource was not found", errors=list()):
"""
Convenience function for returning a JSON response that includes
appropriate error messages and code.
"""
response = jsonify(dict(code=code, message=message, errors=errors, success=False))
response.status_code = code
return response
def bad_json_error_response():
"""
Convenience function for returning an error message related to
malformed/missing JSON data.
"""
return api_error_response(code=400,
message="There was a problem parsing the supplied JSON data. Please send valid JSON.")
def json_required(func=None, required_fields={}, validations=[]):
"""
Decorator used to validate JSON input to an API request
"""
if func is None:
return functools.partial(json_required, required_fields=required_fields, validations=validations)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
try:
#If no JSON was supplied (or it didn't parse correctly)
try:
if request.json is None:
return bad_json_error_response()
except JSONBadRequest:
return bad_json_error_response()
#Check for specific fields
errors = []
def check_required_fields(data, fields):
for field, requirements in fields.iteritems():
nested_fields = type(requirements) == dict
if data.get(field) in (None, ''):
if nested_fields:
error_msg = requirements.get('message')
else:
error_msg = requirements
errors.append({'field': field, 'message': error_msg})
elif nested_fields:
check_required_fields(data[field], requirements.get('fields', {}))
check_required_fields(request.json, required_fields)
for validation_field, validation_message, validation_func in validations:
func_args = inspect.getargspec(validation_func).args
func_params = []
for arg in func_args:
func_params.append(request.json.get(arg))
if not validation_func(*func_params):
errors.append({'field': validation_field, 'message': validation_message})
if errors:
return api_error_response(code=422, message="JSON Validation Failed", errors=errors)
except Exception:
#For internal use, nice to have the traceback in the API response for debugging
#Probably don't want to include for public APIs
etype, value, tb = sys.exc_info()
error_info = ''.join(format_exception(etype, value, tb))
return api_error_response(code=500, message="Internal Error validating API input", errors=[{'message':error_info}])
return func(*args, **kwargs)
return decorated_function
EMAIL_REGEX = re.compile(r"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?")
def verify_account_available(email):
"""
Check to see if this email is already registered
"""
#Run a query, use an ORM, use Twilio to call someone and ask them :-)
return True
def valid_date_of_birth(date_of_birth):
"""
Does the supplied date string meet our criteria for a date of birth
"""
#Do whatever you need to do...
return True
@app.route("/do/something", methods=['POST'])
@json_required(
required_fields={
'first_name':"Please provide your first name.",
'last_name':"Please provide your last name.",
'email':'Please specify a valid email address',
'date_of_birth':'Please provide your date of birth'
},
validations=[
('email', 'Please provide a valid email address', lambda email: email is not None and EMAIL_REGEX.match(email)),
('email', "This email is already in use. Please try a different email address.", verify_account_available),
('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth)
]
)
def do_something_useful():
#Confidently use the data in request.json...
return jsonify(dict(status='OK'))
if __name__ == "__main__":
with app.test_client() as client:
response = client.post(
'/do/something',
data=json.dumps({ "first_name": "Brian",
"last_name": "Corbin",
"email": "<EMAIL>",
"date_of_birth": "01/01/1970" }),
follow_redirects=True,
content_type='application/json')
response_dict = json.loads(response.data)
assert response_dict['status'] == 'OK'
response = client.post(
'/do/something',
data=json.dumps({ "last_name": "Corbin",
"email": "<EMAIL>",
"date_of_birth": "01/01/1970" }),
follow_redirects=True,
content_type='application/json')
response_dict = json.loads(response.data)
assert response.status_code == 422
assert response_dict['code'] == 422
assert response_dict['message'] == "JSON Validation Failed"
assert len(response_dict['errors']) == 1
assert response_dict['errors'][0]['field'] == 'first_name'
assert response_dict['errors'][0]['message'] == 'Please provide your first name.'
| [
"functools.partial",
"traceback.format_exception",
"json.loads",
"flask.Flask",
"json.dumps",
"inspect.getargspec",
"functools.wraps",
"sys.exc_info",
"flask.request.json.get",
"re.compile"
] | [((300, 315), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (305, 315), False, 'from flask import Flask\n'), ((3436, 3619), 're.compile', 're.compile', (['"""[A-Za-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\\\\.[A-Za-z0-9!#$%&\'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?"""'], {}), '(\n "[A-Za-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\\\\.[A-Za-z0-9!#$%&\'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?"\n )\n', (3446, 3619), False, 'import re\n'), ((1257, 1278), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1272, 1278), False, 'import functools\n'), ((1161, 1255), 'functools.partial', 'functools.partial', (['json_required'], {'required_fields': 'required_fields', 'validations': 'validations'}), '(json_required, required_fields=required_fields,\n validations=validations)\n', (1178, 1255), False, 'import functools\n'), ((5233, 5258), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (5243, 5258), False, 'import json\n'), ((5640, 5665), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (5650, 5665), False, 'import json\n'), ((3140, 3154), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3152, 3154), False, 'import sys\n'), ((4925, 5038), 'json.dumps', 'json.dumps', (["{'first_name': 'Brian', 'last_name': 'Corbin', 'email': '<EMAIL>',\n 'date_of_birth': '01/01/1970'}"], {}), "({'first_name': 'Brian', 'last_name': 'Corbin', 'email':\n '<EMAIL>', 'date_of_birth': '01/01/1970'})\n", (4935, 5038), False, 'import json\n'), ((5385, 5475), 'json.dumps', 'json.dumps', (["{'last_name': 'Corbin', 'email': '<EMAIL>', 'date_of_birth': '01/01/1970'}"], {}), "({'last_name': 'Corbin', 'email': '<EMAIL>', 'date_of_birth':\n '01/01/1970'})\n", (5395, 5475), False, 'import json\n'), ((2482, 2517), 'inspect.getargspec', 'inspect.getargspec', (['validation_func'], {}), '(validation_func)\n', (2500, 2517), False, 'import inspect\n'), ((3188, 3222), 'traceback.format_exception', 'format_exception', (['etype', 'value', 'tb'], {}), '(etype, value, tb)\n', (3204, 3222), False, 'from traceback import format_exception\n'), ((2633, 2654), 'flask.request.json.get', 'request.json.get', (['arg'], {}), '(arg)\n', (2649, 2654), False, 'from flask import jsonify, request\n')] |
import torch
from ..math.cross import *
from ..math.normvec import *
class CameraExtrinsic(object):
"""
A class representing the camera extrinsic properties
Attributes
----------
position : Tensor
the camera position
target : Tensor
the camera target
up_vector : Tensor
the camera up vector
device : str or torch.device
the device to store the tensors to
Methods
-------
look_at(target)
sets the camera target
look_from(position)
sets the camera position
direction()
returns the camera direction
view_matrix()
returns the current view matrix
to(**kwargs)
changes extrinsic dtype and/or device
"""
def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'):
"""
Parameters
----------
position : list or tuple (optional)
the camera position (default is (0, 0, 0))
target : list or tuple (optional)
the camera target (default is (0, 0, 1))
up_vector : list or tuple (optional)
the camera up vector (default is (0, 1, 0))
device : str or torch.device (optional)
the device to store the tensors to (default is 'cuda:0')
"""
self.position = torch.tensor(position, dtype=torch.float, device=device)
self.target = torch.tensor(target, dtype=torch.float, device=device)
self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device)
self._device = device
def look_at(self, target):
"""
Sets the camera target
Parameters
----------
target : Tensor
the (3,) target tensor
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
self.target = target
return self
def look_from(self, position):
"""
Sets the camera position
Parameters
----------
position : Tensor
the (3,) position tensor
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
self.position = position
return self
def direction(self):
"""
Returns the camera direction
Returns
-------
Tensor
the (3,) direction tensor
"""
return self.target - self.position
def view_matrix(self):
"""
Returns the current view matrix
Returns
-------
Tensor
a (4,4,) view matrix
"""
z = normr(self.direction().unsqueeze(0))
x = normr(cross(self.up_vector.unsqueeze(0), z))
y = cross(z, x)
p = self.position.unsqueeze(0)
M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1),
torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)),
dim=0)
return M
def to(self, **kwargs):
"""
Changes the extrinsic dtype and/or device
Parameters
----------
kwargs : ...
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
if 'device' in kwargs:
self._device = kwargs['device']
self.position = self.position.to(**kwargs)
self.target = self.target.to(**kwargs)
self.up_vector = self.up_vector.to(**kwargs)
return self
@property
def device(self):
return self._device
@device.setter
def device(self, value):
self._device = value
self.position = self.position.to(self.device)
self.target = self.target.to(self.device)
self.up_vector = self.up_vector.to(self.device)
| [
"torch.tensor"
] | [((1340, 1396), 'torch.tensor', 'torch.tensor', (['position'], {'dtype': 'torch.float', 'device': 'device'}), '(position, dtype=torch.float, device=device)\n', (1352, 1396), False, 'import torch\n'), ((1423, 1477), 'torch.tensor', 'torch.tensor', (['target'], {'dtype': 'torch.float', 'device': 'device'}), '(target, dtype=torch.float, device=device)\n', (1435, 1477), False, 'import torch\n'), ((1506, 1563), 'torch.tensor', 'torch.tensor', (['up_vector'], {'dtype': 'torch.float', 'device': 'device'}), '(up_vector, dtype=torch.float, device=device)\n', (1518, 1563), False, 'import torch\n'), ((2915, 2982), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 1]]'], {'dtype': 'torch.float', 'device': 'self.device'}), '([[0, 0, 0, 1]], dtype=torch.float, device=self.device)\n', (2927, 2982), False, 'import torch\n')] |
import uuid
from textwrap import dedent
from IPython.core.display import display, HTML
from string import Template
import numpy as np
# function to initialize a scatter plot
def init_chart(data,features):
chart_id = 'mychart-' + str(uuid.uuid4())
feature_types = {} # map each feature to type
num_feature_ranges = {}
for x in features:
if data[x].dtype in ["int64", "float64"]:
feature_domain = [min(data[x].dropna()), max(data[x].dropna())]
if feature_domain[1] == feature_domain[0]:
feature_types[x] = "categorical"
else:
feature_types[x] = data[x].dtype.name
num_feature_ranges[x] = feature_domain
else:
feature_types[x] = "categorical"
display(HTML('<script src="/static/components/requirejs/require.js"></script>'))
display(HTML(Template(dedent('''
<style>
body {
font: 11px sans-serif;
color: #2A3F5E
}
.chart {
background-color: #E5ECF6;
display: relative;
}
.axis path,
.axis line {
fill: none;
stroke: #2A3F5E;
shape-rendering: crispEdges;
}
.label {
color: #2A3F5E;
}
.selection {
margin-bottom: 20px;
}
.dot {
stroke: #fff;
opacity: 0.8;
}
.grid line {
stroke: #fff;
stroke-opacity: 0.7;
stroke-width: 2px;
shape-rendering: crispEdges;
}
.grid path {
stroke-width: 0;
}
.tooltip {
position: absolute;
font-size: 12px;
width: auto;
height: auto;
pointer-events: none;
background-color: white;
padding: 5px;
}
.legend {
background-color: white;
position: absolute;
left: 650px;
top: 20px;
width: auto;
height: 500px;
}
</style>
<script>
require.config({
paths: {
'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min',
}
})
// If we configure mychart via url, we can eliminate this define here
define($chart_id, ['d3'], function(d3) {
return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) {
var initialFeature = d3.select("#" + select_id).property("value")
var margin = {top: 40, right: 10, bottom: 50, left: 50},
width = 650 - margin.left - margin.right,
height = 400 - margin.top - margin.bottom;
// append the svg object to the body of the page
var svg = d3.select('#' + figure_id)
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// X and Y scales and Axis
var x = d3.scaleLinear()
.domain(axes["x"])
.range([0, width]);
var y = d3.scaleLinear()
.domain(axes["y"])
.range([height, 0]);
// Add X-axis and label
svg
.append('g')
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(x))
svg.append("text")
.attr("class", "label")
.attr("x", width / 2)
.attr("y", height + 35)
.style("text-anchor", "end")
.text(xCat);
// Add Y-axis and label
svg
.append('g')
.call(d3.axisLeft(y));
svg.append("text")
.attr("class", "label")
.attr("x", -(height - 15)/ 2 )
.attr("y", -30)
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text(yCat);
// gridlines in x axis function
function make_x_gridlines() {
return d3.axisBottom(x)
.ticks(5)
}
// gridlines in y axis function
function make_y_gridlines() {
return d3.axisLeft(y)
.ticks(5)
}
// add grid lines
// add the X gridlines
svg.append("g")
.attr("class", "grid")
.attr("transform", "translate(0," + height + ")")
.call(make_x_gridlines()
.tickSize(-height)
.tickFormat("")
)
// add the Y gridlines
svg.append("g")
.attr("class", "grid")
.call(make_y_gridlines()
.tickSize(-width)
.tickFormat("")
)
// Add the datapoints
var dots = svg
.selectAll()
.data(data)
.enter()
.append("circle")
// Add the tooltip container to the body container
// it's invisible and its position/contents are defined during mouseover
var tooltip = d3.select("body").append("div")
.attr("class", "tooltip")
.style("opacity", 0);
// Add the legend container to the body container
var legend = d3.select("#" + legend_id).attr("y", 0);
// tooltip mouseover event handler
var tipMouseover = d => {
// x and y numeric labels
let html = xCat + ": " + Number((d[xCat]).toFixed(3)) + "<br>" + yCat + ": " + Number((d[yCat]).toFixed(3)) + "<br><br>"
// color feature label
html += colorFeature + ": " + d[colorFeature]
tooltip.html(html)
.style("left", (d3.event.pageX + 10) + "px")
.style("top", (d3.event.pageY - 15) + "px")
.transition()
.style("opacity", .9)
};
function updateLegendCat(featureColors) { // create the categorical legend
var legend = d3.select("#" + legend_id).html("") // clear current legend content
legend.append("text")
.attr("x", 15)
.attr("y", 10)
.text(colorFeature)
.attr("font-size", "14px")
let i = 0
Object.keys(featureColors).forEach(feature => {
legend.append("circle")
.attr("cx",20)
.attr("cy",30 + 20*i)
.attr("r", 4)
.style("fill", featureColors[feature])
legend.append("text")
.attr("x", 40)
.attr("y", 30 + 20*i )
.text(feature)
.style("font-size", "14px")
.attr("alignment-baseline","middle")
i += 1
})
}
function updateLegendNum(domain) { // create the continuous (numerical) legend
var legend = d3.select("#" + legend_id).html("")
var width = 30,
height = 300;
// add legend title
legend.append("text")
.attr("x", 15)
.attr("y", 10)
.text(colorFeature)
.attr("font-size", "14px")
var textHeight = 1;
var linearGradient = legend.append("defs")
.append("linearGradient")
.attr("id", "linear-gradient")
.attr("gradientTransform", "rotate(90)");
var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100])
for (let i = 0; i <= 100; i += 5)
linearGradient.append("stop")
.attr("offset", i + "%")
.attr("stop-color", color(100-i)); // to get the right orientation of gradient
const legendScale = num => {
var scale = d3.scaleLinear()
.domain([5, 0])
.range(domain)
return Number((scale(num))).toFixed(0)
}
legend.append("rect")
.attr("x", 20)
.attr("y", 30)
.attr("width", width)
.attr("height", height)
.style("fill", "url(#linear-gradient)");
for (let i = 0; i <= 5; i += 1) {
legend.append("text")
.attr("x", 55)
.attr("y", 30 + textHeight/2 + ((height-textHeight*6)/5)*i)
.text(legendScale(i))
.style("font-size", "14px")
.attr("alignment-baseline","middle");
}
}
// tooltip mouseout event handler
var tipMouseout = d => {
tooltip.transition()
.duration(0) // ms
.style("opacity", 0); // don't care about position!
};
var sizeScale = d3.scaleLinear()
.domain(sizeCat["range"])
.range([3,7])
dots.attr("class", "dot")
.attr("cx", d => x(d[xCat]) )
.attr("cy", d => y(d[yCat]) )
.attr("r", d => sizeScale(d[sizeCat["label"]]))
.on("mouseover", tipMouseover)
.on("mouseout", tipMouseout)
update(initialFeature)
// A function that update the chart with the new color coding scheme
function update(feature) {
colorFeature = feature
var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
var color;
let type = $feature_types[feature];
if (type === "categorical") {
color = d3.scaleOrdinal(colors);
let featureColors = {}
dots
.attr("fill", d => {
let dotColor = color(d[feature])
featureColors[d[feature]] = dotColor
return dotColor
})
updateLegendCat(featureColors) // update the legend with the new color map
} else {
let feature_domain = $num_feature_ranges[feature]
color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain)
dots
.attr("fill", d => {
let dotColor = color(d[feature])
return dotColor
})
updateLegendNum(feature_domain)
}
}
d3.select("#" + select_id).on("change", function(d) {
// recover the option that has been chosen
var selectedOption = d3.select(this).property("value")
// run the updateChart function with this selected option
update(selectedOption)
});
}
})
</script>
''')).substitute({ 'chart_id': repr(chart_id),
'feature_types': repr(feature_types),
'num_feature_ranges': repr(num_feature_ranges)})))
return chart_id
def scatter_plot(data,x_cat,y_cat,axes,features):
chart_id = init_chart(data,features)
features_html_options = "".join([ f"<option value ='{x}'>{x}</option>" for x in features ])
dict_data = data.replace(np.nan, "N/A").to_dict("records")
size_cat = {
"label": "n_reads",
"range": [min(data["n_reads"]), max(data["n_reads"])]
}
display(HTML(Template(dedent('''
<div class="selection">
<label for="colorFeature"
style="display: inline-block; width: 240px; text-align: right">
<span> Color by feature: </span>
</label>
<select id=$select_id>
$options
</select>
</div>
<div style="position: relative">
<svg id=$figure_id class='chart'></svg>
<div class="legend"><svg id=$legend_id height=500 width=400></svg></div>
</div>
<script>
require([$chart_id], function(mychart) {
mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes )
})
</script>
''')).substitute({
'chart_id': repr(chart_id),
'figure_id': repr('fig-' + str(uuid.uuid4())),
'legend_id': repr('leg-' + str(uuid.uuid4())),
'select_id': repr('sel-' + str(uuid.uuid4())),
'data': repr(dict_data),
'axes': repr(axes),
'x_cat': repr(x_cat),
'y_cat': repr(y_cat),
'size_cat': repr(size_cat),
'options': repr(features_html_options)
})))
| [
"IPython.core.display.HTML",
"uuid.uuid4",
"textwrap.dedent"
] | [((782, 853), 'IPython.core.display.HTML', 'HTML', (['"""<script src="/static/components/requirejs/require.js"></script>"""'], {}), '(\'<script src="/static/components/requirejs/require.js"></script>\')\n', (786, 853), False, 'from IPython.core.display import display, HTML\n'), ((238, 250), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (248, 250), False, 'import uuid\n'), ((881, 11018), 'textwrap.dedent', 'dedent', (['"""\n <style>\n body {\n font: 11px sans-serif;\n color: #2A3F5E\n }\n .chart {\n background-color: #E5ECF6;\n display: relative;\n }\n .axis path,\n .axis line {\n fill: none;\n stroke: #2A3F5E;\n shape-rendering: crispEdges;\n }\n .label {\n color: #2A3F5E;\n }\n .selection {\n margin-bottom: 20px;\n }\n .dot {\n stroke: #fff;\n opacity: 0.8;\n }\n .grid line {\n stroke: #fff;\n stroke-opacity: 0.7;\n stroke-width: 2px;\n shape-rendering: crispEdges;\n }\n .grid path {\n stroke-width: 0;\n }\n .tooltip {\n position: absolute;\n font-size: 12px;\n width: auto;\n height: auto;\n pointer-events: none;\n background-color: white;\n padding: 5px;\n }\n .legend {\n background-color: white;\n position: absolute;\n left: 650px;\n top: 20px;\n width: auto;\n height: 500px;\n }\n </style>\n <script>\n require.config({\n paths: {\n \'d3\': \'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min\',\n }\n })\n\n // If we configure mychart via url, we can eliminate this define here\n define($chart_id, [\'d3\'], function(d3) {\n return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) {\n\n var initialFeature = d3.select("#" + select_id).property("value")\n\n var margin = {top: 40, right: 10, bottom: 50, left: 50},\n width = 650 - margin.left - margin.right,\n height = 400 - margin.top - margin.bottom;\n\n // append the svg object to the body of the page\n var svg = d3.select(\'#\' + figure_id)\n .attr("width", width + margin.left + margin.right)\n .attr("height", height + margin.top + margin.bottom)\n .append("g")\n .attr("transform",\n "translate(" + margin.left + "," + margin.top + ")");\n\n // X and Y scales and Axis\n var x = d3.scaleLinear()\n .domain(axes["x"])\n .range([0, width]);\n\n var y = d3.scaleLinear()\n .domain(axes["y"])\n .range([height, 0]);\n\n // Add X-axis and label\n svg\n .append(\'g\')\n .attr("class", "x axis")\n .attr("transform", "translate(0," + height + ")")\n .call(d3.axisBottom(x))\n\n svg.append("text")\n .attr("class", "label")\n .attr("x", width / 2)\n .attr("y", height + 35)\n .style("text-anchor", "end")\n .text(xCat);\n\n // Add Y-axis and label\n\n svg\n .append(\'g\')\n .call(d3.axisLeft(y));\n\n svg.append("text")\n .attr("class", "label")\n .attr("x", -(height - 15)/ 2 )\n .attr("y", -30)\n .attr("transform", "rotate(-90)")\n .style("text-anchor", "end")\n .text(yCat);\n\n\n // gridlines in x axis function\n function make_x_gridlines() {\n return d3.axisBottom(x)\n .ticks(5)\n }\n // gridlines in y axis function\n function make_y_gridlines() {\n return d3.axisLeft(y)\n .ticks(5)\n }\n\n // add grid lines\n\n // add the X gridlines\n svg.append("g")\n .attr("class", "grid")\n .attr("transform", "translate(0," + height + ")")\n .call(make_x_gridlines()\n .tickSize(-height)\n .tickFormat("")\n )\n\n // add the Y gridlines\n svg.append("g")\n .attr("class", "grid")\n .call(make_y_gridlines()\n .tickSize(-width)\n .tickFormat("")\n )\n\n // Add the datapoints\n var dots = svg\n .selectAll()\n .data(data)\n .enter()\n .append("circle")\n\n // Add the tooltip container to the body container\n // it\'s invisible and its position/contents are defined during mouseover\n var tooltip = d3.select("body").append("div")\n .attr("class", "tooltip")\n .style("opacity", 0);\n\n // Add the legend container to the body container\n var legend = d3.select("#" + legend_id).attr("y", 0);\n\n // tooltip mouseover event handler\n var tipMouseover = d => {\n // x and y numeric labels\n let html = xCat + ": " + Number((d[xCat]).toFixed(3)) + "<br>" + yCat + ": " + Number((d[yCat]).toFixed(3)) + "<br><br>"\n // color feature label\n html += colorFeature + ": " + d[colorFeature]\n tooltip.html(html)\n .style("left", (d3.event.pageX + 10) + "px")\n .style("top", (d3.event.pageY - 15) + "px")\n .transition()\n .style("opacity", .9)\n\n };\n\n function updateLegendCat(featureColors) { // create the categorical legend\n\n var legend = d3.select("#" + legend_id).html("") // clear current legend content\n\n legend.append("text")\n .attr("x", 15)\n .attr("y", 10)\n .text(colorFeature)\n .attr("font-size", "14px")\n\n let i = 0\n Object.keys(featureColors).forEach(feature => {\n legend.append("circle")\n .attr("cx",20)\n .attr("cy",30 + 20*i)\n .attr("r", 4)\n .style("fill", featureColors[feature])\n legend.append("text")\n .attr("x", 40)\n .attr("y", 30 + 20*i )\n .text(feature)\n .style("font-size", "14px")\n .attr("alignment-baseline","middle")\n i += 1\n })\n\n }\n\n function updateLegendNum(domain) { // create the continuous (numerical) legend\n\n var legend = d3.select("#" + legend_id).html("")\n var width = 30,\n height = 300;\n\n // add legend title\n legend.append("text")\n .attr("x", 15)\n .attr("y", 10)\n .text(colorFeature)\n .attr("font-size", "14px")\n\n var textHeight = 1;\n\n var linearGradient = legend.append("defs")\n .append("linearGradient")\n .attr("id", "linear-gradient")\n .attr("gradientTransform", "rotate(90)");\n\n var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100])\n\n\n for (let i = 0; i <= 100; i += 5)\n linearGradient.append("stop")\n .attr("offset", i + "%")\n .attr("stop-color", color(100-i)); // to get the right orientation of gradient\n\n\n const legendScale = num => {\n var scale = d3.scaleLinear()\n .domain([5, 0])\n .range(domain)\n return Number((scale(num))).toFixed(0)\n\n }\n\n legend.append("rect")\n .attr("x", 20)\n .attr("y", 30)\n .attr("width", width)\n .attr("height", height)\n .style("fill", "url(#linear-gradient)");\n\n for (let i = 0; i <= 5; i += 1) {\n legend.append("text")\n .attr("x", 55)\n .attr("y", 30 + textHeight/2 + ((height-textHeight*6)/5)*i)\n .text(legendScale(i))\n .style("font-size", "14px")\n .attr("alignment-baseline","middle");\n }\n\n }\n\n // tooltip mouseout event handler\n var tipMouseout = d => {\n tooltip.transition()\n .duration(0) // ms\n .style("opacity", 0); // don\'t care about position!\n };\n\n var sizeScale = d3.scaleLinear()\n .domain(sizeCat["range"])\n .range([3,7])\n\n dots.attr("class", "dot")\n .attr("cx", d => x(d[xCat]) )\n .attr("cy", d => y(d[yCat]) )\n .attr("r", d => sizeScale(d[sizeCat["label"]]))\n .on("mouseover", tipMouseover)\n .on("mouseout", tipMouseout)\n\n update(initialFeature)\n\n // A function that update the chart with the new color coding scheme\n function update(feature) {\n colorFeature = feature\n var colors = [\'#636EFA\', \'#EF553B\', \'#00CC96\', \'#AB63FA\', \'#FFA15A\', \'#19D3F3\', \'#FF6692\', \'#B6E880\', \'#FF97FF\', \'#FECB52\']\n\n var color;\n let type = $feature_types[feature];\n if (type === "categorical") {\n color = d3.scaleOrdinal(colors);\n let featureColors = {}\n dots\n .attr("fill", d => {\n let dotColor = color(d[feature])\n featureColors[d[feature]] = dotColor\n return dotColor\n })\n updateLegendCat(featureColors) // update the legend with the new color map\n\n } else {\n let feature_domain = $num_feature_ranges[feature]\n color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain)\n\n dots\n .attr("fill", d => {\n let dotColor = color(d[feature])\n return dotColor\n })\n updateLegendNum(feature_domain)\n\n }\n\n }\n\n d3.select("#" + select_id).on("change", function(d) {\n // recover the option that has been chosen\n var selectedOption = d3.select(this).property("value")\n // run the updateChart function with this selected option\n update(selectedOption)\n });\n }\n })\n </script>\n\n """'], {}), '(\n """\n <style>\n body {\n font: 11px sans-serif;\n color: #2A3F5E\n }\n .chart {\n background-color: #E5ECF6;\n display: relative;\n }\n .axis path,\n .axis line {\n fill: none;\n stroke: #2A3F5E;\n shape-rendering: crispEdges;\n }\n .label {\n color: #2A3F5E;\n }\n .selection {\n margin-bottom: 20px;\n }\n .dot {\n stroke: #fff;\n opacity: 0.8;\n }\n .grid line {\n stroke: #fff;\n stroke-opacity: 0.7;\n stroke-width: 2px;\n shape-rendering: crispEdges;\n }\n .grid path {\n stroke-width: 0;\n }\n .tooltip {\n position: absolute;\n font-size: 12px;\n width: auto;\n height: auto;\n pointer-events: none;\n background-color: white;\n padding: 5px;\n }\n .legend {\n background-color: white;\n position: absolute;\n left: 650px;\n top: 20px;\n width: auto;\n height: 500px;\n }\n </style>\n <script>\n require.config({\n paths: {\n \'d3\': \'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min\',\n }\n })\n\n // If we configure mychart via url, we can eliminate this define here\n define($chart_id, [\'d3\'], function(d3) {\n return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) {\n\n var initialFeature = d3.select("#" + select_id).property("value")\n\n var margin = {top: 40, right: 10, bottom: 50, left: 50},\n width = 650 - margin.left - margin.right,\n height = 400 - margin.top - margin.bottom;\n\n // append the svg object to the body of the page\n var svg = d3.select(\'#\' + figure_id)\n .attr("width", width + margin.left + margin.right)\n .attr("height", height + margin.top + margin.bottom)\n .append("g")\n .attr("transform",\n "translate(" + margin.left + "," + margin.top + ")");\n\n // X and Y scales and Axis\n var x = d3.scaleLinear()\n .domain(axes["x"])\n .range([0, width]);\n\n var y = d3.scaleLinear()\n .domain(axes["y"])\n .range([height, 0]);\n\n // Add X-axis and label\n svg\n .append(\'g\')\n .attr("class", "x axis")\n .attr("transform", "translate(0," + height + ")")\n .call(d3.axisBottom(x))\n\n svg.append("text")\n .attr("class", "label")\n .attr("x", width / 2)\n .attr("y", height + 35)\n .style("text-anchor", "end")\n .text(xCat);\n\n // Add Y-axis and label\n\n svg\n .append(\'g\')\n .call(d3.axisLeft(y));\n\n svg.append("text")\n .attr("class", "label")\n .attr("x", -(height - 15)/ 2 )\n .attr("y", -30)\n .attr("transform", "rotate(-90)")\n .style("text-anchor", "end")\n .text(yCat);\n\n\n // gridlines in x axis function\n function make_x_gridlines() {\n return d3.axisBottom(x)\n .ticks(5)\n }\n // gridlines in y axis function\n function make_y_gridlines() {\n return d3.axisLeft(y)\n .ticks(5)\n }\n\n // add grid lines\n\n // add the X gridlines\n svg.append("g")\n .attr("class", "grid")\n .attr("transform", "translate(0," + height + ")")\n .call(make_x_gridlines()\n .tickSize(-height)\n .tickFormat("")\n )\n\n // add the Y gridlines\n svg.append("g")\n .attr("class", "grid")\n .call(make_y_gridlines()\n .tickSize(-width)\n .tickFormat("")\n )\n\n // Add the datapoints\n var dots = svg\n .selectAll()\n .data(data)\n .enter()\n .append("circle")\n\n // Add the tooltip container to the body container\n // it\'s invisible and its position/contents are defined during mouseover\n var tooltip = d3.select("body").append("div")\n .attr("class", "tooltip")\n .style("opacity", 0);\n\n // Add the legend container to the body container\n var legend = d3.select("#" + legend_id).attr("y", 0);\n\n // tooltip mouseover event handler\n var tipMouseover = d => {\n // x and y numeric labels\n let html = xCat + ": " + Number((d[xCat]).toFixed(3)) + "<br>" + yCat + ": " + Number((d[yCat]).toFixed(3)) + "<br><br>"\n // color feature label\n html += colorFeature + ": " + d[colorFeature]\n tooltip.html(html)\n .style("left", (d3.event.pageX + 10) + "px")\n .style("top", (d3.event.pageY - 15) + "px")\n .transition()\n .style("opacity", .9)\n\n };\n\n function updateLegendCat(featureColors) { // create the categorical legend\n\n var legend = d3.select("#" + legend_id).html("") // clear current legend content\n\n legend.append("text")\n .attr("x", 15)\n .attr("y", 10)\n .text(colorFeature)\n .attr("font-size", "14px")\n\n let i = 0\n Object.keys(featureColors).forEach(feature => {\n legend.append("circle")\n .attr("cx",20)\n .attr("cy",30 + 20*i)\n .attr("r", 4)\n .style("fill", featureColors[feature])\n legend.append("text")\n .attr("x", 40)\n .attr("y", 30 + 20*i )\n .text(feature)\n .style("font-size", "14px")\n .attr("alignment-baseline","middle")\n i += 1\n })\n\n }\n\n function updateLegendNum(domain) { // create the continuous (numerical) legend\n\n var legend = d3.select("#" + legend_id).html("")\n var width = 30,\n height = 300;\n\n // add legend title\n legend.append("text")\n .attr("x", 15)\n .attr("y", 10)\n .text(colorFeature)\n .attr("font-size", "14px")\n\n var textHeight = 1;\n\n var linearGradient = legend.append("defs")\n .append("linearGradient")\n .attr("id", "linear-gradient")\n .attr("gradientTransform", "rotate(90)");\n\n var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100])\n\n\n for (let i = 0; i <= 100; i += 5)\n linearGradient.append("stop")\n .attr("offset", i + "%")\n .attr("stop-color", color(100-i)); // to get the right orientation of gradient\n\n\n const legendScale = num => {\n var scale = d3.scaleLinear()\n .domain([5, 0])\n .range(domain)\n return Number((scale(num))).toFixed(0)\n\n }\n\n legend.append("rect")\n .attr("x", 20)\n .attr("y", 30)\n .attr("width", width)\n .attr("height", height)\n .style("fill", "url(#linear-gradient)");\n\n for (let i = 0; i <= 5; i += 1) {\n legend.append("text")\n .attr("x", 55)\n .attr("y", 30 + textHeight/2 + ((height-textHeight*6)/5)*i)\n .text(legendScale(i))\n .style("font-size", "14px")\n .attr("alignment-baseline","middle");\n }\n\n }\n\n // tooltip mouseout event handler\n var tipMouseout = d => {\n tooltip.transition()\n .duration(0) // ms\n .style("opacity", 0); // don\'t care about position!\n };\n\n var sizeScale = d3.scaleLinear()\n .domain(sizeCat["range"])\n .range([3,7])\n\n dots.attr("class", "dot")\n .attr("cx", d => x(d[xCat]) )\n .attr("cy", d => y(d[yCat]) )\n .attr("r", d => sizeScale(d[sizeCat["label"]]))\n .on("mouseover", tipMouseover)\n .on("mouseout", tipMouseout)\n\n update(initialFeature)\n\n // A function that update the chart with the new color coding scheme\n function update(feature) {\n colorFeature = feature\n var colors = [\'#636EFA\', \'#EF553B\', \'#00CC96\', \'#AB63FA\', \'#FFA15A\', \'#19D3F3\', \'#FF6692\', \'#B6E880\', \'#FF97FF\', \'#FECB52\']\n\n var color;\n let type = $feature_types[feature];\n if (type === "categorical") {\n color = d3.scaleOrdinal(colors);\n let featureColors = {}\n dots\n .attr("fill", d => {\n let dotColor = color(d[feature])\n featureColors[d[feature]] = dotColor\n return dotColor\n })\n updateLegendCat(featureColors) // update the legend with the new color map\n\n } else {\n let feature_domain = $num_feature_ranges[feature]\n color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain)\n\n dots\n .attr("fill", d => {\n let dotColor = color(d[feature])\n return dotColor\n })\n updateLegendNum(feature_domain)\n\n }\n\n }\n\n d3.select("#" + select_id).on("change", function(d) {\n // recover the option that has been chosen\n var selectedOption = d3.select(this).property("value")\n // run the updateChart function with this selected option\n update(selectedOption)\n });\n }\n })\n </script>\n\n """\n )\n', (887, 11018), False, 'from textwrap import dedent\n'), ((11599, 12270), 'textwrap.dedent', 'dedent', (['"""\n <div class="selection">\n <label for="colorFeature"\n style="display: inline-block; width: 240px; text-align: right">\n <span> Color by feature: </span>\n </label>\n <select id=$select_id>\n $options\n </select>\n </div>\n <div style="position: relative">\n <svg id=$figure_id class=\'chart\'></svg>\n <div class="legend"><svg id=$legend_id height=500 width=400></svg></div>\n </div>\n <script>\n require([$chart_id], function(mychart) {\n mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes )\n })\n </script>\n """'], {}), '(\n """\n <div class="selection">\n <label for="colorFeature"\n style="display: inline-block; width: 240px; text-align: right">\n <span> Color by feature: </span>\n </label>\n <select id=$select_id>\n $options\n </select>\n </div>\n <div style="position: relative">\n <svg id=$figure_id class=\'chart\'></svg>\n <div class="legend"><svg id=$legend_id height=500 width=400></svg></div>\n </div>\n <script>\n require([$chart_id], function(mychart) {\n mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes )\n })\n </script>\n """\n )\n', (11605, 12270), False, 'from textwrap import dedent\n'), ((12350, 12362), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12360, 12362), False, 'import uuid\n'), ((12405, 12417), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12415, 12417), False, 'import uuid\n'), ((12460, 12472), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12470, 12472), False, 'import uuid\n')] |
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
from yardstick.benchmark.scenarios import base
from yardstick.common import openstack_utils
from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
class CreateImage(base.Scenario):
"""Create an OpenStack image"""
__scenario_type__ = "CreateImage"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.options = self.scenario_cfg["options"]
self.name = self.options["image_name"]
self.file_name = self.options.get("file_name")
self.container = self.options.get("container", 'images')
self.md5 = self.options.get("md5")
self.sha256 = self.options.get("sha256")
self.disk_format = self.options.get("disk_format")
self.container_format = self.options.get("container_format",)
self.disable_vendor_agent = self.options.get("disable_vendor_agent", True)
self.wait = self.options.get("wait", True)
self.timeout = self.options.get("timeout", 3600)
self.allow_duplicates = self.options.get("allow_duplicates", False)
self.meta = self.options.get("meta")
self.volume = self.options.get("volume")
self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
def setup(self):
"""scenario setup"""
self.setup_done = True
def run(self, result):
"""execute the test"""
if not self.setup_done:
self.setup()
image_id = openstack_utils.create_image(
self.shade_client, self.name, filename=self.file_name,
container=self.container, md5=self.md5, sha256=self.sha256,
disk_format=self.disk_format,
container_format=self.container_format,
disable_vendor_agent=self.disable_vendor_agent, wait=self.wait,
timeout=self.timeout, allow_duplicates=self.allow_duplicates,
meta=self.meta, volume=self.volume)
if not image_id:
result.update({"image_create": 0})
LOG.error("Create image failed!")
raise exceptions.ScenarioCreateImageError
result.update({"image_create": 1})
LOG.info("Create image successful!")
keys = self.scenario_cfg.get("output", '').split()
values = [image_id]
return self._push_to_outputs(keys, values)
| [
"yardstick.common.openstack_utils.create_image",
"yardstick.common.openstack_utils.get_shade_client",
"logging.getLogger"
] | [((619, 646), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'import logging\n'), ((1721, 1755), 'yardstick.common.openstack_utils.get_shade_client', 'openstack_utils.get_shade_client', ([], {}), '()\n', (1753, 1755), False, 'from yardstick.common import openstack_utils\n'), ((2009, 2406), 'yardstick.common.openstack_utils.create_image', 'openstack_utils.create_image', (['self.shade_client', 'self.name'], {'filename': 'self.file_name', 'container': 'self.container', 'md5': 'self.md5', 'sha256': 'self.sha256', 'disk_format': 'self.disk_format', 'container_format': 'self.container_format', 'disable_vendor_agent': 'self.disable_vendor_agent', 'wait': 'self.wait', 'timeout': 'self.timeout', 'allow_duplicates': 'self.allow_duplicates', 'meta': 'self.meta', 'volume': 'self.volume'}), '(self.shade_client, self.name, filename=self.\n file_name, container=self.container, md5=self.md5, sha256=self.sha256,\n disk_format=self.disk_format, container_format=self.container_format,\n disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout\n =self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta,\n volume=self.volume)\n', (2037, 2406), False, 'from yardstick.common import openstack_utils\n')] |
#!/usr/bin/env python
#
# Copyright © 2022 Github Lzhiyong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=not-callable, line-too-long, no-else-return
import time
import argparse
import subprocess
from pathlib import Path
def format_time(seconds):
minute, sec = divmod(seconds, 60)
hour, minute = divmod(minute, 60)
if hour > 0:
return "{}h{:02d}m{:02d.2f}s".format(hour, minute, sec)
elif minute > 0:
return "{}m{:02d.2f}s".format(minute, sec)
else:
return "{:.2f}s".format(sec)
def build(cc, cxx, args):
command = ["cmake", "-GNinja",
"-B {}".format(args.build),
"-DCMAKE_C_COMPILER={}".format(cc),
"-DCMAKE_CXX_COMPILER={}".format(cxx),
"-DTARGET_ABI={}".format(args.arch),
"-DCMAKE_BUILD_TYPE=Release"]
if args.protoc is not None and len(str(args.protoc)) > 0:
command.append("-DPROTOC_PATH={}".format(args.protoc))
result = subprocess.run(command)
start = time.time()
if result.returncode == 0:
if args.target == "all":
result = subprocess.run(["ninja", "-C", args.build, "-j {}".format(args.job)])
else:
result = subprocess.run(["ninja", "-C", args.build, args.target, "-j {}".format(args.job)])
if result.returncode == 0:
end = time.time()
print("\033[1;32mbuild success cost time: {}\033[0m".format(format_time(end - start)))
def configure(args):
ndk = Path(args.ndk)
if not ndk.exists() or not ndk.is_dir():
raise ValueError("cannot find the ndk")
toolchain = ndk / "toolchains/llvm/prebuilt/linux-x86_64"
cc: Path = Path()
cxx: Path = Path()
if args.arch == "aarch64":
cc = toolchain / "bin" / "aarch64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "aarch64-linux-android{}-clang++".format(args.api)
elif args.arch == "arm":
cc = toolchain / "bin" / "armv7a-linux-androideabi{}-clang".format(args.api)
cxx = toolchain / "bin" / "armv7a-linux-androideabi{}-clang++".format(args.api)
elif args.arch == "x86":
cc = toolchain / "bin" / "i686-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "i686-linux-android{}-clang++".format(args.api)
else:
cc = toolchain / "bin" / "x86_64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "x86_64-linux-android{}-clang++".format(args.api)
if not cc.exists() or not cxx.exists():
print("cc is {}".format(cc))
print("cxx is {}".format(cxx))
raise ValueError("error: cannot find the clang compiler")
# start building
build(str(cc), str(cxx), args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--ndk", required=True, help="set the ndk toolchain path")
parser.add_argument("--arch", choices=["aarch64", "arm", "x86", "x86_64"],
required=True, help="build for the specified architecture")
parser.add_argument("--api", default=30, help="set android platform level, min api is 30")
parser.add_argument("--build", default="build", help="the build directory")
parser.add_argument("--job", default=16, help="run N jobs in parallel, default is 16")
parser.add_argument("--target", default="all", help="build specified targets such as aapt2 adb fastboot, etc")
parser.add_argument("--protoc", help="set the host protoc path")
args = parser.parse_args()
configure(args)
if __name__ == "__main__":
main()
| [
"subprocess.run",
"pathlib.Path",
"argparse.ArgumentParser",
"time.time"
] | [((1459, 1482), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (1473, 1482), False, 'import subprocess\n'), ((1495, 1506), 'time.time', 'time.time', ([], {}), '()\n', (1504, 1506), False, 'import time\n'), ((1965, 1979), 'pathlib.Path', 'Path', (['args.ndk'], {}), '(args.ndk)\n', (1969, 1979), False, 'from pathlib import Path\n'), ((2152, 2158), 'pathlib.Path', 'Path', ([], {}), '()\n', (2156, 2158), False, 'from pathlib import Path\n'), ((2175, 2181), 'pathlib.Path', 'Path', ([], {}), '()\n', (2179, 2181), False, 'from pathlib import Path\n'), ((3222, 3247), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3245, 3247), False, 'import argparse\n'), ((1826, 1837), 'time.time', 'time.time', ([], {}), '()\n', (1835, 1837), False, 'import time\n')] |
import pathlib
from datetime import timedelta
from airflow import DAG
from airflow.operators.python import PythonOperator
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
import json
from airflow.utils.dates import days_ago
def _preprocess_data():
data_df = pd.read_csv("/opt/airflow/data/raw/{{ ds }}/data.csv")
target_df = pd.read_csv("/opt/airflow/data/raw/{{ ds }}/target.csv")
print(f"data before transform: {data_df}")
data_df.drop(columns=["fbs"], inplace=True)
data_df["target"] = target_df
print(f"data after transform: {data_df}")
pathlib.Path("/opt/airflow/data/processed/{{ ds }}").mkdir(parents=True, exist_ok=True)
processed_path = "/opt/airflow/data/processed/{{ ds }}/data.csv"
print(f"saving processed data to {processed_path}")
data_df.to_csv(processed_path, index=False)
def _train_val_split():
data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/data.csv")
train_data, test_data = train_test_split(data, train_size=0.8)
train_data.to_csv("/opt/airflow/data/processed/{{ ds }}/train.csv", index=False)
test_data.to_csv("/opt/airflow/data/processed/{{ ds }}/test.csv", index=False)
def _train_model():
train_data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/train.csv")
target = train_data["target"]
train_data.drop(columns=["target"], inplace=True)
transformer = ColumnTransformer(
[
(
'num',
Pipeline([('scaler', StandardScaler())]),
["age", "trestbps", "chol", "thalach", "oldpeak"],
),
(
'cat',
Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]),
["sex", "cp", "restecg", "exang", "slope", "ca", "thal"],
),
]
)
transformer.fit_transform(train_data)
model = LogisticRegression()
model.fit(train_data, target)
pathlib.Path("/opt/airflow/data/models/{{ ds }}").mkdir(parents=True, exist_ok=True)
with open("/opt/airflow/data/models/{{ ds }}/model.pkl", "wb") as f:
pickle.dump(model, f)
with open("/opt/airflow/data/models/{{ ds }}/transformer.pkl", "wb") as f:
pickle.dump(transformer, f)
def _test_model():
test_data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/test.csv")
target = test_data["target"]
test_data.drop(columns=["target"], inplace=True)
model = pickle.load(open("/opt/airflow/data/models/{{ ds }}/model.pkl", "rb"))
transformer = pickle.load(open("/opt/airflow/data/models/{{ ds }}/transformer.pkl", "rb"))
transformer.transform(test_data)
predicts = model.predict(test_data)
metrics = {
"accuracy": accuracy_score(target, predicts),
"f1": f1_score(target, predicts),
"roc_auc": roc_auc_score(target, predicts),
}
pathlib.Path("/opt/airflow/data/metrics/{{ ds }}").mkdir(parents=True, exist_ok=True)
with open("/opt/airflow/data/metrics/{{ ds }}/metrics.json", "w") as metric_file:
json.dump(metrics, metric_file)
with DAG(
dag_id="model_train",
description="This DAG trains model on synthetic data",
start_date=days_ago(0),
schedule_interval=timedelta(days=1),
) as dag:
preprocess_data = PythonOperator(
task_id="data_preprocessing",
python_callable=_preprocess_data,
dag=dag,
)
train_val_split = PythonOperator(
task_id="split_data",
python_callable=_train_val_split,
dag=dag
)
train_model = PythonOperator(
task_id="train_model",
python_callable=_train_model,
dag=dag
)
test_model = PythonOperator(
task_id="test_model",
python_callable=_test_model,
dag=dag
)
preprocess_data >> train_val_split >> train_model >> test_model
| [
"json.dump",
"pickle.dump",
"sklearn.preprocessing.StandardScaler",
"airflow.operators.python.PythonOperator",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.f1_score",
"pathlib.Path",
"datetime.timedelta",
"airflow.utils.dates.days_ago"
] | [((621, 675), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/airflow/data/raw/{{ ds }}/data.csv"""'], {}), "('/opt/airflow/data/raw/{{ ds }}/data.csv')\n", (632, 675), True, 'import pandas as pd\n'), ((693, 749), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/airflow/data/raw/{{ ds }}/target.csv"""'], {}), "('/opt/airflow/data/raw/{{ ds }}/target.csv')\n", (704, 749), True, 'import pandas as pd\n'), ((1245, 1305), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/airflow/data/processed/{{ ds }}/data.csv"""'], {}), "('/opt/airflow/data/processed/{{ ds }}/data.csv')\n", (1256, 1305), True, 'import pandas as pd\n'), ((1335, 1373), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'train_size': '(0.8)'}), '(data, train_size=0.8)\n', (1351, 1373), False, 'from sklearn.model_selection import train_test_split\n'), ((1587, 1648), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/airflow/data/processed/{{ ds }}/train.csv"""'], {}), "('/opt/airflow/data/processed/{{ ds }}/train.csv')\n", (1598, 1648), True, 'import pandas as pd\n'), ((2257, 2277), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2275, 2277), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2670, 2730), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/airflow/data/processed/{{ ds }}/test.csv"""'], {}), "('/opt/airflow/data/processed/{{ ds }}/test.csv')\n", (2681, 2730), True, 'import pandas as pd\n'), ((3701, 3793), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""data_preprocessing"""', 'python_callable': '_preprocess_data', 'dag': 'dag'}), "(task_id='data_preprocessing', python_callable=\n _preprocess_data, dag=dag)\n", (3715, 3793), False, 'from airflow.operators.python import PythonOperator\n'), ((3847, 3926), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""split_data"""', 'python_callable': '_train_val_split', 'dag': 'dag'}), "(task_id='split_data', python_callable=_train_val_split, dag=dag)\n", (3861, 3926), False, 'from airflow.operators.python import PythonOperator\n'), ((3980, 4056), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""train_model"""', 'python_callable': '_train_model', 'dag': 'dag'}), "(task_id='train_model', python_callable=_train_model, dag=dag)\n", (3994, 4056), False, 'from airflow.operators.python import PythonOperator\n'), ((4109, 4183), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""test_model"""', 'python_callable': '_test_model', 'dag': 'dag'}), "(task_id='test_model', python_callable=_test_model, dag=dag)\n", (4123, 4183), False, 'from airflow.operators.python import PythonOperator\n'), ((2488, 2509), 'pickle.dump', 'pickle.dump', (['model', 'f'], {}), '(model, f)\n', (2499, 2509), False, 'import pickle\n'), ((2601, 2628), 'pickle.dump', 'pickle.dump', (['transformer', 'f'], {}), '(transformer, f)\n', (2612, 2628), False, 'import pickle\n'), ((3120, 3152), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['target', 'predicts'], {}), '(target, predicts)\n', (3134, 3152), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((3169, 3195), 'sklearn.metrics.f1_score', 'f1_score', (['target', 'predicts'], {}), '(target, predicts)\n', (3177, 3195), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((3217, 3248), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['target', 'predicts'], {}), '(target, predicts)\n', (3230, 3248), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((3446, 3477), 'json.dump', 'json.dump', (['metrics', 'metric_file'], {}), '(metrics, metric_file)\n', (3455, 3477), False, 'import json\n'), ((938, 990), 'pathlib.Path', 'pathlib.Path', (['"""/opt/airflow/data/processed/{{ ds }}"""'], {}), "('/opt/airflow/data/processed/{{ ds }}')\n", (950, 990), False, 'import pathlib\n'), ((2320, 2369), 'pathlib.Path', 'pathlib.Path', (['"""/opt/airflow/data/models/{{ ds }}"""'], {}), "('/opt/airflow/data/models/{{ ds }}')\n", (2332, 2369), False, 'import pathlib\n'), ((3264, 3314), 'pathlib.Path', 'pathlib.Path', (['"""/opt/airflow/data/metrics/{{ ds }}"""'], {}), "('/opt/airflow/data/metrics/{{ ds }}')\n", (3276, 3314), False, 'import pathlib\n'), ((3608, 3619), 'airflow.utils.dates.days_ago', 'days_ago', (['(0)'], {}), '(0)\n', (3616, 3619), False, 'from airflow.utils.dates import days_ago\n'), ((3648, 3665), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3657, 3665), False, 'from datetime import timedelta\n'), ((1865, 1881), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1879, 1881), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2047, 2085), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (2060, 2085), False, 'from sklearn.preprocessing import OneHotEncoder\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.