text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_response_content_type(self): """Figure out what content type will be used in the response.""" if self._best_response_match is None: settings = get_settings(self.application, force_instance=True) acceptable = headers.parse_accept( self.request.headers.get( 'Accept', settings.default_content_type if settings.default_content_type else '*/*')) try: selected, _ = algorithms.select_content_type( acceptable, settings.available_content_types) self._best_response_match = '/'.join( [selected.content_type, selected.content_subtype]) if selected.content_suffix is not None: self._best_response_match = '+'.join( [self._best_response_match, selected.content_suffix]) except errors.NoMatch: self._best_response_match = settings.default_content_type return self._best_response_match
[ "def", "get_response_content_type", "(", "self", ")", ":", "if", "self", ".", "_best_response_match", "is", "None", ":", "settings", "=", "get_settings", "(", "self", ".", "application", ",", "force_instance", "=", "True", ")", "acceptable", "=", "headers", ".", "parse_accept", "(", "self", ".", "request", ".", "headers", ".", "get", "(", "'Accept'", ",", "settings", ".", "default_content_type", "if", "settings", ".", "default_content_type", "else", "'*/*'", ")", ")", "try", ":", "selected", ",", "_", "=", "algorithms", ".", "select_content_type", "(", "acceptable", ",", "settings", ".", "available_content_types", ")", "self", ".", "_best_response_match", "=", "'/'", ".", "join", "(", "[", "selected", ".", "content_type", ",", "selected", ".", "content_subtype", "]", ")", "if", "selected", ".", "content_suffix", "is", "not", "None", ":", "self", ".", "_best_response_match", "=", "'+'", ".", "join", "(", "[", "self", ".", "_best_response_match", ",", "selected", ".", "content_suffix", "]", ")", "except", "errors", ".", "NoMatch", ":", "self", ".", "_best_response_match", "=", "settings", ".", "default_content_type", "return", "self", ".", "_best_response_match" ]
50.380952
0.001855
def bounds(self): """Finds min/max for bounds across blocks Returns: tuple(float): length 6 tuple of floats containing min/max along each axis """ bounds = [np.inf,-np.inf, np.inf,-np.inf, np.inf,-np.inf] def update_bounds(ax, nb, bounds): """internal helper to update bounds while keeping track""" if nb[2*ax] < bounds[2*ax]: bounds[2*ax] = nb[2*ax] if nb[2*ax+1] > bounds[2*ax+1]: bounds[2*ax+1] = nb[2*ax+1] return bounds # get bounds for each block and update for i in range(self.n_blocks): try: bnds = self[i].GetBounds() for a in range(3): bounds = update_bounds(a, bnds, bounds) except AttributeError: # Data object doesn't have bounds or is None pass return bounds
[ "def", "bounds", "(", "self", ")", ":", "bounds", "=", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", ",", "np", ".", "inf", ",", "-", "np", ".", "inf", ",", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", "def", "update_bounds", "(", "ax", ",", "nb", ",", "bounds", ")", ":", "\"\"\"internal helper to update bounds while keeping track\"\"\"", "if", "nb", "[", "2", "*", "ax", "]", "<", "bounds", "[", "2", "*", "ax", "]", ":", "bounds", "[", "2", "*", "ax", "]", "=", "nb", "[", "2", "*", "ax", "]", "if", "nb", "[", "2", "*", "ax", "+", "1", "]", ">", "bounds", "[", "2", "*", "ax", "+", "1", "]", ":", "bounds", "[", "2", "*", "ax", "+", "1", "]", "=", "nb", "[", "2", "*", "ax", "+", "1", "]", "return", "bounds", "# get bounds for each block and update", "for", "i", "in", "range", "(", "self", ".", "n_blocks", ")", ":", "try", ":", "bnds", "=", "self", "[", "i", "]", ".", "GetBounds", "(", ")", "for", "a", "in", "range", "(", "3", ")", ":", "bounds", "=", "update_bounds", "(", "a", ",", "bnds", ",", "bounds", ")", "except", "AttributeError", ":", "# Data object doesn't have bounds or is None", "pass", "return", "bounds" ]
33.178571
0.00523
def filter_feed(self, updated=False, following=False, folder=False, filter_folder="", sort="updated", nid=None): """Get filtered feed Only one filter type (updated, following, folder) is possible. :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type sort: str :param sort: How to sort feed that will be retrieved; only current known value is "updated" :type updated: bool :param updated: Set to filter through only posts which have been updated since you last read them :type following: bool :param following: Set to filter through only posts which you are following :type folder: bool :param folder: Set to filter through only posts which are in the provided ``filter_folder`` :type filter_folder: str :param filter_folder: Name of folder to show posts from; required only if ``folder`` is set """ assert sum([updated, following, folder]) == 1 if folder: assert filter_folder if updated: filter_type = dict(updated=1) elif following: filter_type = dict(following=1) else: filter_type = dict(folder=1, filter_folder=filter_folder) r = self.request( nid=nid, method="network.filter_feed", data=dict( sort=sort, **filter_type ) ) return self._handle_error(r, "Could not retrieve filtered feed.")
[ "def", "filter_feed", "(", "self", ",", "updated", "=", "False", ",", "following", "=", "False", ",", "folder", "=", "False", ",", "filter_folder", "=", "\"\"", ",", "sort", "=", "\"updated\"", ",", "nid", "=", "None", ")", ":", "assert", "sum", "(", "[", "updated", ",", "following", ",", "folder", "]", ")", "==", "1", "if", "folder", ":", "assert", "filter_folder", "if", "updated", ":", "filter_type", "=", "dict", "(", "updated", "=", "1", ")", "elif", "following", ":", "filter_type", "=", "dict", "(", "following", "=", "1", ")", "else", ":", "filter_type", "=", "dict", "(", "folder", "=", "1", ",", "filter_folder", "=", "filter_folder", ")", "r", "=", "self", ".", "request", "(", "nid", "=", "nid", ",", "method", "=", "\"network.filter_feed\"", ",", "data", "=", "dict", "(", "sort", "=", "sort", ",", "*", "*", "filter_type", ")", ")", "return", "self", ".", "_handle_error", "(", "r", ",", "\"Could not retrieve filtered feed.\"", ")" ]
36.869565
0.002298
def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
[ "def", "_copy_new_parent", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "parent", "==", "\"undefined\"", ":", "param", "=", "copy", ".", "copy", "(", "self", ")", "param", ".", "parent", "=", "parent", ".", "uid", "return", "param", "else", ":", "raise", "ValueError", "(", "\"Cannot copy from non-dummy parent %s.\"", "%", "parent", ")" ]
42
0.005831
def add_user(uid, password, desc=None): """ Adds user to the DCOS Enterprise. If not description is provided the uid will be used for the description. :param uid: user id :type uid: str :param password: password :type password: str :param desc: description of user :type desc: str """ try: desc = uid if desc is None else desc user_object = {"description": desc, "password": password} acl_url = urljoin(_acl_url(), 'users/{}'.format(uid)) r = http.put(acl_url, json=user_object) assert r.status_code == 201 except DCOSHTTPException as e: # already exists if e.response.status_code != 409: raise
[ "def", "add_user", "(", "uid", ",", "password", ",", "desc", "=", "None", ")", ":", "try", ":", "desc", "=", "uid", "if", "desc", "is", "None", "else", "desc", "user_object", "=", "{", "\"description\"", ":", "desc", ",", "\"password\"", ":", "password", "}", "acl_url", "=", "urljoin", "(", "_acl_url", "(", ")", ",", "'users/{}'", ".", "format", "(", "uid", ")", ")", "r", "=", "http", ".", "put", "(", "acl_url", ",", "json", "=", "user_object", ")", "assert", "r", ".", "status_code", "==", "201", "except", "DCOSHTTPException", "as", "e", ":", "# already exists", "if", "e", ".", "response", ".", "status_code", "!=", "409", ":", "raise" ]
34.095238
0.001359
def text(length, choices=string.ascii_letters): """ returns a random (fixed length) string :param length: string length :param choices: string containing all the chars can be used to build the string .. seealso:: :py:func:`rtext` """ return ''.join(choice(choices) for x in range(length))
[ "def", "text", "(", "length", ",", "choices", "=", "string", ".", "ascii_letters", ")", ":", "return", "''", ".", "join", "(", "choice", "(", "choices", ")", "for", "x", "in", "range", "(", "length", ")", ")" ]
31.2
0.006231
def get_context_data(self,**kwargs): ''' Add the event and series listing data ''' context = self.get_listing() context['showDescriptionRule'] = getConstant('registration__showDescriptionRule') or 'all' context.update(kwargs) # Update the site session data so that registration processes know to send return links to # the registration page. set_return_page() is in SiteHistoryMixin. self.set_return_page('registration',_('Registration')) return super(ClassRegistrationView,self).get_context_data(**context)
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "self", ".", "get_listing", "(", ")", "context", "[", "'showDescriptionRule'", "]", "=", "getConstant", "(", "'registration__showDescriptionRule'", ")", "or", "'all'", "context", ".", "update", "(", "kwargs", ")", "# Update the site session data so that registration processes know to send return links to", "# the registration page. set_return_page() is in SiteHistoryMixin.", "self", ".", "set_return_page", "(", "'registration'", ",", "_", "(", "'Registration'", ")", ")", "return", "super", "(", "ClassRegistrationView", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "context", ")" ]
51.272727
0.012195
def _fix_pooling(pool_type, inputs, new_attr): """onnx pooling operator supports asymmetrical padding Adding pad operator before pooling in mxnet to work with onnx""" stride = new_attr.get('stride') kernel = new_attr.get('kernel') padding = new_attr.get('pad') p_value = new_attr.get('p_value') # Adding default stride. if stride is None: stride = (1,) * len(kernel) # Add padding attr if not provided. if padding is None: padding = (0,) * len(kernel) * 2 # Mxnet Pad operator supports only 4D/5D tensors. # For 1D case, these are the steps: # Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2 # Step 2. Apply padding to this changed tensor # Step 3. Remove the extra dimension added in step 1. if len(kernel) == 1: dummy_axis = 2 # setting 0 padding to the new dim to be added. padding = (0, padding[0], 0, padding[1]) pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2) # Step 1. curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis) # Step 2. Common for all tensor sizes new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) # Step 3: Removing extra dim added. new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1) else: # For 2D/3D cases: # Apply padding pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel)) curr_sym = inputs[0] if pool_type == 'max': # For max pool : mode = 'edge', we should replicate the # edge values to pad, so that we only include input data values # for calculating 'max' new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width) else: # For avg pool, we should add 'zeros' for padding so mode='constant' new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width) # Apply pooling without pads. if pool_type == 'lp': new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value) else: new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel) return new_pooling_op
[ "def", "_fix_pooling", "(", "pool_type", ",", "inputs", ",", "new_attr", ")", ":", "stride", "=", "new_attr", ".", "get", "(", "'stride'", ")", "kernel", "=", "new_attr", ".", "get", "(", "'kernel'", ")", "padding", "=", "new_attr", ".", "get", "(", "'pad'", ")", "p_value", "=", "new_attr", ".", "get", "(", "'p_value'", ")", "# Adding default stride.", "if", "stride", "is", "None", ":", "stride", "=", "(", "1", ",", ")", "*", "len", "(", "kernel", ")", "# Add padding attr if not provided.", "if", "padding", "is", "None", ":", "padding", "=", "(", "0", ",", ")", "*", "len", "(", "kernel", ")", "*", "2", "# Mxnet Pad operator supports only 4D/5D tensors.", "# For 1D case, these are the steps:", "# Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2", "# Step 2. Apply padding to this changed tensor", "# Step 3. Remove the extra dimension added in step 1.", "if", "len", "(", "kernel", ")", "==", "1", ":", "dummy_axis", "=", "2", "# setting 0 padding to the new dim to be added.", "padding", "=", "(", "0", ",", "padding", "[", "0", "]", ",", "0", ",", "padding", "[", "1", "]", ")", "pad_width", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", "+", "_pad_sequence_fix", "(", "padding", ",", "kernel_dim", "=", "2", ")", "# Step 1.", "curr_sym", "=", "symbol", ".", "expand_dims", "(", "inputs", "[", "0", "]", ",", "axis", "=", "dummy_axis", ")", "# Step 2. Common for all tensor sizes", "new_pad_op", "=", "symbol", ".", "pad", "(", "curr_sym", ",", "mode", "=", "'edge'", ",", "pad_width", "=", "pad_width", ")", "# Step 3: Removing extra dim added.", "new_pad_op", "=", "symbol", ".", "split", "(", "new_pad_op", ",", "axis", "=", "dummy_axis", ",", "num_outputs", "=", "1", ",", "squeeze_axis", "=", "1", ")", "else", ":", "# For 2D/3D cases:", "# Apply padding", "pad_width", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", "+", "_pad_sequence_fix", "(", "padding", ",", "kernel_dim", "=", "len", "(", "kernel", ")", ")", "curr_sym", "=", "inputs", "[", "0", "]", "if", "pool_type", "==", "'max'", ":", "# For max pool : mode = 'edge', we should replicate the", "# edge values to pad, so that we only include input data values", "# for calculating 'max'", "new_pad_op", "=", "symbol", ".", "pad", "(", "curr_sym", ",", "mode", "=", "'edge'", ",", "pad_width", "=", "pad_width", ")", "else", ":", "# For avg pool, we should add 'zeros' for padding so mode='constant'", "new_pad_op", "=", "symbol", ".", "pad", "(", "curr_sym", ",", "mode", "=", "'constant'", ",", "pad_width", "=", "pad_width", ")", "# Apply pooling without pads.", "if", "pool_type", "==", "'lp'", ":", "new_pooling_op", "=", "symbol", ".", "Pooling", "(", "new_pad_op", ",", "pool_type", "=", "pool_type", ",", "stride", "=", "stride", ",", "kernel", "=", "kernel", ",", "p_value", "=", "p_value", ")", "else", ":", "new_pooling_op", "=", "symbol", ".", "Pooling", "(", "new_pad_op", ",", "pool_type", "=", "pool_type", ",", "stride", "=", "stride", ",", "kernel", "=", "kernel", ")", "return", "new_pooling_op" ]
40.910714
0.002984
def search_read_all(self, domain, order, fields, batch_size=500, context=None, offset=0, limit=None): """ An endless iterator that iterates over records. :param domain: A search domain :param order: The order clause for search read :param fields: The fields argument for search_read :param batch_size: The optimal batch size when sending paginated requests """ if context is None: context = {} if limit is None: # When no limit is specified, all the records # should be fetched. record_count = self.search_count(domain, context=context) end = record_count + offset else: end = limit + offset for page_offset in range(offset, end, batch_size): if page_offset + batch_size > end: batch_size = end - page_offset for record in self.search_read( domain, page_offset, batch_size, order, fields, context=context): yield record
[ "def", "search_read_all", "(", "self", ",", "domain", ",", "order", ",", "fields", ",", "batch_size", "=", "500", ",", "context", "=", "None", ",", "offset", "=", "0", ",", "limit", "=", "None", ")", ":", "if", "context", "is", "None", ":", "context", "=", "{", "}", "if", "limit", "is", "None", ":", "# When no limit is specified, all the records", "# should be fetched.", "record_count", "=", "self", ".", "search_count", "(", "domain", ",", "context", "=", "context", ")", "end", "=", "record_count", "+", "offset", "else", ":", "end", "=", "limit", "+", "offset", "for", "page_offset", "in", "range", "(", "offset", ",", "end", ",", "batch_size", ")", ":", "if", "page_offset", "+", "batch_size", ">", "end", ":", "batch_size", "=", "end", "-", "page_offset", "for", "record", "in", "self", ".", "search_read", "(", "domain", ",", "page_offset", ",", "batch_size", ",", "order", ",", "fields", ",", "context", "=", "context", ")", ":", "yield", "record" ]
37.965517
0.002657
def get_base_arguments(parser): """ Append base arguments icetea run arguments to parser. :param parser: argument parser :return: ArgumentParser """ thisfilepath = os.path.abspath(os.path.dirname(__file__)) group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--list', action='store_true', help='List of available testcases(nothing else)', default=False) group.add_argument('--listsuites', action='store_true', help='List of available suites', default=False) group.add_argument('--tc', help='execute testcase. Give test index, name, list of indices/' 'names, or all to execute all testcases', default=False) group.add_argument('--suite', default=False, help='Run tests from suite json file <suite>. Can be absolute path to ' 'suite file or path relative to --suitedir.') group.add_argument('--version', action='store_true', default=False, help='Show version') # Filters filter_group = parser.add_argument_group("Filter arguments", "Arguments used for filtering " "tc:s") filter_group.add_argument('--status', default=False, help='Run all testcases with status <status>') filter_group.add_argument('--group', default=False, help='Run all testcases that have all items ' 'in <group/subgroup> or <group,group2> in their group path.') filter_group.add_argument('--testtype', default=False, help='Run all testcases with type <testtype>') filter_group.add_argument('--subtype', default=False, help="Run all testcases with subtype <subtype") filter_group.add_argument('--component', default=False, help='Run all testcases with component <component>') filter_group.add_argument('--feature', default=False, help='Run all testcases with feature <feature>') filter_group.add_argument("--platform_filter", default=False, help="Run all testcases that allow platform <platform_filter>") # JobId is BUILD_TAG (from Jenkins job), or generated UUID or command line argument value info_group = parser.add_argument_group("Run information", "Information of run, such as job " "id and git or build information.") info_group.add_argument('--jobId', default=os.environ.get('BUILD_TAG', str(uuid.uuid1())), help='Job Unique ID') info_group.add_argument('--gitUrl', default=os.environ.get('ghprbAuthorRepoGitUrl', None), help='Set application used git url for results') info_group.add_argument('--branch', default=os.environ.get('GIT_BRANCH', 'master'), help='Set used build branch for results') info_group.add_argument('--commitId', default=os.environ.get('ghprbActualCommit', None), help='Set used commit ID for results') info_group.add_argument('--buildDate', default=None, help='Set build date') info_group.add_argument('--toolchain', default=None, help='Set toolchain for results') info_group.add_argument('--buildUrl', default=os.environ.get('BUILD_URL', None), help='Set build url for results') info_group.add_argument('--campaign', default=os.environ.get('JOB_NAME', None), help='Set campaign name for results') # Directories and paths directories = parser.add_argument_group("Paths", "Directory and file paths for various " "Icetea features.") directories.add_argument('--tcdir', help='Search for testcases in directory <path>', default='./testcases') directories.add_argument('--suitedir', help='Search for suites in directory <path>', default='./testcases/suites') directories.add_argument("--cfg_file", type=open, default=None, help="Load cli parameters from file. " "This will overwrite parameters given before --cfg_file, but " "results of this will be overwritten by " "parameters given after this one", action=LoadFromFile) directories.add_argument('--plugin_path', default=os.path.abspath( os.path.join(thisfilepath, "Plugin/plugins/plugins_to_load")), help="location of file called plugins_to_load, " "where custom plugins are imported from.") # Allocator group alloc_group = parser.add_argument_group("Allocator", "Control allocation of resources for " "tests.") alloc_group.add_argument("--allocator", default="LocalAllocator", help="Allocator to be used for allocating resources. " "Default is LocalAllocator") alloc_group.add_argument("--allocator_cfg", help="File that contains configuration for used allocator.", default=None) # Other arguments parser.add_argument('--env_cfg', help='Use user specific environment configuration file', default='') parser.add_argument("--logging_cfg", help="Location of JSON configuration for logging.", default=None) parser.add_argument('--repeat', help='Repeat testcases N times', default=1) parser.add_argument('--stop_on_failure', help='Stop testruns/repeation on first failed TC', default=False, action="store_true") parser.add_argument('--clean', action='store_true', default=False, help='Clean old logs') parser.add_argument('--connector', default=None, help='Connector credentials for selecting and/or generating endpoint ' 'certificates. Format should be domain[:token] where token is ' 'optional. Eg. --connector this_is_some_domain:this_is_my_token') parser.add_argument('--failure_return_value', default=False, action="store_true", help='Sets Icetea to return a failing code to caller if ' 'one or more tests fail during the run. Default is False') parser.add_argument('--color', default=False, action="store_true", help='Indicates if console logs are printed plain' ' or with colours. Default is False for plain' 'logs.') parser.add_argument("--check_version", default=False, action="store_true", help="Enables version checks for test cases.") parser.add_argument('--ignore_invalid_params', default=False, action="store_true", help="Disables checks for invalid parameters.") parser.add_argument('--parallel_flash', default=False, action="store_true", help="Enables parallel flash.") parser.add_argument('--disable_log_truncate', default=False, action="store_true", help="Disable long log line truncating. Over 10000" "characters long lines are truncated by default.") parser.add_argument('--cm', default="opentmi_client", help='name of module that is to be used to send results to a cloud ' 'service.') parser.add_argument("--json", action="store_true", default=False, help="Output results of --list as json instead of a table.") parser.add_argument("--export", default=None, metavar="SUITE_FILE_NAME", help="Export list into suite template file.") parser.add_argument("--sync_start", default=False, action="store_true", help="Use echo-command to try and make sure duts have " "started before proceeding with test.") return parser
[ "def", "get_base_arguments", "(", "parser", ")", ":", "thisfilepath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "False", ")", "group", ".", "add_argument", "(", "'--list'", ",", "action", "=", "'store_true'", ",", "help", "=", "'List of available testcases(nothing else)'", ",", "default", "=", "False", ")", "group", ".", "add_argument", "(", "'--listsuites'", ",", "action", "=", "'store_true'", ",", "help", "=", "'List of available suites'", ",", "default", "=", "False", ")", "group", ".", "add_argument", "(", "'--tc'", ",", "help", "=", "'execute testcase. Give test index, name, list of indices/'", "'names, or all to execute all testcases'", ",", "default", "=", "False", ")", "group", ".", "add_argument", "(", "'--suite'", ",", "default", "=", "False", ",", "help", "=", "'Run tests from suite json file <suite>. Can be absolute path to '", "'suite file or path relative to --suitedir.'", ")", "group", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Show version'", ")", "# Filters", "filter_group", "=", "parser", ".", "add_argument_group", "(", "\"Filter arguments\"", ",", "\"Arguments used for filtering \"", "\"tc:s\"", ")", "filter_group", ".", "add_argument", "(", "'--status'", ",", "default", "=", "False", ",", "help", "=", "'Run all testcases with status <status>'", ")", "filter_group", ".", "add_argument", "(", "'--group'", ",", "default", "=", "False", ",", "help", "=", "'Run all testcases that have all items '", "'in <group/subgroup> or <group,group2> in their group path.'", ")", "filter_group", ".", "add_argument", "(", "'--testtype'", ",", "default", "=", "False", ",", "help", "=", "'Run all testcases with type <testtype>'", ")", "filter_group", ".", "add_argument", "(", "'--subtype'", ",", "default", "=", "False", ",", "help", "=", "\"Run all testcases with subtype <subtype\"", ")", "filter_group", ".", "add_argument", "(", "'--component'", ",", "default", "=", "False", ",", "help", "=", "'Run all testcases with component <component>'", ")", "filter_group", ".", "add_argument", "(", "'--feature'", ",", "default", "=", "False", ",", "help", "=", "'Run all testcases with feature <feature>'", ")", "filter_group", ".", "add_argument", "(", "\"--platform_filter\"", ",", "default", "=", "False", ",", "help", "=", "\"Run all testcases that allow platform <platform_filter>\"", ")", "# JobId is BUILD_TAG (from Jenkins job), or generated UUID or command line argument value", "info_group", "=", "parser", ".", "add_argument_group", "(", "\"Run information\"", ",", "\"Information of run, such as job \"", "\"id and git or build information.\"", ")", "info_group", ".", "add_argument", "(", "'--jobId'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'BUILD_TAG'", ",", "str", "(", "uuid", ".", "uuid1", "(", ")", ")", ")", ",", "help", "=", "'Job Unique ID'", ")", "info_group", ".", "add_argument", "(", "'--gitUrl'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ghprbAuthorRepoGitUrl'", ",", "None", ")", ",", "help", "=", "'Set application used git url for results'", ")", "info_group", ".", "add_argument", "(", "'--branch'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'GIT_BRANCH'", ",", "'master'", ")", ",", "help", "=", "'Set used build branch for results'", ")", "info_group", ".", "add_argument", "(", "'--commitId'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ghprbActualCommit'", ",", "None", ")", ",", "help", "=", "'Set used commit ID for results'", ")", "info_group", ".", "add_argument", "(", "'--buildDate'", ",", "default", "=", "None", ",", "help", "=", "'Set build date'", ")", "info_group", ".", "add_argument", "(", "'--toolchain'", ",", "default", "=", "None", ",", "help", "=", "'Set toolchain for results'", ")", "info_group", ".", "add_argument", "(", "'--buildUrl'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'BUILD_URL'", ",", "None", ")", ",", "help", "=", "'Set build url for results'", ")", "info_group", ".", "add_argument", "(", "'--campaign'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'JOB_NAME'", ",", "None", ")", ",", "help", "=", "'Set campaign name for results'", ")", "# Directories and paths", "directories", "=", "parser", ".", "add_argument_group", "(", "\"Paths\"", ",", "\"Directory and file paths for various \"", "\"Icetea features.\"", ")", "directories", ".", "add_argument", "(", "'--tcdir'", ",", "help", "=", "'Search for testcases in directory <path>'", ",", "default", "=", "'./testcases'", ")", "directories", ".", "add_argument", "(", "'--suitedir'", ",", "help", "=", "'Search for suites in directory <path>'", ",", "default", "=", "'./testcases/suites'", ")", "directories", ".", "add_argument", "(", "\"--cfg_file\"", ",", "type", "=", "open", ",", "default", "=", "None", ",", "help", "=", "\"Load cli parameters from file. \"", "\"This will overwrite parameters given before --cfg_file, but \"", "\"results of this will be overwritten by \"", "\"parameters given after this one\"", ",", "action", "=", "LoadFromFile", ")", "directories", ".", "add_argument", "(", "'--plugin_path'", ",", "default", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "thisfilepath", ",", "\"Plugin/plugins/plugins_to_load\"", ")", ")", ",", "help", "=", "\"location of file called plugins_to_load, \"", "\"where custom plugins are imported from.\"", ")", "# Allocator group", "alloc_group", "=", "parser", ".", "add_argument_group", "(", "\"Allocator\"", ",", "\"Control allocation of resources for \"", "\"tests.\"", ")", "alloc_group", ".", "add_argument", "(", "\"--allocator\"", ",", "default", "=", "\"LocalAllocator\"", ",", "help", "=", "\"Allocator to be used for allocating resources. \"", "\"Default is LocalAllocator\"", ")", "alloc_group", ".", "add_argument", "(", "\"--allocator_cfg\"", ",", "help", "=", "\"File that contains configuration for used allocator.\"", ",", "default", "=", "None", ")", "# Other arguments", "parser", ".", "add_argument", "(", "'--env_cfg'", ",", "help", "=", "'Use user specific environment configuration file'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "\"--logging_cfg\"", ",", "help", "=", "\"Location of JSON configuration for logging.\"", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--repeat'", ",", "help", "=", "'Repeat testcases N times'", ",", "default", "=", "1", ")", "parser", ".", "add_argument", "(", "'--stop_on_failure'", ",", "help", "=", "'Stop testruns/repeation on first failed TC'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ")", "parser", ".", "add_argument", "(", "'--clean'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Clean old logs'", ")", "parser", ".", "add_argument", "(", "'--connector'", ",", "default", "=", "None", ",", "help", "=", "'Connector credentials for selecting and/or generating endpoint '", "'certificates. Format should be domain[:token] where token is '", "'optional. Eg. --connector this_is_some_domain:this_is_my_token'", ")", "parser", ".", "add_argument", "(", "'--failure_return_value'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "'Sets Icetea to return a failing code to caller if '", "'one or more tests fail during the run. Default is False'", ")", "parser", ".", "add_argument", "(", "'--color'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "'Indicates if console logs are printed plain'", "' or with colours. Default is False for plain'", "'logs.'", ")", "parser", ".", "add_argument", "(", "\"--check_version\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Enables version checks for test cases.\"", ")", "parser", ".", "add_argument", "(", "'--ignore_invalid_params'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Disables checks for invalid parameters.\"", ")", "parser", ".", "add_argument", "(", "'--parallel_flash'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Enables parallel flash.\"", ")", "parser", ".", "add_argument", "(", "'--disable_log_truncate'", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Disable long log line truncating. Over 10000\"", "\"characters long lines are truncated by default.\"", ")", "parser", ".", "add_argument", "(", "'--cm'", ",", "default", "=", "\"opentmi_client\"", ",", "help", "=", "'name of module that is to be used to send results to a cloud '", "'service.'", ")", "parser", ".", "add_argument", "(", "\"--json\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"Output results of --list as json instead of a table.\"", ")", "parser", ".", "add_argument", "(", "\"--export\"", ",", "default", "=", "None", ",", "metavar", "=", "\"SUITE_FILE_NAME\"", ",", "help", "=", "\"Export list into suite template file.\"", ")", "parser", ".", "add_argument", "(", "\"--sync_start\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Use echo-command to try and make sure duts have \"", "\"started before proceeding with test.\"", ")", "return", "parser" ]
53.313187
0.002631
def _process_loaded_object(self, path): """process the :paramref:`path`. :param str path: the path to load an svg from """ file_name = os.path.basename(path) name = os.path.splitext(file_name)[0] with open(path) as file: string = file.read() self._instruction_type_to_file_content[name] = string
[ "def", "_process_loaded_object", "(", "self", ",", "path", ")", ":", "file_name", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "name", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "[", "0", "]", "with", "open", "(", "path", ")", "as", "file", ":", "string", "=", "file", ".", "read", "(", ")", "self", ".", "_instruction_type_to_file_content", "[", "name", "]", "=", "string" ]
35.9
0.005435
def surface_area(self): r"""Calculate all atomic surface area. :rtype: [float] """ return [self.atomic_sa(i) for i in range(len(self.rads))]
[ "def", "surface_area", "(", "self", ")", ":", "return", "[", "self", ".", "atomic_sa", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "rads", ")", ")", "]" ]
28
0.011561
def rsa_pkcs1v15_sign(private_key, data, hash_algorithm): """ Generates an RSASSA-PKCS-v1.5 signature. When the hash_algorithm is "raw", the operation is identical to RSA private key encryption. That is: the data is not hashed and no ASN.1 structure with an algorithm identifier of the hash algorithm is placed in the encrypted byte string. :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha224", "sha256", "sha384", "sha512" or "raw" :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature """ if private_key.algorithm != 'rsa': raise ValueError(pretty_message( ''' The key specified is not an RSA private key, but %s ''', private_key.algorithm.upper() )) return _sign(private_key, data, hash_algorithm)
[ "def", "rsa_pkcs1v15_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ")", ":", "if", "private_key", ".", "algorithm", "!=", "'rsa'", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n The key specified is not an RSA private key, but %s\n '''", ",", "private_key", ".", "algorithm", ".", "upper", "(", ")", ")", ")", "return", "_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ")" ]
31.837838
0.000824
def _remove(self, rtracker): """ Remove a resource from the pool. :param rtracker: A resource. :type rtracker: :class:`_ResourceTracker` """ with self._lock: i = self._reference_queue.index(rtracker) self._reference_queue[i] = None self._size -= 1
[ "def", "_remove", "(", "self", ",", "rtracker", ")", ":", "with", "self", ".", "_lock", ":", "i", "=", "self", ".", "_reference_queue", ".", "index", "(", "rtracker", ")", "self", ".", "_reference_queue", "[", "i", "]", "=", "None", "self", ".", "_size", "-=", "1" ]
29.272727
0.006024
def subn_filter(s, find, replace, count=0): """A non-optimal implementation of a regex filter""" return re.gsub(find, replace, count, s)
[ "def", "subn_filter", "(", "s", ",", "find", ",", "replace", ",", "count", "=", "0", ")", ":", "return", "re", ".", "gsub", "(", "find", ",", "replace", ",", "count", ",", "s", ")" ]
47.333333
0.006944
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument """ Inserts a batch of completions. REST Endpoint Format: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } **Returns** A Response object, with an appropriate status code. If successful, status code is 200. { "detail" : _("ok") } Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error. """ batch_object = request.data or {} try: user, course_key, blocks = self._validate_and_parse(batch_object) BlockCompletion.objects.submit_batch_completion(user, course_key, blocks) except ValidationError as exc: return Response({ "detail": _(' ').join(text_type(msg) for msg in exc.messages), }, status=status.HTTP_400_BAD_REQUEST) except ValueError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_400_BAD_REQUEST) except ObjectDoesNotExist as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_404_NOT_FOUND) except DatabaseError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "batch_object", "=", "request", ".", "data", "or", "{", "}", "try", ":", "user", ",", "course_key", ",", "blocks", "=", "self", ".", "_validate_and_parse", "(", "batch_object", ")", "BlockCompletion", ".", "objects", ".", "submit_batch_completion", "(", "user", ",", "course_key", ",", "blocks", ")", "except", "ValidationError", "as", "exc", ":", "return", "Response", "(", "{", "\"detail\"", ":", "_", "(", "' '", ")", ".", "join", "(", "text_type", "(", "msg", ")", "for", "msg", "in", "exc", ".", "messages", ")", ",", "}", ",", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")", "except", "ValueError", "as", "exc", ":", "return", "Response", "(", "{", "\"detail\"", ":", "text_type", "(", "exc", ")", ",", "}", ",", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")", "except", "ObjectDoesNotExist", "as", "exc", ":", "return", "Response", "(", "{", "\"detail\"", ":", "text_type", "(", "exc", ")", ",", "}", ",", "status", "=", "status", ".", "HTTP_404_NOT_FOUND", ")", "except", "DatabaseError", "as", "exc", ":", "return", "Response", "(", "{", "\"detail\"", ":", "text_type", "(", "exc", ")", ",", "}", ",", "status", "=", "status", ".", "HTTP_500_INTERNAL_SERVER_ERROR", ")", "return", "Response", "(", "{", "\"detail\"", ":", "_", "(", "\"ok\"", ")", "}", ",", "status", "=", "status", ".", "HTTP_200_OK", ")" ]
32.632653
0.002429
def mouseDown(self, button): """ Send a mouse button down at the last set position button: int: [1-n] """ log.debug('mouseDown %s', button) self.buttons |= 1 << (button - 1) self.pointerEvent(self.x, self.y, buttonmask=self.buttons) return self
[ "def", "mouseDown", "(", "self", ",", "button", ")", ":", "log", ".", "debug", "(", "'mouseDown %s'", ",", "button", ")", "self", ".", "buttons", "|=", "1", "<<", "(", "button", "-", "1", ")", "self", ".", "pointerEvent", "(", "self", ".", "x", ",", "self", ".", "y", ",", "buttonmask", "=", "self", ".", "buttons", ")", "return", "self" ]
27
0.006515
def _generate_normals(polygons): """ Takes a list of polygons and return an array of their normals. Normals point towards the viewer for a face with its vertices in counterclockwise order, following the right hand rule. Uses three points equally spaced around the polygon. This normal of course might not make sense for polygons with more than three points not lying in a plane, but it's a plausible and fast approximation. Args: polygons (list): list of (M_i, 3) array_like, or (..., M, 3) array_like A sequence of polygons to compute normals for, which can have varying numbers of vertices. If the polygons all have the same number of vertices and array is passed, then the operation will be vectorized. Returns: normals: (..., 3) array_like A normal vector estimated for the polygon. """ if isinstance(polygons, np.ndarray): # optimization: polygons all have the same number of points, so can # vectorize n = polygons.shape[-2] i1, i2, i3 = 0, n//3, 2*n//3 v1 = polygons[..., i1, :] - polygons[..., i2, :] v2 = polygons[..., i2, :] - polygons[..., i3, :] else: # The subtraction doesn't vectorize because polygons is jagged. v1 = np.empty((len(polygons), 3)) v2 = np.empty((len(polygons), 3)) for poly_i, ps in enumerate(polygons): n = len(ps) i1, i2, i3 = 0, n//3, 2*n//3 v1[poly_i, :] = ps[i1, :] - ps[i2, :] v2[poly_i, :] = ps[i2, :] - ps[i3, :] return np.cross(v1, v2)
[ "def", "_generate_normals", "(", "polygons", ")", ":", "if", "isinstance", "(", "polygons", ",", "np", ".", "ndarray", ")", ":", "# optimization: polygons all have the same number of points, so can", "# vectorize", "n", "=", "polygons", ".", "shape", "[", "-", "2", "]", "i1", ",", "i2", ",", "i3", "=", "0", ",", "n", "//", "3", ",", "2", "*", "n", "//", "3", "v1", "=", "polygons", "[", "...", ",", "i1", ",", ":", "]", "-", "polygons", "[", "...", ",", "i2", ",", ":", "]", "v2", "=", "polygons", "[", "...", ",", "i2", ",", ":", "]", "-", "polygons", "[", "...", ",", "i3", ",", ":", "]", "else", ":", "# The subtraction doesn't vectorize because polygons is jagged.", "v1", "=", "np", ".", "empty", "(", "(", "len", "(", "polygons", ")", ",", "3", ")", ")", "v2", "=", "np", ".", "empty", "(", "(", "len", "(", "polygons", ")", ",", "3", ")", ")", "for", "poly_i", ",", "ps", "in", "enumerate", "(", "polygons", ")", ":", "n", "=", "len", "(", "ps", ")", "i1", ",", "i2", ",", "i3", "=", "0", ",", "n", "//", "3", ",", "2", "*", "n", "//", "3", "v1", "[", "poly_i", ",", ":", "]", "=", "ps", "[", "i1", ",", ":", "]", "-", "ps", "[", "i2", ",", ":", "]", "v2", "[", "poly_i", ",", ":", "]", "=", "ps", "[", "i2", ",", ":", "]", "-", "ps", "[", "i3", ",", ":", "]", "return", "np", ".", "cross", "(", "v1", ",", "v2", ")" ]
43.243243
0.000611
def getMargin(self, name): """Provides the requested margin. Returns a reference to the margin if found and None otherwise """ for margin in self._margins: if margin.getName() == name: return margin return None
[ "def", "getMargin", "(", "self", ",", "name", ")", ":", "for", "margin", "in", "self", ".", "_margins", ":", "if", "margin", ".", "getName", "(", ")", "==", "name", ":", "return", "margin", "return", "None" ]
34.25
0.007117
def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802 """ Interpret input ASCII file to return arrays for specified columns. Notes ----- The specification of the columns should be expected to have lists for each 'column', with all columns in each list combined into a single entry. For example:: cols = ['1,2,3','4,5,6',7] where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec values and 7 represents the flux value for a total of 3 requested columns of data to be returned. Returns ------- outarr : list of arrays The return value will be a list of numpy arrays, one for each 'column'. """ # build dictionary representing format of each row # Format of dictionary: {'colname':col_number,...} # This provides the mapping between column name and column number coldict = {} with open(infile, 'r') as f: flines = f.readlines() for l in flines: # interpret each line from catalog file if l[0].lstrip() == '#' or l.lstrip() == '': continue else: # convert first row of data into column definitions using indices coldict = {str(i + 1): i for i, _ in enumerate(l.split())} break numcols = len(cols) outarr = [[] for _ in range(numcols)] convert_radec = False # Now, map specified columns to columns in file and populate output arrays for l in flines: # interpret each line from catalog file l = l.strip() lspl = l.split() # skip blank lines, comment lines, or lines with # fewer columns than requested by user if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l: continue # For each 'column' requested by user, pull data from row for c, i in zip(cols, list(range(numcols))): cnames = parse_colname(c) if len(cnames) > 1: # interpret multi-column specification as one value outval = '' for cn in cnames: cnum = coldict[cn] cval = lspl[cnum] outval += cval + ' ' outarr[i].append(outval) convert_radec = True else: # pull single value from row for this column cnum = coldict[cnames[0]] if isfloat(lspl[cnum]): cval = float(lspl[cnum]) else: cval = lspl[cnum] # Check for multi-column values given as "nn:nn:nn.s" if ':' in cval: cval = cval.replace(':', ' ') convert_radec = True outarr[i].append(cval) # convert multi-column RA/Dec specifications if convert_radec: outra = [] outdec = [] for ra, dec in zip(outarr[0], outarr[1]): radd, decdd = radec_hmstodd(ra, dec) outra.append(radd) outdec.append(decdd) outarr[0] = outra outarr[1] = outdec # convert all lists to numpy arrays for c in range(len(outarr)): outarr[c] = np.array(outarr[c]) return outarr
[ "def", "read_ASCII_cols", "(", "infile", ",", "cols", "=", "[", "1", ",", "2", ",", "3", "]", ")", ":", "# noqa: N802", "# build dictionary representing format of each row", "# Format of dictionary: {'colname':col_number,...}", "# This provides the mapping between column name and column number", "coldict", "=", "{", "}", "with", "open", "(", "infile", ",", "'r'", ")", "as", "f", ":", "flines", "=", "f", ".", "readlines", "(", ")", "for", "l", "in", "flines", ":", "# interpret each line from catalog file", "if", "l", "[", "0", "]", ".", "lstrip", "(", ")", "==", "'#'", "or", "l", ".", "lstrip", "(", ")", "==", "''", ":", "continue", "else", ":", "# convert first row of data into column definitions using indices", "coldict", "=", "{", "str", "(", "i", "+", "1", ")", ":", "i", "for", "i", ",", "_", "in", "enumerate", "(", "l", ".", "split", "(", ")", ")", "}", "break", "numcols", "=", "len", "(", "cols", ")", "outarr", "=", "[", "[", "]", "for", "_", "in", "range", "(", "numcols", ")", "]", "convert_radec", "=", "False", "# Now, map specified columns to columns in file and populate output arrays", "for", "l", "in", "flines", ":", "# interpret each line from catalog file", "l", "=", "l", ".", "strip", "(", ")", "lspl", "=", "l", ".", "split", "(", ")", "# skip blank lines, comment lines, or lines with", "# fewer columns than requested by user", "if", "not", "l", "or", "len", "(", "lspl", ")", "<", "numcols", "or", "l", "[", "0", "]", "==", "'#'", "or", "\"INDEF\"", "in", "l", ":", "continue", "# For each 'column' requested by user, pull data from row", "for", "c", ",", "i", "in", "zip", "(", "cols", ",", "list", "(", "range", "(", "numcols", ")", ")", ")", ":", "cnames", "=", "parse_colname", "(", "c", ")", "if", "len", "(", "cnames", ")", ">", "1", ":", "# interpret multi-column specification as one value", "outval", "=", "''", "for", "cn", "in", "cnames", ":", "cnum", "=", "coldict", "[", "cn", "]", "cval", "=", "lspl", "[", "cnum", "]", "outval", "+=", "cval", "+", "' '", "outarr", "[", "i", "]", ".", "append", "(", "outval", ")", "convert_radec", "=", "True", "else", ":", "# pull single value from row for this column", "cnum", "=", "coldict", "[", "cnames", "[", "0", "]", "]", "if", "isfloat", "(", "lspl", "[", "cnum", "]", ")", ":", "cval", "=", "float", "(", "lspl", "[", "cnum", "]", ")", "else", ":", "cval", "=", "lspl", "[", "cnum", "]", "# Check for multi-column values given as \"nn:nn:nn.s\"", "if", "':'", "in", "cval", ":", "cval", "=", "cval", ".", "replace", "(", "':'", ",", "' '", ")", "convert_radec", "=", "True", "outarr", "[", "i", "]", ".", "append", "(", "cval", ")", "# convert multi-column RA/Dec specifications", "if", "convert_radec", ":", "outra", "=", "[", "]", "outdec", "=", "[", "]", "for", "ra", ",", "dec", "in", "zip", "(", "outarr", "[", "0", "]", ",", "outarr", "[", "1", "]", ")", ":", "radd", ",", "decdd", "=", "radec_hmstodd", "(", "ra", ",", "dec", ")", "outra", ".", "append", "(", "radd", ")", "outdec", ".", "append", "(", "decdd", ")", "outarr", "[", "0", "]", "=", "outra", "outarr", "[", "1", "]", "=", "outdec", "# convert all lists to numpy arrays", "for", "c", "in", "range", "(", "len", "(", "outarr", ")", ")", ":", "outarr", "[", "c", "]", "=", "np", ".", "array", "(", "outarr", "[", "c", "]", ")", "return", "outarr" ]
34.913043
0.001211
def sun_declination(day): """Compute the declination angle of the sun for the given date. Uses the Spencer Formula (found at http://www.illustratingshadows.com/www-formulae-collection.pdf) :param day: The datetime.date to compute the declination angle for :returns: The angle, in degrees, of the angle of declination """ day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() day_angle = 2 * pi * day_of_year / 365 declination_radians = sum([ 0.006918, 0.001480*sin(3*day_angle), 0.070257*sin(day_angle), 0.000907*sin(2*day_angle), -0.399912*cos(day_angle), -0.006758*cos(2*day_angle), -0.002697*cos(3*day_angle), ]) return degrees(declination_radians)
[ "def", "sun_declination", "(", "day", ")", ":", "day_of_year", "=", "day", ".", "toordinal", "(", ")", "-", "date", "(", "day", ".", "year", ",", "1", ",", "1", ")", ".", "toordinal", "(", ")", "day_angle", "=", "2", "*", "pi", "*", "day_of_year", "/", "365", "declination_radians", "=", "sum", "(", "[", "0.006918", ",", "0.001480", "*", "sin", "(", "3", "*", "day_angle", ")", ",", "0.070257", "*", "sin", "(", "day_angle", ")", ",", "0.000907", "*", "sin", "(", "2", "*", "day_angle", ")", ",", "-", "0.399912", "*", "cos", "(", "day_angle", ")", ",", "-", "0.006758", "*", "cos", "(", "2", "*", "day_angle", ")", ",", "-", "0.002697", "*", "cos", "(", "3", "*", "day_angle", ")", ",", "]", ")", "return", "degrees", "(", "declination_radians", ")" ]
33.818182
0.001307
def execute(path, arguments): """ Wrapper around execv(): * fork()s before exec()ing (in order to run the command in a subprocess) * wait for the subprocess to finish before returning (blocks the parent process) This is **hyper** simplistic. This *does not* handle **many** edge cases. *DO NOT DO THIS*: subprocess.check_call() does it better, and handle edge cases. """ pid = os.fork() if pid == 0: try: os.execv(path, arguments) finally: sys.exit(1) # In case path is not executable else: try: # Wait for subprocess to finish os.waitpid(pid, NORMAL_PIDWAIT) except OSError: pass # The subprocess was already finish return
[ "def", "execute", "(", "path", ",", "arguments", ")", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", "==", "0", ":", "try", ":", "os", ".", "execv", "(", "path", ",", "arguments", ")", "finally", ":", "sys", ".", "exit", "(", "1", ")", "# In case path is not executable", "else", ":", "try", ":", "# Wait for subprocess to finish", "os", ".", "waitpid", "(", "pid", ",", "NORMAL_PIDWAIT", ")", "except", "OSError", ":", "pass", "# The subprocess was already finish", "return" ]
28.961538
0.001285
def get_analysis_data_for(self, ar): """Return the Analysis data for this AR """ # Exclude analyses from children (partitions) analyses = ar.objectValues("Analysis") out = [] for an in analyses: info = self.get_base_info(an) info.update({ "service_uid": an.getServiceUID(), }) out.append(info) return out
[ "def", "get_analysis_data_for", "(", "self", ",", "ar", ")", ":", "# Exclude analyses from children (partitions)", "analyses", "=", "ar", ".", "objectValues", "(", "\"Analysis\"", ")", "out", "=", "[", "]", "for", "an", "in", "analyses", ":", "info", "=", "self", ".", "get_base_info", "(", "an", ")", "info", ".", "update", "(", "{", "\"service_uid\"", ":", "an", ".", "getServiceUID", "(", ")", ",", "}", ")", "out", ".", "append", "(", "info", ")", "return", "out" ]
31.692308
0.004717
def filter_leading_non_json_lines(buf): ''' used to avoid random output from SSH at the top of JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). need to filter anything which starts not with '{', '[', ', '=' or is an empty line. filter only leading lines since multiline JSON is valid. ''' filtered_lines = StringIO.StringIO() stop_filtering = False for line in buf.splitlines(): if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['): stop_filtering = True filtered_lines.write(line + '\n') return filtered_lines.getvalue()
[ "def", "filter_leading_non_json_lines", "(", "buf", ")", ":", "filtered_lines", "=", "StringIO", ".", "StringIO", "(", ")", "stop_filtering", "=", "False", "for", "line", "in", "buf", ".", "splitlines", "(", ")", ":", "if", "stop_filtering", "or", "\"=\"", "in", "line", "or", "line", ".", "startswith", "(", "'{'", ")", "or", "line", ".", "startswith", "(", "'['", ")", ":", "stop_filtering", "=", "True", "filtered_lines", ".", "write", "(", "line", "+", "'\\n'", ")", "return", "filtered_lines", ".", "getvalue", "(", ")" ]
42.0625
0.007267
def get_devices(self, refresh=False, generic_type=None): """Get all devices from Lupusec.""" _LOGGER.info("Updating all devices...") if refresh or self._devices is None: if self._devices is None: self._devices = {} responseObject = self.get_sensors() if (responseObject and not isinstance(responseObject, (tuple, list))): responseObject = responseObject for deviceJson in responseObject: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device # We will be treating the Lupusec panel itself as an armable device. panelJson = self.get_panel() _LOGGER.debug("Get the panel in get_devices: %s", panelJson) self._panel.update(panelJson) alarmDevice = self._devices.get('0') if alarmDevice: alarmDevice.update(panelJson) else: alarmDevice = ALARM.create_alarm(panelJson, self) self._devices['0'] = alarmDevice # Now we will handle the power switches switches = self.get_power_switches() _LOGGER.debug( 'Get active the power switches in get_devices: %s', switches) for deviceJson in switches: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device if generic_type: devices = [] for device in self._devices.values(): if (device.type is not None and device.type in generic_type[0]): devices.append(device) return devices return list(self._devices.values())
[ "def", "get_devices", "(", "self", ",", "refresh", "=", "False", ",", "generic_type", "=", "None", ")", ":", "_LOGGER", ".", "info", "(", "\"Updating all devices...\"", ")", "if", "refresh", "or", "self", ".", "_devices", "is", "None", ":", "if", "self", ".", "_devices", "is", "None", ":", "self", ".", "_devices", "=", "{", "}", "responseObject", "=", "self", ".", "get_sensors", "(", ")", "if", "(", "responseObject", "and", "not", "isinstance", "(", "responseObject", ",", "(", "tuple", ",", "list", ")", ")", ")", ":", "responseObject", "=", "responseObject", "for", "deviceJson", "in", "responseObject", ":", "# Attempt to reuse an existing device", "device", "=", "self", ".", "_devices", ".", "get", "(", "deviceJson", "[", "'name'", "]", ")", "# No existing device, create a new one", "if", "device", ":", "device", ".", "update", "(", "deviceJson", ")", "else", ":", "device", "=", "newDevice", "(", "deviceJson", ",", "self", ")", "if", "not", "device", ":", "_LOGGER", ".", "info", "(", "'Device is unknown'", ")", "continue", "self", ".", "_devices", "[", "device", ".", "device_id", "]", "=", "device", "# We will be treating the Lupusec panel itself as an armable device.", "panelJson", "=", "self", ".", "get_panel", "(", ")", "_LOGGER", ".", "debug", "(", "\"Get the panel in get_devices: %s\"", ",", "panelJson", ")", "self", ".", "_panel", ".", "update", "(", "panelJson", ")", "alarmDevice", "=", "self", ".", "_devices", ".", "get", "(", "'0'", ")", "if", "alarmDevice", ":", "alarmDevice", ".", "update", "(", "panelJson", ")", "else", ":", "alarmDevice", "=", "ALARM", ".", "create_alarm", "(", "panelJson", ",", "self", ")", "self", ".", "_devices", "[", "'0'", "]", "=", "alarmDevice", "# Now we will handle the power switches", "switches", "=", "self", ".", "get_power_switches", "(", ")", "_LOGGER", ".", "debug", "(", "'Get active the power switches in get_devices: %s'", ",", "switches", ")", "for", "deviceJson", "in", "switches", ":", "# Attempt to reuse an existing device", "device", "=", "self", ".", "_devices", ".", "get", "(", "deviceJson", "[", "'name'", "]", ")", "# No existing device, create a new one", "if", "device", ":", "device", ".", "update", "(", "deviceJson", ")", "else", ":", "device", "=", "newDevice", "(", "deviceJson", ",", "self", ")", "if", "not", "device", ":", "_LOGGER", ".", "info", "(", "'Device is unknown'", ")", "continue", "self", ".", "_devices", "[", "device", ".", "device_id", "]", "=", "device", "if", "generic_type", ":", "devices", "=", "[", "]", "for", "device", "in", "self", ".", "_devices", ".", "values", "(", ")", ":", "if", "(", "device", ".", "type", "is", "not", "None", "and", "device", ".", "type", "in", "generic_type", "[", "0", "]", ")", ":", "devices", ".", "append", "(", "device", ")", "return", "devices", "return", "list", "(", "self", ".", "_devices", ".", "values", "(", ")", ")" ]
36.228571
0.001152
def walk_tree(self): """Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order.""" shards = collections.deque([self]) while shards: shard = shards.popleft() yield shard shards.extend(shard.children)
[ "def", "walk_tree", "(", "self", ")", ":", "shards", "=", "collections", ".", "deque", "(", "[", "self", "]", ")", "while", "shards", ":", "shard", "=", "shards", ".", "popleft", "(", ")", "yield", "shard", "shards", ".", "extend", "(", "shard", ".", "children", ")" ]
42.857143
0.009804
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"): """ Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return: """ if not os.path.isdir(img_dir): print("\nCreating image folder at {}".format(img_dir)) os.makedirs(img_dir) pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num)) if not os.path.isfile(img_path): # create image for a page in the pdf document and save it in img_dir save_image(pdf_path, img_path, page_num) image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim)) image = img_to_array(image, data_format=K.image_data_format()) image = ( image.reshape((img_dim, img_dim, 1)) .repeat(3, axis=2) .reshape((1, img_dim, img_dim, 3)) ) return ( image.astype(np.uint8).reshape((img_dim, img_dim, 3)), model.predict(image).reshape((img_dim, img_dim)), )
[ "def", "predict_heatmap", "(", "pdf_path", ",", "page_num", ",", "model", ",", "img_dim", "=", "448", ",", "img_dir", "=", "\"tmp/img\"", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "img_dir", ")", ":", "print", "(", "\"\\nCreating image folder at {}\"", ".", "format", "(", "img_dir", ")", ")", "os", ".", "makedirs", "(", "img_dir", ")", "pdf_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "pdf_path", ")", ")", "[", "0", "]", "# TODO: add hashing function to make sure name is unique", "# TODO: add parallelization", "img_path", "=", "os", ".", "path", ".", "join", "(", "img_dir", ",", "pdf_name", "+", "\"-{}.png\"", ".", "format", "(", "page_num", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "img_path", ")", ":", "# create image for a page in the pdf document and save it in img_dir", "save_image", "(", "pdf_path", ",", "img_path", ",", "page_num", ")", "image", "=", "load_img", "(", "img_path", ",", "grayscale", "=", "True", ",", "target_size", "=", "(", "img_dim", ",", "img_dim", ")", ")", "image", "=", "img_to_array", "(", "image", ",", "data_format", "=", "K", ".", "image_data_format", "(", ")", ")", "image", "=", "(", "image", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "1", ")", ")", ".", "repeat", "(", "3", ",", "axis", "=", "2", ")", ".", "reshape", "(", "(", "1", ",", "img_dim", ",", "img_dim", ",", "3", ")", ")", ")", "return", "(", "image", ".", "astype", "(", "np", ".", "uint8", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "3", ")", ")", ",", "model", ".", "predict", "(", "image", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ")", ")", ",", ")" ]
42.612903
0.00074
def float_to_fp(signed, n_bits, n_frac): """Return a function to convert a floating point value to a fixed point value. For example, a function to convert a float to a signed fractional representation with 8 bits overall and 4 fractional bits (S3.4) can be constructed and used with:: >>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4) >>> hex(int(s34(0.5))) '0x8' The fixed point conversion is saturating:: >>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4 >>> hex(int(q34(-0.5))) '0x0' >>> hex(int(q34(15.0))) '0xf0' >>> hex(int(q34(16.0))) '0xff' Parameters ---------- signed : bool Whether the values that are to be converted should be signed, or clipped at zero. >>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed '-0x8' >>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned '0x0' n_bits : int Total number of bits in the fixed-point representation (including sign bit and fractional bits). n_frac : int Number of fractional bits in the fixed-point representation. """ # Calculate the maximum and minimum values if signed: max_v = (1 << (n_bits - 1)) - 1 min_v = -max_v - 1 else: min_v = 0 max_v = (1 << n_bits) - 1 # Compute the scale scale = 2.0**n_frac def bitsk(value): """Convert a floating point value to a fixed point value. Parameters ---------- value : float The value to convert. """ int_val = int(scale * value) return max((min(max_v, int_val), min_v)) return bitsk
[ "def", "float_to_fp", "(", "signed", ",", "n_bits", ",", "n_frac", ")", ":", "# Calculate the maximum and minimum values", "if", "signed", ":", "max_v", "=", "(", "1", "<<", "(", "n_bits", "-", "1", ")", ")", "-", "1", "min_v", "=", "-", "max_v", "-", "1", "else", ":", "min_v", "=", "0", "max_v", "=", "(", "1", "<<", "n_bits", ")", "-", "1", "# Compute the scale", "scale", "=", "2.0", "**", "n_frac", "def", "bitsk", "(", "value", ")", ":", "\"\"\"Convert a floating point value to a fixed point value.\n\n Parameters\n ----------\n value : float\n The value to convert.\n \"\"\"", "int_val", "=", "int", "(", "scale", "*", "value", ")", "return", "max", "(", "(", "min", "(", "max_v", ",", "int_val", ")", ",", "min_v", ")", ")", "return", "bitsk" ]
26.34375
0.000572
def pull(ctx, source, destination, progress): """ Copy file(s) from device(s) -> local machine. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param source: the source filepath or dirpath @type source: str @param destination: the destination filepath or dirpath @type destination: str @param progress: bool set to True if we should request a progress callback | from the Jaide object. Always set to False when | we're copying to/from multiple devices. @type progress: bool @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining. """ mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2) multi = True if len(ctx.obj['hosts']) > 1 else False for ip in ctx.obj['hosts']: mp_pool.apply_async(wrap.open_connection, args=(ip, ctx.obj['conn']['username'], ctx.obj['conn']['password'], wrap.pull, [source, destination, progress, multi], ctx.obj['out'], ctx.obj['conn']['connect_timeout'], ctx.obj['conn']['session_timeout'], ctx.obj['conn']['port']), callback=write_out) mp_pool.close() mp_pool.join()
[ "def", "pull", "(", "ctx", ",", "source", ",", "destination", ",", "progress", ")", ":", "mp_pool", "=", "multiprocessing", ".", "Pool", "(", "multiprocessing", ".", "cpu_count", "(", ")", "*", "2", ")", "multi", "=", "True", "if", "len", "(", "ctx", ".", "obj", "[", "'hosts'", "]", ")", ">", "1", "else", "False", "for", "ip", "in", "ctx", ".", "obj", "[", "'hosts'", "]", ":", "mp_pool", ".", "apply_async", "(", "wrap", ".", "open_connection", ",", "args", "=", "(", "ip", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'username'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'password'", "]", ",", "wrap", ".", "pull", ",", "[", "source", ",", "destination", ",", "progress", ",", "multi", "]", ",", "ctx", ".", "obj", "[", "'out'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'connect_timeout'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'session_timeout'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'port'", "]", ")", ",", "callback", "=", "write_out", ")", "mp_pool", ".", "close", "(", ")", "mp_pool", ".", "join", "(", ")" ]
49.787879
0.000597
def post_object(self, container, obj, headers=None, query=None, cdn=False, body=None): """ POSTs the object and returns the results. This is used to update the object's header values. Note that all headers must be sent with the POST, unlike the account and container POSTs. With account and container POSTs, existing headers are untouched. But with object POSTs, any existing headers are removed. The full list of supported headers depends on the Swift cluster, but usually include Content-Type, Content-Encoding, and any X-Object-Meta-xxx headers. :param container: The name of the container. :param obj: The name of the object. :param headers: Additional headers to send with the request. :param query: Set to a dict of query values to send on the query string of the request. :param cdn: If set True, the CDN management interface will be used. :param body: No known Swift POSTs take a body; but the option is there for the future. :returns: A tuple of (status, reason, headers, contents). :status: is an int for the HTTP status code. :reason: is the str for the HTTP status (ex: "Ok"). :headers: is a dict with all lowercase keys of the HTTP headers; if a header has multiple values, it will be a list. :contents: is the str for the HTTP body. """ path = self._object_path(container, obj) return self.request( 'POST', path, body or '', headers, query=query, cdn=cdn)
[ "def", "post_object", "(", "self", ",", "container", ",", "obj", ",", "headers", "=", "None", ",", "query", "=", "None", ",", "cdn", "=", "False", ",", "body", "=", "None", ")", ":", "path", "=", "self", ".", "_object_path", "(", "container", ",", "obj", ")", "return", "self", ".", "request", "(", "'POST'", ",", "path", ",", "body", "or", "''", ",", "headers", ",", "query", "=", "query", ",", "cdn", "=", "cdn", ")" ]
49.666667
0.001795
def close_related_clients(self, client): """Close all clients related to *client*, except itself""" related_clients = self.get_related_clients(client) for cl in related_clients: self.close_client(client=cl, force=True)
[ "def", "close_related_clients", "(", "self", ",", "client", ")", ":", "related_clients", "=", "self", ".", "get_related_clients", "(", "client", ")", "for", "cl", "in", "related_clients", ":", "self", ".", "close_client", "(", "client", "=", "cl", ",", "force", "=", "True", ")" ]
50.8
0.007752
def record_type(self, column=None, value=None, **kwargs): """ Codes and descriptions indicating whether an award is for a new project or for the continuation of a currently funded one. >>> GICS().record_type('record_type_code', 'A') """ return self._resolve_call('GIC_RECORD_TYPE', column, value, **kwargs)
[ "def", "record_type", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'GIC_RECORD_TYPE'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
43.5
0.005634
def decode_aes256_base64_auto(data, encryption_key): """Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data.""" assert isinstance(data, bytes) length = len(data) if length == 0: return b'' elif data[0] == b'!'[0]: return decode_aes256_cbc_base64(data, encryption_key) else: return decode_aes256_ecb_base64(data, encryption_key)
[ "def", "decode_aes256_base64_auto", "(", "data", ",", "encryption_key", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", "length", "=", "len", "(", "data", ")", "if", "length", "==", "0", ":", "return", "b''", "elif", "data", "[", "0", "]", "==", "b'!'", "[", "0", "]", ":", "return", "decode_aes256_cbc_base64", "(", "data", ",", "encryption_key", ")", "else", ":", "return", "decode_aes256_ecb_base64", "(", "data", ",", "encryption_key", ")" ]
35.363636
0.005013
def merge_two(one, other, merge_strategy=MergeStrategy.UNION, silent=False, pixel_strategy=PixelStrategy.FIRST): # type: (GeoRaster2, GeoRaster2, MergeStrategy, bool, PixelStrategy) -> GeoRaster2 """Merge two rasters into one. Parameters ---------- one : GeoRaster2 Left raster to merge. other : GeoRaster2 Right raster to merge. merge_strategy : MergeStrategy, optional Merge strategy, from :py:data:`telluric.georaster.MergeStrategy` (default to "union"). silent : bool, optional Whether to raise errors or return some result, default to False (raise errors). pixel_strategy: PixelStrategy, optional Pixel strategy, from :py:data:`telluric.georaster.PixelStrategy` (default to "top"). Returns ------- GeoRaster2 """ other_res = _prepare_other_raster(one, other) if other_res is None: if silent: return one else: raise ValueError("rasters do not intersect") else: other = other.copy_with(image=other_res.image, band_names=other_res.band_names) # To make MyPy happy # Create a list of single band rasters # Cropping won't happen twice, since other was already cropped all_band_names, projected_rasters = _prepare_rasters([other], merge_strategy, first=one) if not all_band_names and not silent: raise ValueError("rasters have no bands in common, use another merge strategy") prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy) prepared_rasters = _explode_rasters(prepared_rasters, all_band_names) # Merge common bands prepared_rasters = _merge_common_bands(_explode_raster(one, all_band_names) + prepared_rasters) # Merge all bands raster = reduce(_stack_bands, prepared_rasters) return one.copy_with(image=raster.image, band_names=raster.band_names)
[ "def", "merge_two", "(", "one", ",", "other", ",", "merge_strategy", "=", "MergeStrategy", ".", "UNION", ",", "silent", "=", "False", ",", "pixel_strategy", "=", "PixelStrategy", ".", "FIRST", ")", ":", "# type: (GeoRaster2, GeoRaster2, MergeStrategy, bool, PixelStrategy) -> GeoRaster2", "other_res", "=", "_prepare_other_raster", "(", "one", ",", "other", ")", "if", "other_res", "is", "None", ":", "if", "silent", ":", "return", "one", "else", ":", "raise", "ValueError", "(", "\"rasters do not intersect\"", ")", "else", ":", "other", "=", "other", ".", "copy_with", "(", "image", "=", "other_res", ".", "image", ",", "band_names", "=", "other_res", ".", "band_names", ")", "# To make MyPy happy", "# Create a list of single band rasters", "# Cropping won't happen twice, since other was already cropped", "all_band_names", ",", "projected_rasters", "=", "_prepare_rasters", "(", "[", "other", "]", ",", "merge_strategy", ",", "first", "=", "one", ")", "if", "not", "all_band_names", "and", "not", "silent", ":", "raise", "ValueError", "(", "\"rasters have no bands in common, use another merge strategy\"", ")", "prepared_rasters", "=", "_apply_pixel_strategy", "(", "projected_rasters", ",", "pixel_strategy", ")", "prepared_rasters", "=", "_explode_rasters", "(", "prepared_rasters", ",", "all_band_names", ")", "# Merge common bands", "prepared_rasters", "=", "_merge_common_bands", "(", "_explode_raster", "(", "one", ",", "all_band_names", ")", "+", "prepared_rasters", ")", "# Merge all bands", "raster", "=", "reduce", "(", "_stack_bands", ",", "prepared_rasters", ")", "return", "one", ".", "copy_with", "(", "image", "=", "raster", ".", "image", ",", "band_names", "=", "raster", ".", "band_names", ")" ]
36.82
0.005291
def authorize_cache_security_group_ingress(name, region=None, key=None, keyid=None, profile=None, **args): ''' Authorize network ingress from an ec2 security group to a cache security group. Example: .. code-block:: bash salt myminion boto3_elasticache.authorize_cache_security_group_ingress \ mycachesecgrp \ EC2SecurityGroupName=someEC2sg \ EC2SecurityGroupOwnerId=SOMEOWNERID ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if 'CacheSecurityGroupName' in args: log.info( "'name: %s' param being overridden by explicitly provided " "'CacheSecurityGroupName: %s'", name, args['CacheSecurityGroupName'] ) name = args['CacheSecurityGroupName'] else: args['CacheSubnetGroupName'] = name args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) try: conn.authorize_cache_security_group_ingress(**args) log.info('Authorized %s to cache security group %s.', args['EC2SecurityGroupName'], name) return True except botocore.exceptions.ClientError as e: log.error('Failed to update security group %s: %s', name, e) return False
[ "def", "authorize_cache_security_group_ingress", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "args", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "'CacheSecurityGroupName'", "in", "args", ":", "log", ".", "info", "(", "\"'name: %s' param being overridden by explicitly provided \"", "\"'CacheSecurityGroupName: %s'\"", ",", "name", ",", "args", "[", "'CacheSecurityGroupName'", "]", ")", "name", "=", "args", "[", "'CacheSecurityGroupName'", "]", "else", ":", "args", "[", "'CacheSubnetGroupName'", "]", "=", "name", "args", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", "if", "not", "k", ".", "startswith", "(", "'_'", ")", "]", ")", "try", ":", "conn", ".", "authorize_cache_security_group_ingress", "(", "*", "*", "args", ")", "log", ".", "info", "(", "'Authorized %s to cache security group %s.'", ",", "args", "[", "'EC2SecurityGroupName'", "]", ",", "name", ")", "return", "True", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "e", ":", "log", ".", "error", "(", "'Failed to update security group %s: %s'", ",", "name", ",", "e", ")", "return", "False" ]
41.5625
0.002939
def projection(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80), center=(39.5, 39.5)): """Optical path difference projection of a dielectric sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the sphere medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] pixel_size: float Pixel size [m] grid_size: tuple of floats Resulting image size in x and y [px] center: tuple of floats Center position in image coordinates [px] Returns ------- qpi: qpimage.QPImage Quantitative phase data set """ # grid x = np.arange(grid_size[0]).reshape(-1, 1) y = np.arange(grid_size[1]).reshape(1, -1) cx, cy = center # sphere location rpx = radius / pixel_size r = rpx**2 - (x - cx)**2 - (y - cy)**2 # distance z = np.zeros_like(r) rvalid = r > 0 z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size # phase = delta_n * 2PI * z / wavelength phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength meta_data = {"pixel size": pixel_size, "wavelength": wavelength, "medium index": medium_index, "sim center": center, "sim radius": radius, "sim index": sphere_index, "sim model": "projection", } qpi = qpimage.QPImage(data=phase, which_data="phase", meta_data=meta_data) return qpi
[ "def", "projection", "(", "radius", "=", "5e-6", ",", "sphere_index", "=", "1.339", ",", "medium_index", "=", "1.333", ",", "wavelength", "=", "550e-9", ",", "pixel_size", "=", "1e-7", ",", "grid_size", "=", "(", "80", ",", "80", ")", ",", "center", "=", "(", "39.5", ",", "39.5", ")", ")", ":", "# grid", "x", "=", "np", ".", "arange", "(", "grid_size", "[", "0", "]", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "y", "=", "np", ".", "arange", "(", "grid_size", "[", "1", "]", ")", ".", "reshape", "(", "1", ",", "-", "1", ")", "cx", ",", "cy", "=", "center", "# sphere location", "rpx", "=", "radius", "/", "pixel_size", "r", "=", "rpx", "**", "2", "-", "(", "x", "-", "cx", ")", "**", "2", "-", "(", "y", "-", "cy", ")", "**", "2", "# distance", "z", "=", "np", ".", "zeros_like", "(", "r", ")", "rvalid", "=", "r", ">", "0", "z", "[", "rvalid", "]", "=", "2", "*", "np", ".", "sqrt", "(", "r", "[", "rvalid", "]", ")", "*", "pixel_size", "# phase = delta_n * 2PI * z / wavelength", "phase", "=", "(", "sphere_index", "-", "medium_index", ")", "*", "2", "*", "np", ".", "pi", "*", "z", "/", "wavelength", "meta_data", "=", "{", "\"pixel size\"", ":", "pixel_size", ",", "\"wavelength\"", ":", "wavelength", ",", "\"medium index\"", ":", "medium_index", ",", "\"sim center\"", ":", "center", ",", "\"sim radius\"", ":", "radius", ",", "\"sim index\"", ":", "sphere_index", ",", "\"sim model\"", ":", "\"projection\"", ",", "}", "qpi", "=", "qpimage", ".", "QPImage", "(", "data", "=", "phase", ",", "which_data", "=", "\"phase\"", ",", "meta_data", "=", "meta_data", ")", "return", "qpi" ]
32.568627
0.000584
def lease(self, time_to_live, lease_id=None, timeout=None): """ Creates a lease which expires if the server does not receive a keep alive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. :param time_to_live: TTL is the advisory time-to-live in seconds. :type time_to_live: int :param lease_id: ID is the requested ID for the lease. If ID is None, the lessor (etcd) chooses an ID. :type lease_id: int or None :param timeout: Request timeout in seconds. :type timeout: int :returns: A lease object representing the created lease. This can be used for refreshing or revoking the least etc. :rtype: instance of :class:`txaioetcd.Lease` """ assembler = commons.LeaseRequestAssembler(self._url, time_to_live, lease_id) obj = yield self._post(assembler.url, assembler.data, timeout) lease = Lease._parse(self, obj) returnValue(lease)
[ "def", "lease", "(", "self", ",", "time_to_live", ",", "lease_id", "=", "None", ",", "timeout", "=", "None", ")", ":", "assembler", "=", "commons", ".", "LeaseRequestAssembler", "(", "self", ".", "_url", ",", "time_to_live", ",", "lease_id", ")", "obj", "=", "yield", "self", ".", "_post", "(", "assembler", ".", "url", ",", "assembler", ".", "data", ",", "timeout", ")", "lease", "=", "Lease", ".", "_parse", "(", "self", ",", "obj", ")", "returnValue", "(", "lease", ")" ]
35.903226
0.002625
def variable_absolute(self, a: float) -> mm.ModelMapper: """ Parameters ---------- a The absolute width of gaussian priors Returns ------- A model mapper created by taking results from this phase and creating priors with the defined absolute width. """ return self.previous_variable.mapper_from_gaussian_tuples(self.gaussian_tuples, a=a)
[ "def", "variable_absolute", "(", "self", ",", "a", ":", "float", ")", "->", "mm", ".", "ModelMapper", ":", "return", "self", ".", "previous_variable", ".", "mapper_from_gaussian_tuples", "(", "self", ".", "gaussian_tuples", ",", "a", "=", "a", ")" ]
34.25
0.009479
def ends_of_next_whole_turn(self, root): """Simulate one complete turn to completion and generate each end of turn reached during the simulation. Note on mana drain: Generates but does not continue simulation of mana drains. Arguments: root: a start state with no parent """ # simple confirmation that the root is actually a root. # otherwise it may seem to work but would be totally out of spec if root.parent: raise ValueError('Unexpectedly received a node with a parent for' ' root:\n{}'.format(root)) # build the list of eots (or just the root if first turn) to be run leaves = list(root.leaves()) kw_starts = list() if leaves[0] is root: # build ends of state kwargs as only the root kw_starts.append({'root': root}) else: # build ends of state kwargs as eots in the tree for leaf in leaves: # ignore mana drains if not leaf.is_mana_drain: kw_starts.append({'root_eot': leaf}) # run a single turn for each starting point for kw_start in kw_starts: for eot in self.ends_of_one_state(**kw_start): yield eot
[ "def", "ends_of_next_whole_turn", "(", "self", ",", "root", ")", ":", "# simple confirmation that the root is actually a root.", "# otherwise it may seem to work but would be totally out of spec", "if", "root", ".", "parent", ":", "raise", "ValueError", "(", "'Unexpectedly received a node with a parent for'", "' root:\\n{}'", ".", "format", "(", "root", ")", ")", "# build the list of eots (or just the root if first turn) to be run", "leaves", "=", "list", "(", "root", ".", "leaves", "(", ")", ")", "kw_starts", "=", "list", "(", ")", "if", "leaves", "[", "0", "]", "is", "root", ":", "# build ends of state kwargs as only the root", "kw_starts", ".", "append", "(", "{", "'root'", ":", "root", "}", ")", "else", ":", "# build ends of state kwargs as eots in the tree", "for", "leaf", "in", "leaves", ":", "# ignore mana drains", "if", "not", "leaf", ".", "is_mana_drain", ":", "kw_starts", ".", "append", "(", "{", "'root_eot'", ":", "leaf", "}", ")", "# run a single turn for each starting point", "for", "kw_start", "in", "kw_starts", ":", "for", "eot", "in", "self", ".", "ends_of_one_state", "(", "*", "*", "kw_start", ")", ":", "yield", "eot" ]
41.483871
0.00152
def _cleanup_factory(self): """Build a cleanup clojure that doesn't increase our ref count""" _self = weakref.proxy(self) def wrapper(): try: _self.close(timeout=0) except (ReferenceError, AttributeError): pass return wrapper
[ "def", "_cleanup_factory", "(", "self", ")", ":", "_self", "=", "weakref", ".", "proxy", "(", "self", ")", "def", "wrapper", "(", ")", ":", "try", ":", "_self", ".", "close", "(", "timeout", "=", "0", ")", "except", "(", "ReferenceError", ",", "AttributeError", ")", ":", "pass", "return", "wrapper" ]
33.888889
0.009585
def main(): """ NAME plot_2cdfs.py DESCRIPTION makes plots of cdfs of data in input file SYNTAX plot_2cdfs.py [-h][command line options] OPTIONS -h prints help message and quits -f FILE1 FILE2 -t TITLE -fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg """ fmt='svg' title="" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] X=numpy.loadtxt(file) file=sys.argv[ind+2] X2=numpy.loadtxt(file) # else: # X=numpy.loadtxt(sys.stdin,dtype=numpy.float) else: print('-f option required') print(main.__doc__) sys.exit() if '-fmt' in sys.argv: ind=sys.argv.index('-fmt') fmt=sys.argv[ind+1] if '-t' in sys.argv: ind=sys.argv.index('-t') title=sys.argv[ind+1] CDF={'X':1} pmagplotlib.plot_init(CDF['X'],5,5) pmagplotlib.plot_cdf(CDF['X'],X,'','r','') pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','') D,p=scipy.stats.ks_2samp(X,X2) if p>=.05: print(D,p,' not rejected at 95%') else: print(D,p,' rejected at 95%') pmagplotlib.draw_figs(CDF) ans= input('S[a]ve plot, <Return> to quit ') if ans=='a': files={'X':'CDF_.'+fmt} pmagplotlib.save_plots(CDF,files)
[ "def", "main", "(", ")", ":", "fmt", "=", "'svg'", "title", "=", "\"\"", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "file", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "X", "=", "numpy", ".", "loadtxt", "(", "file", ")", "file", "=", "sys", ".", "argv", "[", "ind", "+", "2", "]", "X2", "=", "numpy", ".", "loadtxt", "(", "file", ")", "# else:", "# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)", "else", ":", "print", "(", "'-f option required'", ")", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-fmt'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fmt'", ")", "fmt", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-t'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-t'", ")", "title", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "CDF", "=", "{", "'X'", ":", "1", "}", "pmagplotlib", ".", "plot_init", "(", "CDF", "[", "'X'", "]", ",", "5", ",", "5", ")", "pmagplotlib", ".", "plot_cdf", "(", "CDF", "[", "'X'", "]", ",", "X", ",", "''", ",", "'r'", ",", "''", ")", "pmagplotlib", ".", "plot_cdf", "(", "CDF", "[", "'X'", "]", ",", "X2", ",", "title", ",", "'b'", ",", "''", ")", "D", ",", "p", "=", "scipy", ".", "stats", ".", "ks_2samp", "(", "X", ",", "X2", ")", "if", "p", ">=", ".05", ":", "print", "(", "D", ",", "p", ",", "' not rejected at 95%'", ")", "else", ":", "print", "(", "D", ",", "p", ",", "' rejected at 95%'", ")", "pmagplotlib", ".", "draw_figs", "(", "CDF", ")", "ans", "=", "input", "(", "'S[a]ve plot, <Return> to quit '", ")", "if", "ans", "==", "'a'", ":", "files", "=", "{", "'X'", ":", "'CDF_.'", "+", "fmt", "}", "pmagplotlib", ".", "save_plots", "(", "CDF", ",", "files", ")" ]
25.272727
0.038781
def draw_text(self, ax, text, force_trans=None, text_type=None): """Process a matplotlib text object and call renderer.draw_text""" content = text.get_text() if content: transform = text.get_transform() position = text.get_position() coords, position = self.process_transform(transform, ax, position, force_trans=force_trans) style = utils.get_text_style(text) self.renderer.draw_text(text=content, position=position, coordinates=coords, text_type=text_type, style=style, mplobj=text)
[ "def", "draw_text", "(", "self", ",", "ax", ",", "text", ",", "force_trans", "=", "None", ",", "text_type", "=", "None", ")", ":", "content", "=", "text", ".", "get_text", "(", ")", "if", "content", ":", "transform", "=", "text", ".", "get_transform", "(", ")", "position", "=", "text", ".", "get_position", "(", ")", "coords", ",", "position", "=", "self", ".", "process_transform", "(", "transform", ",", "ax", ",", "position", ",", "force_trans", "=", "force_trans", ")", "style", "=", "utils", ".", "get_text_style", "(", "text", ")", "self", ".", "renderer", ".", "draw_text", "(", "text", "=", "content", ",", "position", "=", "position", ",", "coordinates", "=", "coords", ",", "text_type", "=", "text_type", ",", "style", "=", "style", ",", "mplobj", "=", "text", ")" ]
55.071429
0.002551
def write(self, text, fg='black', bg='white'): '''write to the console''' if isinstance(text, str): sys.stdout.write(text) else: sys.stdout.write(str(text)) sys.stdout.flush() if self.udp.connected(): self.udp.writeln(text) if self.tcp.connected(): self.tcp.writeln(text)
[ "def", "write", "(", "self", ",", "text", ",", "fg", "=", "'black'", ",", "bg", "=", "'white'", ")", ":", "if", "isinstance", "(", "text", ",", "str", ")", ":", "sys", ".", "stdout", ".", "write", "(", "text", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "str", "(", "text", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "self", ".", "udp", ".", "connected", "(", ")", ":", "self", ".", "udp", ".", "writeln", "(", "text", ")", "if", "self", ".", "tcp", ".", "connected", "(", ")", ":", "self", ".", "tcp", ".", "writeln", "(", "text", ")" ]
32.454545
0.00545
def filename(self): """ Name of the file on the client file system, but normalized to ensure file system compatibility. An empty filename is returned as 'empty'. Only ASCII letters, digits, dashes, underscores and dots are allowed in the final filename. Accents are removed, if possible. Whitespace is replaced by a single dash. Leading or tailing dots or dashes are removed. The filename is limited to 255 characters. """ fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname) fname = fname.encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty'
[ "def", "filename", "(", "self", ")", ":", "fname", "=", "self", ".", "raw_filename", "if", "not", "isinstance", "(", "fname", ",", "unicode", ")", ":", "fname", "=", "fname", ".", "decode", "(", "'utf8'", ",", "'ignore'", ")", "fname", "=", "normalize", "(", "'NFKD'", ",", "fname", ")", "fname", "=", "fname", ".", "encode", "(", "'ASCII'", ",", "'ignore'", ")", ".", "decode", "(", "'ASCII'", ")", "fname", "=", "os", ".", "path", ".", "basename", "(", "fname", ".", "replace", "(", "'\\\\'", ",", "os", ".", "path", ".", "sep", ")", ")", "fname", "=", "re", ".", "sub", "(", "r'[^a-zA-Z0-9-_.\\s]'", ",", "''", ",", "fname", ")", ".", "strip", "(", ")", "fname", "=", "re", ".", "sub", "(", "r'[-\\s]+'", ",", "'-'", ",", "fname", ")", ".", "strip", "(", "'.-'", ")", "return", "fname", "[", ":", "255", "]", "or", "'empty'" ]
52.333333
0.004171
def send_location(self, chat_id, latitude, longitude, live_period=None, reply_to_message_id=None, reply_markup=None, disable_notification=None): """ Use this method to send point on the map. :param chat_id: :param latitude: :param longitude: :param live_period :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_location(self.token, chat_id, latitude, longitude, live_period, reply_to_message_id, reply_markup, disable_notification))
[ "def", "send_location", "(", "self", ",", "chat_id", ",", "latitude", ",", "longitude", ",", "live_period", "=", "None", ",", "reply_to_message_id", "=", "None", ",", "reply_markup", "=", "None", ",", "disable_notification", "=", "None", ")", ":", "return", "types", ".", "Message", ".", "de_json", "(", "apihelper", ".", "send_location", "(", "self", ".", "token", ",", "chat_id", ",", "latitude", ",", "longitude", ",", "live_period", ",", "reply_to_message_id", ",", "reply_markup", ",", "disable_notification", ")", ")" ]
42.4375
0.007205
def evaluate_binop_comparison(self, operation, left, right, **kwargs): """ Evaluate given comparison binary operation with given operands. """ if not operation in self.binops_comparison: raise ValueError("Invalid comparison binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None if operation in ['OP_IS']: res = self.binops_comparison[operation](left, right) if res: return True elif operation in ['OP_IN']: for iteml in left: res = self.binops_comparison[operation](iteml, right) if res: return True else: for iteml in left: if iteml is None: continue for itemr in right: if itemr is None: continue res = self.binops_comparison[operation](iteml, itemr) if res: return True return False
[ "def", "evaluate_binop_comparison", "(", "self", ",", "operation", ",", "left", ",", "right", ",", "*", "*", "kwargs", ")", ":", "if", "not", "operation", "in", "self", ".", "binops_comparison", ":", "raise", "ValueError", "(", "\"Invalid comparison binary operation '{}'\"", ".", "format", "(", "operation", ")", ")", "if", "left", "is", "None", "or", "right", "is", "None", ":", "return", "None", "if", "not", "isinstance", "(", "left", ",", "(", "list", ",", "ListIP", ")", ")", ":", "left", "=", "[", "left", "]", "if", "not", "isinstance", "(", "right", ",", "(", "list", ",", "ListIP", ")", ")", ":", "right", "=", "[", "right", "]", "if", "not", "left", "or", "not", "right", ":", "return", "None", "if", "operation", "in", "[", "'OP_IS'", "]", ":", "res", "=", "self", ".", "binops_comparison", "[", "operation", "]", "(", "left", ",", "right", ")", "if", "res", ":", "return", "True", "elif", "operation", "in", "[", "'OP_IN'", "]", ":", "for", "iteml", "in", "left", ":", "res", "=", "self", ".", "binops_comparison", "[", "operation", "]", "(", "iteml", ",", "right", ")", "if", "res", ":", "return", "True", "else", ":", "for", "iteml", "in", "left", ":", "if", "iteml", "is", "None", ":", "continue", "for", "itemr", "in", "right", ":", "if", "itemr", "is", "None", ":", "continue", "res", "=", "self", ".", "binops_comparison", "[", "operation", "]", "(", "iteml", ",", "itemr", ")", "if", "res", ":", "return", "True", "return", "False" ]
37.323529
0.003072
def get_comments(self): """ Returns the comments for the job, querying the server if necessary. """ # Lazily load info if self.info is None: self.get_info() if 'comments' in self.info: return [structure['text'] for structure in self.info['comments']['comments']] else: return []
[ "def", "get_comments", "(", "self", ")", ":", "# Lazily load info", "if", "self", ".", "info", "is", "None", ":", "self", ".", "get_info", "(", ")", "if", "'comments'", "in", "self", ".", "info", ":", "return", "[", "structure", "[", "'text'", "]", "for", "structure", "in", "self", ".", "info", "[", "'comments'", "]", "[", "'comments'", "]", "]", "else", ":", "return", "[", "]" ]
28.230769
0.007916
def write_data(self, data, data_file, compression=0): """Writes the given *preprocessed* data to a file with the given name. """ f = bob.io.base.HDF5File(data_file, 'w') f.set("rate", data[0], compression=compression) f.set("data", data[1], compression=compression) f.set("labels", data[2], compression=compression)
[ "def", "write_data", "(", "self", ",", "data", ",", "data_file", ",", "compression", "=", "0", ")", ":", "f", "=", "bob", ".", "io", ".", "base", ".", "HDF5File", "(", "data_file", ",", "'w'", ")", "f", ".", "set", "(", "\"rate\"", ",", "data", "[", "0", "]", ",", "compression", "=", "compression", ")", "f", ".", "set", "(", "\"data\"", ",", "data", "[", "1", "]", ",", "compression", "=", "compression", ")", "f", ".", "set", "(", "\"labels\"", ",", "data", "[", "2", "]", ",", "compression", "=", "compression", ")" ]
47.571429
0.00295
def secret_hex(self, secret_hex): """ Sets the secret_hex of this PreSharedKey. The secret of the pre-shared key in hexadecimal. It is not case sensitive; 4a is same as 4A, and it is allowed with or without 0x in the beginning. The minimum length of the secret is 128 bits and maximum 256 bits. :param secret_hex: The secret_hex of this PreSharedKey. :type: str """ if secret_hex is None: raise ValueError("Invalid value for `secret_hex`, must not be `None`") if secret_hex is not None and not re.search('^(0[xX])?[0-9a-fA-F]{32,64}$', secret_hex): raise ValueError("Invalid value for `secret_hex`, must be a follow pattern or equal to `/^(0[xX])?[0-9a-fA-F]{32,64}$/`") self._secret_hex = secret_hex
[ "def", "secret_hex", "(", "self", ",", "secret_hex", ")", ":", "if", "secret_hex", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `secret_hex`, must not be `None`\"", ")", "if", "secret_hex", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^(0[xX])?[0-9a-fA-F]{32,64}$'", ",", "secret_hex", ")", ":", "raise", "ValueError", "(", "\"Invalid value for `secret_hex`, must be a follow pattern or equal to `/^(0[xX])?[0-9a-fA-F]{32,64}$/`\"", ")", "self", ".", "_secret_hex", "=", "secret_hex" ]
56.071429
0.007519
def clone(self): """ Create a copy of the timeout object Timeout properties are stored per-pool but each request needs a fresh Timeout object to ensure each one has its own start/stop configured. :return: a copy of the timeout object :rtype: :class:`Timeout` """ # We can't use copy.deepcopy because that will also create a new object # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to # detect the user default. return Timeout(connect=self._connect, read=self._read, total=self.total)
[ "def", "clone", "(", "self", ")", ":", "# We can't use copy.deepcopy because that will also create a new object", "# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to", "# detect the user default.", "return", "Timeout", "(", "connect", "=", "self", ".", "_connect", ",", "read", "=", "self", ".", "_read", ",", "total", "=", "self", ".", "total", ")" ]
42.5
0.003289
def send_calibrate_accelerometer(self, simple=False): """Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration """ calibration_command = self.message_factory.command_long_encode( self._handler.target_system, 0, # target_system, target_component mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, # command 0, # confirmation 0, # param 1, 1: gyro calibration, 3: gyro temperature calibration 0, # param 2, 1: magnetometer calibration 0, # param 3, 1: ground pressure calibration 0, # param 4, 1: radio RC calibration, 2: RC trim calibration 4 if simple else 1, # param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration 0, # param 6, 2: airspeed calibration 0, # param 7, 1: ESC calibration, 3: barometer temperature calibration ) self.send_mavlink(calibration_command)
[ "def", "send_calibrate_accelerometer", "(", "self", ",", "simple", "=", "False", ")", ":", "calibration_command", "=", "self", ".", "message_factory", ".", "command_long_encode", "(", "self", ".", "_handler", ".", "target_system", ",", "0", ",", "# target_system, target_component", "mavutil", ".", "mavlink", ".", "MAV_CMD_PREFLIGHT_CALIBRATION", ",", "# command", "0", ",", "# confirmation", "0", ",", "# param 1, 1: gyro calibration, 3: gyro temperature calibration", "0", ",", "# param 2, 1: magnetometer calibration", "0", ",", "# param 3, 1: ground pressure calibration", "0", ",", "# param 4, 1: radio RC calibration, 2: RC trim calibration", "4", "if", "simple", "else", "1", ",", "# param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration", "0", ",", "# param 6, 2: airspeed calibration", "0", ",", "# param 7, 1: ESC calibration, 3: barometer temperature calibration", ")", "self", ".", "send_mavlink", "(", "calibration_command", ")" ]
55.789474
0.003711
def load_config_file(self, filename, path=None): """Load a .py based config file by filename and path.""" loader = PyFileConfigLoader(filename, path=path) try: config = loader.load_config() except ConfigFileNotFound: # problem finding the file, raise raise except Exception: # try to get the full filename, but it will be empty in the # unlikely event that the error raised before filefind finished filename = loader.full_filename or filename # problem while running the file self.log.error("Exception while loading config file %s", filename, exc_info=True) else: self.log.debug("Loaded config file: %s", loader.full_filename) self.update_config(config)
[ "def", "load_config_file", "(", "self", ",", "filename", ",", "path", "=", "None", ")", ":", "loader", "=", "PyFileConfigLoader", "(", "filename", ",", "path", "=", "path", ")", "try", ":", "config", "=", "loader", ".", "load_config", "(", ")", "except", "ConfigFileNotFound", ":", "# problem finding the file, raise", "raise", "except", "Exception", ":", "# try to get the full filename, but it will be empty in the", "# unlikely event that the error raised before filefind finished", "filename", "=", "loader", ".", "full_filename", "or", "filename", "# problem while running the file", "self", ".", "log", ".", "error", "(", "\"Exception while loading config file %s\"", ",", "filename", ",", "exc_info", "=", "True", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Loaded config file: %s\"", ",", "loader", ".", "full_filename", ")", "self", ".", "update_config", "(", "config", ")" ]
46.222222
0.003534
def _ci_to_hgvs_coord(s, e): """ Convert continuous interbase (right-open) coordinates (..,-2,-1,0,1,..) to discontinuous HGVS coordinates (..,-2,-1,1,2,..) """ def _ci_to_hgvs(c): return c + 1 if c >= 0 else c return (None if s is None else _ci_to_hgvs(s), None if e is None else _ci_to_hgvs(e) - 1)
[ "def", "_ci_to_hgvs_coord", "(", "s", ",", "e", ")", ":", "def", "_ci_to_hgvs", "(", "c", ")", ":", "return", "c", "+", "1", "if", "c", ">=", "0", "else", "c", "return", "(", "None", "if", "s", "is", "None", "else", "_ci_to_hgvs", "(", "s", ")", ",", "None", "if", "e", "is", "None", "else", "_ci_to_hgvs", "(", "e", ")", "-", "1", ")" ]
35.777778
0.009091
def _do_watch_progress(filename, sock, handler): """Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.""" connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close()
[ "def", "_do_watch_progress", "(", "filename", ",", "sock", ",", "handler", ")", ":", "connection", ",", "client_address", "=", "sock", ".", "accept", "(", ")", "data", "=", "b''", "try", ":", "while", "True", ":", "more_data", "=", "connection", ".", "recv", "(", "16", ")", "if", "not", "more_data", ":", "break", "data", "+=", "more_data", "lines", "=", "data", ".", "split", "(", "b'\\n'", ")", "for", "line", "in", "lines", "[", ":", "-", "1", "]", ":", "line", "=", "line", ".", "decode", "(", ")", "parts", "=", "line", ".", "split", "(", "'='", ")", "key", "=", "parts", "[", "0", "]", "if", "len", "(", "parts", ")", ">", "0", "else", "None", "value", "=", "parts", "[", "1", "]", "if", "len", "(", "parts", ")", ">", "1", "else", "None", "handler", "(", "key", ",", "value", ")", "data", "=", "lines", "[", "-", "1", "]", "finally", ":", "connection", ".", "close", "(", ")" ]
34.857143
0.00133
def comment_sync(self, comment): """Update comments to host and notify subscribers""" self.host.update(key="comment", value=comment) self.host.emit("commented", comment=comment)
[ "def", "comment_sync", "(", "self", ",", "comment", ")", ":", "self", ".", "host", ".", "update", "(", "key", "=", "\"comment\"", ",", "value", "=", "comment", ")", "self", ".", "host", ".", "emit", "(", "\"commented\"", ",", "comment", "=", "comment", ")" ]
49.5
0.00995
def write(self, settings=None): """ Save the current configuration to its file (as given by :code:`self._config_file`). Optionally, settings may be passed in to override the current settings before writing. Returns :code:`None` if the file could not be written to, either due to permissions, or if the :class:`~giraffez.config.Config` object has the :code:`mode` 'r'. :param dict settings: Defaults to :code:`None`, if not :code:`None` this will replace `self.settings` prior to writing to the file """ if "r" in self.mode: raise ConfigReadOnly("Cannot write Config while in 'r' mode") try: if settings: self.settings = settings with open(self._config_file, "w") as f: f.write(repr(self)) return repr(self) except OSError: return None
[ "def", "write", "(", "self", ",", "settings", "=", "None", ")", ":", "if", "\"r\"", "in", "self", ".", "mode", ":", "raise", "ConfigReadOnly", "(", "\"Cannot write Config while in 'r' mode\"", ")", "try", ":", "if", "settings", ":", "self", ".", "settings", "=", "settings", "with", "open", "(", "self", ".", "_config_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "repr", "(", "self", ")", ")", "return", "repr", "(", "self", ")", "except", "OSError", ":", "return", "None" ]
43.380952
0.007519
def FileHeader(self, zip64=None): """Return the per-file header as a string.""" dt = self.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) if self.flag_bits & 0x08: # Set these to zero because we write them after the file data CRC = compress_size = file_size = 0 else: CRC = self.CRC compress_size = self.compress_size file_size = self.file_size extra = self.extra min_version = 0 if zip64 is None: zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT if zip64: fmt = '<HHQQ' extra = extra + struct.pack(fmt, 1, struct.calcsize(fmt) - 4, file_size, compress_size) if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT: if not zip64: raise LargeZipFile("Filesize would require ZIP64 extensions") # File is larger than what fits into a 4 byte integer, # fall back to the ZIP64 extension file_size = 0xffffffff compress_size = 0xffffffff min_version = ZIP64_VERSION if self.compress_type == ZIP_BZIP2: min_version = max(BZIP2_VERSION, min_version) elif self.compress_type == ZIP_LZMA: min_version = max(LZMA_VERSION, min_version) self.extract_version = max(min_version, self.extract_version) self.create_version = max(min_version, self.create_version) filename, flag_bits = self._encodeFilenameFlags() header = struct.pack(structFileHeader, stringFileHeader, self.extract_version, self.reserved, flag_bits, self.compress_type, dostime, dosdate, CRC, compress_size, file_size, len(filename), len(extra)) return header + filename + extra
[ "def", "FileHeader", "(", "self", ",", "zip64", "=", "None", ")", ":", "dt", "=", "self", ".", "date_time", "dosdate", "=", "(", "dt", "[", "0", "]", "-", "1980", ")", "<<", "9", "|", "dt", "[", "1", "]", "<<", "5", "|", "dt", "[", "2", "]", "dostime", "=", "dt", "[", "3", "]", "<<", "11", "|", "dt", "[", "4", "]", "<<", "5", "|", "(", "dt", "[", "5", "]", "//", "2", ")", "if", "self", ".", "flag_bits", "&", "0x08", ":", "# Set these to zero because we write them after the file data", "CRC", "=", "compress_size", "=", "file_size", "=", "0", "else", ":", "CRC", "=", "self", ".", "CRC", "compress_size", "=", "self", ".", "compress_size", "file_size", "=", "self", ".", "file_size", "extra", "=", "self", ".", "extra", "min_version", "=", "0", "if", "zip64", "is", "None", ":", "zip64", "=", "file_size", ">", "ZIP64_LIMIT", "or", "compress_size", ">", "ZIP64_LIMIT", "if", "zip64", ":", "fmt", "=", "'<HHQQ'", "extra", "=", "extra", "+", "struct", ".", "pack", "(", "fmt", ",", "1", ",", "struct", ".", "calcsize", "(", "fmt", ")", "-", "4", ",", "file_size", ",", "compress_size", ")", "if", "file_size", ">", "ZIP64_LIMIT", "or", "compress_size", ">", "ZIP64_LIMIT", ":", "if", "not", "zip64", ":", "raise", "LargeZipFile", "(", "\"Filesize would require ZIP64 extensions\"", ")", "# File is larger than what fits into a 4 byte integer,", "# fall back to the ZIP64 extension", "file_size", "=", "0xffffffff", "compress_size", "=", "0xffffffff", "min_version", "=", "ZIP64_VERSION", "if", "self", ".", "compress_type", "==", "ZIP_BZIP2", ":", "min_version", "=", "max", "(", "BZIP2_VERSION", ",", "min_version", ")", "elif", "self", ".", "compress_type", "==", "ZIP_LZMA", ":", "min_version", "=", "max", "(", "LZMA_VERSION", ",", "min_version", ")", "self", ".", "extract_version", "=", "max", "(", "min_version", ",", "self", ".", "extract_version", ")", "self", ".", "create_version", "=", "max", "(", "min_version", ",", "self", ".", "create_version", ")", "filename", ",", "flag_bits", "=", "self", ".", "_encodeFilenameFlags", "(", ")", "header", "=", "struct", ".", "pack", "(", "structFileHeader", ",", "stringFileHeader", ",", "self", ".", "extract_version", ",", "self", ".", "reserved", ",", "flag_bits", ",", "self", ".", "compress_type", ",", "dostime", ",", "dosdate", ",", "CRC", ",", "compress_size", ",", "file_size", ",", "len", "(", "filename", ")", ",", "len", "(", "extra", ")", ")", "return", "header", "+", "filename", "+", "extra" ]
43.911111
0.001485
def __equalize_densities(self,nominal_bounds,nominal_density): """ Calculate the true density along x, and adjust the top and bottom bounds so that the density along y will be equal. Returns (adjusted_bounds, true_density) """ left,bottom,right,top = nominal_bounds.lbrt() width = right-left; height = top-bottom center_y = bottom + height/2.0 # True density is not equal to the nominal_density when # nominal_density*(right-left) is not an integer. true_density = int(nominal_density*(width))/float(width) n_cells = round(height*true_density,0) adjusted_half_height = n_cells/true_density/2.0 return (BoundingBox(points=((left, center_y-adjusted_half_height), (right, center_y+adjusted_half_height))), true_density)
[ "def", "__equalize_densities", "(", "self", ",", "nominal_bounds", ",", "nominal_density", ")", ":", "left", ",", "bottom", ",", "right", ",", "top", "=", "nominal_bounds", ".", "lbrt", "(", ")", "width", "=", "right", "-", "left", "height", "=", "top", "-", "bottom", "center_y", "=", "bottom", "+", "height", "/", "2.0", "# True density is not equal to the nominal_density when", "# nominal_density*(right-left) is not an integer.", "true_density", "=", "int", "(", "nominal_density", "*", "(", "width", ")", ")", "/", "float", "(", "width", ")", "n_cells", "=", "round", "(", "height", "*", "true_density", ",", "0", ")", "adjusted_half_height", "=", "n_cells", "/", "true_density", "/", "2.0", "return", "(", "BoundingBox", "(", "points", "=", "(", "(", "left", ",", "center_y", "-", "adjusted_half_height", ")", ",", "(", "right", ",", "center_y", "+", "adjusted_half_height", ")", ")", ")", ",", "true_density", ")" ]
43.2
0.010193
def get_parent(self): """Return the parent block of this block, or None if there isn't one.""" if not self.has_cached_parent: if self.parent is not None: self._parent_block = self.runtime.get_block(self.parent) else: self._parent_block = None self._parent_block_id = self.parent return self._parent_block
[ "def", "get_parent", "(", "self", ")", ":", "if", "not", "self", ".", "has_cached_parent", ":", "if", "self", ".", "parent", "is", "not", "None", ":", "self", ".", "_parent_block", "=", "self", ".", "runtime", ".", "get_block", "(", "self", ".", "parent", ")", "else", ":", "self", ".", "_parent_block", "=", "None", "self", ".", "_parent_block_id", "=", "self", ".", "parent", "return", "self", ".", "_parent_block" ]
43.111111
0.007576
def run(command, parser, cl_args, unknown_args): ''' Submits the topology to the scheduler * Depending on the topology file name extension, we treat the file as a fatjar (if the ext is .jar) or a tar file (if the ext is .tar/.tar.gz). * We upload the topology file to the packer, update zookeeper and launch scheduler jobs representing that topology * You can see your topology in Heron UI :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Submit Args %s", cl_args) # get the topology file name topology_file = cl_args['topology-file-name'] if urlparse.urlparse(topology_file).scheme: cl_args['topology-file-name'] = download(topology_file, cl_args['cluster']) topology_file = cl_args['topology-file-name'] Log.debug("download uri to local file: %s", topology_file) # check to see if the topology file exists if not os.path.isfile(topology_file): err_context = "Topology file '%s' does not exist" % topology_file return SimpleResult(Status.InvocationError, err_context) # check if it is a valid file type jar_type = topology_file.endswith(".jar") tar_type = topology_file.endswith(".tar") or topology_file.endswith(".tar.gz") pex_type = topology_file.endswith(".pex") cpp_type = topology_file.endswith(".dylib") or topology_file.endswith(".so") if not (jar_type or tar_type or pex_type or cpp_type): _, ext_name = os.path.splitext(topology_file) err_context = "Unknown file type '%s'. Please use .tar "\ "or .tar.gz or .jar or .pex or .dylib or .so file"\ % ext_name return SimpleResult(Status.InvocationError, err_context) # check if extra launch classpath is provided and if it is validate if cl_args['extra_launch_classpath']: valid_classpath = classpath.valid_java_classpath(cl_args['extra_launch_classpath']) if not valid_classpath: err_context = "One of jar or directory in extra launch classpath does not exist: %s" % \ cl_args['extra_launch_classpath'] return SimpleResult(Status.InvocationError, err_context) # create a temporary directory for topology definition file tmp_dir = tempfile.mkdtemp() opts.cleaned_up_files.append(tmp_dir) # if topology needs to be launched in deactivated state, do it so if cl_args['deploy_deactivated']: initial_state = topology_pb2.TopologyState.Name(topology_pb2.PAUSED) else: initial_state = topology_pb2.TopologyState.Name(topology_pb2.RUNNING) # set the tmp dir and deactivated state in global options opts.set_config('cmdline.topologydefn.tmpdirectory', tmp_dir) opts.set_config('cmdline.topology.initial.state', initial_state) opts.set_config('cmdline.topology.role', cl_args['role']) opts.set_config('cmdline.topology.environment', cl_args['environ']) # Use CLI release yaml file if the release_yaml_file config is empty if not cl_args['release_yaml_file']: cl_args['release_yaml_file'] = config.get_heron_release_file() # check the extension of the file name to see if it is tar/jar file. if jar_type: return submit_fatjar(cl_args, unknown_args, tmp_dir) elif tar_type: return submit_tar(cl_args, unknown_args, tmp_dir) elif cpp_type: return submit_cpp(cl_args, unknown_args, tmp_dir) else: return submit_pex(cl_args, unknown_args, tmp_dir)
[ "def", "run", "(", "command", ",", "parser", ",", "cl_args", ",", "unknown_args", ")", ":", "Log", ".", "debug", "(", "\"Submit Args %s\"", ",", "cl_args", ")", "# get the topology file name", "topology_file", "=", "cl_args", "[", "'topology-file-name'", "]", "if", "urlparse", ".", "urlparse", "(", "topology_file", ")", ".", "scheme", ":", "cl_args", "[", "'topology-file-name'", "]", "=", "download", "(", "topology_file", ",", "cl_args", "[", "'cluster'", "]", ")", "topology_file", "=", "cl_args", "[", "'topology-file-name'", "]", "Log", ".", "debug", "(", "\"download uri to local file: %s\"", ",", "topology_file", ")", "# check to see if the topology file exists", "if", "not", "os", ".", "path", ".", "isfile", "(", "topology_file", ")", ":", "err_context", "=", "\"Topology file '%s' does not exist\"", "%", "topology_file", "return", "SimpleResult", "(", "Status", ".", "InvocationError", ",", "err_context", ")", "# check if it is a valid file type", "jar_type", "=", "topology_file", ".", "endswith", "(", "\".jar\"", ")", "tar_type", "=", "topology_file", ".", "endswith", "(", "\".tar\"", ")", "or", "topology_file", ".", "endswith", "(", "\".tar.gz\"", ")", "pex_type", "=", "topology_file", ".", "endswith", "(", "\".pex\"", ")", "cpp_type", "=", "topology_file", ".", "endswith", "(", "\".dylib\"", ")", "or", "topology_file", ".", "endswith", "(", "\".so\"", ")", "if", "not", "(", "jar_type", "or", "tar_type", "or", "pex_type", "or", "cpp_type", ")", ":", "_", ",", "ext_name", "=", "os", ".", "path", ".", "splitext", "(", "topology_file", ")", "err_context", "=", "\"Unknown file type '%s'. Please use .tar \"", "\"or .tar.gz or .jar or .pex or .dylib or .so file\"", "%", "ext_name", "return", "SimpleResult", "(", "Status", ".", "InvocationError", ",", "err_context", ")", "# check if extra launch classpath is provided and if it is validate", "if", "cl_args", "[", "'extra_launch_classpath'", "]", ":", "valid_classpath", "=", "classpath", ".", "valid_java_classpath", "(", "cl_args", "[", "'extra_launch_classpath'", "]", ")", "if", "not", "valid_classpath", ":", "err_context", "=", "\"One of jar or directory in extra launch classpath does not exist: %s\"", "%", "cl_args", "[", "'extra_launch_classpath'", "]", "return", "SimpleResult", "(", "Status", ".", "InvocationError", ",", "err_context", ")", "# create a temporary directory for topology definition file", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "opts", ".", "cleaned_up_files", ".", "append", "(", "tmp_dir", ")", "# if topology needs to be launched in deactivated state, do it so", "if", "cl_args", "[", "'deploy_deactivated'", "]", ":", "initial_state", "=", "topology_pb2", ".", "TopologyState", ".", "Name", "(", "topology_pb2", ".", "PAUSED", ")", "else", ":", "initial_state", "=", "topology_pb2", ".", "TopologyState", ".", "Name", "(", "topology_pb2", ".", "RUNNING", ")", "# set the tmp dir and deactivated state in global options", "opts", ".", "set_config", "(", "'cmdline.topologydefn.tmpdirectory'", ",", "tmp_dir", ")", "opts", ".", "set_config", "(", "'cmdline.topology.initial.state'", ",", "initial_state", ")", "opts", ".", "set_config", "(", "'cmdline.topology.role'", ",", "cl_args", "[", "'role'", "]", ")", "opts", ".", "set_config", "(", "'cmdline.topology.environment'", ",", "cl_args", "[", "'environ'", "]", ")", "# Use CLI release yaml file if the release_yaml_file config is empty", "if", "not", "cl_args", "[", "'release_yaml_file'", "]", ":", "cl_args", "[", "'release_yaml_file'", "]", "=", "config", ".", "get_heron_release_file", "(", ")", "# check the extension of the file name to see if it is tar/jar file.", "if", "jar_type", ":", "return", "submit_fatjar", "(", "cl_args", ",", "unknown_args", ",", "tmp_dir", ")", "elif", "tar_type", ":", "return", "submit_tar", "(", "cl_args", ",", "unknown_args", ",", "tmp_dir", ")", "elif", "cpp_type", ":", "return", "submit_cpp", "(", "cl_args", ",", "unknown_args", ",", "tmp_dir", ")", "else", ":", "return", "submit_pex", "(", "cl_args", ",", "unknown_args", ",", "tmp_dir", ")" ]
42.217949
0.011573
def getOrphanParticleInfos(self, swarmId, genIdx): """Return a list of particleStates for all particles in the given swarm generation that have been orphaned. Parameters: --------------------------------------------------------------------- swarmId: A string representation of the sorted list of encoders in this swarm. For example '__address_encoder.__gym_encoder' genIdx: If not None, only return particles at this specific generation index. retval: (particleStates, modelIds, errScores, completed, matured) particleStates: list of particleStates modelIds: list of modelIds errScores: list of errScores, numpy.inf is plugged in if we don't have a result yet completed: list of completed booleans matured: list of matured booleans """ entryIdxs = range(len(self._allResults)) if len(entryIdxs) == 0: return ([], [], [], [], []) # Get the particles of interest particleStates = [] modelIds = [] errScores = [] completedFlags = [] maturedFlags = [] for idx in entryIdxs: # Get info on this model entry = self._allResults[idx] if not entry['hidden']: continue modelParams = entry['modelParams'] if modelParams['particleState']['swarmId'] != swarmId: continue isCompleted = entry['completed'] isMatured = entry['matured'] particleState = modelParams['particleState'] particleGenIdx = particleState['genIdx'] particleId = particleState['id'] if genIdx is not None and particleGenIdx != genIdx: continue # Incorporate into return values particleStates.append(particleState) modelIds.append(entry['modelID']) errScores.append(entry['errScore']) completedFlags.append(isCompleted) maturedFlags.append(isMatured) return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
[ "def", "getOrphanParticleInfos", "(", "self", ",", "swarmId", ",", "genIdx", ")", ":", "entryIdxs", "=", "range", "(", "len", "(", "self", ".", "_allResults", ")", ")", "if", "len", "(", "entryIdxs", ")", "==", "0", ":", "return", "(", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "# Get the particles of interest", "particleStates", "=", "[", "]", "modelIds", "=", "[", "]", "errScores", "=", "[", "]", "completedFlags", "=", "[", "]", "maturedFlags", "=", "[", "]", "for", "idx", "in", "entryIdxs", ":", "# Get info on this model", "entry", "=", "self", ".", "_allResults", "[", "idx", "]", "if", "not", "entry", "[", "'hidden'", "]", ":", "continue", "modelParams", "=", "entry", "[", "'modelParams'", "]", "if", "modelParams", "[", "'particleState'", "]", "[", "'swarmId'", "]", "!=", "swarmId", ":", "continue", "isCompleted", "=", "entry", "[", "'completed'", "]", "isMatured", "=", "entry", "[", "'matured'", "]", "particleState", "=", "modelParams", "[", "'particleState'", "]", "particleGenIdx", "=", "particleState", "[", "'genIdx'", "]", "particleId", "=", "particleState", "[", "'id'", "]", "if", "genIdx", "is", "not", "None", "and", "particleGenIdx", "!=", "genIdx", ":", "continue", "# Incorporate into return values", "particleStates", ".", "append", "(", "particleState", ")", "modelIds", ".", "append", "(", "entry", "[", "'modelID'", "]", ")", "errScores", ".", "append", "(", "entry", "[", "'errScore'", "]", ")", "completedFlags", ".", "append", "(", "isCompleted", ")", "maturedFlags", ".", "append", "(", "isMatured", ")", "return", "(", "particleStates", ",", "modelIds", ",", "errScores", ",", "completedFlags", ",", "maturedFlags", ")" ]
33.457627
0.00935
def parse_plays_stream(self): """Generate and yield a stream of parsed plays. Useful for per play processing.""" lx_doc = self.html_doc() if lx_doc is not None: parser = PlayParser(self.game_key.season, self.game_key.game_type) plays = lx_doc.xpath('//tr[@class = "evenColor"]') for p in plays: p_obj = parser.build_play(p) self.plays.append(p_obj) yield p_obj
[ "def", "parse_plays_stream", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "if", "lx_doc", "is", "not", "None", ":", "parser", "=", "PlayParser", "(", "self", ".", "game_key", ".", "season", ",", "self", ".", "game_key", ".", "game_type", ")", "plays", "=", "lx_doc", ".", "xpath", "(", "'//tr[@class = \"evenColor\"]'", ")", "for", "p", "in", "plays", ":", "p_obj", "=", "parser", ".", "build_play", "(", "p", ")", "self", ".", "plays", ".", "append", "(", "p_obj", ")", "yield", "p_obj" ]
40.25
0.010121
def upgrade(): """Upgrade database.""" op.create_table( 'communities_community', sa.Column('created', sa.DateTime(), nullable=False), sa.Column('updated', sa.DateTime(), nullable=False), sa.Column('id', sa.String(length=100), nullable=False), sa.Column('id_user', sa.Integer(), nullable=False), sa.Column('title', sa.String(length=255), nullable=False), sa.Column('description', sa.Text(), nullable=False), sa.Column('page', sa.Text(), nullable=False), sa.Column('curation_policy', sa.Text(), nullable=False), sa.Column('last_record_accepted', sa.DateTime(), nullable=False), sa.Column('logo_ext', sa.String(length=4), nullable=True), sa.Column('ranking', sa.Integer(), nullable=False), sa.Column('fixed_points', sa.Integer(), nullable=False), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['id_user'], [u'accounts_user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table( 'communities_community_record', sa.Column('created', sa.DateTime(), nullable=False), sa.Column('updated', sa.DateTime(), nullable=False), sa.Column('id_community', sa.String(length=100), nullable=False), sa.Column('id_record', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('id_user', sa.Integer(), nullable=True), sa.Column('expires_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint( ['id_community'], ['communities_community.id'], name='fk_communities_community_record_id_community', ), sa.ForeignKeyConstraint(['id_record'], [u'records_metadata.id'], ), sa.ForeignKeyConstraint(['id_user'], [u'accounts_user.id'], ), sa.PrimaryKeyConstraint('id_community', 'id_record') ) op.create_table( 'communities_featured_community', sa.Column('created', sa.DateTime(), nullable=False), sa.Column('updated', sa.DateTime(), nullable=False), sa.Column('id', sa.Integer(), nullable=False), sa.Column('id_community', sa.String(length=100), nullable=False), sa.Column('start_date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint( ['id_community'], [u'communities_community.id'], name='fk_communities_featured_community_id_community', ), sa.PrimaryKeyConstraint('id') )
[ "def", "upgrade", "(", ")", ":", "op", ".", "create_table", "(", "'communities_community'", ",", "sa", ".", "Column", "(", "'created'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'updated'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id'", ",", "sa", ".", "String", "(", "length", "=", "100", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id_user'", ",", "sa", ".", "Integer", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'title'", ",", "sa", ".", "String", "(", "length", "=", "255", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'description'", ",", "sa", ".", "Text", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'page'", ",", "sa", ".", "Text", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'curation_policy'", ",", "sa", ".", "Text", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'last_record_accepted'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'logo_ext'", ",", "sa", ".", "String", "(", "length", "=", "4", ")", ",", "nullable", "=", "True", ")", ",", "sa", ".", "Column", "(", "'ranking'", ",", "sa", ".", "Integer", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'fixed_points'", ",", "sa", ".", "Integer", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'deleted_at'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "True", ")", ",", "sa", ".", "ForeignKeyConstraint", "(", "[", "'id_user'", "]", ",", "[", "u'accounts_user.id'", "]", ",", ")", ",", "sa", ".", "PrimaryKeyConstraint", "(", "'id'", ")", ")", "op", ".", "create_table", "(", "'communities_community_record'", ",", "sa", ".", "Column", "(", "'created'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'updated'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id_community'", ",", "sa", ".", "String", "(", "length", "=", "100", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id_record'", ",", "sqlalchemy_utils", ".", "types", ".", "uuid", ".", "UUIDType", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id_user'", ",", "sa", ".", "Integer", "(", ")", ",", "nullable", "=", "True", ")", ",", "sa", ".", "Column", "(", "'expires_at'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "True", ")", ",", "sa", ".", "ForeignKeyConstraint", "(", "[", "'id_community'", "]", ",", "[", "'communities_community.id'", "]", ",", "name", "=", "'fk_communities_community_record_id_community'", ",", ")", ",", "sa", ".", "ForeignKeyConstraint", "(", "[", "'id_record'", "]", ",", "[", "u'records_metadata.id'", "]", ",", ")", ",", "sa", ".", "ForeignKeyConstraint", "(", "[", "'id_user'", "]", ",", "[", "u'accounts_user.id'", "]", ",", ")", ",", "sa", ".", "PrimaryKeyConstraint", "(", "'id_community'", ",", "'id_record'", ")", ")", "op", ".", "create_table", "(", "'communities_featured_community'", ",", "sa", ".", "Column", "(", "'created'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'updated'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id'", ",", "sa", ".", "Integer", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'id_community'", ",", "sa", ".", "String", "(", "length", "=", "100", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'start_date'", ",", "sa", ".", "DateTime", "(", ")", ",", "nullable", "=", "False", ")", ",", "sa", ".", "ForeignKeyConstraint", "(", "[", "'id_community'", "]", ",", "[", "u'communities_community.id'", "]", ",", "name", "=", "'fk_communities_featured_community_id_community'", ",", ")", ",", "sa", ".", "PrimaryKeyConstraint", "(", "'id'", ")", ")" ]
48.92
0.000401
def step(self, substeps=2): '''Step the world forward by one frame. Parameters ---------- substeps : int, optional Split the step into this many sub-steps. This helps to prevent the time delta for an update from being too large. ''' self.frame_no += 1 dt = self.dt / substeps for _ in range(substeps): self.ode_contactgroup.empty() self.ode_space.collide(None, self.on_collision) self.ode_world.step(dt)
[ "def", "step", "(", "self", ",", "substeps", "=", "2", ")", ":", "self", ".", "frame_no", "+=", "1", "dt", "=", "self", ".", "dt", "/", "substeps", "for", "_", "in", "range", "(", "substeps", ")", ":", "self", ".", "ode_contactgroup", ".", "empty", "(", ")", "self", ".", "ode_space", ".", "collide", "(", "None", ",", "self", ".", "on_collision", ")", "self", ".", "ode_world", ".", "step", "(", "dt", ")" ]
34.266667
0.003788
def send(node_name): """ Send our information to a remote nago instance Arguments: node -- node_name or token for the node this data belongs to """ my_data = nago.core.get_my_info() if not node_name: node_name = nago.settings.get('server') node = nago.core.get_node(node_name) json_params = {} json_params['node_name'] = node_name json_params['key'] = "node_info" for k, v in my_data.items(): nago.core.log("sending %s to %s" % (k, node['host_name']), level="notice") json_params[k] = v return node.send_command('info', 'post', node_name=node.token, key="node_info", **my_data)
[ "def", "send", "(", "node_name", ")", ":", "my_data", "=", "nago", ".", "core", ".", "get_my_info", "(", ")", "if", "not", "node_name", ":", "node_name", "=", "nago", ".", "settings", ".", "get", "(", "'server'", ")", "node", "=", "nago", ".", "core", ".", "get_node", "(", "node_name", ")", "json_params", "=", "{", "}", "json_params", "[", "'node_name'", "]", "=", "node_name", "json_params", "[", "'key'", "]", "=", "\"node_info\"", "for", "k", ",", "v", "in", "my_data", ".", "items", "(", ")", ":", "nago", ".", "core", ".", "log", "(", "\"sending %s to %s\"", "%", "(", "k", ",", "node", "[", "'host_name'", "]", ")", ",", "level", "=", "\"notice\"", ")", "json_params", "[", "k", "]", "=", "v", "return", "node", ".", "send_command", "(", "'info'", ",", "'post'", ",", "node_name", "=", "node", ".", "token", ",", "key", "=", "\"node_info\"", ",", "*", "*", "my_data", ")" ]
37.529412
0.004587
def backlink(node): """Given a CFG with outgoing links, create incoming links.""" seen = set() to_see = [node] while to_see: node = to_see.pop() seen.add(node) for succ in node.next: succ.prev.add(node) if succ not in seen: to_see.append(succ)
[ "def", "backlink", "(", "node", ")", ":", "seen", "=", "set", "(", ")", "to_see", "=", "[", "node", "]", "while", "to_see", ":", "node", "=", "to_see", ".", "pop", "(", ")", "seen", ".", "add", "(", "node", ")", "for", "succ", "in", "node", ".", "next", ":", "succ", ".", "prev", ".", "add", "(", "node", ")", "if", "succ", "not", "in", "seen", ":", "to_see", ".", "append", "(", "succ", ")" ]
26.636364
0.016502
def select_down(self): """move cursor down""" r, c = self._index self._select_index(r+1, c)
[ "def", "select_down", "(", "self", ")", ":", "r", ",", "c", "=", "self", ".", "_index", "self", ".", "_select_index", "(", "r", "+", "1", ",", "c", ")" ]
28
0.017391
def process_der(self, data, name): """ DER processing :param data: :param name: :return: """ from cryptography.x509.base import load_der_x509_certificate try: x509 = load_der_x509_certificate(data, self.get_backend()) self.num_der_certs += 1 return self.process_x509(x509, name=name, pem=False, source='der-cert') except Exception as e: logger.debug('DER processing failed: %s : %s' % (name, e)) self.trace_logger.log(e)
[ "def", "process_der", "(", "self", ",", "data", ",", "name", ")", ":", "from", "cryptography", ".", "x509", ".", "base", "import", "load_der_x509_certificate", "try", ":", "x509", "=", "load_der_x509_certificate", "(", "data", ",", "self", ".", "get_backend", "(", ")", ")", "self", ".", "num_der_certs", "+=", "1", "return", "self", ".", "process_x509", "(", "x509", ",", "name", "=", "name", ",", "pem", "=", "False", ",", "source", "=", "'der-cert'", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'DER processing failed: %s : %s'", "%", "(", "name", ",", "e", ")", ")", "self", ".", "trace_logger", ".", "log", "(", "e", ")" ]
33.625
0.005425
def parse_redis_url(redis_url): ''' >>> parse_redis_url('redis://:pass%20@localhost:1234/selecteddb%20') ('localhost', 1234, 'pass ', 'selecteddb ') >>> parse_redis_url('redis://:pass%20@localhost:1234/') ('localhost', 1234, 'pass ', None) >>> parse_redis_url('redis://localhost:1234/') ('localhost', 1234, None, None) >>> parse_redis_url('redis://localhost:1234') ('localhost', 1234, None, None) >>> parse_redis_url('redis://localhost/') ('localhost', 6379, None, None) >>> parse_redis_url('redis://') ('localhost', 6379, None, None) >>> parse_redis_url('redis://') ('localhost', 6379, None, None) ''' (use, pas, hos, por, pat) = _parse_url(redis_url, 'redis://', def_port=6379) return (hos, por, pas, pat)
[ "def", "parse_redis_url", "(", "redis_url", ")", ":", "(", "use", ",", "pas", ",", "hos", ",", "por", ",", "pat", ")", "=", "_parse_url", "(", "redis_url", ",", "'redis://'", ",", "def_port", "=", "6379", ")", "return", "(", "hos", ",", "por", ",", "pas", ",", "pat", ")" ]
40.052632
0.002567
def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret)))
[ "def", "sentinel_get_master_ip", "(", "master", ",", "host", "=", "None", ",", "port", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_sconnect", "(", "host", ",", "port", ",", "password", ")", "ret", "=", "server", ".", "sentinel_get_master_addr_by_name", "(", "master", ")", "return", "dict", "(", "list", "(", "zip", "(", "(", "'master_host'", ",", "'master_port'", ")", ",", "ret", ")", ")", ")" ]
27
0.002387
def find_lexer_class_for_filename(_fn, code=None): """Get a lexer for a filename. If multiple lexers match the filename pattern, use ``analyse_text()`` to figure out which one is more appropriate. Returns None if not found. """ matches = [] fn = basename(_fn) for modname, name, _, filenames, _ in itervalues(LEXERS): for filename in filenames: if _fn_matches(fn, filename): if name not in _lexer_cache: _load_lexers(modname) matches.append((_lexer_cache[name], filename)) for cls in find_plugin_lexers(): for filename in cls.filenames: if _fn_matches(fn, filename): matches.append((cls, filename)) if sys.version_info > (3,) and isinstance(code, bytes): # decode it, since all analyse_text functions expect unicode code = guess_decode(code) def get_rating(info): cls, filename = info # explicit patterns get a bonus bonus = '*' not in filename and 0.5 or 0 # The class _always_ defines analyse_text because it's included in # the Lexer class. The default implementation returns None which # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py # to find lexers which need it overridden. if code: return cls.analyse_text(code) + bonus, cls.__name__ return cls.priority + bonus, cls.__name__ if matches: matches.sort(key=get_rating) # print "Possible lexers, after sort:", matches return matches[-1][0]
[ "def", "find_lexer_class_for_filename", "(", "_fn", ",", "code", "=", "None", ")", ":", "matches", "=", "[", "]", "fn", "=", "basename", "(", "_fn", ")", "for", "modname", ",", "name", ",", "_", ",", "filenames", ",", "_", "in", "itervalues", "(", "LEXERS", ")", ":", "for", "filename", "in", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "if", "name", "not", "in", "_lexer_cache", ":", "_load_lexers", "(", "modname", ")", "matches", ".", "append", "(", "(", "_lexer_cache", "[", "name", "]", ",", "filename", ")", ")", "for", "cls", "in", "find_plugin_lexers", "(", ")", ":", "for", "filename", "in", "cls", ".", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "matches", ".", "append", "(", "(", "cls", ",", "filename", ")", ")", "if", "sys", ".", "version_info", ">", "(", "3", ",", ")", "and", "isinstance", "(", "code", ",", "bytes", ")", ":", "# decode it, since all analyse_text functions expect unicode", "code", "=", "guess_decode", "(", "code", ")", "def", "get_rating", "(", "info", ")", ":", "cls", ",", "filename", "=", "info", "# explicit patterns get a bonus", "bonus", "=", "'*'", "not", "in", "filename", "and", "0.5", "or", "0", "# The class _always_ defines analyse_text because it's included in", "# the Lexer class. The default implementation returns None which", "# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py", "# to find lexers which need it overridden.", "if", "code", ":", "return", "cls", ".", "analyse_text", "(", "code", ")", "+", "bonus", ",", "cls", ".", "__name__", "return", "cls", ".", "priority", "+", "bonus", ",", "cls", ".", "__name__", "if", "matches", ":", "matches", ".", "sort", "(", "key", "=", "get_rating", ")", "# print \"Possible lexers, after sort:\", matches", "return", "matches", "[", "-", "1", "]", "[", "0", "]" ]
38.04878
0.000625
def create_ca_file(anchor_list, filename): """ Concatenate all the certificates (PEM format for the export) in 'anchor_list' and write the result to file 'filename'. On success 'filename' is returned, None otherwise. If you are used to OpenSSL tools, this function builds a CAfile that can be used for certificate and CRL check. Also see create_temporary_ca_file(). """ try: f = open(filename, "w") for a in anchor_list: s = a.output(fmt="PEM") f.write(s) f.close() except: return None return filename
[ "def", "create_ca_file", "(", "anchor_list", ",", "filename", ")", ":", "try", ":", "f", "=", "open", "(", "filename", ",", "\"w\"", ")", "for", "a", "in", "anchor_list", ":", "s", "=", "a", ".", "output", "(", "fmt", "=", "\"PEM\"", ")", "f", ".", "write", "(", "s", ")", "f", ".", "close", "(", ")", "except", ":", "return", "None", "return", "filename" ]
29.2
0.003317
def create_bug(self, request): """ Create a bugzilla bug with passed params """ if settings.BUGFILER_API_KEY is None: return Response({"failure": "Bugzilla API key not set!"}, status=HTTP_400_BAD_REQUEST) params = request.data # Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo crash_signature = params.get("crash_signature") if crash_signature and len(crash_signature) > 2048: return Response({"failure": "Crash signature can't be more than 2048 characters."}, status=HTTP_400_BAD_REQUEST) description = u"**Filed by:** {}\n{}".format( request.user.email.replace('@', " [at] "), params.get("comment", "") ).encode("utf-8") summary = params.get("summary").encode("utf-8").strip() url = settings.BUGFILER_API_URL + "/rest/bug" headers = { 'x-bugzilla-api-key': settings.BUGFILER_API_KEY, 'Accept': 'application/json' } data = { 'product': params.get("product"), 'component': params.get("component"), 'summary': summary, 'keywords': params.get("keywords"), 'blocks': params.get("blocks"), 'depends_on': params.get("depends_on"), 'see_also': params.get("see_also"), 'version': params.get("version"), 'cf_crash_signature': params.get("crash_signature"), 'severity': params.get("severity"), 'priority': params.get("priority"), 'description': description, 'comment_tags': "treeherder", } try: response = make_request(url, method='POST', headers=headers, json=data) except requests.exceptions.HTTPError as e: try: message = e.response.json()['message'] except (ValueError, KeyError): message = e.response.text return Response({"failure": message}, status=HTTP_400_BAD_REQUEST) return Response({"success": response.json()["id"]})
[ "def", "create_bug", "(", "self", ",", "request", ")", ":", "if", "settings", ".", "BUGFILER_API_KEY", "is", "None", ":", "return", "Response", "(", "{", "\"failure\"", ":", "\"Bugzilla API key not set!\"", "}", ",", "status", "=", "HTTP_400_BAD_REQUEST", ")", "params", "=", "request", ".", "data", "# Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo", "crash_signature", "=", "params", ".", "get", "(", "\"crash_signature\"", ")", "if", "crash_signature", "and", "len", "(", "crash_signature", ")", ">", "2048", ":", "return", "Response", "(", "{", "\"failure\"", ":", "\"Crash signature can't be more than 2048 characters.\"", "}", ",", "status", "=", "HTTP_400_BAD_REQUEST", ")", "description", "=", "u\"**Filed by:** {}\\n{}\"", ".", "format", "(", "request", ".", "user", ".", "email", ".", "replace", "(", "'@'", ",", "\" [at] \"", ")", ",", "params", ".", "get", "(", "\"comment\"", ",", "\"\"", ")", ")", ".", "encode", "(", "\"utf-8\"", ")", "summary", "=", "params", ".", "get", "(", "\"summary\"", ")", ".", "encode", "(", "\"utf-8\"", ")", ".", "strip", "(", ")", "url", "=", "settings", ".", "BUGFILER_API_URL", "+", "\"/rest/bug\"", "headers", "=", "{", "'x-bugzilla-api-key'", ":", "settings", ".", "BUGFILER_API_KEY", ",", "'Accept'", ":", "'application/json'", "}", "data", "=", "{", "'product'", ":", "params", ".", "get", "(", "\"product\"", ")", ",", "'component'", ":", "params", ".", "get", "(", "\"component\"", ")", ",", "'summary'", ":", "summary", ",", "'keywords'", ":", "params", ".", "get", "(", "\"keywords\"", ")", ",", "'blocks'", ":", "params", ".", "get", "(", "\"blocks\"", ")", ",", "'depends_on'", ":", "params", ".", "get", "(", "\"depends_on\"", ")", ",", "'see_also'", ":", "params", ".", "get", "(", "\"see_also\"", ")", ",", "'version'", ":", "params", ".", "get", "(", "\"version\"", ")", ",", "'cf_crash_signature'", ":", "params", ".", "get", "(", "\"crash_signature\"", ")", ",", "'severity'", ":", "params", ".", "get", "(", "\"severity\"", ")", ",", "'priority'", ":", "params", ".", "get", "(", "\"priority\"", ")", ",", "'description'", ":", "description", ",", "'comment_tags'", ":", "\"treeherder\"", ",", "}", "try", ":", "response", "=", "make_request", "(", "url", ",", "method", "=", "'POST'", ",", "headers", "=", "headers", ",", "json", "=", "data", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "try", ":", "message", "=", "e", ".", "response", ".", "json", "(", ")", "[", "'message'", "]", "except", "(", "ValueError", ",", "KeyError", ")", ":", "message", "=", "e", ".", "response", ".", "text", "return", "Response", "(", "{", "\"failure\"", ":", "message", "}", ",", "status", "=", "HTTP_400_BAD_REQUEST", ")", "return", "Response", "(", "{", "\"success\"", ":", "response", ".", "json", "(", ")", "[", "\"id\"", "]", "}", ")" ]
40.846154
0.002299
def concat_batch_variantcalls(items, region_block=True, skip_jointcheck=False): """CWL entry point: combine variant calls from regions into single VCF. """ items = [utils.to_single_data(x) for x in items] batch_name = _get_batch_name(items, skip_jointcheck) variantcaller = _get_batch_variantcaller(items) # Pre-called input variant files if not variantcaller and all(d.get("vrn_file") for d in items): return {"vrn_file": items[0]["vrn_file"]} out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, "%s.vcf.gz" % (batch_name)) utils.safe_makedir(os.path.dirname(out_file)) if region_block: regions = [_region_to_coords(rs[0]) for rs in items[0]["region_block"]] else: regions = [_region_to_coords(r) for r in items[0]["region"]] vrn_file_regions = items[0]["vrn_file_region"] out_file = vcfutils.concat_variant_files(vrn_file_regions, out_file, regions, dd.get_ref_file(items[0]), items[0]["config"]) return {"vrn_file": out_file}
[ "def", "concat_batch_variantcalls", "(", "items", ",", "region_block", "=", "True", ",", "skip_jointcheck", "=", "False", ")", ":", "items", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "items", "]", "batch_name", "=", "_get_batch_name", "(", "items", ",", "skip_jointcheck", ")", "variantcaller", "=", "_get_batch_variantcaller", "(", "items", ")", "# Pre-called input variant files", "if", "not", "variantcaller", "and", "all", "(", "d", ".", "get", "(", "\"vrn_file\"", ")", "for", "d", "in", "items", ")", ":", "return", "{", "\"vrn_file\"", ":", "items", "[", "0", "]", "[", "\"vrn_file\"", "]", "}", "out_file", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "items", "[", "0", "]", ")", ",", "variantcaller", ",", "\"%s.vcf.gz\"", "%", "(", "batch_name", ")", ")", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "dirname", "(", "out_file", ")", ")", "if", "region_block", ":", "regions", "=", "[", "_region_to_coords", "(", "rs", "[", "0", "]", ")", "for", "rs", "in", "items", "[", "0", "]", "[", "\"region_block\"", "]", "]", "else", ":", "regions", "=", "[", "_region_to_coords", "(", "r", ")", "for", "r", "in", "items", "[", "0", "]", "[", "\"region\"", "]", "]", "vrn_file_regions", "=", "items", "[", "0", "]", "[", "\"vrn_file_region\"", "]", "out_file", "=", "vcfutils", ".", "concat_variant_files", "(", "vrn_file_regions", ",", "out_file", ",", "regions", ",", "dd", ".", "get_ref_file", "(", "items", "[", "0", "]", ")", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")", "return", "{", "\"vrn_file\"", ":", "out_file", "}" ]
55.210526
0.003749
def _split_line(s, parts): """ Parameters ---------- s: string Fixed-length string to split parts: list of (name, length) pairs Used to break up string, name '_' will be filtered from output. Returns ------- Dict of name:contents of string at given location. """ out = {} start = 0 for name, length in parts: out[name] = s[start:start + length].strip() start += length del out['_'] return out
[ "def", "_split_line", "(", "s", ",", "parts", ")", ":", "out", "=", "{", "}", "start", "=", "0", "for", "name", ",", "length", "in", "parts", ":", "out", "[", "name", "]", "=", "s", "[", "start", ":", "start", "+", "length", "]", ".", "strip", "(", ")", "start", "+=", "length", "del", "out", "[", "'_'", "]", "return", "out" ]
23.1
0.002079
def analyzeThing(originalThing2): """analyze an object and all its attirbutes. Returns a dictionary.""" originalThing = copy.copy(originalThing2) things={} for name in sorted(dir(originalThing)): print("analyzing",name) thing = copy.copy(originalThing) if name in webinspect.blacklist or name.lower() in webinspect.blacklist: item="DID NOT EVALUATE (this will appear as a string)" else: item=getattr(thing,name) itemType=type(item).__name__ itemStr=thingToString(item) itemEval="" if "method" in itemStr: if name in webinspect.blacklist or name.lower() in webinspect.blacklist: itemEval="DID NOT EVALUATE" else: print("executing %s()"%name) print("I'm about to try...") try: itemEval=thingToString(getattr(thing,name)()) except Exception as e: exceptionToString(e) #print("[%s] (%s) %s {%s}"%(name,itemType,itemStr,itemEval)) things[name]=[itemType,itemStr,itemEval] return things
[ "def", "analyzeThing", "(", "originalThing2", ")", ":", "originalThing", "=", "copy", ".", "copy", "(", "originalThing2", ")", "things", "=", "{", "}", "for", "name", "in", "sorted", "(", "dir", "(", "originalThing", ")", ")", ":", "print", "(", "\"analyzing\"", ",", "name", ")", "thing", "=", "copy", ".", "copy", "(", "originalThing", ")", "if", "name", "in", "webinspect", ".", "blacklist", "or", "name", ".", "lower", "(", ")", "in", "webinspect", ".", "blacklist", ":", "item", "=", "\"DID NOT EVALUATE (this will appear as a string)\"", "else", ":", "item", "=", "getattr", "(", "thing", ",", "name", ")", "itemType", "=", "type", "(", "item", ")", ".", "__name__", "itemStr", "=", "thingToString", "(", "item", ")", "itemEval", "=", "\"\"", "if", "\"method\"", "in", "itemStr", ":", "if", "name", "in", "webinspect", ".", "blacklist", "or", "name", ".", "lower", "(", ")", "in", "webinspect", ".", "blacklist", ":", "itemEval", "=", "\"DID NOT EVALUATE\"", "else", ":", "print", "(", "\"executing %s()\"", "%", "name", ")", "print", "(", "\"I'm about to try...\"", ")", "try", ":", "itemEval", "=", "thingToString", "(", "getattr", "(", "thing", ",", "name", ")", "(", ")", ")", "except", "Exception", "as", "e", ":", "exceptionToString", "(", "e", ")", "#print(\"[%s] (%s) %s {%s}\"%(name,itemType,itemStr,itemEval))\r", "things", "[", "name", "]", "=", "[", "itemType", ",", "itemStr", ",", "itemEval", "]", "return", "things" ]
39.655172
0.016978
def minlen(min_length, strict=False # type: bool ): """ 'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return: """ if strict: def minlen_(x): if len(x) > min_length: return True else: # raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=True) else: def minlen_(x): if len(x) >= min_length: return True else: # raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=False) minlen_.__name__ = 'length_{}greater_than_{}'.format('strictly_' if strict else '', min_length) return minlen_
[ "def", "minlen", "(", "min_length", ",", "strict", "=", "False", "# type: bool", ")", ":", "if", "strict", ":", "def", "minlen_", "(", "x", ")", ":", "if", "len", "(", "x", ")", ">", "min_length", ":", "return", "True", "else", ":", "# raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x))", "raise", "TooShort", "(", "wrong_value", "=", "x", ",", "min_length", "=", "min_length", ",", "strict", "=", "True", ")", "else", ":", "def", "minlen_", "(", "x", ")", ":", "if", "len", "(", "x", ")", ">=", "min_length", ":", "return", "True", "else", ":", "# raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x))", "raise", "TooShort", "(", "wrong_value", "=", "x", ",", "min_length", "=", "min_length", ",", "strict", "=", "False", ")", "minlen_", ".", "__name__", "=", "'length_{}greater_than_{}'", ".", "format", "(", "'strictly_'", "if", "strict", "else", "''", ",", "min_length", ")", "return", "minlen_" ]
38.966667
0.006678
def comparison_operator_query(comparison_operator): """Generate comparison operator checking function.""" def _comparison_operator_query(expression): """Apply binary operator to expression.""" def _apply_comparison_operator(index, expression=expression): """Return store key for documents that satisfy expression.""" ev = expression() if callable(expression) else expression return [ store_key for value, store_keys in index.get_index().items() if comparison_operator(value, ev) for store_key in store_keys ] return _apply_comparison_operator return _comparison_operator_query
[ "def", "comparison_operator_query", "(", "comparison_operator", ")", ":", "def", "_comparison_operator_query", "(", "expression", ")", ":", "\"\"\"Apply binary operator to expression.\"\"\"", "def", "_apply_comparison_operator", "(", "index", ",", "expression", "=", "expression", ")", ":", "\"\"\"Return store key for documents that satisfy expression.\"\"\"", "ev", "=", "expression", "(", ")", "if", "callable", "(", "expression", ")", "else", "expression", "return", "[", "store_key", "for", "value", ",", "store_keys", "in", "index", ".", "get_index", "(", ")", ".", "items", "(", ")", "if", "comparison_operator", "(", "value", ",", "ev", ")", "for", "store_key", "in", "store_keys", "]", "return", "_apply_comparison_operator", "return", "_comparison_operator_query" ]
45.3125
0.001351
def pdf_saver(filehandle, *args, **kwargs): "Uses werkzeug.FileStorage instance to save the converted image." fullpath = get_save_path(filehandle.filename) filehandle.save(fullpath, buffer_size=kwargs.get('buffer_size', 16384))
[ "def", "pdf_saver", "(", "filehandle", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fullpath", "=", "get_save_path", "(", "filehandle", ".", "filename", ")", "filehandle", ".", "save", "(", "fullpath", ",", "buffer_size", "=", "kwargs", ".", "get", "(", "'buffer_size'", ",", "16384", ")", ")" ]
59
0.004184
def set_permissions(username, permissions, uid=None): ''' Configure users permissions CLI Example: .. code-block:: bash salt dell drac.set_permissions [USERNAME] [PRIVILEGES] [USER INDEX - optional] salt dell drac.set_permissions diana login,test_alerts,clear_logs 4 DRAC Privileges * login : Login to iDRAC * drac : Configure iDRAC * user_management : Configure Users * clear_logs : Clear Logs * server_control_commands : Execute Server Control Commands * console_redirection : Access Console Redirection * virtual_media : Access Virtual Media * test_alerts : Test Alerts * debug_commands : Execute Debug Commands ''' privileges = {'login': '0x0000001', 'drac': '0x0000002', 'user_management': '0x0000004', 'clear_logs': '0x0000008', 'server_control_commands': '0x0000010', 'console_redirection': '0x0000020', 'virtual_media': '0x0000040', 'test_alerts': '0x0000080', 'debug_commands': '0x0000100'} permission = 0 # When users don't provide a user ID we need to search for this if uid is None: user = list_users() uid = user[username]['index'] # Generate privilege bit mask for i in permissions.split(','): perm = i.strip() if perm in privileges: permission += int(privileges[perm], 16) return __execute_cmd('config -g cfgUserAdmin -o \ cfgUserAdminPrivilege -i {0} 0x{1:08X}'.format(uid, permission))
[ "def", "set_permissions", "(", "username", ",", "permissions", ",", "uid", "=", "None", ")", ":", "privileges", "=", "{", "'login'", ":", "'0x0000001'", ",", "'drac'", ":", "'0x0000002'", ",", "'user_management'", ":", "'0x0000004'", ",", "'clear_logs'", ":", "'0x0000008'", ",", "'server_control_commands'", ":", "'0x0000010'", ",", "'console_redirection'", ":", "'0x0000020'", ",", "'virtual_media'", ":", "'0x0000040'", ",", "'test_alerts'", ":", "'0x0000080'", ",", "'debug_commands'", ":", "'0x0000100'", "}", "permission", "=", "0", "# When users don't provide a user ID we need to search for this", "if", "uid", "is", "None", ":", "user", "=", "list_users", "(", ")", "uid", "=", "user", "[", "username", "]", "[", "'index'", "]", "# Generate privilege bit mask", "for", "i", "in", "permissions", ".", "split", "(", "','", ")", ":", "perm", "=", "i", ".", "strip", "(", ")", "if", "perm", "in", "privileges", ":", "permission", "+=", "int", "(", "privileges", "[", "perm", "]", ",", "16", ")", "return", "__execute_cmd", "(", "'config -g cfgUserAdmin -o \\\n cfgUserAdminPrivilege -i {0} 0x{1:08X}'", ".", "format", "(", "uid", ",", "permission", ")", ")" ]
34.979167
0.001159
def _apply_scope(self, config, tags): """Add locally scoped tags to config""" if isinstance(config, dict): # Recursively _apply_scope for each item in the config for val in config.values(): self._apply_scope(val, tags) elif isinstance(config, list): # Recursively _apply_scope for each item in the config for item in config: self._apply_scope(item, tags) elif isinstance(config, TaggedValue): tagged_value = config # add all local tags tagged_value.tags |= set(tags) for tag in tags: if not hasattr(tagged_value, tag): setattr(tagged_value, tag, False)
[ "def", "_apply_scope", "(", "self", ",", "config", ",", "tags", ")", ":", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "# Recursively _apply_scope for each item in the config", "for", "val", "in", "config", ".", "values", "(", ")", ":", "self", ".", "_apply_scope", "(", "val", ",", "tags", ")", "elif", "isinstance", "(", "config", ",", "list", ")", ":", "# Recursively _apply_scope for each item in the config", "for", "item", "in", "config", ":", "self", ".", "_apply_scope", "(", "item", ",", "tags", ")", "elif", "isinstance", "(", "config", ",", "TaggedValue", ")", ":", "tagged_value", "=", "config", "# add all local tags", "tagged_value", ".", "tags", "|=", "set", "(", "tags", ")", "for", "tag", "in", "tags", ":", "if", "not", "hasattr", "(", "tagged_value", ",", "tag", ")", ":", "setattr", "(", "tagged_value", ",", "tag", ",", "False", ")" ]
36.6
0.002663
def prepare_mvpa_data(images, conditions, mask): """Prepare data for activity-based model training and prediction. Average the activity within epochs and z-scoring within subject. Parameters ---------- images: Iterable[SpatialImage] Data. conditions: List[UniqueLabelConditionSpec] Condition specification. mask: np.ndarray Mask to apply to each image. Returns ------- processed_data: 2D array in shape [num_voxels, num_epochs] averaged epoch by epoch processed data labels: 1D array contains labels of the data """ activity_data = list(mask_images(images, mask, np.float32)) epoch_info = generate_epochs_info(conditions) num_epochs = len(epoch_info) (d1, _) = activity_data[0].shape processed_data = np.empty([d1, num_epochs]) labels = np.empty(num_epochs) subject_count = [0] # counting the epochs per subject for z-scoring cur_sid = -1 # averaging for idx, epoch in enumerate(epoch_info): labels[idx] = epoch[0] if cur_sid != epoch[1]: subject_count.append(0) cur_sid = epoch[1] subject_count[-1] += 1 processed_data[:, idx] = \ np.mean(activity_data[cur_sid][:, epoch[2]:epoch[3]], axis=1) # z-scoring cur_epoch = 0 for i in subject_count: if i > 1: processed_data[:, cur_epoch:cur_epoch + i] = \ zscore(processed_data[:, cur_epoch:cur_epoch + i], axis=1, ddof=0) cur_epoch += i # if zscore fails (standard deviation is zero), # set all values to be zero processed_data = np.nan_to_num(processed_data) return processed_data, labels
[ "def", "prepare_mvpa_data", "(", "images", ",", "conditions", ",", "mask", ")", ":", "activity_data", "=", "list", "(", "mask_images", "(", "images", ",", "mask", ",", "np", ".", "float32", ")", ")", "epoch_info", "=", "generate_epochs_info", "(", "conditions", ")", "num_epochs", "=", "len", "(", "epoch_info", ")", "(", "d1", ",", "_", ")", "=", "activity_data", "[", "0", "]", ".", "shape", "processed_data", "=", "np", ".", "empty", "(", "[", "d1", ",", "num_epochs", "]", ")", "labels", "=", "np", ".", "empty", "(", "num_epochs", ")", "subject_count", "=", "[", "0", "]", "# counting the epochs per subject for z-scoring", "cur_sid", "=", "-", "1", "# averaging", "for", "idx", ",", "epoch", "in", "enumerate", "(", "epoch_info", ")", ":", "labels", "[", "idx", "]", "=", "epoch", "[", "0", "]", "if", "cur_sid", "!=", "epoch", "[", "1", "]", ":", "subject_count", ".", "append", "(", "0", ")", "cur_sid", "=", "epoch", "[", "1", "]", "subject_count", "[", "-", "1", "]", "+=", "1", "processed_data", "[", ":", ",", "idx", "]", "=", "np", ".", "mean", "(", "activity_data", "[", "cur_sid", "]", "[", ":", ",", "epoch", "[", "2", "]", ":", "epoch", "[", "3", "]", "]", ",", "axis", "=", "1", ")", "# z-scoring", "cur_epoch", "=", "0", "for", "i", "in", "subject_count", ":", "if", "i", ">", "1", ":", "processed_data", "[", ":", ",", "cur_epoch", ":", "cur_epoch", "+", "i", "]", "=", "zscore", "(", "processed_data", "[", ":", ",", "cur_epoch", ":", "cur_epoch", "+", "i", "]", ",", "axis", "=", "1", ",", "ddof", "=", "0", ")", "cur_epoch", "+=", "i", "# if zscore fails (standard deviation is zero),", "# set all values to be zero", "processed_data", "=", "np", ".", "nan_to_num", "(", "processed_data", ")", "return", "processed_data", ",", "labels" ]
32.692308
0.000571
def install(name, minimum_version=None, required_version=None, scope=None, repository=None): ''' Install a Powershell module from powershell gallery on the system. :param name: Name of a Powershell module :type name: ``str`` :param minimum_version: The maximum version to install, e.g. 1.23.2 :type minimum_version: ``str`` :param required_version: Install a specific version :type required_version: ``str`` :param scope: The scope to install the module to, e.g. CurrentUser, Computer :type scope: ``str`` :param repository: The friendly name of a private repository, e.g. MyREpo :type repository: ``str`` CLI Example: .. code-block:: bash salt 'win01' psget.install PowerPlan ''' # Putting quotes around the parameter protects against command injection flags = [('Name', name)] if minimum_version is not None: flags.append(('MinimumVersion', minimum_version)) if required_version is not None: flags.append(('RequiredVersion', required_version)) if scope is not None: flags.append(('Scope', scope)) if repository is not None: flags.append(('Repository', repository)) params = '' for flag, value in flags: params += '-{0} {1} '.format(flag, value) cmd = 'Install-Module {0} -Force'.format(params) _pshell(cmd) return name in list_modules()
[ "def", "install", "(", "name", ",", "minimum_version", "=", "None", ",", "required_version", "=", "None", ",", "scope", "=", "None", ",", "repository", "=", "None", ")", ":", "# Putting quotes around the parameter protects against command injection", "flags", "=", "[", "(", "'Name'", ",", "name", ")", "]", "if", "minimum_version", "is", "not", "None", ":", "flags", ".", "append", "(", "(", "'MinimumVersion'", ",", "minimum_version", ")", ")", "if", "required_version", "is", "not", "None", ":", "flags", ".", "append", "(", "(", "'RequiredVersion'", ",", "required_version", ")", ")", "if", "scope", "is", "not", "None", ":", "flags", ".", "append", "(", "(", "'Scope'", ",", "scope", ")", ")", "if", "repository", "is", "not", "None", ":", "flags", ".", "append", "(", "(", "'Repository'", ",", "repository", ")", ")", "params", "=", "''", "for", "flag", ",", "value", "in", "flags", ":", "params", "+=", "'-{0} {1} '", ".", "format", "(", "flag", ",", "value", ")", "cmd", "=", "'Install-Module {0} -Force'", ".", "format", "(", "params", ")", "_pshell", "(", "cmd", ")", "return", "name", "in", "list_modules", "(", ")" ]
31.953488
0.001412
def build(self, filename, bytecode_compile=True): """Package the PEX into a zipfile. :param filename: The filename where the PEX should be stored. :param bytecode_compile: If True, precompile .py files into .pyc files. If the PEXBuilder is not yet frozen, it will be frozen by ``build``. This renders the PEXBuilder immutable. """ if not self._frozen: self.freeze(bytecode_compile=bytecode_compile) try: os.unlink(filename + '~') self._logger.warn('Previous binary unexpectedly exists, cleaning: %s' % (filename + '~')) except OSError: # The expectation is that the file does not exist, so continue pass if os.path.dirname(filename): safe_mkdir(os.path.dirname(filename)) with open(filename + '~', 'ab') as pexfile: assert os.path.getsize(pexfile.name) == 0 pexfile.write(to_bytes('%s\n' % self._shebang)) self._chroot.zip(filename + '~', mode='a') if os.path.exists(filename): os.unlink(filename) os.rename(filename + '~', filename) chmod_plus_x(filename)
[ "def", "build", "(", "self", ",", "filename", ",", "bytecode_compile", "=", "True", ")", ":", "if", "not", "self", ".", "_frozen", ":", "self", ".", "freeze", "(", "bytecode_compile", "=", "bytecode_compile", ")", "try", ":", "os", ".", "unlink", "(", "filename", "+", "'~'", ")", "self", ".", "_logger", ".", "warn", "(", "'Previous binary unexpectedly exists, cleaning: %s'", "%", "(", "filename", "+", "'~'", ")", ")", "except", "OSError", ":", "# The expectation is that the file does not exist, so continue", "pass", "if", "os", ".", "path", ".", "dirname", "(", "filename", ")", ":", "safe_mkdir", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "with", "open", "(", "filename", "+", "'~'", ",", "'ab'", ")", "as", "pexfile", ":", "assert", "os", ".", "path", ".", "getsize", "(", "pexfile", ".", "name", ")", "==", "0", "pexfile", ".", "write", "(", "to_bytes", "(", "'%s\\n'", "%", "self", ".", "_shebang", ")", ")", "self", ".", "_chroot", ".", "zip", "(", "filename", "+", "'~'", ",", "mode", "=", "'a'", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "unlink", "(", "filename", ")", "os", ".", "rename", "(", "filename", "+", "'~'", ",", "filename", ")", "chmod_plus_x", "(", "filename", ")" ]
38.777778
0.011184
def run_shell_command( state, host, command, get_pty=False, timeout=None, print_output=False, **command_kwargs ): ''' Execute a command on the local machine. Args: state (``pyinfra.api.State`` obj): state object for this command hostname (string): hostname of the target command (string): actual command to execute sudo (boolean): whether to wrap the command with sudo sudo_user (string): user to sudo to get_pty (boolean): whether to get a PTY before executing the command env (dict): envrionment variables to set timeout (int): timeout for this command to complete before erroring Returns: tuple: (exit_code, stdout, stderr) stdout and stderr are both lists of strings from each buffer. ''' command = make_command(command, **command_kwargs) logger.debug('--> Running command on localhost: {0}'.format(command)) if print_output: print('{0}>>> {1}'.format(host.print_prefix, command)) process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) # Iterate through outputs to get an exit status and generate desired list # output, done in two greenlets so stdout isn't printed before stderr. Not # attached to state.pool to avoid blocking it with 2x n-hosts greenlets. stdout_reader = gevent.spawn( read_buffer, process.stdout, print_output=print_output, print_func=lambda line: '{0}{1}'.format(host.print_prefix, line), ) stderr_reader = gevent.spawn( read_buffer, process.stderr, print_output=print_output, print_func=lambda line: '{0}{1}'.format( host.print_prefix, click.style(line, 'red'), ), ) # Wait on output, with our timeout (or None) greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout) # Timeout doesn't raise an exception, but gevent.wait returns the greenlets # which did complete. So if both haven't completed, we kill them and fail # with a timeout. if len(greenlets) != 2: stdout_reader.kill() stderr_reader.kill() raise timeout_error() # Read the buffers into a list of lines stdout = stdout_reader.get() stderr = stderr_reader.get() logger.debug('--> Waiting for exit status...') process.wait() # Close any open file descriptor process.stdout.close() logger.debug('--> Command exit status: {0}'.format(process.returncode)) return process.returncode == 0, stdout, stderr
[ "def", "run_shell_command", "(", "state", ",", "host", ",", "command", ",", "get_pty", "=", "False", ",", "timeout", "=", "None", ",", "print_output", "=", "False", ",", "*", "*", "command_kwargs", ")", ":", "command", "=", "make_command", "(", "command", ",", "*", "*", "command_kwargs", ")", "logger", ".", "debug", "(", "'--> Running command on localhost: {0}'", ".", "format", "(", "command", ")", ")", "if", "print_output", ":", "print", "(", "'{0}>>> {1}'", ".", "format", "(", "host", ".", "print_prefix", ",", "command", ")", ")", "process", "=", "Popen", "(", "command", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "# Iterate through outputs to get an exit status and generate desired list", "# output, done in two greenlets so stdout isn't printed before stderr. Not", "# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.", "stdout_reader", "=", "gevent", ".", "spawn", "(", "read_buffer", ",", "process", ".", "stdout", ",", "print_output", "=", "print_output", ",", "print_func", "=", "lambda", "line", ":", "'{0}{1}'", ".", "format", "(", "host", ".", "print_prefix", ",", "line", ")", ",", ")", "stderr_reader", "=", "gevent", ".", "spawn", "(", "read_buffer", ",", "process", ".", "stderr", ",", "print_output", "=", "print_output", ",", "print_func", "=", "lambda", "line", ":", "'{0}{1}'", ".", "format", "(", "host", ".", "print_prefix", ",", "click", ".", "style", "(", "line", ",", "'red'", ")", ",", ")", ",", ")", "# Wait on output, with our timeout (or None)", "greenlets", "=", "gevent", ".", "wait", "(", "(", "stdout_reader", ",", "stderr_reader", ")", ",", "timeout", "=", "timeout", ")", "# Timeout doesn't raise an exception, but gevent.wait returns the greenlets", "# which did complete. So if both haven't completed, we kill them and fail", "# with a timeout.", "if", "len", "(", "greenlets", ")", "!=", "2", ":", "stdout_reader", ".", "kill", "(", ")", "stderr_reader", ".", "kill", "(", ")", "raise", "timeout_error", "(", ")", "# Read the buffers into a list of lines", "stdout", "=", "stdout_reader", ".", "get", "(", ")", "stderr", "=", "stderr_reader", ".", "get", "(", ")", "logger", ".", "debug", "(", "'--> Waiting for exit status...'", ")", "process", ".", "wait", "(", ")", "# Close any open file descriptor", "process", ".", "stdout", ".", "close", "(", ")", "logger", ".", "debug", "(", "'--> Command exit status: {0}'", ".", "format", "(", "process", ".", "returncode", ")", ")", "return", "process", ".", "returncode", "==", "0", ",", "stdout", ",", "stderr" ]
34.277778
0.000394
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._session_callback_removed`` if it exists. ''' super(SessionCallbackRemoved, self).dispatch(receiver) if hasattr(receiver, '_session_callback_removed'): receiver._session_callback_removed(self)
[ "def", "dispatch", "(", "self", ",", "receiver", ")", ":", "super", "(", "SessionCallbackRemoved", ",", "self", ")", ".", "dispatch", "(", "receiver", ")", "if", "hasattr", "(", "receiver", ",", "'_session_callback_removed'", ")", ":", "receiver", ".", "_session_callback_removed", "(", "self", ")" ]
36.1
0.005405
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every): '''adversarial training of the VAE ''' #encoder z_mu, z_lv, z = encoder(nef, Z, batch_size) symE = mx.sym.Group([z_mu, z_lv, z]) #generator symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation ) #discriminator h = discriminator1(ndf) dloss = discriminator2(ndf) symD1 = h symD2 = dloss # ==============data============== X_train, _ = get_data(data_path, activation) train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True) rand_iter = RandIter(batch_size, Z) label = mx.nd.zeros((batch_size,), ctx=ctx) # =============module E============= modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx) modE.bind(data_shapes=train_iter.provide_data) modE.init_params(initializer=mx.init.Normal(0.02)) modE.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-6, 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods = [modE] # =============module G============= modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx) modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True) modG.init_params(initializer=mx.init.Normal(0.02)) modG.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-6, 'beta1': beta1, 'epsilon': epsilon, }) mods.append(modG) # =============module D============= modD1 = mx.mod.Module(symD1, label_names=[], context=ctx) modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx) modD = mx.mod.SequentialModule() modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True) modD.bind(data_shapes=train_iter.provide_data, label_shapes=[('label', (batch_size,))], inputs_need_grad=True) modD.init_params(initializer=mx.init.Normal(0.02)) modD.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-3, 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods.append(modD) # =============module DL============= symDL = DiscriminatorLayerLoss() modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx) modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here label_shapes=[('label', (batch_size,nef * 4,4,4))], inputs_need_grad=True) modDL.init_params(initializer=mx.init.Normal(0.02)) modDL.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 0., 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) # =============module KL============= symKL = KLDivergenceLoss() modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx) modKL.bind(data_shapes=[('data', (batch_size*2,Z))], inputs_need_grad=True) modKL.init_params(initializer=mx.init.Normal(0.02)) modKL.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 0., 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods.append(modKL) def norm_stat(d): return mx.nd.norm(d)/np.sqrt(d.size) mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True) mon = None if mon is not None: for mod in mods: pass def facc(label, pred): '''calculating prediction accuracy ''' pred = pred.ravel() label = label.ravel() return ((pred > 0.5) == label).mean() def fentropy(label, pred): '''calculating binary cross-entropy loss ''' pred = pred.ravel() label = label.ravel() return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean() def kldivergence(label, pred): '''calculating KL divergence loss ''' mean, log_var = np.split(pred, 2, axis=0) var = np.exp(log_var) KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var) KLLoss = KLLoss / nElements return KLLoss mG = mx.metric.CustomMetric(fentropy) mD = mx.metric.CustomMetric(fentropy) mE = mx.metric.CustomMetric(kldivergence) mACC = mx.metric.CustomMetric(facc) print('Training...') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') # =============train=============== for epoch in range(num_epoch): train_iter.reset() for t, batch in enumerate(train_iter): rbatch = rand_iter.next() if mon is not None: mon.tic() modG.forward(rbatch, is_train=True) outG = modG.get_outputs() # update discriminator on fake label[:] = 0 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays] gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays] modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) #update discriminator on decoded modE.forward(batch, is_train=True) mu, lv, z = modE.get_outputs() z = z.reshape((batch_size, Z, 1, 1)) sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))]) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 0 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() #modD.update() gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays] gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays] modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) # update discriminator on real label[:] = 1 batch.label = [label] modD.forward(batch, is_train=True) lx = [out.copyto(out.context) for out in modD1.get_outputs()] modD.backward() for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21): for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd): gradr += 0.5 * (gradf + gradd) for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22): for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd): gradr += 0.5 * (gradf + gradd) modD.update() modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) modG.forward(rbatch, is_train=True) outG = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2): for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2): grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2) modG.update() mG.update([label], modD.get_outputs()) modG.forward(rbatch, is_train=True) outG = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2): for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2): grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2) modG.update() mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() #update generator modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) DLloss = modDL.get_outputs() modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) #update encoder nElements = batch_size modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True) KLloss = modKL.get_outputs() modKL.backward() gradKLLoss = modKL.get_input_grads() diffG = modG.get_input_grads() diffG = diffG[0].reshape((batch_size, Z)) modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG]) modE.update() pred = mx.ndarray.concat(mu,lv, dim=0) mE.update([pred], [pred]) if mon is not None: mon.toc_print() t += 1 if t % show_after_every == 0: print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy()) mACC.reset() mG.reset() mD.reset() mE.reset() if epoch % visualize_after_every == 0: visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation) visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation) if check_point and epoch % save_after_every == 0: print('Saving...') modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch)) modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch)) modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch))
[ "def", "train", "(", "dataset", ",", "nef", ",", "ndf", ",", "ngf", ",", "nc", ",", "batch_size", ",", "Z", ",", "lr", ",", "beta1", ",", "epsilon", ",", "ctx", ",", "check_point", ",", "g_dl_weight", ",", "output_path", ",", "checkpoint_path", ",", "data_path", ",", "activation", ",", "num_epoch", ",", "save_after_every", ",", "visualize_after_every", ",", "show_after_every", ")", ":", "#encoder", "z_mu", ",", "z_lv", ",", "z", "=", "encoder", "(", "nef", ",", "Z", ",", "batch_size", ")", "symE", "=", "mx", ".", "sym", ".", "Group", "(", "[", "z_mu", ",", "z_lv", ",", "z", "]", ")", "#generator", "symG", "=", "generator", "(", "ngf", ",", "nc", ",", "no_bias", "=", "True", ",", "fix_gamma", "=", "True", ",", "eps", "=", "1e-5", "+", "1e-12", ",", "z_dim", "=", "Z", ",", "activation", "=", "activation", ")", "#discriminator", "h", "=", "discriminator1", "(", "ndf", ")", "dloss", "=", "discriminator2", "(", "ndf", ")", "symD1", "=", "h", "symD2", "=", "dloss", "# ==============data==============", "X_train", ",", "_", "=", "get_data", "(", "data_path", ",", "activation", ")", "train_iter", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "X_train", ",", "batch_size", "=", "batch_size", ",", "shuffle", "=", "True", ")", "rand_iter", "=", "RandIter", "(", "batch_size", ",", "Z", ")", "label", "=", "mx", ".", "nd", ".", "zeros", "(", "(", "batch_size", ",", ")", ",", "ctx", "=", "ctx", ")", "# =============module E=============", "modE", "=", "mx", ".", "mod", ".", "Module", "(", "symbol", "=", "symE", ",", "data_names", "=", "(", "'data'", ",", ")", ",", "label_names", "=", "None", ",", "context", "=", "ctx", ")", "modE", ".", "bind", "(", "data_shapes", "=", "train_iter", ".", "provide_data", ")", "modE", ".", "init_params", "(", "initializer", "=", "mx", ".", "init", ".", "Normal", "(", "0.02", ")", ")", "modE", ".", "init_optimizer", "(", "optimizer", "=", "'adam'", ",", "optimizer_params", "=", "{", "'learning_rate'", ":", "lr", ",", "'wd'", ":", "1e-6", ",", "'beta1'", ":", "beta1", ",", "'epsilon'", ":", "epsilon", ",", "'rescale_grad'", ":", "(", "1.0", "/", "batch_size", ")", "}", ")", "mods", "=", "[", "modE", "]", "# =============module G=============", "modG", "=", "mx", ".", "mod", ".", "Module", "(", "symbol", "=", "symG", ",", "data_names", "=", "(", "'rand'", ",", ")", ",", "label_names", "=", "None", ",", "context", "=", "ctx", ")", "modG", ".", "bind", "(", "data_shapes", "=", "rand_iter", ".", "provide_data", ",", "inputs_need_grad", "=", "True", ")", "modG", ".", "init_params", "(", "initializer", "=", "mx", ".", "init", ".", "Normal", "(", "0.02", ")", ")", "modG", ".", "init_optimizer", "(", "optimizer", "=", "'adam'", ",", "optimizer_params", "=", "{", "'learning_rate'", ":", "lr", ",", "'wd'", ":", "1e-6", ",", "'beta1'", ":", "beta1", ",", "'epsilon'", ":", "epsilon", ",", "}", ")", "mods", ".", "append", "(", "modG", ")", "# =============module D=============", "modD1", "=", "mx", ".", "mod", ".", "Module", "(", "symD1", ",", "label_names", "=", "[", "]", ",", "context", "=", "ctx", ")", "modD2", "=", "mx", ".", "mod", ".", "Module", "(", "symD2", ",", "label_names", "=", "(", "'label'", ",", ")", ",", "context", "=", "ctx", ")", "modD", "=", "mx", ".", "mod", ".", "SequentialModule", "(", ")", "modD", ".", "add", "(", "modD1", ")", ".", "add", "(", "modD2", ",", "take_labels", "=", "True", ",", "auto_wiring", "=", "True", ")", "modD", ".", "bind", "(", "data_shapes", "=", "train_iter", ".", "provide_data", ",", "label_shapes", "=", "[", "(", "'label'", ",", "(", "batch_size", ",", ")", ")", "]", ",", "inputs_need_grad", "=", "True", ")", "modD", ".", "init_params", "(", "initializer", "=", "mx", ".", "init", ".", "Normal", "(", "0.02", ")", ")", "modD", ".", "init_optimizer", "(", "optimizer", "=", "'adam'", ",", "optimizer_params", "=", "{", "'learning_rate'", ":", "lr", ",", "'wd'", ":", "1e-3", ",", "'beta1'", ":", "beta1", ",", "'epsilon'", ":", "epsilon", ",", "'rescale_grad'", ":", "(", "1.0", "/", "batch_size", ")", "}", ")", "mods", ".", "append", "(", "modD", ")", "# =============module DL=============", "symDL", "=", "DiscriminatorLayerLoss", "(", ")", "modDL", "=", "mx", ".", "mod", ".", "Module", "(", "symbol", "=", "symDL", ",", "data_names", "=", "(", "'data'", ",", ")", ",", "label_names", "=", "(", "'label'", ",", ")", ",", "context", "=", "ctx", ")", "modDL", ".", "bind", "(", "data_shapes", "=", "[", "(", "'data'", ",", "(", "batch_size", ",", "nef", "*", "4", ",", "4", ",", "4", ")", ")", "]", ",", "################################################################################################################################ fix 512 here", "label_shapes", "=", "[", "(", "'label'", ",", "(", "batch_size", ",", "nef", "*", "4", ",", "4", ",", "4", ")", ")", "]", ",", "inputs_need_grad", "=", "True", ")", "modDL", ".", "init_params", "(", "initializer", "=", "mx", ".", "init", ".", "Normal", "(", "0.02", ")", ")", "modDL", ".", "init_optimizer", "(", "optimizer", "=", "'adam'", ",", "optimizer_params", "=", "{", "'learning_rate'", ":", "lr", ",", "'wd'", ":", "0.", ",", "'beta1'", ":", "beta1", ",", "'epsilon'", ":", "epsilon", ",", "'rescale_grad'", ":", "(", "1.0", "/", "batch_size", ")", "}", ")", "# =============module KL=============", "symKL", "=", "KLDivergenceLoss", "(", ")", "modKL", "=", "mx", ".", "mod", ".", "Module", "(", "symbol", "=", "symKL", ",", "data_names", "=", "(", "'data'", ",", ")", ",", "label_names", "=", "None", ",", "context", "=", "ctx", ")", "modKL", ".", "bind", "(", "data_shapes", "=", "[", "(", "'data'", ",", "(", "batch_size", "*", "2", ",", "Z", ")", ")", "]", ",", "inputs_need_grad", "=", "True", ")", "modKL", ".", "init_params", "(", "initializer", "=", "mx", ".", "init", ".", "Normal", "(", "0.02", ")", ")", "modKL", ".", "init_optimizer", "(", "optimizer", "=", "'adam'", ",", "optimizer_params", "=", "{", "'learning_rate'", ":", "lr", ",", "'wd'", ":", "0.", ",", "'beta1'", ":", "beta1", ",", "'epsilon'", ":", "epsilon", ",", "'rescale_grad'", ":", "(", "1.0", "/", "batch_size", ")", "}", ")", "mods", ".", "append", "(", "modKL", ")", "def", "norm_stat", "(", "d", ")", ":", "return", "mx", ".", "nd", ".", "norm", "(", "d", ")", "/", "np", ".", "sqrt", "(", "d", ".", "size", ")", "mon", "=", "mx", ".", "mon", ".", "Monitor", "(", "10", ",", "norm_stat", ",", "pattern", "=", "\".*output|d1_backward_data\"", ",", "sort", "=", "True", ")", "mon", "=", "None", "if", "mon", "is", "not", "None", ":", "for", "mod", "in", "mods", ":", "pass", "def", "facc", "(", "label", ",", "pred", ")", ":", "'''calculating prediction accuracy\n '''", "pred", "=", "pred", ".", "ravel", "(", ")", "label", "=", "label", ".", "ravel", "(", ")", "return", "(", "(", "pred", ">", "0.5", ")", "==", "label", ")", ".", "mean", "(", ")", "def", "fentropy", "(", "label", ",", "pred", ")", ":", "'''calculating binary cross-entropy loss\n '''", "pred", "=", "pred", ".", "ravel", "(", ")", "label", "=", "label", ".", "ravel", "(", ")", "return", "-", "(", "label", "*", "np", ".", "log", "(", "pred", "+", "1e-12", ")", "+", "(", "1.", "-", "label", ")", "*", "np", ".", "log", "(", "1.", "-", "pred", "+", "1e-12", ")", ")", ".", "mean", "(", ")", "def", "kldivergence", "(", "label", ",", "pred", ")", ":", "'''calculating KL divergence loss\n '''", "mean", ",", "log_var", "=", "np", ".", "split", "(", "pred", ",", "2", ",", "axis", "=", "0", ")", "var", "=", "np", ".", "exp", "(", "log_var", ")", "KLLoss", "=", "-", "0.5", "*", "np", ".", "sum", "(", "1", "+", "log_var", "-", "np", ".", "power", "(", "mean", ",", "2", ")", "-", "var", ")", "KLLoss", "=", "KLLoss", "/", "nElements", "return", "KLLoss", "mG", "=", "mx", ".", "metric", ".", "CustomMetric", "(", "fentropy", ")", "mD", "=", "mx", ".", "metric", ".", "CustomMetric", "(", "fentropy", ")", "mE", "=", "mx", ".", "metric", ".", "CustomMetric", "(", "kldivergence", ")", "mACC", "=", "mx", ".", "metric", ".", "CustomMetric", "(", "facc", ")", "print", "(", "'Training...'", ")", "stamp", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y_%m_%d-%H_%M'", ")", "# =============train===============", "for", "epoch", "in", "range", "(", "num_epoch", ")", ":", "train_iter", ".", "reset", "(", ")", "for", "t", ",", "batch", "in", "enumerate", "(", "train_iter", ")", ":", "rbatch", "=", "rand_iter", ".", "next", "(", ")", "if", "mon", "is", "not", "None", ":", "mon", ".", "tic", "(", ")", "modG", ".", "forward", "(", "rbatch", ",", "is_train", "=", "True", ")", "outG", "=", "modG", ".", "get_outputs", "(", ")", "# update discriminator on fake", "label", "[", ":", "]", "=", "0", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outG", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "gradD11", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modD1", ".", "_exec_group", ".", "grad_arrays", "]", "gradD12", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modD2", ".", "_exec_group", ".", "grad_arrays", "]", "modD", ".", "update_metric", "(", "mD", ",", "[", "label", "]", ")", "modD", ".", "update_metric", "(", "mACC", ",", "[", "label", "]", ")", "#update discriminator on decoded", "modE", ".", "forward", "(", "batch", ",", "is_train", "=", "True", ")", "mu", ",", "lv", ",", "z", "=", "modE", ".", "get_outputs", "(", ")", "z", "=", "z", ".", "reshape", "(", "(", "batch_size", ",", "Z", ",", "1", ",", "1", ")", ")", "sample", "=", "mx", ".", "io", ".", "DataBatch", "(", "[", "z", "]", ",", "label", "=", "None", ",", "provide_data", "=", "[", "(", "'rand'", ",", "(", "batch_size", ",", "Z", ",", "1", ",", "1", ")", ")", "]", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "label", "[", ":", "]", "=", "0", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "#modD.update()", "gradD21", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modD1", ".", "_exec_group", ".", "grad_arrays", "]", "gradD22", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modD2", ".", "_exec_group", ".", "grad_arrays", "]", "modD", ".", "update_metric", "(", "mD", ",", "[", "label", "]", ")", "modD", ".", "update_metric", "(", "mACC", ",", "[", "label", "]", ")", "# update discriminator on real", "label", "[", ":", "]", "=", "1", "batch", ".", "label", "=", "[", "label", "]", "modD", ".", "forward", "(", "batch", ",", "is_train", "=", "True", ")", "lx", "=", "[", "out", ".", "copyto", "(", "out", ".", "context", ")", "for", "out", "in", "modD1", ".", "get_outputs", "(", ")", "]", "modD", ".", "backward", "(", ")", "for", "gradsr", ",", "gradsf", ",", "gradsd", "in", "zip", "(", "modD1", ".", "_exec_group", ".", "grad_arrays", ",", "gradD11", ",", "gradD21", ")", ":", "for", "gradr", ",", "gradf", ",", "gradd", "in", "zip", "(", "gradsr", ",", "gradsf", ",", "gradsd", ")", ":", "gradr", "+=", "0.5", "*", "(", "gradf", "+", "gradd", ")", "for", "gradsr", ",", "gradsf", ",", "gradsd", "in", "zip", "(", "modD2", ".", "_exec_group", ".", "grad_arrays", ",", "gradD12", ",", "gradD22", ")", ":", "for", "gradr", ",", "gradf", ",", "gradd", "in", "zip", "(", "gradsr", ",", "gradsf", ",", "gradsd", ")", ":", "gradr", "+=", "0.5", "*", "(", "gradf", "+", "gradd", ")", "modD", ".", "update", "(", ")", "modD", ".", "update_metric", "(", "mD", ",", "[", "label", "]", ")", "modD", ".", "update_metric", "(", "mACC", ",", "[", "label", "]", ")", "modG", ".", "forward", "(", "rbatch", ",", "is_train", "=", "True", ")", "outG", "=", "modG", ".", "get_outputs", "(", ")", "label", "[", ":", "]", "=", "1", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outG", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "gradG1", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modG", ".", "_exec_group", ".", "grad_arrays", "]", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "label", "[", ":", "]", "=", "1", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "gradG2", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modG", ".", "_exec_group", ".", "grad_arrays", "]", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "modD1", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "]", ")", ",", "is_train", "=", "True", ")", "outD1", "=", "modD1", ".", "get_outputs", "(", ")", "modDL", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outD1", ",", "lx", ")", ",", "is_train", "=", "True", ")", "modDL", ".", "backward", "(", ")", "dlGrad", "=", "modDL", ".", "get_input_grads", "(", ")", "modD1", ".", "backward", "(", "dlGrad", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "for", "grads", ",", "gradsG1", ",", "gradsG2", "in", "zip", "(", "modG", ".", "_exec_group", ".", "grad_arrays", ",", "gradG1", ",", "gradG2", ")", ":", "for", "grad", ",", "gradg1", ",", "gradg2", "in", "zip", "(", "grads", ",", "gradsG1", ",", "gradsG2", ")", ":", "grad", "=", "g_dl_weight", "*", "grad", "+", "0.5", "*", "(", "gradg1", "+", "gradg2", ")", "modG", ".", "update", "(", ")", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "rbatch", ",", "is_train", "=", "True", ")", "outG", "=", "modG", ".", "get_outputs", "(", ")", "label", "[", ":", "]", "=", "1", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outG", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "gradG1", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modG", ".", "_exec_group", ".", "grad_arrays", "]", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "label", "[", ":", "]", "=", "1", "modD", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "label", "]", ")", ",", "is_train", "=", "True", ")", "modD", ".", "backward", "(", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "gradG2", "=", "[", "[", "grad", ".", "copyto", "(", "grad", ".", "context", ")", "for", "grad", "in", "grads", "]", "for", "grads", "in", "modG", ".", "_exec_group", ".", "grad_arrays", "]", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "modD1", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "]", ")", ",", "is_train", "=", "True", ")", "outD1", "=", "modD1", ".", "get_outputs", "(", ")", "modDL", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outD1", ",", "lx", ")", ",", "is_train", "=", "True", ")", "modDL", ".", "backward", "(", ")", "dlGrad", "=", "modDL", ".", "get_input_grads", "(", ")", "modD1", ".", "backward", "(", "dlGrad", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "for", "grads", ",", "gradsG1", ",", "gradsG2", "in", "zip", "(", "modG", ".", "_exec_group", ".", "grad_arrays", ",", "gradG1", ",", "gradG2", ")", ":", "for", "grad", ",", "gradg1", ",", "gradg2", "in", "zip", "(", "grads", ",", "gradsG1", ",", "gradsG2", ")", ":", "grad", "=", "g_dl_weight", "*", "grad", "+", "0.5", "*", "(", "gradg1", "+", "gradg2", ")", "modG", ".", "update", "(", ")", "mG", ".", "update", "(", "[", "label", "]", ",", "modD", ".", "get_outputs", "(", ")", ")", "modG", ".", "forward", "(", "sample", ",", "is_train", "=", "True", ")", "xz", "=", "modG", ".", "get_outputs", "(", ")", "#update generator", "modD1", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "xz", ",", "[", "]", ")", ",", "is_train", "=", "True", ")", "outD1", "=", "modD1", ".", "get_outputs", "(", ")", "modDL", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "outD1", ",", "lx", ")", ",", "is_train", "=", "True", ")", "DLloss", "=", "modDL", ".", "get_outputs", "(", ")", "modDL", ".", "backward", "(", ")", "dlGrad", "=", "modDL", ".", "get_input_grads", "(", ")", "modD1", ".", "backward", "(", "dlGrad", ")", "diffD", "=", "modD1", ".", "get_input_grads", "(", ")", "modG", ".", "backward", "(", "diffD", ")", "#update encoder", "nElements", "=", "batch_size", "modKL", ".", "forward", "(", "mx", ".", "io", ".", "DataBatch", "(", "[", "mx", ".", "ndarray", ".", "concat", "(", "mu", ",", "lv", ",", "dim", "=", "0", ")", "]", ")", ",", "is_train", "=", "True", ")", "KLloss", "=", "modKL", ".", "get_outputs", "(", ")", "modKL", ".", "backward", "(", ")", "gradKLLoss", "=", "modKL", ".", "get_input_grads", "(", ")", "diffG", "=", "modG", ".", "get_input_grads", "(", ")", "diffG", "=", "diffG", "[", "0", "]", ".", "reshape", "(", "(", "batch_size", ",", "Z", ")", ")", "modE", ".", "backward", "(", "mx", ".", "ndarray", ".", "split", "(", "gradKLLoss", "[", "0", "]", ",", "num_outputs", "=", "2", ",", "axis", "=", "0", ")", "+", "[", "diffG", "]", ")", "modE", ".", "update", "(", ")", "pred", "=", "mx", ".", "ndarray", ".", "concat", "(", "mu", ",", "lv", ",", "dim", "=", "0", ")", "mE", ".", "update", "(", "[", "pred", "]", ",", "[", "pred", "]", ")", "if", "mon", "is", "not", "None", ":", "mon", ".", "toc_print", "(", ")", "t", "+=", "1", "if", "t", "%", "show_after_every", "==", "0", ":", "print", "(", "'epoch:'", ",", "epoch", ",", "'iter:'", ",", "t", ",", "'metric:'", ",", "mACC", ".", "get", "(", ")", ",", "mG", ".", "get", "(", ")", ",", "mD", ".", "get", "(", ")", ",", "mE", ".", "get", "(", ")", ",", "KLloss", "[", "0", "]", ".", "asnumpy", "(", ")", ",", "DLloss", "[", "0", "]", ".", "asnumpy", "(", ")", ")", "mACC", ".", "reset", "(", ")", "mG", ".", "reset", "(", ")", "mD", ".", "reset", "(", ")", "mE", ".", "reset", "(", ")", "if", "epoch", "%", "visualize_after_every", "==", "0", ":", "visual", "(", "output_path", "+", "'gout'", "+", "str", "(", "epoch", ")", ",", "outG", "[", "0", "]", ".", "asnumpy", "(", ")", ",", "activation", ")", "visual", "(", "output_path", "+", "'data'", "+", "str", "(", "epoch", ")", ",", "batch", ".", "data", "[", "0", "]", ".", "asnumpy", "(", ")", ",", "activation", ")", "if", "check_point", "and", "epoch", "%", "save_after_every", "==", "0", ":", "print", "(", "'Saving...'", ")", "modG", ".", "save_params", "(", "checkpoint_path", "+", "'/%s_G-%04d.params'", "%", "(", "dataset", ",", "epoch", ")", ")", "modD", ".", "save_params", "(", "checkpoint_path", "+", "'/%s_D-%04d.params'", "%", "(", "dataset", ",", "epoch", ")", ")", "modE", ".", "save_params", "(", "checkpoint_path", "+", "'/%s_E-%04d.params'", "%", "(", "dataset", ",", "epoch", ")", ")" ]
39.09816
0.005202
def run_from_argv(self, argv): """ Set the default Gherkin test runner for its options to be parsed. """ self.test_runner = test_runner_class super(Command, self).run_from_argv(argv)
[ "def", "run_from_argv", "(", "self", ",", "argv", ")", ":", "self", ".", "test_runner", "=", "test_runner_class", "super", "(", "Command", ",", "self", ")", ".", "run_from_argv", "(", "argv", ")" ]
31
0.008969
def _buildStartOpts(self, streamUrl, playList=False): """ Builds the options to pass to subprocess.""" """ Test for newer MPV versions as it supports different IPC flags. """ p = subprocess.Popen([self.PLAYER_CMD, "--input-ipc-server"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=False) out = p.communicate() if "not found" not in str(out[0]): if logger.isEnabledFor(logging.DEBUG): logger.debug("--input-ipc-server is supported.") newerMpv = 1; else: if logger.isEnabledFor(logging.DEBUG): logger.debug("--input-ipc-server is not supported.") newerMpv = 0; if playList: if newerMpv: opts = [self.PLAYER_CMD, "--quiet", "--playlist", streamUrl, "--input-ipc-server=/tmp/mpvsocket"] else: opts = [self.PLAYER_CMD, "--quiet", "--playlist", streamUrl, "--input-unix-socket=/tmp/mpvsocket"] else: if newerMpv: opts = [self.PLAYER_CMD, "--quiet", streamUrl, "--input-ipc-server=/tmp/mpvsocket"] else: opts = [self.PLAYER_CMD, "--quiet", streamUrl, "--input-unix-socket=/tmp/mpvsocket"] if self.USE_PROFILE == -1: self.USE_PROFILE = self._configHasProfile() if self.USE_PROFILE == 1: opts.append("--profile=pyradio") if (logger.isEnabledFor(logging.DEBUG)): logger.debug("using profile [pyradio]") return opts
[ "def", "_buildStartOpts", "(", "self", ",", "streamUrl", ",", "playList", "=", "False", ")", ":", "\"\"\" Test for newer MPV versions as it supports different IPC flags. \"\"\"", "p", "=", "subprocess", ".", "Popen", "(", "[", "self", ".", "PLAYER_CMD", ",", "\"--input-ipc-server\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ")", "out", "=", "p", ".", "communicate", "(", ")", "if", "\"not found\"", "not", "in", "str", "(", "out", "[", "0", "]", ")", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"--input-ipc-server is supported.\"", ")", "newerMpv", "=", "1", "else", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"--input-ipc-server is not supported.\"", ")", "newerMpv", "=", "0", "if", "playList", ":", "if", "newerMpv", ":", "opts", "=", "[", "self", ".", "PLAYER_CMD", ",", "\"--quiet\"", ",", "\"--playlist\"", ",", "streamUrl", ",", "\"--input-ipc-server=/tmp/mpvsocket\"", "]", "else", ":", "opts", "=", "[", "self", ".", "PLAYER_CMD", ",", "\"--quiet\"", ",", "\"--playlist\"", ",", "streamUrl", ",", "\"--input-unix-socket=/tmp/mpvsocket\"", "]", "else", ":", "if", "newerMpv", ":", "opts", "=", "[", "self", ".", "PLAYER_CMD", ",", "\"--quiet\"", ",", "streamUrl", ",", "\"--input-ipc-server=/tmp/mpvsocket\"", "]", "else", ":", "opts", "=", "[", "self", ".", "PLAYER_CMD", ",", "\"--quiet\"", ",", "streamUrl", ",", "\"--input-unix-socket=/tmp/mpvsocket\"", "]", "if", "self", ".", "USE_PROFILE", "==", "-", "1", ":", "self", ".", "USE_PROFILE", "=", "self", ".", "_configHasProfile", "(", ")", "if", "self", ".", "USE_PROFILE", "==", "1", ":", "opts", ".", "append", "(", "\"--profile=pyradio\"", ")", "if", "(", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", ":", "logger", ".", "debug", "(", "\"using profile [pyradio]\"", ")", "return", "opts" ]
45.939394
0.005814
def _set_survey_scenario(self, survey_scenario): """ Set survey scenario :param survey_scenario: the survey scenario """ self.survey_scenario = survey_scenario # TODO deal with baseline if reform is present if survey_scenario.simulation is None: survey_scenario.simulation = survey_scenario.new_simulation() period = self.period self.filter_by = filter_by = survey_scenario.calculate_variable( variable = self.filter_by_name, period = period) # TODO: shoud not be france specific self.weight_name = weight_name = self.survey_scenario.weight_column_name_by_entity['menage'] self.initial_weight_name = weight_name + "_ini" self.initial_weight = initial_weight = survey_scenario.calculate_variable( variable = weight_name, period = period) self.initial_total_population = sum(initial_weight * filter_by) self.weight = survey_scenario.calculate_variable(variable = weight_name, period = period)
[ "def", "_set_survey_scenario", "(", "self", ",", "survey_scenario", ")", ":", "self", ".", "survey_scenario", "=", "survey_scenario", "# TODO deal with baseline if reform is present", "if", "survey_scenario", ".", "simulation", "is", "None", ":", "survey_scenario", ".", "simulation", "=", "survey_scenario", ".", "new_simulation", "(", ")", "period", "=", "self", ".", "period", "self", ".", "filter_by", "=", "filter_by", "=", "survey_scenario", ".", "calculate_variable", "(", "variable", "=", "self", ".", "filter_by_name", ",", "period", "=", "period", ")", "# TODO: shoud not be france specific", "self", ".", "weight_name", "=", "weight_name", "=", "self", ".", "survey_scenario", ".", "weight_column_name_by_entity", "[", "'menage'", "]", "self", ".", "initial_weight_name", "=", "weight_name", "+", "\"_ini\"", "self", ".", "initial_weight", "=", "initial_weight", "=", "survey_scenario", ".", "calculate_variable", "(", "variable", "=", "weight_name", ",", "period", "=", "period", ")", "self", ".", "initial_total_population", "=", "sum", "(", "initial_weight", "*", "filter_by", ")", "self", ".", "weight", "=", "survey_scenario", ".", "calculate_variable", "(", "variable", "=", "weight_name", ",", "period", "=", "period", ")" ]
51.8
0.016114
def load_related_model(self, name, load_only=None, dont_load=None): '''Load a the :class:`ForeignKey` field ``name`` if this is part of the fields of this model and if the related object is not already loaded. It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>` relationships. :parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load. :parameter load_only: Optional parameters which specify the fields to load. :parameter dont_load: Optional parameters which specify the fields not to load. :return: the related :class:`StdModel` instance. ''' field = self._meta.dfields.get(name) if not field: raise ValueError('Field "%s" not available' % name) elif not field.type == 'related object': raise ValueError('Field "%s" not a foreign key' % name) return self._load_related_model(field, load_only, dont_load)
[ "def", "load_related_model", "(", "self", ",", "name", ",", "load_only", "=", "None", ",", "dont_load", "=", "None", ")", ":", "field", "=", "self", ".", "_meta", ".", "dfields", ".", "get", "(", "name", ")", "if", "not", "field", ":", "raise", "ValueError", "(", "'Field \"%s\" not available'", "%", "name", ")", "elif", "not", "field", ".", "type", "==", "'related object'", ":", "raise", "ValueError", "(", "'Field \"%s\" not a foreign key'", "%", "name", ")", "return", "self", ".", "_load_related_model", "(", "field", ",", "load_only", ",", "dont_load", ")" ]
52.705882
0.002193
def svg_polygons_to_df(svg_source, xpath='//svg:polygon', namespaces=INKSCAPE_NSMAP): ''' Construct a data frame with one row per vertex for all shapes (e.g., ``svg:path``, ``svg:polygon``) in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``path_id``: The ``id`` attribute of the corresponding shape. - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. .. note:: Deprecated in :mod:`svg_model` 0.5.post10 :func:`svg_polygons_to_df` will be removed in :mod:`svg_model` 1.0, it is replaced by :func:`svg_shapes_to_df` because the latter is more general and works with ``svg:path`` and ``svg:polygon`` elements. ''' warnings.warn("The `svg_polygons_to_df` function is deprecated. Use " "`svg_shapes_to_df` instead.") result = svg_shapes_to_df(svg_source, xpath=xpath, namespaces=namespaces) return result[['id', 'vertex_i', 'x', 'y']].rename(columns={'id': 'path_id'})
[ "def", "svg_polygons_to_df", "(", "svg_source", ",", "xpath", "=", "'//svg:polygon'", ",", "namespaces", "=", "INKSCAPE_NSMAP", ")", ":", "warnings", ".", "warn", "(", "\"The `svg_polygons_to_df` function is deprecated. Use \"", "\"`svg_shapes_to_df` instead.\"", ")", "result", "=", "svg_shapes_to_df", "(", "svg_source", ",", "xpath", "=", "xpath", ",", "namespaces", "=", "namespaces", ")", "return", "result", "[", "[", "'id'", ",", "'vertex_i'", ",", "'x'", ",", "'y'", "]", "]", ".", "rename", "(", "columns", "=", "{", "'id'", ":", "'path_id'", "}", ")" ]
41.702703
0.000633
def create_role(self, role_name, role_type, host_id): """ Create a role. @param role_name: Role name @param role_type: Role type @param host_id: ID of the host to assign the role to @return: An ApiRole object """ return roles.create_role(self._get_resource_root(), self.name, role_type, role_name, host_id, self._get_cluster_name())
[ "def", "create_role", "(", "self", ",", "role_name", ",", "role_type", ",", "host_id", ")", ":", "return", "roles", ".", "create_role", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "name", ",", "role_type", ",", "role_name", ",", "host_id", ",", "self", ".", "_get_cluster_name", "(", ")", ")" ]
33
0.005362
def read_sudoers(): """ Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user. """ sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) sudoers_entries = list() copy_result = execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) result_message = copy_result[0][1].decode('UTF-8') if 'No such file or directory' not in result_message: execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path) as tmp_sudoers_file: for line in tmp_sudoers_file: stripped = line.strip().replace(os.linesep, '') if stripped and not stripped.startswith('#'): sudoers_entries.append(stripped) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path)))) return sudoers_entries
[ "def", "read_sudoers", "(", ")", ":", "sudoers_path", "=", "'/etc/sudoers'", "rnd_chars", "=", "random_string", "(", "length", "=", "RANDOM_FILE_EXT_LENGTH", ")", "tmp_sudoers_path", "=", "'/tmp/sudoers_{0}'", ".", "format", "(", "rnd_chars", ")", "sudoers_entries", "=", "list", "(", ")", "copy_result", "=", "execute_command", "(", "shlex", ".", "split", "(", "str", "(", "'{0} cp {1} {2}'", ".", "format", "(", "sudo_check", "(", ")", ",", "sudoers_path", ",", "tmp_sudoers_path", ")", ")", ")", ")", "result_message", "=", "copy_result", "[", "0", "]", "[", "1", "]", ".", "decode", "(", "'UTF-8'", ")", "if", "'No such file or directory'", "not", "in", "result_message", ":", "execute_command", "(", "shlex", ".", "split", "(", "str", "(", "'{0} chmod 755 {1}'", ".", "format", "(", "sudo_check", "(", ")", ",", "tmp_sudoers_path", ")", ")", ")", ")", "with", "open", "(", "tmp_sudoers_path", ")", "as", "tmp_sudoers_file", ":", "for", "line", "in", "tmp_sudoers_file", ":", "stripped", "=", "line", ".", "strip", "(", ")", ".", "replace", "(", "os", ".", "linesep", ",", "''", ")", "if", "stripped", "and", "not", "stripped", ".", "startswith", "(", "'#'", ")", ":", "sudoers_entries", ".", "append", "(", "stripped", ")", "execute_command", "(", "shlex", ".", "split", "(", "str", "(", "'{0} rm {1}'", ".", "format", "(", "sudo_check", "(", ")", ",", "tmp_sudoers_path", ")", ")", ")", ")", "return", "sudoers_entries" ]
44.04
0.003556
def detect(self, text): """Detect language of the input text :param text: The source text(s) whose language you want to identify. Batch detection is supported via sequence input. :type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator) :rtype: Detected :rtype: :class:`list` (when a list is passed) Basic usage: >>> from googletrans import Translator >>> translator = Translator() >>> translator.detect('이 문장은 한글로 쓰여졌습니다.') <Detected lang=ko confidence=0.27041003> >>> translator.detect('この文章は日本語で書かれました。') <Detected lang=ja confidence=0.64889508> >>> translator.detect('This sentence is written in English.') <Detected lang=en confidence=0.22348526> >>> translator.detect('Tiu frazo estas skribita en Esperanto.') <Detected lang=eo confidence=0.10538048> Advanced usage: >>> langs = translator.detect(['한국어', '日本語', 'English', 'le français']) >>> for lang in langs: ... print(lang.lang, lang.confidence) ko 1 ja 0.92929292 en 0.96954316 fr 0.043500196 """ if isinstance(text, list): result = [] for item in text: lang = self.detect(item) result.append(lang) return result data = self._translate(text, dest='en', src='auto') # actual source language that will be recognized by Google Translator when the # src passed is equal to auto. src = '' confidence = 0.0 try: src = ''.join(data[8][0]) confidence = data[8][-2][0] except Exception: # pragma: nocover pass result = Detected(lang=src, confidence=confidence) return result
[ "def", "detect", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "text", ",", "list", ")", ":", "result", "=", "[", "]", "for", "item", "in", "text", ":", "lang", "=", "self", ".", "detect", "(", "item", ")", "result", ".", "append", "(", "lang", ")", "return", "result", "data", "=", "self", ".", "_translate", "(", "text", ",", "dest", "=", "'en'", ",", "src", "=", "'auto'", ")", "# actual source language that will be recognized by Google Translator when the", "# src passed is equal to auto.", "src", "=", "''", "confidence", "=", "0.0", "try", ":", "src", "=", "''", ".", "join", "(", "data", "[", "8", "]", "[", "0", "]", ")", "confidence", "=", "data", "[", "8", "]", "[", "-", "2", "]", "[", "0", "]", "except", "Exception", ":", "# pragma: nocover", "pass", "result", "=", "Detected", "(", "lang", "=", "src", ",", "confidence", "=", "confidence", ")", "return", "result" ]
36.576923
0.00256
def _dist_to_annot_donor_acceptor(df, d, strand, novel_feature): """Find nearest annotated upstream/downstream donor/acceptor for novel donor/acceptors. Parameters ---------- df : pandas.DataFrame Dataframe with observed splice junctions (novel and known) with columns 'chrom', 'first_bp_intron', 'last_bp_intron', 'strand', 'intron_motif', 'annotated', 'ext_annotated', 'chrom:start', 'chrom:end', 'gene_id', 'donor', 'acceptor', 'novel_donor', 'novel_acceptor'. d : dict If df contains novel donors, should be a dict whose keys are acceptors and whose values are the locations (integers) of all associated donors. If df contains novel acceptors, should be a dict whose keys are donors and whose values are the locations (integers) of all associated accepators. strand : str ('+' or '-') Strand that features are on. novel_feature : str ('donor' or 'acceptor') Whether the dataframe contains novel donors or novel acceptors. Returns ------- up : list List of distances from novel feature to nearest feature (of same type) upstream. If upstream feature does not exist, the list will have a nan. down : list List of distances from novel feature to nearest feature (of same type) downstream. If upstream feature does not exist, the list will have a nan. """ if df.shape[0] > 0: assert len(set(df.strand)) == 1 if novel_feature == 'donor': assert df.novel_donor.sum() == df.shape[0] if novel_feature == 'acceptor': assert df.novel_acceptor.sum() == df.shape[0] # For a novel donor, we want to return the distance to the nearest upstream # and downstream donors that use the same acceptor. For a novel acceptor, we # want to return the distance to the nearest upstream and downstream # acceptors that use the same donor. In some cases there may not be one of # the upstream or downstream donors/acceptors. In that case we will just # return nan. if strand == '+': if novel_feature == 'donor': annot_feature = 'acceptor' novel_location = 'start' if novel_feature == 'acceptor': annot_feature = 'donor' novel_location = 'end' if strand == '-': if novel_feature == 'donor': annot_feature = 'acceptor' novel_location = 'end' if novel_feature == 'acceptor': annot_feature = 'donor' novel_location = 'start' upstream_dists = [] downstream_dists = [] for i in df.index: a = df.ix[i, annot_feature] diff = df.ix[i, novel_location] - d[a] pos = diff[diff > 0] neg = diff[diff < 0] if strand == '+': if pos.shape[0] == 0: upstream_dists.append(np.nan) else: upstream_dists.append(pos.min()) if neg.shape[0] == 0: downstream_dists.append(np.nan) else: downstream_dists.append(np.abs(neg).min()) if strand == '-': if pos.shape[0] == 0: downstream_dists.append(np.nan) else: downstream_dists.append(pos.min()) if neg.shape[0] == 0: upstream_dists.append(np.nan) else: upstream_dists.append(np.abs(neg).min()) return upstream_dists, downstream_dists
[ "def", "_dist_to_annot_donor_acceptor", "(", "df", ",", "d", ",", "strand", ",", "novel_feature", ")", ":", "if", "df", ".", "shape", "[", "0", "]", ">", "0", ":", "assert", "len", "(", "set", "(", "df", ".", "strand", ")", ")", "==", "1", "if", "novel_feature", "==", "'donor'", ":", "assert", "df", ".", "novel_donor", ".", "sum", "(", ")", "==", "df", ".", "shape", "[", "0", "]", "if", "novel_feature", "==", "'acceptor'", ":", "assert", "df", ".", "novel_acceptor", ".", "sum", "(", ")", "==", "df", ".", "shape", "[", "0", "]", "# For a novel donor, we want to return the distance to the nearest upstream", "# and downstream donors that use the same acceptor. For a novel acceptor, we", "# want to return the distance to the nearest upstream and downstream", "# acceptors that use the same donor. In some cases there may not be one of", "# the upstream or downstream donors/acceptors. In that case we will just", "# return nan.", "if", "strand", "==", "'+'", ":", "if", "novel_feature", "==", "'donor'", ":", "annot_feature", "=", "'acceptor'", "novel_location", "=", "'start'", "if", "novel_feature", "==", "'acceptor'", ":", "annot_feature", "=", "'donor'", "novel_location", "=", "'end'", "if", "strand", "==", "'-'", ":", "if", "novel_feature", "==", "'donor'", ":", "annot_feature", "=", "'acceptor'", "novel_location", "=", "'end'", "if", "novel_feature", "==", "'acceptor'", ":", "annot_feature", "=", "'donor'", "novel_location", "=", "'start'", "upstream_dists", "=", "[", "]", "downstream_dists", "=", "[", "]", "for", "i", "in", "df", ".", "index", ":", "a", "=", "df", ".", "ix", "[", "i", ",", "annot_feature", "]", "diff", "=", "df", ".", "ix", "[", "i", ",", "novel_location", "]", "-", "d", "[", "a", "]", "pos", "=", "diff", "[", "diff", ">", "0", "]", "neg", "=", "diff", "[", "diff", "<", "0", "]", "if", "strand", "==", "'+'", ":", "if", "pos", ".", "shape", "[", "0", "]", "==", "0", ":", "upstream_dists", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "upstream_dists", ".", "append", "(", "pos", ".", "min", "(", ")", ")", "if", "neg", ".", "shape", "[", "0", "]", "==", "0", ":", "downstream_dists", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "downstream_dists", ".", "append", "(", "np", ".", "abs", "(", "neg", ")", ".", "min", "(", ")", ")", "if", "strand", "==", "'-'", ":", "if", "pos", ".", "shape", "[", "0", "]", "==", "0", ":", "downstream_dists", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "downstream_dists", ".", "append", "(", "pos", ".", "min", "(", ")", ")", "if", "neg", ".", "shape", "[", "0", "]", "==", "0", ":", "upstream_dists", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "upstream_dists", ".", "append", "(", "np", ".", "abs", "(", "neg", ")", ".", "min", "(", ")", ")", "return", "upstream_dists", ",", "downstream_dists" ]
37.626374
0.001707
def prettify_xml(xml_root): """Returns pretty-printed string representation of element tree.""" xml_string = etree.tostring(xml_root, encoding="utf-8", xml_declaration=True, pretty_print=True) return get_unicode_str(xml_string)
[ "def", "prettify_xml", "(", "xml_root", ")", ":", "xml_string", "=", "etree", ".", "tostring", "(", "xml_root", ",", "encoding", "=", "\"utf-8\"", ",", "xml_declaration", "=", "True", ",", "pretty_print", "=", "True", ")", "return", "get_unicode_str", "(", "xml_string", ")" ]
59
0.008368
def register(self, entry_point): """Register an extension :param str entry_point: extension to register (entry point syntax). :raise: ValueError if already registered. """ if entry_point in self.registered_extensions: raise ValueError('Extension already registered') ep = EntryPoint.parse(entry_point) if ep.name in self.names(): raise ValueError('An extension with the same name already exist') ext = self._load_one_plugin(ep, False, (), {}, False) self.extensions.append(ext) if self._extensions_by_name is not None: self._extensions_by_name[ext.name] = ext self.registered_extensions.insert(0, entry_point)
[ "def", "register", "(", "self", ",", "entry_point", ")", ":", "if", "entry_point", "in", "self", ".", "registered_extensions", ":", "raise", "ValueError", "(", "'Extension already registered'", ")", "ep", "=", "EntryPoint", ".", "parse", "(", "entry_point", ")", "if", "ep", ".", "name", "in", "self", ".", "names", "(", ")", ":", "raise", "ValueError", "(", "'An extension with the same name already exist'", ")", "ext", "=", "self", ".", "_load_one_plugin", "(", "ep", ",", "False", ",", "(", ")", ",", "{", "}", ",", "False", ")", "self", ".", "extensions", ".", "append", "(", "ext", ")", "if", "self", ".", "_extensions_by_name", "is", "not", "None", ":", "self", ".", "_extensions_by_name", "[", "ext", ".", "name", "]", "=", "ext", "self", ".", "registered_extensions", ".", "insert", "(", "0", ",", "entry_point", ")" ]
37.842105
0.002714
def load_data(self, filename, *args, **kwargs): """ Load JSON data. :param filename: name of JSON file with data :type filename: str :return: data :rtype: dict """ # append .json extension if needed if not filename.endswith('.json'): filename += '.json' # append "json" to filename # open file and load JSON data with open(filename, 'r') as fid: json_data = json.load(fid) # if JSONReader is the original reader then apply units and return if (not self.orig_data_reader or isinstance(self, self.orig_data_reader)): return self.apply_units_to_cache(json_data['data']) # last modification since JSON file was saved utc_mod_time = json_data.get('utc_mod_time') # instance of original data reader with original parameters orig_data_reader_obj = self.orig_data_reader(self.parameters, self.meta) # check if file has been modified since saved as JSON file if utc_mod_time: # convert to ordered tuple utc_mod_time = time.struct_time(utc_mod_time) orig_filename = filename[:-5] # original filename # use original file if it's been modified since JSON file saved if utc_mod_time < time.gmtime(os.path.getmtime(orig_filename)): os.remove(filename) # delete JSON file return orig_data_reader_obj.load_data(orig_filename) # use JSON file if original file hasn't been modified return orig_data_reader_obj.apply_units_to_cache(json_data['data'])
[ "def", "load_data", "(", "self", ",", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# append .json extension if needed", "if", "not", "filename", ".", "endswith", "(", "'.json'", ")", ":", "filename", "+=", "'.json'", "# append \"json\" to filename", "# open file and load JSON data", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fid", ":", "json_data", "=", "json", ".", "load", "(", "fid", ")", "# if JSONReader is the original reader then apply units and return", "if", "(", "not", "self", ".", "orig_data_reader", "or", "isinstance", "(", "self", ",", "self", ".", "orig_data_reader", ")", ")", ":", "return", "self", ".", "apply_units_to_cache", "(", "json_data", "[", "'data'", "]", ")", "# last modification since JSON file was saved", "utc_mod_time", "=", "json_data", ".", "get", "(", "'utc_mod_time'", ")", "# instance of original data reader with original parameters", "orig_data_reader_obj", "=", "self", ".", "orig_data_reader", "(", "self", ".", "parameters", ",", "self", ".", "meta", ")", "# check if file has been modified since saved as JSON file", "if", "utc_mod_time", ":", "# convert to ordered tuple", "utc_mod_time", "=", "time", ".", "struct_time", "(", "utc_mod_time", ")", "orig_filename", "=", "filename", "[", ":", "-", "5", "]", "# original filename", "# use original file if it's been modified since JSON file saved", "if", "utc_mod_time", "<", "time", ".", "gmtime", "(", "os", ".", "path", ".", "getmtime", "(", "orig_filename", ")", ")", ":", "os", ".", "remove", "(", "filename", ")", "# delete JSON file", "return", "orig_data_reader_obj", ".", "load_data", "(", "orig_filename", ")", "# use JSON file if original file hasn't been modified", "return", "orig_data_reader_obj", ".", "apply_units_to_cache", "(", "json_data", "[", "'data'", "]", ")" ]
47.470588
0.001821
def _load_from_string(data): '''Loads the cache from the string''' global _CACHE if PYTHON_3: data = json.loads(data.decode("utf-8")) else: data = json.loads(data) _CACHE = _recursively_convert_unicode_to_str(data)['data']
[ "def", "_load_from_string", "(", "data", ")", ":", "global", "_CACHE", "if", "PYTHON_3", ":", "data", "=", "json", ".", "loads", "(", "data", ".", "decode", "(", "\"utf-8\"", ")", ")", "else", ":", "data", "=", "json", ".", "loads", "(", "data", ")", "_CACHE", "=", "_recursively_convert_unicode_to_str", "(", "data", ")", "[", "'data'", "]" ]
31.375
0.003876