input
stringlengths
11
5.29k
target
stringlengths
20
8.26k
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def read_current_from_device(self): collection = self.client.api.cm.shared.licensing.pools_s.get_collection( requests_params=dict( params="$filter=name+eq+'{0}'".format(self.want.name) ) ) resource = collection.pop() result = resource.attrs return Parameters(result)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def build_get_request( resource_group_name: str, managed_instance_name: str, database_name: str, query_id: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-11-01-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'), "databaseName": _SERIALIZER.url("database_name", database_name, 'str'), "queryId": _SERIALIZER.url("query_id", query_id, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def build_list_by_query_request( resource_group_name: str, managed_instance_name: str, database_name: str, query_id: str, subscription_id: str, *, start_time: Optional[str] = None, end_time: Optional[str] = None, interval: Optional[Union[str, "_models.QueryTimeGrainType"]] = None, **kwargs: Any ) -> HttpRequest: api_version = "2020-11-01-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}/statistics') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'), "databaseName": _SERIALIZER.url("database_name", database_name, 'str'), "queryId": _SERIALIZER.url("query_id", query_id, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if start_time is not None: query_parameters['startTime'] = _SERIALIZER.query("start_time", start_time, 'str') if end_time is not None: query_parameters['endTime'] = _SERIALIZER.query("end_time", end_time, 'str') if interval is not None: query_parameters['interval'] = _SERIALIZER.query("interval", interval, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def app_settings(): app_settings = {'GRAPHITE_HOST': settings.GRAPHITE_HOST, 'OCULUS_HOST': settings.OCULUS_HOST, 'FULL_NAMESPACE': settings.FULL_NAMESPACE, } resp = json.dumps(app_settings) return resp, 200
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def data(): metric = request.args.get('metric', None) try: raw_series = REDIS_CONN.get(metric) if not raw_series: resp = json.dumps({'results': 'Error: No metric by that name'}) return resp, 404 else: unpacker = Unpacker(use_list = False) unpacker.feed(raw_series) timeseries = [item[:2] for item in unpacker] resp = json.dumps({'results': timeseries}) return resp, 200 except Exception as e: error = "Error: " + e resp = json.dumps({'results': error}) return resp, 500
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get( self, resource_group_name: str, managed_instance_name: str, database_name: str, query_id: str, **kwargs: Any ) -> "_models.ManagedInstanceQuery": """Get query by query id. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param managed_instance_name: The name of the managed instance. :type managed_instance_name: str :param database_name: The name of the database. :type database_name: str :param query_id: :type query_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ManagedInstanceQuery, or the result of cls(response) :rtype: ~azure.mgmt.sql.models.ManagedInstanceQuery :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceQuery"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def extract_data(pipeline_response): deserialized = self._deserialize("ManagedInstanceQueryStatistics", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_long_running_output(pipeline_response): deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_query_results_for_management_group( self, management_group_name: str, query_options: Optional["_models.QueryOptions"] = None, **kwargs: Any ) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]: """Queries policy tracked resources under the management group. :param management_group_name: Management group name. :type management_group_name: str :param query_options: Parameter group. :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
async def extract_data(pipeline_response): deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_query_results_for_subscription( self, query_options: Optional["_models.QueryOptions"] = None, **kwargs: Any ) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]: """Queries policy tracked resources under the subscription. :param query_options: Parameter group. :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_query_results_for_resource_group( self, resource_group_name: str, query_options: Optional["_models.QueryOptions"] = None, **kwargs: Any ) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]: """Queries policy tracked resources under the resource group. :param resource_group_name: Resource group name. :type resource_group_name: str :param query_options: Parameter group. :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
async def extract_data(pipeline_response): deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_query_results_for_resource( self, resource_id: str, query_options: Optional["_models.QueryOptions"] = None, **kwargs: Any ) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]: """Queries policy tracked resources under the resource. :param resource_id: Resource ID. :type resource_id: str :param query_options: Parameter group. :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def list_instances( self, ) -> Callable[[appengine.ListInstancesRequest], appengine.ListInstancesResponse]: r"""Return a callable for the list instances method over gRPC. Lists the instances of a version. Tip: To aggregate details about instances over time, see the `Stackdriver Monitoring API <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__. Returns: Callable[[~.ListInstancesRequest], ~.ListInstancesResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: self._stubs["list_instances"] = self.grpc_channel.unary_unary( "/google.appengine.v1.Instances/ListInstances", request_serializer=appengine.ListInstancesRequest.serialize, response_deserializer=appengine.ListInstancesResponse.deserialize, ) return self._stubs["list_instances"]
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_instance( self, ) -> Callable[[appengine.GetInstanceRequest], instance.Instance]: r"""Return a callable for the get instance method over gRPC. Gets instance information. Returns: Callable[[~.GetInstanceRequest], ~.Instance]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: self._stubs["get_instance"] = self.grpc_channel.unary_unary( "/google.appengine.v1.Instances/GetInstance", request_serializer=appengine.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, ) return self._stubs["get_instance"]
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def sm_section(name: str) -> str: """:return: section title used in .gitmodules configuration file""" return f'submodule "{name}"'
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def find_first_remote_branch(remotes: Sequence['Remote'], branch_name: str) -> 'RemoteReference': """Find the remote branch matching the name of the given branch or raise InvalidGitRepositoryError""" for remote in remotes: try: return remote.refs[branch_name] except IndexError: continue # END exception handling # END for remote raise InvalidGitRepositoryError("Didn't find remote branch '%r' in any of the given remotes" % branch_name)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_git_version(self): VERSION_PFX = 'git version ' version = self.run_command( ['version'], show_stdout=False, stdout_only=True ) if version.startswith(VERSION_PFX): version = version[len(VERSION_PFX):].split()[0] else: version = '' # get first 3 positions of the git version because # on windows it is x.y.z.windows.t, and this parses as # LegacyVersion which always smaller than a Version. version = '.'.join(version.split('.')[:3]) return parse_version(version)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def __call__(self, env, start_response): return env
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def GetLogs(region, stream_name, group_name, token=None): """Fetches the JSON formatted log stream starting at the token.""" get_cmd = util.AWS_PREFIX + [ '--region', region, 'logs', 'get-log-events', '--start-from-head', '--log-group-name', group_name, '--log-stream-name', stream_name, ] if token: get_cmd.extend(['--next-token', token]) stdout, _, _ = vm_util.IssueCommand(get_cmd) return json.loads(stdout)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def plugins(): plugins = get_all_plugins() return render_template("management/plugins.html", plugins=plugins)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _checkProvenance(self, resp, item, version, user, eventType, matches=None, fileInfo=None, resource='item'): if resp is None: resp = self._getProvenance(item, user, resource=resource) self.assertStatusOk(resp) itemProvenance = resp.json self.assertEqual(itemProvenance['resourceId'], str(item['_id'])) provenance = itemProvenance['provenance'] self.assertEqual(provenance['eventType'], eventType) self.assertEqual(provenance['version'], version) self.assertEqual(str(provenance['eventUser']), str(user['_id'])) if matches: for key in matches: self.assertEqual(provenance[key], matches[key]) if fileInfo: for key in fileInfo: if isinstance(fileInfo[key], dict): for subkey in fileInfo[key]: self.assertEqual(provenance['file'][0][key][subkey], fileInfo[key][subkey]) else: self.assertEqual(provenance['file'][0][key], fileInfo[key])
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _getProvenance(self, item, user, version=None, resource='item', checkOk=True): params = {} if version is not None: params = {'version': version} resp = self.request( path='/%s/%s/provenance' % (resource, item['_id']), method='GET', user=user, type='application/json', params=params) if checkOk: self.assertStatusOk(resp) return resp
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def setdata(self, request, response): self._calls.append(Call(request, response))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _get_path(self, prefix, override_receipt_id=None): """ Returns the path to a resource with this instance's `receipt_id`. If `override_receipt_id` is given, the path to that resource will be retrieved instead. This allows us to retrieve images submitted in previous attempts (used for reverification, where we send a new face photo with the same photo ID from a previous attempt). """ receipt_id = self.receipt_id if override_receipt_id is None else override_receipt_id return os.path.join(prefix, receipt_id)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def retrieve(id, env=None, headers=None): return request.send('get', request.uri_path("plans",id), None, env, headers)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_tenant_type(self): """ Get the type of tenant. Will only work for multi type tenants :return: str """ return getattr(self, settings.MULTI_TYPE_DATABASE_FIELD)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def object_id(self): return ("ip-path-mtu-%d-%s-%d" % (self.table_id, self.nh, self.pmtu))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get(self): return json.loads(self._child_proxy.get())
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get(self): return self._child_proxy.get()[self._key]
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getter(): return self._secret_client.get(name)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getter(): return self._param_client.get(key)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def member_id(self): if self.device_is_address: # This range lookup is how you do lookups for single IP addresses. Weird. filter = "deviceAddress+eq+'{0}...{0}'".format(self.device) elif self.device_is_name: filter = "deviceName+eq+'{0}'".format(self.device) elif self.device_is_id: filter = "deviceMachineId+eq+'{0}'".format(self.device) else: raise F5ModuleError( "Unknown device format '{0}'".format(self.device) ) uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/' \ '?$filter={4}'.format(self.client.provider['server'], self.client.provider['server_port'], self.pool_id, self.key, filter) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status == 200 and response['totalItems'] == 0: return None elif 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp._content) result = response['items'][0]['id'] return result
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def cli_collect_summary(sys_argv): """Command Line Interface to collecting a summary of the model layers and weights. --m: Input model that is necessary to collect to the tensors, this is a required *option* --v: Verbose: Defines the logging level that the user will be exposed to """ parser = argparse.ArgumentParser( description='This script loads a pretrained model ' 'and prints names of weights and layers activations ' 'to use with other collect commands', prog='ludwig collect_summary', usage='%(prog)s [options]' ) # ---------------- # Model parameters # ---------------- parser.add_argument( '-m', '--model_path', help='model to load', required=True ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'] ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.collect') print_ludwig('Collect Summary', LUDWIG_VERSION) print_model_summary(**vars(args))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
async def _web_get_version(self, request: web.Request) -> web.Response: return web.Response(text=yacron.version.version)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def sample_create_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateContextRequest( parent="parent_value", ) # Make the request response = client.create_context(request=request) # Handle the response print(response)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def deserialize(self, request, data, format='application/json'): """ Given a request, data and a format, deserializes the given data.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def build_schema(self): """ Returns a dictionary of all the fields on the resource and some properties about those fields.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def obj_get_list(self, request=None, **kwargs): """ Fetches the list of objects available on the resource.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def obj_get(self, request=None, **kwargs): """ Fetches an individual object on the resource.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_list(self, request, **kwargs): """ Returns a serialized list of resources.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_detail(self, request, **kwargs): """ Returns a single serialized resource.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_schema(self, request, **kwargs): """ Returns a serialized form of the schema of the resource.
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getInfo(self): return jsonified(self.updater.info())
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getVersion(self): if not self.version: try: f = open(self.version_file, 'r') output = json.loads(f.read()) f.close() log.debug('Source version output: %s', output) self.version = output self.version['type'] = 'source' except Exception, e: log.error('Failed using source updater. %s', e) return {} return self.version
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def do_ReadPropertyRequest(self, apdu): """Return the value of some property of one of our objects.""" if _debug: ReadWritePropertyServices._debug("do_ReadPropertyRequest %r", apdu) # extract the object identifier objId = apdu.objectIdentifier # check for wildcard if (objId == ('device', 4194303)) and self.localDevice is not None: if _debug: ReadWritePropertyServices._debug(" - wildcard device identifier") objId = self.localDevice.objectIdentifier # get the object obj = self.get_object_id(objId) if _debug: ReadWritePropertyServices._debug(" - object: %r", obj) if not obj: raise ExecutionError(errorClass='object', errorCode='unknownObject') try: # get the datatype datatype = obj.get_datatype(apdu.propertyIdentifier) if _debug: ReadWritePropertyServices._debug(" - datatype: %r", datatype) # get the value value = obj.ReadProperty(apdu.propertyIdentifier, apdu.propertyArrayIndex) if _debug: ReadWritePropertyServices._debug(" - value: %r", value) if value is None: raise PropertyError(apdu.propertyIdentifier) # change atomic values into something encodeable if issubclass(datatype, Atomic) or (issubclass(datatype, (Array, List)) and isinstance(value, list)): value = datatype(value) elif issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None): if apdu.propertyArrayIndex == 0: value = Unsigned(value) elif issubclass(datatype.subtype, Atomic): value = datatype.subtype(value) elif not isinstance(value, datatype.subtype): raise TypeError("invalid result datatype, expecting {0} and got {1}" \ .format(datatype.subtype.__name__, type(value).__name__)) elif issubclass(datatype, List): value = datatype(value) elif not isinstance(value, datatype): raise TypeError("invalid result datatype, expecting {0} and got {1}" \ .format(datatype.__name__, type(value).__name__)) if _debug: ReadWritePropertyServices._debug(" - encodeable value: %r", value) # this is a ReadProperty ack resp = ReadPropertyACK(context=apdu) resp.objectIdentifier = objId resp.propertyIdentifier = apdu.propertyIdentifier resp.propertyArrayIndex = apdu.propertyArrayIndex # save the result in the property value resp.propertyValue = Any() resp.propertyValue.cast_in(value) if _debug: ReadWritePropertyServices._debug(" - resp: %r", resp) except PropertyError: raise ExecutionError(errorClass='property', errorCode='unknownProperty') # return the result self.response(resp)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def do_ReadPropertyMultipleRequest(self, apdu): """Respond to a ReadPropertyMultiple Request.""" if _debug: ReadWritePropertyMultipleServices._debug("do_ReadPropertyMultipleRequest %r", apdu) # response is a list of read access results (or an error) resp = None read_access_result_list = [] # loop through the request for read_access_spec in apdu.listOfReadAccessSpecs: # get the object identifier objectIdentifier = read_access_spec.objectIdentifier if _debug: ReadWritePropertyMultipleServices._debug(" - objectIdentifier: %r", objectIdentifier) # check for wildcard if (objectIdentifier == ('device', 4194303)) and self.localDevice is not None: if _debug: ReadWritePropertyMultipleServices._debug(" - wildcard device identifier") objectIdentifier = self.localDevice.objectIdentifier # get the object obj = self.get_object_id(objectIdentifier) if _debug: ReadWritePropertyMultipleServices._debug(" - object: %r", obj) # build a list of result elements read_access_result_element_list = [] # loop through the property references for prop_reference in read_access_spec.listOfPropertyReferences: # get the property identifier propertyIdentifier = prop_reference.propertyIdentifier if _debug: ReadWritePropertyMultipleServices._debug(" - propertyIdentifier: %r", propertyIdentifier) # get the array index (optional) propertyArrayIndex = prop_reference.propertyArrayIndex if _debug: ReadWritePropertyMultipleServices._debug(" - propertyArrayIndex: %r", propertyArrayIndex) # check for special property identifiers if propertyIdentifier in ('all', 'required', 'optional'): if not obj: # build a property access error read_result = ReadAccessResultElementChoice() read_result.propertyAccessError = ErrorType(errorClass='object', errorCode='unknownObject') # make an element for this error read_access_result_element = ReadAccessResultElement( propertyIdentifier=propertyIdentifier, propertyArrayIndex=propertyArrayIndex, readResult=read_result, ) # add it to the list read_access_result_element_list.append(read_access_result_element) else: for propId, prop in obj._properties.items(): if _debug: ReadWritePropertyMultipleServices._debug(" - checking: %r %r", propId, prop.optional)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def fetch_pipeline_from_server(self): """ Method fetches pipeline from server/cloud """ # TODO pass
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_properties(autoscaling_group): properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES) # Ugly hack to make this JSON-serializable. We take a list of boto Tag # objects and replace them with a dict-representation. Needed because the # tags are included in ansible's return value (which is jsonified) if 'tags' in properties and isinstance(properties['tags'], list): serializable_tags = {} for tag in properties['tags']: serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch] properties['tags'] = serializable_tags properties['healthy_instances'] = 0 properties['in_service_instances'] = 0 properties['unhealthy_instances'] = 0 properties['pending_instances'] = 0 properties['viable_instances'] = 0 properties['terminating_instances'] = 0 instance_facts = {} if autoscaling_group.instances: properties['instances'] = [i.instance_id for i in autoscaling_group.instances] for i in autoscaling_group.instances: instance_facts[i.instance_id] = {'health_status': i.health_status, 'lifecycle_state': i.lifecycle_state, 'launch_config_name': i.launch_config_name } if i.health_status == 'Healthy' and i.lifecycle_state == 'InService': properties['viable_instances'] += 1 if i.health_status == 'Healthy': properties['healthy_instances'] += 1 else: properties['unhealthy_instances'] += 1 if i.lifecycle_state == 'InService': properties['in_service_instances'] += 1 if i.lifecycle_state == 'Terminating': properties['terminating_instances'] += 1 if i.lifecycle_state == 'Pending': properties['pending_instances'] += 1 properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) return properties
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def dump(lines): md, errors = parse(lines) if errors: logging.warning( "Invalid metadata found errors=%s", errors) md["status"] = sc.VOL_STATUS_INVALID else: md["status"] = sc.VOL_STATUS_OK # Do not include domain in dump output. md.pop("domain", None) return md
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _process_args(self): RHManageNewsBase._process_args(self) self.item = NewsItem.get_or_404(request.view_args['news_id'])
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw')
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_volume_mounts(self): '''return volume mount information ''' return self.get_volumes(mounts=True)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_volumes(self, mounts=False): '''return volume mount information ''' if mounts: return self.get(DeploymentConfig.volume_mounts_path) or [] return self.get(DeploymentConfig.volumes_path) or []
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def create_dict(self): ''' instantiates a service dict ''' self.data['apiVersion'] = 'v1' self.data['kind'] = 'Service' self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace if self.labels: self.data['metadata']['labels'] = {} for lab, lab_value in self.labels.items(): self.data['metadata']['labels'][lab] = lab_value self.data['spec'] = {} if self.ports: self.data['spec']['ports'] = self.ports else: self.data['spec']['ports'] = [] if self.selector: self.data['spec']['selector'] = self.selector self.data['spec']['sessionAffinity'] = self.session_affinity or 'None' if self.cluster_ip: self.data['spec']['clusterIP'] = self.cluster_ip if self.portal_ip: self.data['spec']['portalIP'] = self.portal_ip if self.service_type: self.data['spec']['type'] = self.service_type if self.external_ips: self.data['spec']['externalIPs'] = self.external_ips
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_ports(self): ''' get a list of ports ''' return self.get(Service.port_path) or []
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_selector(self): ''' get the service selector''' return self.get(Service.selector_path) or {}
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def create_volume_structure(volume_info): ''' return a properly structured volume ''' volume_mount = None volume = {'name': volume_info['name']} volume_type = volume_info['type'].lower() if volume_type == 'secret': volume['secret'] = {} volume[volume_info['type']] = {'secretName': volume_info['secret_name']} volume_mount = {'mountPath': volume_info['path'], 'name': volume_info['name']} elif volume_type == 'emptydir': volume['emptyDir'] = {} volume_mount = {'mountPath': volume_info['path'], 'name': volume_info['name']} elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim': volume['persistentVolumeClaim'] = {} volume['persistentVolumeClaim']['claimName'] = volume_info['claimName'] volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] elif volume_type == 'hostpath': volume['hostPath'] = {} volume['hostPath']['path'] = volume_info['path'] elif volume_type == 'configmap': volume['configMap'] = {} volume['configMap']['name'] = volume_info['configmap_name'] volume_mount = {'mountPath': volume_info['path'], 'name': volume_info['name']} return (volume, volume_mount)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get(self): '''get and return version information ''' results = {} version_results = self._version() if version_results['returncode'] == 0: filtered_vers = Utils.filter_versions(version_results['results']) custom_vers = Utils.add_custom_versions(filtered_vers) results['returncode'] = version_results['returncode'] results.update(filtered_vers) results.update(custom_vers) return results raise OpenShiftCLIError('Problem detecting openshift version.')
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def run_ansible(params): '''run the idempotent ansible code''' oc_version = OCVersion(params['kubeconfig'], params['debug']) if params['state'] == 'list': #pylint: disable=protected-access result = oc_version.get() return {'state': params['state'], 'results': result, 'changed': False}
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def handle_release_assets(assets): assets = [ asset for asset in assets if re.match(r'redhawk-ide.+?(?=x86_64)', asset['name'])] if not assets: sys.exit('Failed to find the IDE asset') elif len(assets) > 1: sys.exit('Found too many IDE assets matching that description...?') return download_asset('downloads', assets[0]['browser_download_url'])
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def run(pv): RELEASES_URL = 'http://api.github.com/repos/RedhawkSDR/redhawk/releases' ide_asset = '' try: releases = json.loads(urllib2.urlopen(RELEASES_URL).read()) releases = [r for r in releases if r['tag_name'] == pv] if releases: ide_asset = handle_release_assets(releases[0]['assets']) else: sys.exit('Failed to find the release: {0}'.format(pv)) finally: return ide_asset
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def build_list_for_resource_group_request( resource_group_name: str, subscription_id: str, *, filter: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2016-12-01" accept = "application/json, text/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if filter is not None: query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True) query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def build_list_for_resource_request( resource_group_name: str, resource_provider_namespace: str, parent_resource_path: str, resource_type: str, resource_name: str, subscription_id: str, *, filter: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2016-12-01" accept = "application/json, text/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "parentResourcePath": _SERIALIZER.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True), "resourceType": _SERIALIZER.url("resource_type", resource_type, 'str', skip_quote=True), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if filter is not None: query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def extract_data(pipeline_response): deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def from_read_session(read_session): schema_type = read_session._pb.WhichOneof("schema") if schema_type == "avro_schema": return _AvroStreamParser(read_session) elif schema_type == "arrow_schema": return _ArrowStreamParser(read_session) else: raise TypeError( "Unsupported schema type in read_session: {0}".format(schema_type) )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _get_aws_region(): # pragma: no cover return boto3.session.Session().region_name
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _print_status(config): cfn = boto3.client('cloudformation') lmb = boto3.client('lambda') try: stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0] except botocore.exceptions.ClientError: print('{} has not been deployed yet.'.format(config['name'])) else: print('{} is deployed!'.format(config['name'])) print(' Function name: {}'.format( _get_from_stack(stack, 'Output', 'FunctionArn').split(':')[-1])) print(' S3 bucket: {}'.format(config['aws']['s3_bucket'])) print(' Stages:') stages = list(config['stage_environments'].keys()) stages.sort() plugin_status = {} for name, plugin in plugins.items(): if name in config and hasattr(plugin, 'status'): statuses = plugin.status(config, stack) if statuses: for s, status in statuses.items(): plugin_status.setdefault(s, []).append(status) for s in stages: fd = None try: fd = lmb.get_function(FunctionName=_get_from_stack( stack, 'Output', 'FunctionArn'), Qualifier=s) except botocore.exceptions.ClientError: # pragma: no cover continue v = ':{}'.format(fd['Configuration']['Version']) if s in plugin_status and len(plugin_status[s]) > 0: print(' {}{}: {}'.format(s, v, ' '.join(plugin_status[s]))) else: print(' {}{}'.format(s, v))
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_all_coeffs(): """Get all available calibration coefficients for the satellites.""" coeffs = {} for platform in URLS: if platform not in coeffs: coeffs[platform] = {} for chan in URLS[platform].keys(): url = URLS[platform][chan] print(url) page = get_page(url) coeffs[platform][chan] = get_coeffs(page) return coeffs
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getCanonicalTrips(uid, get_representative=False): # number returned isnt used """ uid is a UUID object, not a string """ # canonical_trip_list = [] # x = 0 # if route clusters return nothing, then get common routes for user #clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]}) # c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]}) logging.debug('UUID for canonical %s' % uid) info = get_clusters_info(uid) cluster_json_list = [] for (cluster, rt) in info: json_dict = dict() json_dict["representative_trip"] = rt json_dict["start_point_distr"] = cluster[2] json_dict["end_point_distr"] = cluster[3] json_dict["start_time_distr"] = cluster[0] json_dict["end_time_distr"] = cluster[1] json_dict["confirmed_mode_list"] = cluster[4] cluster_json_list.append(json_dict) toRet = cluster_json_list return toRet.__iter__()
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getNoAlternatives(uid): # If pipelineFlags exists then we have started alternatives, and so have # already scheduled the query. No need to reschedule unless the query fails. # TODO: If the query fails, then remove the pipelineFlags so that we will # reschedule. query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}} return get_trip_db().find(query)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_role_name(region, account_id, role): """Shortcut to insert the `account_id` and `role` into the iam string.""" prefix = ARN_PREFIXES.get(region, "aws") return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None): """Register and upload a function to AWS Lambda.""" print("Creating your new Lambda function") byte_stream = read(path_to_zip_file, binary_file=True) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") account_id = get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region",), ) role = get_role_name( cfg.get("region"), account_id, cfg.get("role", "lambda_basic_execution"), ) client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get( "function_name" ) print("Creating lambda function with name: {}".format(func_name)) if use_s3: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": { "S3Bucket": "{}".format(buck_name), "S3Key": "{}".format(s3_file), }, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } else: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": {"ZipFile": byte_stream}, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } if "tags" in cfg: kwargs.update( Tags={key: str(value) for key, value in cfg.get("tags").items()} ) if "environment_variables" in cfg: kwargs.update( Environment={ "Variables": { key: get_environment_variable_value(value) for key, value in cfg.get("environment_variables").items() }, }, ) client.create_function(**kwargs) concurrency = get_concurrency(cfg) if concurrency > 0: client.put_function_concurrency( FunctionName=func_name, ReservedConcurrentExecutions=concurrency )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def init_response_attributes(self, etree): try: self.backend_transaction_id = etree.xpath('//MSS_SignatureResp')[0].attrib[ 'MSSP_TransID' ] self.status = etree.xpath( '//ns6:StatusCode', namespaces={'ns6': self.ns_namespace} )[0].attrib['Value'] except (IndexError, KeyError, lxml.etree.XMLSchemaError) as e: raise ResponseParseError( 'Cannot parse signature response: %s. Response content: %s' % (e, lxml.etree.tostring(etree)) )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def init_response_attributes(self, etree): try: status_code = etree.xpath( '//ns5:StatusCode', namespaces={'ns5': self.ns_namespace} )[0].attrib['Value'] except (IndexError, KeyError, lxml.etree.XMLSchemaError) as e: raise ResponseParseError( 'Cannot parse status response: %s. Response content: %s' % (e, lxml.etree.tostring(etree)) ) self.status = Statuses.map(status_code) try: civil_number_tag = etree.xpath( '//ns4:UserIdentifier', namespaces={'ns4': self.ns_namespace} )[0] except IndexError: # civil number tag does not exist - this is possible if request is still processing return else: try: self.civil_number = civil_number_tag.text.split('=')[1] except IndexError: raise ResponseParseError( 'Cannot get civil_number from tag text: %s' % civil_number_tag.text )
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def getRootNodeName(self, uriText): """ Gets root node name from uri according to provider type. """ if 'memory?' in uriText: rootNodeName = 'memory' elif 'dbname' in uriText: rootNodeName = uriText.replace('dbname=', '').split(' ')[0] elif '|' in uriText: rootNodeName = os.path.dirname(uriText.split(' ')[0].split('|')[0]) else: rootNodeName = 'unrecognised_format' return rootNodeName
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def name(self): """ Returns the algorithm name, used for identifying the algorithm. This string should be fixed for the algorithm, and must not be localised. The name should be unique within each provider. Names should contain lowercase alphanumeric characters only and no spaces or other formatting characters. """ return 'grouplayers'
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def displayName(self): """ Returns the translated algorithm name, which should be used for any user-visible display of the algorithm name. """ return self.tr('Group Layers')
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def postProcess(self): for pkg, content in self.contents: pkg = pkg % self.macros pkgName, compName = pkg.split(':') self._parsePropertyData(content, pkgName, compName)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def _parsePropertyData(self, xml, pkgName, compName): pkgSet = self.propMap.setdefault(xml, set()) if (pkgName, compName) in pkgSet: return pkgSet.add((pkgName, compName)) self.recipe._addProperty(trove._PROPERTY_TYPE_SMARTFORM, pkgName, compName, xml)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def step_impl(context, user_names_str): if not hasattr(context, 'users'): context.users = {} user_names = [name.strip() for name in re.split('and|,', user_names_str)] for user_name in user_names: token = 'fake_token_' + user_name user_id = context.helpers.create_test_user(user_name, token) context.users[user_name] = {'token': token, 'id': user_id}
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def step_impl(context, user_name): context.token = context.users[user_name]['token']
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def step_impl(context): data = json.loads(context.text) receiver_name = re.match(r"\<(\w+)'s id\>", data['receiver']).group(1) data['receiver'] = context.users[receiver_name]['id'] context.request.add_data(data)
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def exit_submission(self): "Close the submission and return to the subreddit page" self.active = False
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def test_when_no_users_returned_no_data_should_be_returned(verbose): expected_users = {} if verbose else [] with patch.dict( pdbedit.__salt__, { "cmd.run_all": MagicMock( return_value={"stdout": "", "stderr": "", "retcode": 0} ) }, ): actual_users = pdbedit.list_users(verbose=verbose) assert actual_users == expected_users
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def test_when_verbose_and_retcode_is_nonzero_output_should_be_had(): expected_stderr = "this is something fnord" with patch.dict( pdbedit.__salt__, { "cmd.run_all": MagicMock( return_value={"stdout": "", "stderr": expected_stderr, "retcode": 1} ) }, ), patch("salt.modules.pdbedit.log.error", autospec=True) as fake_error_log: pdbedit.list_users(verbose=True) actual_error = fake_error_log.mock_calls[0].args[0] assert actual_error == expected_stderr
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def test_when_verbose_and_single_good_output_expected_data_should_be_parsed(): expected_data = { "roscivs": { "unix username": "roscivs", "nt username": "bottia", "full name": "Roscivs Bottia", "user sid": "42", "primary group sid": "99", "home directory": r"\\samba\roscivs", "account desc": "separators! xxx so long and thanks for all the fish", "logoff time": "Sat, 14 Aug 2010 15:06:39 UTC", "kickoff time": "Sat, 14 Aug 2010 15:06:39 UTC", "password must change": "never", } } pdb_output = dedent( r""" Unix username: roscivs NT username: bottia User SID: 42 Primary Group SID: 99 Full Name: Roscivs Bottia Home Directory: \\samba\roscivs Account desc: separators! xxx so long and thanks for all the fish Logoff time: Sat, 14 Aug 2010 15:06:39 UTC Kickoff time: Sat, 14 Aug 2010 15:06:39 UTC Password must change: never """ ).strip() with patch.dict( pdbedit.__salt__, { "cmd.run_all": MagicMock( return_value={"stdout": pdb_output, "stderr": "", "retcode": 0} ) }, ): actual_data = pdbedit.list_users(verbose=True) assert actual_data == expected_data
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def get_active_users(): data = frappe.db.sql("""select name, (select count(*) from tabSessions where user=tabUser.name and timediff(now(), lastupdate) < time("01:00:00")) as has_session from tabUser where enabled=1 and ifnull(user_type, '')!='Website User' and name not in ({}) order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1) # make sure current user is at the top, using has_session = 100 users = [d.name for d in data] if frappe.session.user in users: data[users.index(frappe.session.user)]["has_session"] = 100 else: # in case of administrator data.append({"name": frappe.session.user, "has_session": 100}) return data
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def test_watch_task(client): user = f.UserFactory.create() task = f.create_task(owner=user, milestone=None) f.MembershipFactory.create(project=task.project, user=user, is_admin=True) url = reverse("tasks-watch", args=(task.id,)) client.login(user) response = client.post(url) assert response.status_code == 200
def __init__(self): self.filediff = None meldsettings.connect('changed', self.on_setting_changed)
def test_unwatch_task(client): user = f.UserFactory.create() task = f.create_task(owner=user, milestone=None) f.MembershipFactory.create(project=task.project, user=user, is_admin=True) url = reverse("tasks-watch", args=(task.id,)) client.login(user) response = client.post(url) assert response.status_code == 200