text
stringlengths
48
9.16k
def _readBodyCallback(self, dRes, body): self.busy = False dRes.callback(body) self._processQ()
def listen(self, event, handler): """ Register an event handler. Returns True on success. Event names are defined in `commands`. Handlers must be callable. :param str event: Then event name to listen for. :param callable handler: The callable event handler. """ if event not in self.commands: return False if not callable(handler): return False self._handlers[event].append(handler) return True
def test__path_contained_by(self): p1 = '/foo//bar/baz/../stuff/' p2 = '/a/b/c/d/../../../../foo/bar/stufff' p3 = '/a/b/c/d/../../../../foo/bar/stuff/11' assert not build._path_contained_by(p1, p2) assert build._path_contained_by(p1, p3)
@send_self @catch_errors def run(self, *args, **varargs): this = yield window = sublime.active_window() view = window.active_view() view_name = os.path.split(view.file_name() or view.name())[1] frames = ['Running Nim Check' + f for f in busy_frames] stop_status_loop = loop_status_msg(frames, 0.15) # Save view text if view.is_dirty(): view.run_command('save') # Run 'nim check' on the current view and retrieve the output. # project_file = get_nim_project(window, view) or view.file_name() process, stdout, stderr, error = yield run_nimcheck( view.file_name(), this.send, self.verbosity ) yield stop_status_loop(get_next_method(this)) if handle_process_error(error, 'Nim Check Failed', 'Nim'): yield messages = parse_nimcheck_output(stdout) sublime.status_message('Nim Check Finished.') self.highlight_and_list_messages(messages, window, view) if self.send_output: if self.raw_output: content = stdout else: gen = (m[5] for m in messages if view_name == m[0]) content = '\n'.join(gen) self.write_to_output(content, window, view) yield
def __init__(self, ytid): self.ytid = ytid self.submissions = []
def __reverse_in_node(self, node_data): """Generates a string that matches 'in' node from the regular expression AST. Such node is an alternative between several variants. """ chosen = random.choice(node_data) type_, data = chosen if type_ == 'range': # TODO: add support for negation: [^...] min_char, max_char = data return chr(random.randint(min_char, max_char)) if type_ == 'category': return self.__reverse_category_node(data) return self.__reverse_node(chosen)
def _build_dict(self): if self.nested_path: self[self.nested_path] = self._nesting() else: self[self.agg_name] = {self.metric: {"field": self.field_name}} if self.metric == "terms": self[self.agg_name][self.metric].update({ "size": self.size, "order": {self.order_type: self.order_dir}, "min_doc_count": self.min_doc_count }) if self.range_list: if not self.range_name: range_name = "{name}_ranges".format(name=self.field_name) else: range_name = self.range_name self[range_name] = {"range": { "field": self.field_name, "ranges": self._ranging() }} self.pop(self.agg_name) if self.interval: self[self.agg_name]["histogram"] = { "field": self.field_name, "interval": self.interval, "order": {self.order_type: self.order_dir}, "min_doc_count": self.min_doc_count } self[self.agg_name].pop(self.metric) elif self.filter_val and self.filter_name: self[self.filter_name] = {'filter': self.filter_val, 'aggregations': {}} self[self.filter_name]['aggregations'][self.agg_name] = self.pop(self.agg_name) elif self.global_name: self[self.global_name] = {"global": {}, "aggregations": {}} self[self.global_name]['aggregations'][self.agg_name] = self.pop(self.agg_name)
def recognize_sphinx(self, audio_data, language = "en-US", show_all = False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx. The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation. """ assert isinstance(audio_data, AudioData), "`audio_data` must be audio data" assert isinstance(language, str), "`language` must be a string" # import the PocketSphinx speech recognition module try: from pocketsphinx import pocketsphinx from sphinxbase import sphinxbase except ImportError: raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.") except ValueError: raise RequestError("bad PocketSphinx installation detected; make sure you have PocketSphinx version 0.0.9 or better.") language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language) if not os.path.isdir(language_directory): raise RequestError("missing PocketSphinx language data directory: \"{0}\"".format(language_directory)) acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model") if not os.path.isdir(acoustic_parameters_directory): raise RequestError("missing PocketSphinx language model parameters directory: \"{0}\"".format(acoustic_parameters_directory)) language_model_file = os.path.join(language_directory, "language-model.lm.bin") if not os.path.isfile(language_model_file): raise RequestError("missing PocketSphinx language model file: \"{0}\"".format(language_model_file)) phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict") if not os.path.isfile(phoneme_dictionary_file): raise RequestError("missing PocketSphinx phoneme dictionary file: \"{0}\"".format(phoneme_dictionary_file)) # create decoder object config = pocketsphinx.Decoder.default_config() config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files config.set_string("-lm", language_model_file) config.set_string("-dict", phoneme_dictionary_file) config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal) decoder = pocketsphinx.Decoder(config) # obtain audio data raw_data = audio_data.get_raw_data(convert_rate = 16000, convert_width = 2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format # obtain recognition results decoder.start_utt() # begin utterance processing decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) decoder.end_utt() # stop utterance processing if show_all: return decoder # return results hypothesis = decoder.hyp() if hypothesis is not None: return hypothesis.hypstr raise UnknownValueError() # no transcriptions available
@httpretty.activate def test_queryset_getitem_with_post_query_action(): """ Fetch from QuerySet with __getitem__ and post query action """ # When I create a query block t = QuerySet("localhost", index="bar") # And I have a post query action global my_global_var my_global_var = 1 def action(self, results, start, stop): global my_global_var my_global_var += 1 t.post_query_actions(action) # And I have records response = { "took": 12, "timed_out": False, "_shards": { "total": 5, "successful": 5, "failed": 0 }, "hits": { "total": 1, "max_score": 10, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(response), content_type="application/json") results = t[0:1] len(results).should.equal(1) t.count().should.equal(1) # Then I see the correct results results[0]['_id'].should.equal('1') my_global_var.should.equal(2)
def test_add_agg_global(): """ Create an aggregations block that is global """ # When add a global agg block t = Aggregations("agg_name", "field_name", "metric", global_name="global_agg") # Then I see correct json results = { "global_agg": { "global": {}, "aggregations": { "agg_name": {"metric": {"field": "field_name"}} } } } homogeneous(t, results)
def run(self, edit, message, *args, **kwargs): #print('UpdatePanelCommand.run', args, kwargs) #logger.debug('UpdatePanelCommand was triggered with arguments: %s' % (kwargs)) self.view.erase(edit, sublime.Region(0, self.view.size())) self.view.insert(edit, self.view.size(), message) self.view.show(self.view.size())
@classmethod def create(cls, responseType): return resourceTypes.get(responseType, OEmbedResponse)()
def __call__(self, random, population, args): with self._lock: evaluate_migrant = args.setdefault('evaluate_migrant', False) migrant_index = random.randint(0, len(population) - 1) old_migrant = population[migrant_index] try: migrant = self.migrants.get(block=False) if evaluate_migrant: fit = args["_ec"].evaluator([migrant.candidate], args) migrant.fitness = fit[0] args["_ec"].num_evaluations += 1 population[migrant_index] = migrant except Queue.Empty: pass try: self.migrants.put(old_migrant, block=False) except Queue.Full: pass return population
def get_acl_feed(self, uri=None, auth_token=None, **kwargs): """Retrieves the acl feed containing a site's sharing permissions. Args: uri: string (optional) A full URI to query the acl feed. auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or OAuthToken which authorizes this client to edit the user's data. kwargs: Other parameters to pass to self.get_feed(). Returns: gdata.sites.data.AclFeed """ if uri is None: uri = self.make_acl_feed_uri() return self.get_feed(uri, desired_class=gdata.sites.data.AclFeed, auth_token=auth_token, **kwargs)
def delete_all(filename): """ Delete all files that `find_all` returns :param filename: the absolute path file name to use :return: None """ for file in find_all(filename): os.remove(file)
def __init__(self, uri=None, address=None, *args, **kwargs): """Constructs a new EmailSettingsDelegation object with the given arguments. Args: uri: string (optional) The uri of this object for HTTP requests. address: string The email address of the delegated user. """ super(EmailSettingsDelegation, self).__init__(*args, **kwargs) if uri: self.uri = uri if address: self.address = address
@classmethod def start(self, view, working_directory, command): logger.debug('TestRunner start requested') if self.worker and self.worker.is_alive(): logger.debug(' |- there is another worker alive...') if (settings.get('test_override')): logger.debug(' |- overriding current worker...') self.worker.stop() else: logger.debug(' |- ignoring request') return logger.debug(' |- starting a new worker for tests') self.worker = TestRunnerWorker(view, working_directory, command)
def AuthSubTokenFromHttpBody(http_body): """Extracts the AuthSub token from an HTTP body string. Used to find the new session token after making a request to upgrade a single use AuthSub token. Args: http_body: str The repsonse from the server which contains the AuthSub key. For example, this function would find the new session token from the server's response to an upgrade token request. Returns: The header value to use for Authorization which contains the AuthSub token. """ token_value = token_from_http_body(http_body) if token_value: return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value) return None
def addObservable(self, eventTypeId, callbackFunction): """ Add a function that will be invoked when the corresponding event is triggered. Ex: myWidget.addObservable(myWidget.EVENT_BEFORE_NEXT, self.onBeforeNextClicked) :param eventTypeId: public id if the event exposed by the class :param callbackFunction: function that will be invoked when the event is triggered :return: identifier for this observable (that can be used to remove it) """ if eventTypeId not in self.events: raise Exception("Event not recognized. Make sure that the event belongs to the class and you called the function 'setEvents'") # Add the event to the list of funcions that will be called when the matching event is triggered self.__eventsCallbacks__[self.__eventsCount__] = (eventTypeId, callbackFunction) self.__eventsCount__ += 1 return self.__eventsCount__ - 1
def __init__(self, bbio_gpio): self.bbio_gpio = bbio_gpio # Define mapping of Adafruit GPIO library constants to RPi.GPIO constants. self._dir_mapping = { OUT: bbio_gpio.OUT, IN: bbio_gpio.IN } self._pud_mapping = { PUD_OFF: bbio_gpio.PUD_OFF, PUD_DOWN: bbio_gpio.PUD_DOWN, PUD_UP: bbio_gpio.PUD_UP } self._edge_mapping = { RISING: bbio_gpio.RISING, FALLING: bbio_gpio.FALLING, BOTH: bbio_gpio.BOTH }
def retrieve_page_of_org_users(self, customer_id, startKey=None, **kwargs): """Retrieve one page of OrgUsers in the customer's domain. Args: customer_id: string The ID of the Google Apps customer. startKey: The string key to continue for pagination through all OrgUnits. Returns: gdata.apps.organisation.data.OrgUserFeed object """ uri = '' if startKey is not None: uri = self.MakeOrganizationUnitOrguserProvisioningUri( customer_id, params={'get': 'all', 'startKey': startKey}, **kwargs) else: uri = self.MakeOrganizationUnitOrguserProvisioningUri( customer_id, params={'get': 'all'}) return self.GetFeed( uri, desired_class=gdata.apps.organization.data.OrgUserFeed, **kwargs)
def test_textfield(self): dickie = Author.objects.create(name="Dickens", bio="Aged 10, bald.") authors = Author.objects.filter(bio__case_exact="Aged 10, bald.") assert list(authors) == [dickie] authors = Author.objects.filter(bio__case_exact="Aged 10, BALD.") assert list(authors) == []
def test_constant(self): # If we keep achieving a rate of 100 rows in 0.5 seconds, it should # recommend that we keep there rate = WeightedAverageRate(0.5) assert rate.update(100, 0.5) == 100 assert rate.update(100, 0.5) == 100 assert rate.update(100, 0.5) == 100
def clean_TaxOverrideType(self): otype = getattr(self, 'TaxOverrideType', None) if otype is None: otype = 'None' if otype not in TaxOverride.OVERRIDE_TYPES: raise AvalaraValidationException(AvalaraException.CODE_BAD_OTYPE, 'TaxOverrideType is not one of the allowed types') setattr(self, 'TaxOverrideType', otype)
def __init__(self, results=None, more=None,): self.results = results self.more = more
def writeList(self, register, data): """Write bytes to the specified register.""" self._bus.write_i2c_block_data(self._address, register, data) self._logger.debug("Wrote to register 0x%02X: %s", register, data)
def callNoduleSegmentationCLI(self, inputVolumeID, maximumRadius, onCLISegmentationFinishedCallback=None): """ Invoke the Lesion Segmentation CLI for the specified volume and fiducials. Note: the fiducials will be retrieved directly from the scene :param inputVolumeID: :return: """ # Try to load preexisting structures self.setActiveVolume(inputVolumeID) if self.cliOutputScalarNode is None: # Create the scalar node that will work as the CLI output self.cliOutputScalarNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLScalarVolumeNode") #segmentedNodeName = self.currentVolume.GetID() + '_segmentedlm' segmentedNodeName = self.__PREFIX_INPUTVOLUME__ + self.currentVolume.GetID() self.cliOutputScalarNode.SetName(segmentedNodeName) slicer.mrmlScene.AddNode(self.cliOutputScalarNode) parameters = {} print("Calling CLI...") parameters["inputImage"] = inputVolumeID parameters["outputLevelSet"] = self.cliOutputScalarNode parameters["seedsFiducials"] = self.getFiducialsListNode(inputVolumeID) parameters["maximumRadius"] = maximumRadius parameters["fullSizeOutput"] = True self.invokedCLI = False # Semaphore to avoid duplicated events module = slicer.modules.generatelesionsegmentation result = slicer.cli.run(module, None, parameters) # Observer when the state of the process is modified result.AddObserver('ModifiedEvent', self.__onNoduleSegmentationCLIStateUpdated__) # Function that will be invoked when the CLI finishes self.onCLISegmentationFinishedCallback = onCLISegmentationFinishedCallback return result
def test_dumping(self): instance = NullBit1Model(flag=None) data = json.loads(serializers.serialize('json', [instance]))[0] fields = data['fields'] assert fields['flag'] is None
def create_parser(self, prog_name, subcommand): """ Create and return the ``OptionParser`` which will be used to parse the arguments to this command. """ # hack __main__ so --help in dev_appserver_main works OK. sys.modules['__main__'] = dev_appserver_main return super(Command, self).create_parser(prog_name, subcommand)
def info_to_dict(value, delimiter = ';'): """ Simple function to convert string to dict """ stat_dict = {} stat_param = itertools.imap(lambda sp: info_to_tuple(sp, "="), info_to_list(value, delimiter)) for g in itertools.groupby(stat_param, lambda x: x[0]): try: value = map(lambda v: v[1], g[1]) value = ",".join(sorted(value)) if len(value) > 1 else value[0] stat_dict[g[0]] = value except: # NOTE: 3.0 had a bug in stats at least prior to 3.0.44. This will # ignore that bug. # Not sure if this bug is fixed or not.. removing this try/catch # results in things not working. TODO: investigate. pass return stat_dict
@CommandHelp('Shows the distribution of TTLs for namespaces') def do_time_to_live(self, line): return self._do_distribution('ttl', 'TTL Distribution', 'Seconds')
def items(self, obj): list = [] list.insert(0,obj) for obj in obj.reply_set.all()[:10] : list.append(obj) return list
def try_upload_subtitles(self, params): '''Return True if the subtitle is on database, False if not. ''' self.data = self.xmlrpc.TryUploadSubtitles(self.token, params) return self._get_from_data_or_none('alreadyindb') == 1
def prepare_publication(self, object): return object.publication.name
def compilelib(libpath): version = git_version(libpath) lines = [] lines.append("EESchema-LIBRARY Version 2.3\n") lines.append("#encoding utf-8\n\n") lines.append("#" + "="*78 + "\n") lines.append("# Automatically generated by agg-kicad compile_lib.py\n") lines.append("# on {}\n".format(datetime.datetime.now())) lines.append("# using git version {}\n".format(version)) lines.append("# See github.com/adamgreig/agg-kicad\n") lines.append("#" + "="*78 + "\n\n") for dirpath, dirnames, files in os.walk(libpath): dirnames.sort() for f in fnmatch.filter(sorted(files), "*.lib"): with open(os.path.join(dirpath, f)) as libf: part = libf.readlines()[2:-1] if len(part) > 2 and "agg-kicad compile_lib.py" not in part[2]: lines.append("".join(part)) lines.append("# End of library\n") return "".join(lines)
def assert_raises(self, exc_class, func, *args, **kwargs): '''Like assertRaises() but returns the exception''' try: func(*args, **kwargs) except exc_class as exc: return exc else: raise AssertionError('%s was not raised' % exc_class.__name__)
def _do_default(self, line): self.executeHelp(line)
def filter_features(model_results, significance=0.1): ''' Returns a list of features that are below a given level of significance. Parameters ---------- model_results : Series a pandas series of the results.pvalues of your model significance : float significance level, default at 90% confidence. Returns ------- list : a list of columns below the given significance level ''' return list((model_results.index[index] for index, pvalues in enumerate(model_results) if pvalues > significance))
def _move_root_node(self, node, target, position): """ Moves root node``node`` to a different tree, inserting it relative to the given ``target`` node as specified by ``position``. ``node`` will be modified to reflect its new tree state in the database. """ left = getattr(node, self.left_attr) right = getattr(node, self.right_attr) level = getattr(node, self.level_attr) tree_id = getattr(node, self.tree_id_attr) new_tree_id = getattr(target, self.tree_id_attr) width = right - left + 1 if node == target: raise InvalidMove(_('A node may not be made a child of itself.')) elif tree_id == new_tree_id: raise InvalidMove(_('A node may not be made a child of any of its descendants.')) space_target, level_change, left_right_change, parent = \ self._calculate_inter_tree_move_values(node, target, position) # Create space for the tree which will be inserted self._create_space(width, space_target, new_tree_id) # Move the root node, making it a child node opts = self.model._meta move_tree_query = """ UPDATE %(table)s SET %(level)s = %(level)s - %%s, %(left)s = %(left)s - %%s, %(right)s = %(right)s - %%s, %(tree_id)s = %%s, %(parent)s = CASE WHEN %(pk)s = %%s THEN %%s ELSE %(parent)s END WHERE %(left)s >= %%s AND %(left)s <= %%s AND %(tree_id)s = %%s""" % { 'table': qn(opts.db_table), 'level': qn(opts.get_field(self.level_attr).column), 'left': qn(opts.get_field(self.left_attr).column), 'right': qn(opts.get_field(self.right_attr).column), 'tree_id': qn(opts.get_field(self.tree_id_attr).column), 'parent': qn(opts.get_field(self.parent_attr).column), 'pk': qn(opts.pk.column), } cursor = connection.cursor() cursor.execute(move_tree_query, [level_change, left_right_change, left_right_change, new_tree_id, node.pk, parent.pk, left, right, tree_id]) # Update the former root node to be consistent with the updated # tree in the database. setattr(node, self.left_attr, left - left_right_change) setattr(node, self.right_attr, right - left_right_change) setattr(node, self.level_attr, level - level_change) setattr(node, self.tree_id_attr, new_tree_id) setattr(node, self.parent_attr, parent)
def revert_to(self, article, revision): (default_revision_manager.get_for_object(article)[revision] .revision.revert())
def test_fqdnurl_validation_without_host(): """ test with empty host FQDN URL """ schema = Schema({"url": FqdnUrl()}) try: schema({"url": 'http://'}) except MultipleInvalid as e: assert_equal(str(e), "expected a Fully qualified domain name URL for dictionary value @ data['url']") else: assert False, "Did not raise Invalid for empty string url"
@raise_if_none('cookie', MagicError, 'object has already been closed') @byte_args(positions=[1]) @str_return def id_filename(self, filename): "Return a textual description of the contents of the file" return api.magic_file(self.cookie, filename)
def email(self, comment, content_object, request): moderators = [] chief = settings.EDITORS['chief'] moderators.append(chief) managing = settings.EDITORS['managing'] moderators.append(managing) online_dev = settings.EDITORS['online_dev'] moderators.append(online_dev) multimedia = settings.EDITORS['multimedia'] moderators.append(multimedia) online_assistant = settings.EDITORS['online_assistant'] moderators.append(online_assistant) context = {'comment': comment, 'content_object': content_object} subject = 'New comment awaiting moderation on "%s"' % content_object render_email_and_send(context=context, message_template='multimedia/video_comment_notification_email.txt', subject=subject, recipients=moderators)
def longestConsecutive(self, root): self.longest(root) return self.gmax
def longestPalindrome_TLE(self, s): """ Algorithm: dp, O(n^2) p[i,j] represents weather s[i:j] is palindrome. (incl. i-th while excl. j-th) For example S = "abccb" 01234 p[0,1] = True, p[1,2] = True, etc. since single char is Palindrom p[0,2] = s[0]==s[1], p[0,3] = s[0]==s[2] && p[1,2] p[0,4] = s[0]==s[3] && p[1,3] p[0,5] = s[0]==s[4] && p[1,4] thus, p[i,j] = 1 if i+1==j p[i,j] = s[i]==s[j-1] if i+1==j-1 else p[i,j] = s[i]==s[j-1] && p[i+1, j-1] :param s: string :return: string """ length = len(s) dp = [[False for _ in xrange(length+1)] for _ in xrange(length+1)] for i in xrange(length+1): dp[i][i] = True longest = [0, 0] for j in xrange(length+1): for i in xrange(j-1, -1, -1): if i+1 == j: dp[i][j] = True else: dp[i][j] = s[i] == s[j-1] and dp[i+1][j-1] # pre-access? starting backward if dp[i][j] == True and longest[1]-longest[0] < j-i: longest[0], longest[1] = i, j return s[longest[0]:longest[1]]
def __init__(self, source, *args, **kwargs): """Init.""" super(IndexBatchIterator, self).__init__(*args, **kwargs) self.source = source if source is not None: # Tack on (SAMPLE_SIZE-1) copies of the first value so that it is # easy to grab # SAMPLE_SIZE POINTS even from the first location. x = source.data input_shape = [len(x) + (SAMPLE_SIZE - 1), N_ELECTRODES] self.augmented = np.zeros(input_shape, dtype=np.float32) self.augmented[SAMPLE_SIZE-1:] = x self.augmented[:SAMPLE_SIZE-1] = x[0] if filt2Dsize: input_shape = [self.batch_size, 1, N_ELECTRODES, TIME_POINTS] self.Xbuf = np.zeros(input_shape, np.float32) else: input_shape = [self.batch_size, N_ELECTRODES, TIME_POINTS] self.Xbuf = np.zeros(input_shape, np.float32) self.Ybuf = np.zeros([self.batch_size, N_EVENTS], np.float32)
def asdict(hdr, row, missing=None): flds = [text_type(f) for f in hdr] try: # list comprehension should be faster items = [(flds[i], row[i]) for i in range(len(flds))] except IndexError: # short row, fall back to slower for loop items = list() for i, f in enumerate(flds): try: v = row[i] except IndexError: v = missing items.append((f, v)) return dict(items)
def attribute_text_getter(attr, missing): def _get(v): if len(v) > 1: return tuple(e.get(attr) for e in v) elif len(v) == 1: return v[0].get(attr) else: return missing return _get
def save_to_json(): table = _AGGREGATED_SALES_TABLE with open('{}.json'.format(table), 'w') as f: records = [row for row in scraperwiki.sqlite.select( '* FROM {}'.format(table))] f.write(json.dumps(records, cls=JsonEncoder, indent=1))
def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('unpause_args') oprot.writeFieldStop() oprot.writeStructEnd()
def recordtree(table, start='start', stop='stop'): """ Construct an interval tree for the given table, where each node in the tree is a row of the table represented as a record object. """ import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) tree = intervaltree.IntervalTree() for rec in records(table): tree.addi(getstart(rec), getstop(rec), rec) return tree
def wrapper(*args, **kwargs): current_delay = delay current_try = max_tries while current_try > 0: current_try -= 1 for current_try in range(max_tries): try: return f(*args, **kwargs) except RetryFailed: # Do not sleep after the last retry if current_try < max_tries - 1: sleep_func(current_delay) # wait... current_delay *= backoff # make future wait longer # No more retries raise TooManyRetries()
def __init__(self, conf={}): logger.info("Creating decoder using conf: %s" % conf) self.create_pipeline(conf) self.outdir = conf.get("out-dir", None) if not os.path.exists(self.outdir): os.makedirs(self.outdir) elif not os.path.isdir(self.outdir): raise Exception("Output directory %s already exists as a file" % self.outdir) self.result_handler = None self.full_result_handler = None self.eos_handler = None self.error_handler = None self.request_id = "<undefined>"
def test_parses_data_correctly_when_v2(self): posted_data = [ { "_id": "YnJvd3NlcnMyMDE0LTEwLTE0IDAwOj" "AwOjAwKzAwOjAwTW96aWxsYQ==", "_timestamp": datetime.datetime( 2014, 10, 14, 0, 0, tzinfo=pytz.UTC), "browser": "Mozilla", "dataType": "browsers", "humanId": "browsers2014-10-14 00:00:00+00:00Mozilla", "visitors": 1, "test": "field" }, { "_id": "YnJvd3NlcnMyMDE0LTEwLTE0IDAwO" "jAwOjAwKzAwOjAwR29vZ2xlIENocm9tZQ==", "_timestamp": datetime.datetime( 2014, 10, 14, 0, 0, tzinfo=pytz.UTC), "browser": "Google Chrome", "dataType": "browsers", "humanId": "browsers2014-10-14 00:00:00+00:00Google Chrome", "visitors": 18, "test": "field" } ] options = { 'row_type_name': 'browser', 'mappings': {'Visits': 'visitors'}, 'additionalFields': {'test': 'field'}, 'idMapping': ["dataType", "_timestamp", "browser"]} data_type = "browsers" parser = V2Parser(options, data_type) results = list(parser.parse([get_fake_response()['data']])) assert_that(results[0], has_entries(posted_data[0])) assert_that(results[1], has_entries(posted_data[1]))
def _get_route_for(self, action): """Return the complete URL for this action. Basically: - get, update and delete need an id - add and list does not """ route = self._route if action in self._NEED_ID: route += "/<%s>" % self._identifier return route
def dispatch(self, opcode, context): """Dispatches a context on a given opcode. Returns True if the context is done matching, False if it must be resumed when next encountered.""" #if self.executing_contexts.has_key(id(context)): if id(context) in self.executing_contexts: generator = self.executing_contexts[id(context)] del self.executing_contexts[id(context)] has_finished = next(generator) else: method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown) has_finished = method(self, context) if hasattr(has_finished, "__next__"): # avoid using the types module generator = has_finished has_finished = next(generator) if not has_finished: self.executing_contexts[id(context)] = generator return has_finished
def setEntityResolver(self, resolver): "Register an object to resolve external entities." self._ent_handler = resolver
def secure_connection(self): socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False, certfile=CERTFILE, server_side=True, do_handshake_on_connect=False, ssl_version=ssl.PROTOCOL_SSLv23) self.del_channel() self.set_socket(socket) self._ssl_accepting = True
def op_in(self, ctx): # match set member (or non_member) # <IN> <skip> <set> #self._log(ctx, "OP_IN") self.general_op_in(ctx) return True
def averageWords(text_f="/afs/cs.stanford.edu/u/awni/swbd/data/eval2000/text_ctc"): with open(text_f,'r') as fid: lines = [l.strip().split()[1:] for l in fid.readlines()] numUtts = float(len(lines)) numWords = sum(len(l) for l in lines) return numWords/numUtts
def op_max_until(self, ctx): # maximizing repeat # <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail repeat = ctx.state.repeat #print("op_max_until") #, id(ctx.state.repeat)) if repeat is None: #print(id(ctx), id(ctx.state)) raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.") mincount = repeat.peek_code(2) maxcount = repeat.peek_code(3) ctx.state.string_position = ctx.string_position count = repeat.count + 1 #self._log(ctx, "MAX_UNTIL", count) if count < mincount: # not enough matches repeat.count = count child_context = repeat.push_new_context(4) yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: repeat.count = count - 1 ctx.state.string_position = ctx.string_position yield True if (count < maxcount or maxcount == MAXREPEAT) \ and ctx.state.string_position != repeat.last_position: # we may have enough matches, if we can match another item, do so repeat.count = count ctx.state.marks_push() save_last_position = repeat.last_position # zero-width match protection repeat.last_position = ctx.state.string_position child_context = repeat.push_new_context(4) yield False repeat.last_position = save_last_position if child_context.has_matched: ctx.state.marks_pop_discard() ctx.has_matched = True yield True ctx.state.marks_pop() repeat.count = count - 1 ctx.state.string_position = ctx.string_position # cannot match more repeated items here. make sure the tail matches ctx.state.repeat = repeat.previous child_context = ctx.push_new_context(1) #print("_sre.py:987:op_max_until") yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: ctx.state.repeat = repeat ctx.state.string_position = ctx.string_position yield True
def get_free_nodes(cluster, parallel=True): nodes = [cluster + str(node) for node in CLUSTER_NODES[cluster]] if parallel: is_free = joblib.Parallel(n_jobs=NUM_CPUS)( joblib.delayed(is_node_free)(node) for node in nodes) else: is_free = list() for node in nodes: is_free.append(is_node_free(node)) free_nodes = [nodes[k] for k in range(len(nodes)) if is_free[k]] return free_nodes
def test_setup_class(self): class Test(unittest.TestCase): setUpCalled = 0 @classmethod def setUpClass(cls): Test.setUpCalled += 1 unittest.TestCase.setUpClass() def test_one(self): pass def test_two(self): pass result = self.runTests(Test) self.assertEqual(Test.setUpCalled, 1) self.assertEqual(result.testsRun, 2) self.assertEqual(len(result.errors), 0)
def emit(self, record): """ Emit a record. If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit. """ if self.stream is None: self.stream = self._open() StreamHandler.emit(self, record)
def onkeypress(self, fun, key=None): """Bind fun to key-press event of key if key is given, or to any key-press-event if no key is given. Arguments: fun -- a function with no arguments key -- a string: key (e.g. "a") or key-symbol (e.g. "space") In order to be able to register key-events, TurtleScreen must have focus. (See method listen.) Example (for a TurtleScreen instance named screen and a Turtle instance named turtle): >>> def f(): ... fd(50) ... lt(60) ... >>> screen.onkeypress(f, "Up") >>> screen.listen() Subsequently the turtle can be moved by repeatedly pressing the up-arrow key, or by keeping pressed the up-arrow key. consequently drawing a hexagon. """ if fun is None: if key in self._keys: self._keys.remove(key) elif key is not None and key not in self._keys: self._keys.append(key) self._onkeypress(fun, key)
def __radd__(self, val): return self.val + val
def test_loadTestsFromName__malformed_name(self): loader = unittest.TestLoader() # XXX Should this raise ValueError or ImportError? try: loader.loadTestsFromName('abc () //') except ValueError: pass except ImportError: pass else: self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
def test_process_awareness(self): # ensure that the random source differs between # child and parent. read_fd, write_fd = os.pipe() pid = None try: pid = os.fork() if not pid: os.close(read_fd) os.write(write_fd, next(self.r).encode("ascii")) os.close(write_fd) # bypass the normal exit handlers- leave those to # the parent. os._exit(0) parent_value = next(self.r) child_value = os.read(read_fd, len(parent_value)).decode("ascii") finally: if pid: # best effort to ensure the process can't bleed out # via any bugs above try: os.kill(pid, signal.SIGKILL) except EnvironmentError: pass os.close(read_fd) os.close(write_fd) self.assertNotEqual(child_value, parent_value)
def trace_return(self, frame): self.add_event('return', frame) self.stack.pop()
def test_adding_child_mock(self): for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock: mock = Klass() mock.foo = Mock() mock.foo() self.assertEqual(mock.method_calls, [call.foo()]) self.assertEqual(mock.mock_calls, [call.foo()]) mock = Klass() mock.bar = Mock(name='name') mock.bar() self.assertEqual(mock.method_calls, []) self.assertEqual(mock.mock_calls, []) # mock with an existing _new_parent but no name mock = Klass() mock.baz = MagicMock()() mock.baz() self.assertEqual(mock.method_calls, []) self.assertEqual(mock.mock_calls, [])
def put(self, item, block=True, timeout=None): '''Put an item into the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' is ignored in that case). ''' with self.not_full: if self.maxsize > 0: if not block: if self._qsize() >= self.maxsize: raise Full elif timeout is None: while self._qsize() >= self.maxsize: self.not_full.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time() + timeout while self._qsize() >= self.maxsize: remaining = endtime - time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.unfinished_tasks += 1 self.not_empty.notify()
def _test_module_encoding(self, path): path, _ = os.path.splitext(path) path += ".py" with codecs.open(path, 'r', 'utf-8') as f: f.read()
def test_unsupported_auth_basic_handler(self): # While using BasicAuthHandler opener = OpenerDirector() basic_auth_handler = urllib.request.HTTPBasicAuthHandler(None) http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: NTLM\r\n\r\n') opener.add_handler(basic_auth_handler) opener.add_handler(http_handler) self.assertRaises(ValueError,opener.open,"http://www.example.com")
def test_iadd(self): super().test_iadd() u = (0, 1) u2 = u u += (2, 3) self.assertTrue(u is not u2)
def addExpectedFailure(self, test, err): super(TextTestResult, self).addExpectedFailure(test, err) if self.showAll: self.stream.writeln("expected failure") elif self.dots: self.stream.write("x") self.stream.flush()
@classmethod def _test_stderr_flush(cls, testfn): sys.stderr = open(testfn, 'w') 1/0 # MARKER
def check_pickle(self, itorg, seq): d = pickle.dumps(itorg) it = pickle.loads(d) # Cannot assert type equality because dict iterators unpickle as list # iterators. # self.assertEqual(type(itorg), type(it)) self.assertTrue(isinstance(it, collections.abc.Iterator)) self.assertEqual(list(it), seq) it = pickle.loads(d) try: next(it) except StopIteration: return d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(list(it), seq[1:])
def test_compare_function_objects(self): f = eval('lambda: None') g = eval('lambda: None') self.assertNotEqual(f, g) f = eval('lambda a: a') g = eval('lambda a: a') self.assertNotEqual(f, g) f = eval('lambda a=1: a') g = eval('lambda a=1: a') self.assertNotEqual(f, g) f = eval('lambda: 0') g = eval('lambda: 1') self.assertNotEqual(f, g) f = eval('lambda: None') g = eval('lambda a: None') self.assertNotEqual(f, g) f = eval('lambda a: None') g = eval('lambda b: None') self.assertNotEqual(f, g) f = eval('lambda a: None') g = eval('lambda a=None: None') self.assertNotEqual(f, g) f = eval('lambda a=0: None') g = eval('lambda a=1: None') self.assertNotEqual(f, g)
def test_writable_readonly(self): # Issue #10451: memoryview incorrectly exposes a readonly # buffer as writable causing a segfault if using mmap tp = self.ro_type if tp is None: return b = tp(self._source) m = self._view(b) i = io.BytesIO(b'ZZZZ') self.assertRaises(TypeError, i.readinto, m)
def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close()
def test_failing_queue(self): # Test to make sure a queue is functioning correctly. # Done twice to the same instance. q = FailingQueue(QUEUE_SIZE) self.failing_queue_test(q) self.failing_queue_test(q)
def __init__(self, layers, loss): self.layers = layers self.loss = loss self.bprop_until = next((idx for idx, l in enumerate(self.layers) if isinstance(l, ParamMixin)), 0) self.layers[self.bprop_until].bprop_to_x = False self.collection = self.layers self._initialized = False
@classmethod def load_item_classes_from_file(cls, f): ''''load json items from a file and return a TaskFactory''' return cls.taskfactory_from_objects(json.load(f))
def train_epoch(self): batch_losses = [] for batch in self.feed.batches(): loss = np.array(ca.mean(self.model.update(*batch))) for param, state in zip(self.params, self.learn_rule_states): self.learn_rule.step(param, state) batch_losses.append(loss) epoch_loss = np.mean(batch_losses) return epoch_loss
def http_connect(method, params, api_key): conn = httplib.HTTPSConnection("api.africastalking.com") headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "application/json", "ApiKey": api_key } params = urllib.urlencode(params) conn.request(method, PATH, params, headers) res = conn.getresponse().read() return json.loads(res)
def y_shape(self, x_shape): return self.conv_op.output_shape(x_shape, self.n_filters, self.filter_shape)
def enable_colors(colors): for i in colors: CONF["COLORS"][i] = colors[i]
def seq_final_arrival(self, seq_num): ''' Returns the time at which the seq number had fully arrived, that is, when all the data before it had also arrived. ''' try: return self.final_arrival_data.find_le(seq_num)[1] except: return None
def set_fathers(self, f): self.fathers.append(f)
def visit_ins(self, ins): return ins.visit(self)
def decode(self, y): self._tmp_y = y x = ca.dot(y, self.weights.array.T) + self.bias_prime.array return self.activation_decode.fprop(x)
def Save(self): """Save this branch back into the configuration. """ if self._config.HasSection('branch', self.name): if self.remote: self._Set('remote', self.remote.name) else: self._Set('remote', None) self._Set('merge', self.merge) else: fd = open(self._config.file, 'ab') try: fd.write('[branch "%s"]\n' % self.name) if self.remote: fd.write('\tremote = %s\n' % self.remote.name) if self.merge: fd.write('\tmerge = %s\n' % self.merge) finally: fd.close()
def Multiple_lines_are_printed__test(): out = StringIO() csv = Csv( out, ( "a", "b", "c" ) ) csv.line( ( 2, "x", 3.5 ) ) csv.line( ( 4, "y", 5.5 ) ) assert_equal( '''"a", "b", "c" 2, "x", 3.5 4, "y", 5.5 ''', out.getvalue() )
def get_id(self, package_name, rid, locale='\x00\x00'): self._analyse() try: for i in self.values[package_name][locale]["public"]: if i[2] == rid: return i except KeyError: return None
def process_and_show(self): for name, klass in sorted(self.classes.iteritems()): logger.info('Processing class: %s', name) if not isinstance(klass, DvClass): klass = DvClass(klass, self.vma) klass.process() klass.show_source()
def _Load(self): if not self._loaded: m = self.manifestProject b = m.GetBranch(m.CurrentBranch) if b.remote and b.remote.name: m.remote.name = b.remote.name b = b.merge if b is not None and b.startswith(R_HEADS): b = b[len(R_HEADS):] self.branch = b self._ParseManifest(True) local = os.path.join(self.repodir, LOCAL_MANIFEST_NAME) if os.path.exists(local): try: real = self._manifestFile self._manifestFile = local self._ParseManifest(False) finally: self._manifestFile = real if self.IsMirror: self._AddMetaProjectMirror(self.repoProject) self._AddMetaProjectMirror(self.manifestProject) self._loaded = True
def __init__(self, field): self.field = field self.xrefread = set() self.xrefwrite = set()
def convert(self, value, param, ctx): if hasattr(value, 'read') or hasattr(value, 'write'): return value value = os.path.expanduser(value) return super(File, self).convert(value, param, ctx)
def __init__(self, address): """ Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv4Address('192.0.2.1') == IPv4Address(3221225985). or, more generally IPv4Address(int(IPv4Address('192.0.2.1'))) == IPv4Address('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 4) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str)
def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist