text
stringlengths
48
9.16k
def file_size(file_path): """Returns the byte size of a file at ``file_path``. """ stat = os.stat(file_path) assert stat_fn.S_ISDIR(stat.st_mode) == False return stat.st_size
def _on_word(self, word): self.last_decoder_message = time.time() if word != "<#s>": if len(self.partial_transcript) > 0: self.partial_transcript += " " self.partial_transcript += word logger.debug("%s: Postprocessing partial result.." % self.request_id) processed_transcript = self.post_process(self.partial_transcript) logger.debug("%s: Postprocessing done." % self.request_id) event = dict(status=common.STATUS_SUCCESS, segment=self.num_segments, result=dict(hypotheses=[dict(transcript=processed_transcript)], final=False)) self.send(json.dumps(event)) else: logger.info("%s: Postprocessing final result.." % self.request_id) processed_transcript = self.post_process(self.partial_transcript) logger.info("%s: Postprocessing done." % self.request_id) event = dict(status=common.STATUS_SUCCESS, segment=self.num_segments, result=dict(hypotheses=[dict(transcript=processed_transcript)], final=True)) self.send(json.dumps(event)) self.partial_transcript = "" self.num_segments += 1
def bin_based_cvm(y_pred, sample_weight, bin_indices): """Cramer-von Mises similarity, quite slow meanwhile""" assert len(y_pred) == len(sample_weight) == len(bin_indices) bin_weights = compute_bin_weights(bin_indices=bin_indices, sample_weight=sample_weight) result = 0. global_data, global_weight, global_F = prepare_distibution(y_pred, weights=sample_weight) for bin, bin_weight in enumerate(bin_weights): if bin_weight <= 0: continue bin_mask = bin_indices == bin local_distribution = y_pred[bin_mask] local_weights = sample_weight[bin_mask] result += bin_weight * _cvm_2samp_fast(global_data, local_distribution, global_weight, local_weights, global_F) return result
def test_expressions(): exprs = [ lambda a, b, c: a + b + c, lambda a, b, c: a - b - c, lambda a, b, c: a * b * c, lambda a, b, c: a / b / c, lambda a, b, c: a + b - c, lambda a, b, c: a - b + c, lambda a, b, c: a + b + (-c), lambda a, b, c: a + b*a + c, lambda a, b, c: a + b**2 + c, lambda a, b, c: a + b*b + a*b + c, lambda a, b, c: a + 2*(b + a) + c, lambda a, b, c: c / (a + ((b/1)*(c+1)) + a*b*c), ] confs = itertools.product(shapes, exprs) for shape, op in confs: a_array = np.random.normal(size=shape).astype(dp.float_) b_array = np.random.normal(size=shape).astype(dp.float_) c_array = np.random.normal(size=shape).astype(dp.float_) a_src = BPropableSource(a_array) b_src = BPropableSource(b_array) c_src = BPropableSource(c_array) sink = op(a_src, b_src, c_src) for src, x0 in [(a_src, a_array), (b_src, b_array), (c_src, c_array)]: f, f_grad = graph_funs(src, sink) g_approx = approx_fprime(x0, f) g_true = f_grad(x0) assert gradclose(g_true, g_approx)
def norm_html_from_html(html): """Normalize (somewhat) Markdown'd HTML. Part of Markdown'ing involves obfuscating email links with randomize encoding. Undo that obfuscation. Also normalize EOLs. """ if not isinstance(html, unicode): html = html.decode('utf-8') html = _markdown_email_link_re.sub( _markdown_email_link_sub, html) if sys.platform == "win32": html = html.replace('\r\n', '\n') return html
def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items))
def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state
def __repr__(self): if self.tag==self.__class__.tag: return self.__class__.__name__+"()" else: return self.__class__.__name__+"(tag=%d)"%self.tag
def switch(self, dest, url, rev_options): self.run_command(['switch'] + rev_options + [url, dest])
def name_that_thing(thing): """ Returns either the function/class path or just the object's repr """ # Instance method if hasattr(thing, "im_class"): return name_that_thing(thing.im_class) + "." + thing.im_func.func_name # Other named thing if hasattr(thing, "__name__"): if hasattr(thing, "__class__") and not isinstance(thing, types.FunctionType): if thing.__class__ is not type: return name_that_thing(thing.__class__) if hasattr(thing, "__module__"): return "%s.%s" % (thing.__module__, thing.__name__) # Generic instance of a class if hasattr(thing, "__class__"): return name_that_thing(thing.__class__) return repr(thing)
def stop(self): if self.max is not None: self.update(self.max) self.done = True
def add_filters(self, filterer, filters): """Add filters to a filterer from a list of names.""" for f in filters: try: filterer.addFilter(self.config['filters'][f]) except StandardError as e: raise ValueError('Unable to add filter %r: %s' % (f, e))
def _version2fieldlist(version): if version == '1.0': return _241_FIELDS elif version == '1.1': return _314_FIELDS elif version == '1.2': return _345_FIELDS elif version == '2.0': return _426_FIELDS raise MetadataUnrecognizedVersionError(version)
def _get_index_urls_locations(self, project_name): """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """ def mkurl_pypi_url(url): loc = posixpath.join(url, project_url_name) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. if not loc.endswith('/'): loc = loc + '/' return loc project_url_name = urllib_parse.quote(project_name.lower()) if self.index_urls: # Check that we have the url_name correctly spelled: # Only check main index if index URL is given main_index_url = Link( mkurl_pypi_url(self.index_urls[0]), trusted=True, ) page = self._get_page(main_index_url) if page is None and PyPI.netloc not in str(main_index_url): warnings.warn( "Failed to find %r at %s. It is suggested to upgrade " "your index to support normalized names as the name in " "/simple/{name}." % (project_name, main_index_url), RemovedInPip8Warning, ) project_url_name = self._find_url_name( Link(self.index_urls[0], trusted=True), project_url_name, ) or project_url_name if project_url_name is not None: return [mkurl_pypi_url(url) for url in self.index_urls] return []
@classmethod def run_file_cmd(cls, filename): """ return a 3-tuple of strings containing: the command to run (list) environment variables to be passed to command (dictionary or None) data to be piped into stdin (file-like object or None) """ port_number = str(cls.config.get('port', VerticaDb.DEFAULT_PORT)) cmd = ['/opt/vertica/bin/vsql', '-h', cls.config['host'], '-U', cls.config['username'], '-p', port_number, '-v', 'VERBOSITY=verbose', '-v', 'AUTOCOMMIT=on', '-v', 'ON_ERROR_STOP=on', '-v', 'schema=%s' % cls.config['schema_name'], cls.config['db_name']] my_env = None if 'password' in cls.config: my_env = os.environ.copy() my_env['VSQL_PASSWORD'] = cls.config['password'] return cmd, my_env, open(filename)
def UP(self, n=1): return CSI + str(n) + "A"
def test_pop(self): x = DoubleList() for i in range(5): x.append(i) for i in range(5): a = x.pop() self.assertEqual(a, 5 - i - 1) self.assertEqual(len(x), 5 - i - 1) with self.assertRaises(IndexError): x.pop()
def check_iter_range(self, runs, value): for interval in range(1, len(value)): it = runs.get_run_iterator() for start in range(0, len(value), interval): end = min(start + interval, len(value)) for s, e, style in it.ranges(start, end): for v in value[s:e]: self.assertTrue(v == style, (start, end, s, e, style))
def _parseUserPass(self): try: # Parse out data ver, ulen = struct.unpack('BB', self.buf[:2]) uname, = struct.unpack('%ds' % ulen, self.buf[2:ulen + 2]) plen, = struct.unpack('B', self.buf[ulen + 2]) password, = struct.unpack('%ds' % plen, self.buf[ulen + 3:ulen + 3 + plen]) # Trim off fron of the buffer self.buf = self.buf[3 + ulen + plen:] # Fire event to authenticate user if self.authenticateUserPass(uname, password): # Signal success self.state = STATE_REQUEST self.transport.write(struct.pack('!BB', SOCKS5_VER, 0x00)) else: # Signal failure self.transport.write(struct.pack('!BB', SOCKS5_VER, 0x01)) self.transport.loseConnection() except struct.error: pass
def summary_pyval_repr(self, max_len=None): """ Return a single-line formatted representation of the Python object described by this C{ValueDoc}. This representation may include data from introspection or parsing, and is authorative as 'the best way to summarize a Python value.' If the representation takes more then L{SUMMARY_REPR_LINELEN} characters, then it will be truncated (with an ellipsis marker). This function will never return L{UNKNOWN} or C{None}. @rtype: L{ColorizedPyvalRepr} """ # If max_len is specified, then do *not* cache the result. if max_len is not None: return epydoc.markup.pyval_repr.colorize_pyval( self.pyval, self.parse_repr, self.REPR_MIN_SCORE, max_len, maxlines=1, linebreakok=False) # Use self.__summary_pyval_repr to cache the result. if not hasattr(self, '_ValueDoc__summary_pyval_repr'): self.__summary_pyval_repr = epydoc.markup.pyval_repr.colorize_pyval( self.pyval, self.parse_repr, self.REPR_MIN_SCORE, self.SUMMARY_REPR_LINELEN, maxlines=1, linebreakok=False) return self.__summary_pyval_repr
def update(self, dt): if self.is_rotating or self.index_diff: increment = self.direction * self.speed * self.float_increment * dt self.float_index = (self.float_index + increment) % self.num_tiles if self.index_diff: self.index_diff -= abs(increment) if self.index_diff < 0: self.index_diff = 0 self.float_index = round(self.float_index) % self.num_tiles self.index = int(self.float_index) self.is_rotating = False self.angle = (self.float_index / self.num_tiles) * 360
@feature('cxx') @after('apply_lib_vars') def apply_defines_cxx(self): """after uselib is set for CXXDEFINES""" self.defines = getattr(self, 'defines', []) lst = self.to_list(self.defines) + self.to_list(self.env['CXXDEFINES']) milst = [] # now process the local defines for defi in lst: if not defi in milst: milst.append(defi) # CXXDEFINES_USELIB libs = self.to_list(self.uselib) for l in libs: val = self.env['CXXDEFINES_'+l] if val: milst += self.to_list(val) self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]] y = self.env['CXXDEFINES_ST'] self.env['_CXXDEFFLAGS'] = [y%x for x in milst]
def clone(self, env): "" newobj = task_gen(bld=self.bld) for x in self.__dict__: if x in ['env', 'bld']: continue elif x in ["path", "features"]: setattr(newobj, x, getattr(self, x)) else: setattr(newobj, x, copy.copy(getattr(self, x))) newobj.__class__ = self.__class__ if isinstance(env, str): newobj.env = self.bld.all_envs[env].copy() else: newobj.env = env.copy() return newobj
def update_bars(): for bar in bars: old_length = bar.count length = random.randint(MIN_BAR_LENGTH, MAX_BAR_LENGTH) bar.resize(length) vertices = bar.vertices # Update new vertices (overwrite old degenerate) for i in range((old_length - 1) * 2, length * 2): if i & 1: # y vertices[i] = BAR_SEGMENT_HEIGHT * (i // 4) else: # x vertices[i] = vertices[i - 4] # Update top degenerate (first degenerate is never modified) vertices[-2:] = vertices[-4:-2] # Update colors if length > old_length: bar.colors[old_length*3:length*3] = \ bar.colors[:3] * (length - old_length)
def _reconstruct_sub(self, offset, xstep, ystep): """ Reverse sub filter. """ pixels = self.pixels a_offset = offset offset += self.psize * xstep if xstep == 1: for index in range(self.psize, self.row_bytes): x = pixels[offset] a = pixels[a_offset] pixels[offset] = (x + a) & 0xff offset += 1 a_offset += 1 else: byte_step = self.psize * xstep for index in range(byte_step, self.row_bytes, byte_step): for i in range(self.psize): x = pixels[offset + i] a = pixels[a_offset + i] pixels[offset + i] = (x + a) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep
def timestamp_to_avbin(timestamp): return int(timestamp * 1000000)
def visit_struct(self, struct): if struct.tag in self.structs: return self.structs.add(struct.tag) base = {True: 'Union', False: 'Structure'}[struct.is_union] print >> self.file, 'class struct_%s(%s):' % (struct.tag, base) print >> self.file, ' __slots__ = [' if not struct.opaque: for m in struct.members: print >> self.file, " '%s'," % m[0] print >> self.file, ' ]' # Set fields after completing class, so incomplete structs can be # referenced within struct. for name, typ in struct.members: self.emit_type(typ) print >> self.file, 'struct_%s._fields_ = [' % struct.tag if struct.opaque: print >> self.file, " ('_opaque_struct', c_int)" self.structs.remove(struct.tag) else: for m in struct.members: print >> self.file, " ('%s', %s)," % (m[0], m[1]) print >> self.file, ']' print >> self.file
def _u_in(self, u): return u >= 0.0 and u <= 1.0
def destroy(self): '''Release the context. The context will not be useable after being destroyed. Each platform has its own convention for releasing the context and the buffer(s) that depend on it in the correct order; this should never be called by an application. ''' self.detach() if gl.current_context is self: gl.current_context = None gl_info.remove_active_context() # Switch back to shadow context. if gl._shadow_window is not None: gl._shadow_window.switch_to()
def exec_test(self): status = 0 variant = self.env.variant() filename = self.inputs[0].abspath(self.env) try: fu = getattr(self.generator.bld, 'all_test_paths') except AttributeError: fu = os.environ.copy() self.generator.bld.all_test_paths = fu lst = [] for obj in self.generator.bld.all_task_gen: link_task = getattr(obj, 'link_task', None) if link_task and link_task.env.variant() == variant: lst.append(link_task.outputs[0].parent.abspath(obj.env)) def add_path(dct, path, var): dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')]) if sys.platform == 'win32': add_path(fu, lst, 'PATH') elif sys.platform == 'darwin': add_path(fu, lst, 'DYLD_LIBRARY_PATH') add_path(fu, lst, 'LD_LIBRARY_PATH') else: add_path(fu, lst, 'LD_LIBRARY_PATH') cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env) proc = Utils.pproc.Popen(filename, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE) (stdout, stderr) = proc.communicate() tup = (filename, proc.returncode, stdout, stderr) self.generator.utest_result = tup testlock.acquire() try: bld = self.generator.bld Logs.debug("ut: %r", tup) try: bld.utest_results.append(tup) except AttributeError: bld.utest_results = [tup] finally: testlock.release()
def blit_to_texture(self, target, level, x, y, z): glReadBuffer(self.gl_buffer) glCopyTexSubImage2D(target, level, x - self.anchor_x, y - self.anchor_y, self.x, self.y, self.width, self.height)
def must_introspect(self, name): """ Return C{True} if a module is to be introsepcted with the current settings. @param name: The name of the module to test @type name: L{DottedName} or C{str} """ return self.introspect \ and not self._matches_filter(name, self._introspect_regexp)
def t_error(t): if " " in t.value: idx = t.value.index(" ") error_loc = (t.value[:idx], t.lexer.lexpos, t.lexer.lineno) t.lexer.errors.append(error_loc) t.lexer.skip(idx) else: error_loc = (t.value, t.lexer.lexpos, t.lexer.lineno) t.lexer.errors.append(error_loc) t.lexer.skip(1)
def on_recreate_immediate(self, changes): # The actual _recreate function. if ('context' in changes): self.context.detach() self._create()
def equality_rewrite(node, name, expr, assumed_result): # Get the literal and static compare values static_value = expr.right.value is_static = expr.right.static # Do we 'know' the value to be something # specific, or can we just eliminate a possible value. if expr.type in EQUALITY: known = assumed_result else: known = not assumed_result # Replace function to handle AST re-writes def replace_func(pattern, node): # Do the static comparison static_match = node.right.value == static_value is_static_node = node.right.static # If we are refactoring equality on a static # variable, then we can statically perform the comparisons # and do more aggressive rewrites of the AST. const = None if known and is_static and is_static_node: if node.type in EQUALITY: const = static_match else: const = not static_match # If we are refactoring equality on a non-static # variable, then we have a limit set of rewrites. # for example, if a = b, then a = c could also be true, # since b = c is possible. elif static_match: if node.type in EQUALITY: const = known else: const = not known # If we can't do a rewrite, just skip this node return ast.Constant(const) if const is not None else None # Tile to replace pattern = SimplePattern("types:CompareOperator AND ops:=,!=,is", ASTPattern(expr.left)) return tile(node, [pattern], replace_func)
def decorate(func): name = event_name or func.__name__ if name not in GUIEventDispatcher.event_types: raise EventException('Unknown event "%s"' % name) for r in rule.split(','): selector = Selector.from_string(r.strip()) GUIEventDispatcher.set_default_handler(name, selector, func) return func
def is_classmethod(instancemethod): " Determine if an instancemethod is a classmethod. " return instancemethod.im_self is not None
def _propgetpixelwidth(self): return self.width * self._cellwidth
def mean(a, weights=None, dim=None): if dim is not None: out = Array() if weights is None: safe_call(backend.get().af_mean(ct.pointer(out.arr), a.arr, ct.c_int(dim))) else: safe_call(backend.get().af_mean_weighted(ct.pointer(out.arr), a.arr, weights.arr, ct.c_int(dim))) return out else: real = ct.c_double(0) imag = ct.c_double(0) if weights is None: safe_call(backend.get().af_mean_all(ct.pointer(real), ct.pointer(imag), a.arr)) else: safe_call(backend.get().af_mean_all_weighted(ct.pointer(real), ct.pointer(imag), a.arr, weights.arr)) real = real.value imag = imag.value return real if imag == 0 else real + imag * 1j
def user_docfields(api_doc, docindex): """ Return a list of user defined fields that can be used for the given object. This list is taken from the given C{api_doc}, and any of its containing C{NamepaceDoc}s. @note: We assume here that a parent's docstring will always be parsed before its childrens'. This is indeed the case when we are called via L{docbuilder.build_doc_index()}. If a child's docstring is parsed before its parents, then its parent won't yet have had its C{extra_docstring_fields} attribute initialized. """ docfields = [] # Get any docfields from `api_doc` itself if api_doc.extra_docstring_fields not in (None, UNKNOWN): docfields += api_doc.extra_docstring_fields # Get any docfields from `api_doc`'s ancestors for i in range(len(api_doc.canonical_name)-1, 0, -1): ancestor = docindex.get_valdoc(api_doc.canonical_name.container()) if ancestor is not None \ and ancestor.extra_docstring_fields not in (None, UNKNOWN): docfields += ancestor.extra_docstring_fields return docfields
def test_last_option_updated_up_releated_api_params(index_data): index_data['aws']['commands'] = ['ec2'] index_data['aws']['children'] = { 'ec2': { 'commands': ['create-tags'], 'argument_metadata': {}, 'arguments': [], 'children': { 'create-tags': { 'commands': [], 'argument_metadata': { '--resources': {'example': '', 'minidoc': 'foo'}, '--tags': {'example': 'bar', 'minidoc': 'baz'}, }, 'arguments': ['--resources', '--tags'], 'children': {}, } } } } completer = AWSCLIModelCompleter(index_data) completer.autocomplete('ec2 create-tags --resources ') assert completer.last_option == '--resources' completer.autocomplete('ec2 create-tags --resources f --tags ') # last_option should be updated. assert completer.last_option == '--tags'
def _set_text_property(self, name, value, allow_utf8=True): atom = xlib.XInternAtom(self._x_display, asbytes(name), False) if not atom: raise XlibException('Undefined atom "%s"' % name) assert type(value) in (str, unicode) property = xlib.XTextProperty() if _have_utf8 and allow_utf8: buf = create_string_buffer(value.encode('utf8')) result = xlib.Xutf8TextListToTextProperty(self._x_display, cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle, byref(property)) if result < 0: raise XlibException('Could not create UTF8 text property') else: buf = create_string_buffer(value.encode('ascii', 'ignore')) result = xlib.XStringListToTextProperty( cast(pointer(buf), c_char_p), 1, byref(property)) if result < 0: raise XlibException('Could not create text property') xlib.XSetTextProperty(self._x_display, self._window, byref(property), atom)
def savememo(memo,good,bad,skipped): f = open(memo,'w') try: for n,l in [('good',good),('bad',bad),('skipped',skipped)]: print >>f,"%s = [" % n for x in l: print >>f," %r," % x print >>f," ]" finally: f.close()
def __getattr__(self, attr): value = self._d.setdefault(attr, 0) self._d[attr] = value + 1 return Bucket(value)
@iterate(1000) def testRandomContains(self): prefixes = [random_ipv4_prefix() for i in xrange(random.randrange(50))] question = random_ipv4_prefix() answer = any(question in pfx for pfx in prefixes) ipset = IPy.IPSet(prefixes) self.assertEqual(question in ipset, answer, "%s in %s != %s (made from %s)" % (question, ipset, answer, prefixes))
def format_roman(self, case, counter): ones = ['i', 'x', 'c', 'm'] fives = ['v', 'l', 'd'] label, index = '', 0 # This will die of IndexError when counter is too big while counter > 0: counter, x = divmod(counter, 10) if x == 9: label = ones[index] + ones[index+1] + label elif x == 4: label = ones[index] + fives[index] + label else: if x >= 5: s = fives[index] x = x-5 else: s = '' s = s + ones[index]*x label = s + label index = index + 1 if case == 'I': return label.upper() return label
def _propsetpixelheight(self, value): newheight = int(int(value) / self._cellheight) if newheight != self._height: self.resize(newheight=newheight)
def ddpop(self, bl=0): self.formatter.end_paragraph(bl) if self.list_stack: if self.list_stack[-1][0] == 'dd': del self.list_stack[-1] self.formatter.pop_margin()
def parse(self, source): source = prepare_input_source(source) self.prepareParser(source) self._cont_handler.startDocument() # FIXME: what about char-stream? inf = source.getByteStream() buffer = inf.read(16384) while buffer != "": self.feed(buffer) buffer = inf.read(16384) self.close() self.reset() self._cont_handler.endDocument()
def abspath(path): """Return the absolute version of a path.""" if not isabs(path): if isinstance(path, unicode): cwd = os.getcwdu() else: cwd = os.getcwd() path = join(cwd, path) if not splitunc(path)[0] and not splitdrive(path)[0]: # cwd lacks a UNC mount point, so it should have a drive # letter (but lacks one): determine it canon_path = newString(java.io.File(path).getCanonicalPath()) drive = splitdrive(canon_path)[0] path = join(drive, path) return normpath(path)
def test_cmptypes(self): # Built-in tp_compare slots expect their arguments to have the # same type, but a user-defined __coerce__ doesn't have to obey. # SF #980352 evil_coercer = CoerceTo(42) # Make sure these don't crash any more self.assertNotEqual(cmp(u'fish', evil_coercer), 0) self.assertNotEqual(cmp(slice(1), evil_coercer), 0) # ...but that this still works class WackyComparer(object): def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 __hash__ = None # Invalid cmp makes this unhashable self.assertEqual(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0)
def get_qualified_path(name): """ return a more qualified path to name""" import sys import os path = sys.path try: path = [os.path.dirname(__file__)] + path except NameError: pass for dir in path: fullname = os.path.join(dir, name) if os.path.exists(fullname): return fullname return name
def monkeypatched_call_command(name, *args, **options): # XXX: Find a better way to detect a DB connection using # django-pydobc and do our monkeypatching only in such cases if name == 'loaddata' and Query.__name__ == 'PyOdbcSSQuery': name = 'ss_loaddata' return real_call_command(name, *args, **options)
def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(1)"]) count = 0 while p.poll() is None: time.sleep(0.1) count += 1 # We expect that the poll loop probably went around about 10 times, # but, based on system scheduling we can't control, it's possible # poll() never returned None. It "should be" very rare that it # didn't go around at least twice. self.assert_(count >= 2) # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0)
def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return self._from_iterable(value for value in self if value not in other)
@unittest.skipIf(test_support.is_jython, "FIXME: not working in Jython") def test_contains(self): for c in self.letters: self.assertEqual(c in self.s, c in self.d) # 1 is not weakref'able, but that TypeError is caught by __contains__ self.assertNotIn(1, self.s) self.assertIn(self.obj, self.fs) del self.obj self.assertNotIn(SomeClass('F'), self.fs)
def strseq(object, convert, join=joinseq): """Recursively walk a sequence, stringifying each element.""" if type(object) in (list, tuple): return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) else: return convert(object)
def _cmp_(x, y): xt = (x.getHours(), x.getMinutes(), x.getSeconds()) yt = (y.getHours(), y.getMinutes(), y.getSeconds()) return not xt == yt
def testRegisterResult(self): result = unittest.TestResult() unittest.registerResult(result) for ref in unittest.signals._results: if ref is result: break elif ref is not result: self.fail("odd object in result set") else: self.fail("result not found")
def emit(self, record): """ Emit a record. First check if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ if not os.path.exists(self.baseFilename): stat = None changed = 1 else: stat = os.stat(self.baseFilename) changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino) if changed and self.stream is not None: self.stream.flush() self.stream.close() self.stream = self._open() if stat is None: stat = os.stat(self.baseFilename) self.dev, self.ino = stat[ST_DEV], stat[ST_INO] logging.FileHandler.emit(self, record)
def handle_close(self): self.close() if test_support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket)
def test_setuptools_compat(self): import distutils.core, distutils.extension, distutils.command.build_ext saved_ext = distutils.extension.Extension try: # on some platforms, it loads the deprecated "dl" module test_support.import_module('setuptools_build_ext', deprecated=True) # theses import patch Distutils' Extension class from setuptools_build_ext import build_ext as setuptools_build_ext from setuptools_extension import Extension etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') etree_ext = Extension('lxml.etree', [etree_c]) dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]}) cmd = setuptools_build_ext(dist) cmd.ensure_finalized() cmd.inplace = 1 cmd.distribution.package_dir = {'': 'src'} cmd.distribution.packages = ['lxml', 'lxml.html'] curdir = os.getcwd() ext = sysconfig.get_config_var("SO") wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') self.assertEqual(wanted, path) finally: # restoring Distutils' Extension class otherwise its broken distutils.extension.Extension = saved_ext distutils.core.Extension = saved_ext distutils.command.build_ext.Extension = saved_ext
def test_2(self): hier = [ ("t2", None), ("t2 __init__"+os.extsep+"py", "'doc for t2'"), ("t2 sub", None), ("t2 sub __init__"+os.extsep+"py", ""), ("t2 sub subsub", None), ("t2 sub subsub __init__"+os.extsep+"py", "spam = 1"), ] self.mkhier(hier) import t2.sub import t2.sub.subsub self.assertEqual(t2.__name__, "t2") self.assertEqual(t2.sub.__name__, "t2.sub") self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub") # This exec crap is needed because Py3k forbids 'import *' outside # of module-scope and __import__() is insufficient for what we need. s = """ import t2 from t2 import * self.assertEqual(dir(), ['self', 'sub', 't2']) """ self.run_code(s) from t2 import sub from t2.sub import subsub from t2.sub.subsub import spam self.assertEqual(sub.__name__, "t2.sub") self.assertEqual(subsub.__name__, "t2.sub.subsub") self.assertEqual(sub.subsub.__name__, "t2.sub.subsub") for name in ['spam', 'sub', 'subsub', 't2']: self.assertTrue(locals()["name"], "Failed to import %s" % name) import t2.sub import t2.sub.subsub self.assertEqual(t2.__name__, "t2") self.assertEqual(t2.sub.__name__, "t2.sub") self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub") s = """ from t2 import * self.assertTrue(dir(), ['self', 'sub']) """ self.run_code(s)
def __eq__(self, other): if isinstance(other, timedelta): return self.__cmp(other) == 0 else: return False
def handle_expt_event(self): # handle_expt_event() is called if there might be an error on the # socket, or if there is OOB data # check for the error condition first err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0: # we can get here when select.select() says that there is an # exceptional condition on the socket # since there is an error, we'll go ahead and close the socket # like we would in a subclassed handle_read() that received no # data self.handle_close() else: self.handle_expt()
def __eq__(self, other): if isinstance(other, A) and self.x == other.x and self.y == other.y: return True return False
def foo(self): pass fy(C.foo.__name__ == "foo") fy(C().foo.__name__ == "foo") set(C.foo, "__name__", "foo") set(C().foo, "__name__", "foo")
def get_ext_fullname(self, ext_name): """Returns the fullname of a given extension name. Adds the `package.` prefix""" if self.package is None: return ext_name else: return self.package + '.' + ext_name
def formatweekday(self, day, width): """ Returns a formatted week day name. """ if width >= 9: names = day_name else: names = day_abbr return names[day][:width].center(width)
def test_boolean(self): cf = self.fromstring( "[BOOLTEST]\n" "T1=1\n" "T2=TRUE\n" "T3=True\n" "T4=oN\n" "T5=yes\n" "F1=0\n" "F2=FALSE\n" "F3=False\n" "F4=oFF\n" "F5=nO\n" "E1=2\n" "E2=foo\n" "E3=-1\n" "E4=0.1\n" "E5=FALSE AND MORE" ) for x in range(1, 5): self.failUnless(cf.getboolean('BOOLTEST', 't%d' % x)) self.failIf(cf.getboolean('BOOLTEST', 'f%d' % x)) self.assertRaises(ValueError, cf.getboolean, 'BOOLTEST', 'e%d' % x)
def _read(self, size, read_method): """Read size bytes using read_method, honoring start and stop.""" remaining = self._stop - self._pos if remaining <= 0: return '' if size is None or size < 0 or size > remaining: size = remaining return _ProxyFile._read(self, size, read_method)
def replace_header(self, _name, _value): """Replace a header. Replace the first matching header found in the message, retaining header order and case. If no matching header was found, a KeyError is raised. """ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: self._headers[i] = (k, _value) break else: raise KeyError(_name)
def addsitedir(sitedir, known_paths=None): """Add 'sitedir' argument to sys.path if missing and handle .pth files in 'sitedir'""" if known_paths is None: known_paths = _init_pathinfo() reset = 1 else: reset = 0 sitedir, sitedircase = makepath(sitedir) if not sitedircase in known_paths: sys.path.append(sitedir) # Add path component try: names = os.listdir(sitedir) except os.error: return names.sort() for name in names: if name.endswith(os.extsep + "pth"): addpackage(sitedir, name, known_paths) if reset: known_paths = None return known_paths
def ratio(self): """Return a measure of the sequences' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven't already computed .get_matching_blocks() or .get_opcodes(), in which case you may want to try .quick_ratio() or .real_quick_ratio() first to get an upper bound. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.quick_ratio() 0.75 >>> s.real_quick_ratio() 1.0 """ matches = reduce(lambda sum, triple: sum + triple[-1], self.get_matching_blocks(), 0) return _calculate_ratio(matches, len(self.a) + len(self.b))
def __repr__(self): return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc)
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while 1: try: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record) except: raise
def get_chromecasts(tries=None, retry_wait=None, timeout=None, **filters): """ Searches the network and returns a list of Chromecast objects. Filter is a list of options to filter the chromecasts by. ex: get_chromecasts(friendly_name="Living Room") May return an empty list if no chromecasts were found matching the filter criteria Filters include DeviceStatus items: friendly_name, model_name, manufacturer, api_version Or AppStatus items: app_id, description, state, service_url, service_protocols (list) Or ip address: ip Tries is specified if you want to limit the number of times the underlying socket associated with your Chromecast objects will retry connecting if connection is lost or it fails to connect in the first place. The number of seconds spent between each retry can be defined by passing the retry_wait parameter, the default is to wait 5 seconds. """ logger = logging.getLogger(__name__) cc_list = set(_get_all_chromecasts(tries, retry_wait, timeout)) excluded_cc = set() if not filters: return list(cc_list) if 'ip' in filters: for chromecast in cc_list: if chromecast.host != filters['ip']: excluded_cc.add(chromecast) filters.pop('ip') for key, val in filters.items(): for chromecast in cc_list: for tup in [chromecast.device, chromecast.status]: if hasattr(tup, key) and val != getattr(tup, key): excluded_cc.add(chromecast) filtered_cc = cc_list - excluded_cc for cast in excluded_cc: logger.debug("Stopping excluded chromecast %s", cast) cast.socket_client.stop.set() return list(filtered_cc)
def _call_chain(self, chain, kind, meth_name, *args): # Handlers raise an exception if no one else should try to handle # the request, or return None if they can't but another handler # could. Otherwise, they return the response. handlers = chain.get(kind, ()) for handler in handlers: func = getattr(handler, meth_name) result = func(*args) if result is not None: return result
def _line_pair_iterator(): """Yields from/to lines of text with a change indication. This function is an iterator. It itself pulls lines from the line iterator. Its difference from that iterator is that this function always yields a pair of from/to text lines (with the change indication). If necessary it will collect single from/to lines until it has a matching pair from/to pair to yield. Note, this function is purposefully not defined at the module scope so that data it needs from its parent function (within whose context it is defined) does not need to be of module scope. """ line_iterator = _line_iterator() fromlines,tolines=[],[] while True: # Collecting lines of text until we have a from/to pair while (len(fromlines)==0 or len(tolines)==0): from_line, to_line, found_diff =line_iterator.next() if from_line is not None: fromlines.append((from_line,found_diff)) if to_line is not None: tolines.append((to_line,found_diff)) # Once we have a pair, remove them from the collection and yield it from_line, fromDiff = fromlines.pop(0) to_line, to_diff = tolines.pop(0) yield (from_line,to_line,fromDiff or to_diff)
def chunk_it(l, chunks): return list(zip(*izip_longest(*[iter(l)] * chunks)))
def default_output_device(): """Return default output device index.""" idx = _pa.Pa_GetDefaultOutputDevice() if idx < 0: raise RuntimeError("No default output device available") return idx
def get_prior_mean(self, node_id, param, settings): if settings.optype == 'class': if node_id == self.root: base = param.base_measure else: base = self.pred_prob[node_id.parent] else: base = None # for settings.settings.smooth_hierarchically = False return base
def func(environ, start_response): content = f(environ, start_response) if 'gzip' in environ.get('HTTP_ACCEPT_ENCODING', ''): if type(content) is list: content = "".join(content) else: #this is a stream content = content.read() sio = StringIO.StringIO() comp_file = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=sio) comp_file.write(content) comp_file.close() start_response.add_header('Content-Encoding', 'gzip') res = sio.getvalue() start_response.add_header('Content-Length', len(res)) return [res] else: return content
def _prune_cond_tree(heads, min_support): merged_before = {} merged_now = {} for key in reversed(heads): (node, head_support) = heads[key] if head_support > 0: visited_parents = {} previous_node = None while node is not None: # If the node is merged, we lose the next_node next_node = node.next_node node.prune_me( previous_node, visited_parents, merged_before, merged_now, heads, min_support) if node.next_node is not None: # Only change the previous node if it wasn't merged. previous_node = node node = next_node merged_before = merged_now merged_now = {}
def forward(self, input_act): """ Forward propagation. This class is mostly wraps around _forward and does some extra asserts. Child classes should overwrite _forward rather than this method. Parameters ---------- input_act : numpy array, activations from the layer below; shape must either be the same as self.input_shape, or (NUMBER_OF_EXAMPLES,) + self.input_shape Returns ------- output_act : numpy array, output activations from this layer; shape will be self.output_shape or (NUMBER_OF_EXAMPLES,) + self.output_shape, depending on the input """ input_ndim = len(self.input_shape) assert input_act.shape[-input_ndim:] == tuple(self.input_shape), 'wrong input shape' many = (input_act.ndim > input_ndim) input_act = atleast(input_act, input_ndim+1) act = self._forward(input_act) assert act.shape[1:] == tuple(self.output_shape), 'wrong output shape' return act if many else act[0,...]
def test_job_run(): expected_rv = 42 job = Job(lambda: expected_rv, Schedule(30)) assert job.run() == expected_rv
def it_should_raise_exception(self): assert self.task.exception() is not None
@app.route('/') def home(): # Code adapted from: http://stackoverflow.com/questions/168409/ image_infos = [] for filename in os.listdir(DATA_DIR): filepath = os.path.join(DATA_DIR, filename) file_stat = os.stat(filepath) if S_ISREG(file_stat[ST_MODE]): image_infos.append((file_stat[ST_CTIME], filepath)) images = [] for i, (_, path) in enumerate(sorted(image_infos, reverse=True)): if i >= MAX_IMAGES: os.unlink(path) continue images.append('<div><img alt="User uploaded image" src="{0}" /></div>' .format(path)) return """ <!doctype html> <title>Image Uploader</title> <meta charset="utf-8" /> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script> <script src="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.1/jquery-ui.min.js"></script> <link rel="stylesheet" href="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.1/themes/vader/jquery-ui.css" /> <style> body { max-width: 800px; margin: auto; padding: 1em; background: black; color: #fff; font: 16px/1.6 menlo, monospace; text-align:center; } a { color: #fff; } .notice { font-size: 80%%; } #drop { font-weight: bold; text-align: center; padding: 1em 0; margin: 1em 0; color: #555; border: 2px dashed #555; border-radius: 7px; cursor: default; } #drop.hover { color: #f00; border-color: #f00; border-style: solid; box-shadow: inset 0 3px 4px #888; } </style> <h3>Image Uploader</h3> <p>Upload an image for everyone to see. Valid images are pushed to everyone currently connected, and only the most recent %s images are saved.</p> <p>The complete source for this Flask web service can be found at: <a href="https://github.com/bboe/flask-image-uploader">https://github.com/bboe/flask-image-uploader</a></p> <p class="notice">Disclaimer: The author of this application accepts no responsibility for the images uploaded to this web service. To discourage the submission of obscene images, IP addresses with the last two octets hidden will be visibly associated with uploaded images.</p> <noscript>Note: You must have javascript enabled in order to upload and dynamically view new images.</noscript> <fieldset> <p id="status">Select an image</p> <div id="progressbar"></div> <input id="file" type="file" /> <div id="drop">or drop image here</div> </fieldset> <h3>Uploaded Images (updated in real-time)</h3> <div id="images">%s</div> <script> function sse() { var source = new EventSource('/stream'); source.onmessage = function(e) { if (e.data == '') return; var data = $.parseJSON(e.data); var upload_message = 'Image uploaded by ' + data['ip_addr']; var image = $('<img>', {alt: upload_message, src: data['src']}); var container = $('<div>').hide(); container.append($('<div>', {text: upload_message})); container.append(image); $('#images').prepend(container); image.load(function(){ container.show('blind', {}, 1000); }); }; } function file_select_handler(to_upload) { var progressbar = $('#progressbar'); var status = $('#status'); var xhr = new XMLHttpRequest(); xhr.upload.addEventListener('loadstart', function(e1){ status.text('uploading image'); progressbar.progressbar({max: e1.total}); }); xhr.upload.addEventListener('progress', function(e1){ if (progressbar.progressbar('option', 'max') == 0) progressbar.progressbar('option', 'max', e1.total); progressbar.progressbar('value', e1.loaded); }); xhr.onreadystatechange = function(e1) { if (this.readyState == 4) { if (this.status == 200) var text = 'upload complete: ' + this.responseText; else var text = 'upload failed: code ' + this.status; status.html(text + '<br/>Select an image'); progressbar.progressbar('destroy'); } }; xhr.open('POST', '/post', true); xhr.send(to_upload); }; function handle_hover(e) { e.originalEvent.stopPropagation(); e.originalEvent.preventDefault(); e.target.className = (e.type == 'dragleave' || e.type == 'drop') ? '' : 'hover'; } $('#drop').bind('drop', function(e) { handle_hover(e); if (e.originalEvent.dataTransfer.files.length < 1) { return; } file_select_handler(e.originalEvent.dataTransfer.files[0]); }).bind('dragenter dragleave dragover', handle_hover); $('#file').change(function(e){ file_select_handler(e.target.files[0]); e.target.value = ''; }); sse(); var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-510348-17']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> """ % (MAX_IMAGES, '\n'.join(images))
def test_supplied_feature_directory_no_steps(self): config = create_mock_config() config.paths = ["features/group1"] config.verbose = True r = runner.Runner(config) fs = FsMock( "features/", "features/group1/", "features/group1/foo.feature", ) with patch("os.path", fs): with patch("os.walk", fs.walk): assert_raises(ConfigError, r.setup_paths) ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
def __eq__(self, other): if not isinstance(other, Match): return False return (self.func, self.location) == (other.func, other.location)
def it_should_not_try_to_decode_the_body(self): assert self.message.body == self.body
def get_output_shape(self): # output_width = (self.input_shape[1] - self.filter_size + self.stride) // self.stride output_width = self.input_shape[1] // self.stride # because it's a circular convolution, this dimension is just divided by the stride. output_height = (self.input_shape[2] - self.filter_size + self.stride) // self.stride # in this direction it's still valid though. output_shape = (self.n_filters, output_width, output_height, self.mb_size) return output_shape
def __init__(self, file=None, name=u'', url='', size=None): """Constructor. file: File object. Typically an io.StringIO. name: File basename. url: File URL. """ super(VirtualFile, self).__init__(file, name) self.url = url if size is not None: self._size = size
@contract def mad(data): """ Calculate the Median Absolute Deviation from the data. :param data: The data to analyze. :type data: list(number) :return: The calculated MAD. :rtype: float """ data_median = median(data) return float(median([abs(data_median - x) for x in data]))
def _add_removed_links(self, section, removed_links): for link in self._get_links(section): if link is None: continue else: link_change = LinkChange( diff=self.docdiff, link_from=link) link_change.save() removed_links.append(link_change)
@classmethod def get_by_key(cls, key, content_type=None): if key in _notification_type_cache: return _notification_type_cache[key] try: nt = cls.objects.get(key=key) except cls.DoesNotExist: nt = cls.objects.create(key=key, content_type=content_type) _notification_type_cache[key] = nt return nt
def escape(): if len(vim.windows) < 2: return cur = vfunc.winnr() for n, w in reversed(list(enumerate(vim.windows, 1))): if not buffer_with_file(w.buffer): if not '[Command Line]'in w.buffer.name: focus_window(n) vim.command('q') if n != cur: if cur > n: cur -= 1 focus_window(cur) return
def peek(self, offset=0): self.checkPos(self._pos+offset) pos = self._pos + offset return self._src[pos]
def loop(self): """ main game loop. returns the final score. """ pause_key = self.board.PAUSE margins = {'left': 4, 'top': 4, 'bottom': 4} atexit.register(self.showCursor) try: self.hideCursor() while True: self.clearScreen() print(self.__str__(margins=margins)) if self.board.won() or not self.board.canMove(): break m = self.readMove() if (m == pause_key): self.saveBestScore() if self.store(): print("Game successfully saved. " "Resume it with `term2048 --resume`.") return self.score print("An error ocurred while saving your game.") return self.incScore(self.board.move(m)) except KeyboardInterrupt: self.saveBestScore() return self.saveBestScore() print('You won!' if self.board.won() else 'Game Over') return self.score
def spawn_workers(self): """\ Spawn new workers as needed. This is where a worker process leaves the main loop of the master process. """ for i in range(self.num_workers - len(self.WORKERS.keys())): self.spawn_worker()