input
stringlengths
11
7.65k
target
stringlengths
22
8.26k
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_repository_root(cls, location): loc = super(Git, cls).get_repository_root(location) if loc: return loc try: r = cls.run_command( ['rev-parse', '--show-toplevel'], cwd=location, show_stdout=False, stdout_only=True, on_returncode='raise', log_failed_cmd=False, ) except BadCommand: logger.debug("could not determine if %s is under git control " "because git is not available", location) return None except InstallationError: return None return os.path.normpath(r.rstrip('\r\n'))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_spec(field, limit=10, query='', query_dsl=''): """Returns aggregation specs for a term of filtered events. The aggregation spec will summarize values of an attribute whose events fall under a filter. Args: field (str): this denotes the event attribute that is used for aggregation. limit (int): How many buckets to return, defaults to 10. query (str): the query field to run on all documents prior to aggregating the results. query_dsl (str): the query DSL field to run on all documents prior to aggregating the results (optional). Either a query string or a query DSL has to be present. Raises: ValueError: if neither query_string or query_dsl is provided. Returns: a dict value that can be used as an aggregation spec. """ if query: query_filter = { 'bool': { 'must': [ { 'query_string': { 'query': query } } ] } } elif query_dsl: query_filter = query_dsl else: raise ValueError('Neither query nor query DSL provided.') return { 'query': query_filter, 'aggs': { 'aggregation': { 'terms': { 'field': field, 'size': limit } } } }
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __call__(self, env, start_response): return env
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def chart_title(self): """Returns a title for the chart.""" if self.field: return 'Top filtered results for "{0:s}"'.format(self.field) return 'Top results for an unknown field after filtering'
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def read(self, path): raise Exception('read called with %r' % path)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def run( self, field, query_string='', query_dsl='', supported_charts='table', start_time='', end_time='', limit=10): """Run the aggregation. Args: field (str): this denotes the event attribute that is used for aggregation. query_string (str): the query field to run on all documents prior to aggregating the results. query_dsl (str): the query DSL field to run on all documents prior to aggregating the results. Either a query string or a query DSL has to be present. supported_charts: Chart type to render. Defaults to table. start_time: Optional ISO formatted date string that limits the time range for the aggregation. end_time: Optional ISO formatted date string that limits the time range for the aggregation. limit (int): How many buckets to return, defaults to 10. Returns: Instance of interface.AggregationResult with aggregation result. Raises: ValueError: if neither query_string or query_dsl is provided. """ if not (query_string or query_dsl): raise ValueError('Both query_string and query_dsl are missing') self.field = field formatted_field_name = self.format_field_by_type(field) aggregation_spec = get_spec( field=formatted_field_name, limit=limit, query=query_string, query_dsl=query_dsl) aggregation_spec = self._add_query_to_aggregation_spec( aggregation_spec, start_time=start_time, end_time=end_time) # Encoding information for Vega-Lite. encoding = { 'x': { 'field': field, 'type': 'nominal', 'sort': { 'op': 'sum', 'field': 'count', 'order': 'descending' } }, 'y': {'field': 'count', 'type': 'quantitative'}, 'tooltip': [ {'field': field, 'type': 'nominal'}, {'field': 'count', 'type': 'quantitative'}], } response = self.opensearch_aggregation(aggregation_spec) aggregations = response.get('aggregations', {}) aggregation = aggregations.get('aggregation', {}) buckets = aggregation.get('buckets', []) values = [] for bucket in buckets: d = { field: bucket.get('key', 'N/A'), 'count': bucket.get('doc_count', 0) } values.append(d) if query_string: extra_query_url = 'AND {0:s}'.format(query_string) else: extra_query_url = '' return interface.AggregationResult( encoding=encoding, values=values, chart_type=supported_charts, sketch_url=self._sketch_url, field=field, extra_query_url=extra_query_url)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def read(self, path): return False
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def items(self, section_name): if section_name != section: raise NoSectionError(section_name) return { 'memcache_servers': memcache_servers, 'memcache_serialization_support': memcache_serialization_support, 'memcache_max_connections': memcache_max_connections, }
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def read(self, path): return True
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get(self, section, option): if _section == section: if option == 'memcache_servers': if _srvs == 'error': raise NoOptionError(option, section) return _srvs elif option == 'memcache_serialization_support': if _sers == 'error': raise NoOptionError(option, section) return _sers elif option in ('memcache_max_connections', 'max_connections'): if _maxc == 'error': raise NoOptionError(option, section) return _maxc else: raise NoOptionError(option, section) else: raise NoSectionError(option)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def start_response(*args): pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def setUp(self): self.app = memcache.MemcacheMiddleware(FakeApp(), {})
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_cache_middleware(self): req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'}) resp = self.app(req.environ, start_response) self.assertTrue('swift.cache' in resp) self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_conf_inline_ratelimiting(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'error_suppression_limit': '5', 'error_suppression_interval': '2.5'}) self.assertEqual(app.memcache._error_limit_count, 5) self.assertEqual(app.memcache._error_limit_time, 2.5) self.assertEqual(app.memcache._error_limit_duration, 2.5)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_conf_inline_tls(self): fake_context = mock.Mock() with mock.patch.object(ssl, 'create_default_context', return_value=fake_context): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): memcache.MemcacheMiddleware( FakeApp(), {'tls_enabled': 'true', 'tls_cafile': 'cafile', 'tls_certfile': 'certfile', 'tls_keyfile': 'keyfile'}) ssl.create_default_context.assert_called_with(cafile='cafile') fake_context.load_cert_chain.assert_called_with('certfile', 'keyfile')
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_filter_factory(self): factory = memcache.filter_factory({'max_connections': '3'}, memcache_servers='10.10.10.10:10', memcache_serialization_support='1') thefilter = factory('myapp') self.assertEqual(thefilter.app, 'myapp') self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10') self.assertEqual(thefilter.memcache._allow_pickle, False) self.assertEqual(thefilter.memcache._allow_unpickle, True) self.assertEqual( thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _loadapp(self, proxy_config_path): """ Load a proxy from an app.conf to get the memcache_ring :returns: the memcache_ring of the memcache middleware filter """ with mock.patch('swift.proxy.server.Ring'): app = loadapp(proxy_config_path) memcache_ring = None while True: memcache_ring = getattr(app, 'memcache', None) if memcache_ring: break app = app.app return memcache_ring
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_real_config(self, tempdir): config = """ [pipeline:main] pipeline = cache proxy-server [app:proxy-server] use = egg:swift#proxy [filter:cache] use = egg:swift#memcache """ config_path = os.path.join(tempdir, 'test.conf') with open(config_path, 'w') as f: f.write(dedent(config)) memcache_ring = self._loadapp(config_path) # only one server by default self.assertEqual(list(memcache_ring._client_cache.keys()), ['127.0.0.1:11211']) # extra options self.assertEqual(memcache_ring._connect_timeout, 0.3) self.assertEqual(memcache_ring._pool_timeout, 1.0) # tries is limited to server count self.assertEqual(memcache_ring._tries, 1) self.assertEqual(memcache_ring._io_timeout, 2.0)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_real_config_with_options(self, tempdir): config = """ [pipeline:main] pipeline = cache proxy-server [app:proxy-server] use = egg:swift#proxy [filter:cache] use = egg:swift#memcache memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211, 10.0.0.4:11211 connect_timeout = 1.0 pool_timeout = 0.5 tries = 4 io_timeout = 1.0 tls_enabled = true """ config_path = os.path.join(tempdir, 'test.conf') with open(config_path, 'w') as f: f.write(dedent(config)) memcache_ring = self._loadapp(config_path) self.assertEqual(sorted(memcache_ring._client_cache.keys()), ['10.0.0.%d:11211' % i for i in range(1, 5)]) # extra options self.assertEqual(memcache_ring._connect_timeout, 1.0) self.assertEqual(memcache_ring._pool_timeout, 0.5) # tries is limited to server count self.assertEqual(memcache_ring._tries, 4) self.assertEqual(memcache_ring._io_timeout, 1.0) self.assertEqual(memcache_ring._error_limit_count, 10) self.assertEqual(memcache_ring._error_limit_time, 60) self.assertEqual(memcache_ring._error_limit_duration, 60) self.assertIsInstance( list(memcache_ring._client_cache.values())[0]._tls_context, ssl.SSLContext)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self): # Fix some key bindings self.bind("<Control-Key-a>", self.select_all) # We will need Ctrl-/ for the "stroke", but it cannot be unbound, so # let's prevent it from being passed to the standard handler self.bind("<Control-Key-/>", lambda event: "break") # Diacritical bindings for a, k in self.accents: # Little-known feature of Tk, it allows to bind an event to # multiple keystrokes self.bind("<Control-Key-%s><Key>" % k, lambda event, a=a: self.insert_accented(event.char, a))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def call_vision_api(image_filename, api_keys): api_key = api_keys['microsoft'] post_url = "https://api.projectoxford.ai/vision/v1.0/analyze?visualFeatures=Categories,Tags,Description,Faces,ImageType,Color,Adult&subscription-key=" + api_key image_data = open(image_filename, 'rb').read() result = requests.post(post_url, data=image_data, headers={'Content-Type': 'application/octet-stream'}) result.raise_for_status() return result.text
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def insert_accented(self, c, accent): if c.isalpha(): if c.isupper(): cap = 'capital' else: cap = 'small' try: c = lookup("latin %s letter %c with %s" % (cap, c, accent)) self.insert(INSERT, c) # Prevent plain letter from being inserted too, tell Tk to # stop handling this event return "break" except KeyError, e: pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_standardized_result(api_result): output = { 'tags' : [], 'captions' : [],
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, master=None, **kwargs): Entry.__init__(self, master=None, **kwargs) Diacritical.__init__(self)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def select_all(self, event=None): self.selection_range(0, END) return "break"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, master=None, **kwargs): ScrolledText.__init__(self, master=None, **kwargs) Diacritical.__init__(self)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def select_all(self, event=None): self.tag_add(SEL, "1.0", "end-1c") self.mark_set(INSERT, "1.0") self.see(INSERT) return "break"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test(): frame = Frame() frame.pack(fill=BOTH, expand=YES) if os.name == "nt": # Set default font for all widgets; use Windows typical default frame.option_add("*font", "Tahoma 8") # The editors entry = DiacriticalEntry(frame) entry.pack(fill=BOTH, expand=YES) text = DiacriticalText(frame, width=76, height=25, wrap=WORD) if os.name == "nt": # But this looks better than the default set above text.config(font="Arial 10") text.pack(fill=BOTH, expand=YES) text.focus() frame.master.title("Diacritical Editor") frame.mainloop()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def loaded(cls): return 'cudf' in sys.modules
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def applies(cls, obj): if not cls.loaded(): return False import cudf return isinstance(obj, (cudf.DataFrame, cudf.Series))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def init(cls, eltype, data, kdims, vdims): import cudf import pandas as pd element_params = eltype.param.objects() kdim_param = element_params['kdims'] vdim_param = element_params['vdims'] if isinstance(data, (cudf.Series, pd.Series)): data = data.to_frame() if not isinstance(data, cudf.DataFrame): data, _, _ = PandasInterface.init(eltype, data, kdims, vdims) data = cudf.from_pandas(data) columns = list(data.columns) ncols = len(columns) index_names = [data.index.name] if index_names == [None]: index_names = ['index'] if eltype._auto_indexable_1d and ncols == 1 and kdims is None: kdims = list(index_names) if isinstance(kdim_param.bounds[1], int): ndim = min([kdim_param.bounds[1], len(kdim_param.default)]) else: ndim = None nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None if kdims and vdims is None: vdims = [c for c in columns if c not in kdims] elif vdims and kdims is None: kdims = [c for c in columns if c not in vdims][:ndim] elif kdims is None: kdims = list(columns[:ndim]) if vdims is None: vdims = [d for d in columns[ndim:((ndim+nvdim) if nvdim else None)] if d not in kdims] elif kdims == [] and vdims is None: vdims = list(columns[:nvdim if nvdim else None]) # Handle reset of index if kdims reference index by name for kd in kdims: kd = dimension_name(kd) if kd in columns: continue if any(kd == ('index' if name is None else name) for name in index_names): data = data.reset_index() break if any(isinstance(d, (np.int64, int)) for d in kdims+vdims): raise DataError("cudf DataFrame column names used as dimensions " "must be strings not integers.", cls) if kdims: kdim = dimension_name(kdims[0]) if eltype._auto_indexable_1d and ncols == 1 and kdim not in columns: data = data.copy() data.insert(0, kdim, np.arange(len(data))) for d in kdims+vdims: d = dimension_name(d) if len([c for c in columns if c == d]) > 1: raise DataError('Dimensions may not reference duplicated DataFrame ' 'columns (found duplicate %r columns). If you want to plot ' 'a column against itself simply declare two dimensions ' 'with the same name. '% d, cls) return data, {'kdims':kdims, 'vdims':vdims}, {}
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def range(cls, dataset, dimension): dimension = dataset.get_dimension(dimension, strict=True) column = dataset.data[dimension.name] if dimension.nodata is not None: column = cls.replace_value(column, dimension.nodata) if column.dtype.kind == 'O': return np.NaN, np.NaN else: return finite_range(column, column.min(), column.max())
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def values(cls, dataset, dim, expanded=True, flat=True, compute=True, keep_index=False): dim = dataset.get_dimension(dim, strict=True) data = dataset.data[dim.name] if not expanded: data = data.unique() return data.values_host if compute else data.values elif keep_index: return data elif compute: return data.values_host try: return data.values except Exception: return data.values_host
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs): # Get dimensions information dimensions = [dataset.get_dimension(d).name for d in dimensions] kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions] # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = dict if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) group_kwargs['kdims'] = kdims group_kwargs.update(kwargs) # Propagate dataset group_kwargs['dataset'] = dataset.dataset # Find all the keys along supplied dimensions keys = product(*(dataset.data[dimensions[0]].unique().values_host for d in dimensions)) # Iterate over the unique entries applying selection masks grouped_data = [] for unique_key in util.unique_iterator(keys): group_data = dataset.select(**dict(zip(dimensions, unique_key))) if not len(group_data): continue group_data = group_type(group_data, **group_kwargs) grouped_data.append((unique_key, group_data)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): kdims = [dataset.get_dimension(d) for d in dimensions] return container_type(grouped_data, kdims=kdims) else: return container_type(grouped_data)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e. tuple ranges, slices, sets, lists, or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ mask = None for dim, sel in selection.items(): if isinstance(sel, tuple): sel = slice(*sel) arr = cls.values(dataset, dim, keep_index=True) if util.isdatetime(arr) and util.pd: try: sel = util.parse_datetime_selection(sel) except: pass new_masks = [] if isinstance(sel, slice): with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') if sel.start is not None: new_masks.append(sel.start <= arr) if sel.stop is not None: new_masks.append(arr < sel.stop) if not new_masks: continue new_mask = new_masks[0] for imask in new_masks[1:]: new_mask &= imask elif isinstance(sel, (set, list)): for v in sel: new_masks.append(arr==v) if not new_masks: continue new_mask = new_masks[0] for imask in new_masks[1:]: new_mask |= imask elif callable(sel): new_mask = sel(arr) else: new_mask = arr == sel if mask is None: mask = new_mask else: mask &= new_mask return mask
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def select(cls, dataset, selection_mask=None, **selection): df = dataset.data if selection_mask is None: selection_mask = cls.select_mask(dataset, selection) indexed = cls.indexed(dataset, selection) if selection_mask is not None: df = df.loc[selection_mask] if indexed and len(df) == 1 and len(dataset.vdims) == 1: return df[dataset.vdims[0].name].iloc[0] return df
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def concat_fn(cls, dataframes, **kwargs): import cudf return cudf.concat(dataframes, **kwargs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim): data = dataset.data.copy() if dimension.name not in data: data[dimension.name] = values return data
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def aggregate(cls, dataset, dimensions, function, **kwargs): data = dataset.data cols = [d.name for d in dataset.kdims if d in dimensions] vdims = dataset.dimensions('value', label='name') reindexed = data[cols+vdims] agg = function.__name__ if len(dimensions): agg_map = {'amin': 'min', 'amax': 'max'} agg = agg_map.get(agg, agg) grouped = reindexed.groupby(cols, sort=False) if not hasattr(grouped, agg): raise ValueError('%s aggregation is not supported on cudf DataFrame.' % agg) df = getattr(grouped, agg)().reset_index() else: agg_map = {'amin': 'min', 'amax': 'max', 'size': 'count'} agg = agg_map.get(agg, agg) if not hasattr(reindexed, agg): raise ValueError('%s aggregation is not supported on cudf DataFrame.' % agg) agg = getattr(reindexed, agg)() data = dict(((col, [v]) for col, v in zip(agg.index.values_host, agg.to_array()))) df = util.pd.DataFrame(data, columns=list(agg.index.values_host)) dropped = [] for vd in vdims: if vd not in df.columns: dropped.append(vd) return df, dropped
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def iloc(cls, dataset, index): import cudf rows, cols = index scalar = False columns = list(dataset.data.columns) if isinstance(cols, slice): cols = [d.name for d in dataset.dimensions()][cols] elif np.isscalar(cols): scalar = np.isscalar(rows) cols = [dataset.get_dimension(cols).name] else: cols = [dataset.get_dimension(d).name for d in index[1]] col_index = [columns.index(c) for c in cols] if np.isscalar(rows): rows = [rows] if scalar: return dataset.data[cols[0]].iloc[rows[0]] result = dataset.data.iloc[rows, col_index] # cuDF does not handle single rows and cols indexing correctly # as of cudf=0.10.0 so we have to convert Series back to DataFrame if isinstance(result, cudf.Series): if len(cols) == 1: result = result.to_frame(cols[0]) else: result = result.to_frame().T return result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def sort(cls, dataset, by=[], reverse=False): cols = [dataset.get_dimension(d, strict=True).name for d in by] return dataset.data.sort_values(by=cols, ascending=not reverse)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def dframe(cls, dataset, dimensions): if dimensions: return dataset.data[dimensions].to_pandas() else: return dataset.data.to_pandas()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, key): self.key = key self.prio = random.randint(0, 1000000000) self.size = 1 self.left = None self.right = None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def GetCompletions_Basic_test( app ): filepath = PathToTestFile( 'basic.py' ) completion_data = BuildRequest( filepath = filepath, filetype = 'python', contents = ReadFile( filepath ), line_num = 7, column_num = 3) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'a' ), CompletionEntryMatcher( 'b' ), CompletionLocationMatcher( 'line_num', 3 ), CompletionLocationMatcher( 'line_num', 4 ), CompletionLocationMatcher( 'column_num', 10 ), CompletionLocationMatcher( 'filepath', filepath ) ) )
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def update(self): self.size = 1 + size(self.left) + size(self.right)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def GetCompletions_UnicodeDescription_test( app ): filepath = PathToTestFile( 'unicode.py' ) completion_data = BuildRequest( filepath = filepath, filetype = 'python', contents = ReadFile( filepath ), force_semantic = True, line_num = 5, column_num = 3) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_item( has_entry( 'detailed_info', contains_string( u'aafäö' ) ) ) )
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def size(treap): return 0 if treap is None else treap.size
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def CombineRequest( request, data ): kw = request request.update( data ) return BuildRequest( **kw )
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def split(root, minRight): if root is None: return None, None if root.key >= minRight: left, right = split(root.left, minRight) root.left = right root.update() return left, root else: left, right = split(root.right, minRight) root.right = left root.update() return root, right
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def GetCompletions_NoSuggestions_Fallback_test( app ): # Python completer doesn't raise NO_COMPLETIONS_MESSAGE, so this is a # different code path to the Clang completer cases # TESTCASE2 (general_fallback/lang_python.py) RunTest( app, { 'description': 'param jedi does not know about (id). query="a_p"', 'request': { 'filetype' : 'python', 'filepath' : PathToTestFile( 'general_fallback', 'lang_python.py' ), 'line_num' : 28, 'column_num': 20, 'force_semantic': False, }, 'expect': { 'response': http.client.OK, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'a_parameter', '[ID]' ), CompletionEntryMatcher( 'another_parameter', '[ID]' ), ), 'errors': empty(), } ) }, } )
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def merge(left, right): if left is None: return right if right is None: return left if left.prio > right.prio: left.right = merge(left.right, right) left.update() return left else: right.left = merge(left, right.left) right.update() return right
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def insert(root, key): left, right = split(root, key) return merge(merge(left, Treap(key)), right)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def remove(root, key): left, right = split(root, key) return merge(left, split(right, key + 1)[1])
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def kth(root, k): if k < size(root.left): return kth(root.left, k) elif k > size(root.left): return kth(root.right, k - size(root.left) - 1) return root.key
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def dfs_print(root): if root is None: return dfs_print(root.left) print(str(root.key) + ' ', end='') dfs_print(root.right)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test(): start = time.time() treap = None s = set() for i in range(100000): key = random.randint(0, 10000) if random.randint(0, 1) == 0: if key in s: treap = remove(treap, key) s.remove(key) elif key not in s: treap = insert(treap, key) s.add(key) assert len(s) == size(treap) for i in range(size(treap)): assert kth(treap, i) in s print(time.time() - start)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, region, name, retention_in_days=7): super(LogGroup, self).__init__() self.region = region self.name = name self.retention_in_days = retention_in_days
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _Create(self): """Create the log group.""" create_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'create-log-group', '--log-group-name', self.name ] vm_util.IssueCommand(create_cmd)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _Delete(self): """Delete the log group.""" delete_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'delete-log-group', '--log-group-name', self.name ] vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def Exists(self): """Returns True if the log group exists.""" describe_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'describe-log-groups', '--log-group-name-prefix', self.name, '--no-paginate' ] stdout, _, _ = vm_util.IssueCommand(describe_cmd) log_groups = json.loads(stdout)['logGroups'] group = next((group for group in log_groups if group['logGroupName'] == self.name), None) return bool(group)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _PostCreate(self): """Set the retention policy.""" put_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'put-retention-policy', '--log-group-name', self.name, '--retention-in-days', str(self.retention_in_days) ] vm_util.IssueCommand(put_cmd)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def GetLogs(region, stream_name, group_name, token=None): """Fetches the JSON formatted log stream starting at the token.""" get_cmd = util.AWS_PREFIX + [ '--region', region, 'logs', 'get-log-events', '--start-from-head', '--log-group-name', group_name, '--log-stream-name', stream_name, ] if token: get_cmd.extend(['--next-token', token]) stdout, _, _ = vm_util.IssueCommand(get_cmd) return json.loads(stdout)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def begin(self): self.append({'cbs': [], 'dirty': False})
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def check_fresh_login(): """Checks if the login is fresh for the current user, otherwise the user has to reauthenticate.""" if not login_fresh(): return current_app.login_manager.needs_refresh()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def commit(self): context = self.pop() if self: # savepoint self[-1]['cbs'].extend(context['cbs']) self[-1]['dirty'] = self[-1]['dirty'] or context['dirty'] else: # transaction for func, args, kwargs in context['cbs']: func(*args, **kwargs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def overview(): # user and group stats banned_users = User.query.filter( Group.banned == True, Group.id == User.primary_group_id ).count() if not current_app.config["REDIS_ENABLED"]: online_users = User.query.filter(User.lastseen >= time_diff()).count() else: online_users = len(get_online_users()) stats = { # user stats "all_users": User.query.count(), "banned_users": banned_users, "online_users": online_users, "all_groups": Group.query.count(), # forum stats "report_count": Report.query.count(), "topic_count": Topic.query.count(), "post_count": Post.query.count(), # misc stats "plugins": get_all_plugins(), "python_version": "%s.%s" % (sys.version_info[0], sys.version_info[1]), "flask_version": flask_version, "flaskbb_version": flaskbb_version } return render_template("management/overview.html", **stats)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def rollback(self): self.pop()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def settings(slug=None): slug = slug if slug else "general" # get the currently active group active_group = SettingsGroup.query.filter_by(key=slug).first_or_404() # get all groups - used to build the navigation all_groups = SettingsGroup.query.all() SettingsForm = Setting.get_form(active_group) old_settings = Setting.get_settings(active_group) new_settings = {} form = SettingsForm() if form.validate_on_submit(): for key, values in iteritems(old_settings): try: # check if the value has changed if values['value'] == form[key].data: continue else: new_settings[key] = form[key].data except KeyError: pass Setting.update(settings=new_settings, app=current_app) flash(_("Settings saved."), "success") else: for key, values in iteritems(old_settings): try: form[key].data = values['value'] except (KeyError, ValueError): pass return render_template("management/settings.html", form=form, all_groups=all_groups, active_group=active_group)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def push(self, item): self[-1]['cbs'].append(item)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def users(): page = request.args.get("page", 1, type=int) search_form = UserSearchForm() if search_form.validate(): users = search_form.get_results().\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/users.html", users=users, search_form=search_form) users = User.query. \ order_by(User.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/users.html", users=users, search_form=search_form)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def mark_dirty(self): self[-1]['dirty'] = True
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def edit_user(user_id): user = User.query.filter_by(id=user_id).first_or_404() if not Permission(CanEditUser, identity=current_user): flash(_("You are not allowed to edit this user."), "danger") return redirect(url_for("management.users")) member_group = db.and_(*[db.not_(getattr(Group, p)) for p in ['admin', 'mod', 'super_mod', 'banned', 'guest']]) filt = db.or_( Group.id.in_(g.id for g in current_user.groups), member_group ) if Permission(IsAtleastSuperModerator, identity=current_user): filt = db.or_(filt, Group.mod) if Permission(IsAdmin, identity=current_user): filt = db.or_(filt, Group.admin, Group.super_mod) if Permission(CanBanUser, identity=current_user): filt = db.or_(filt, Group.banned) group_query = Group.query.filter(filt) form = EditUserForm(user) form.primary_group.query = group_query form.secondary_groups.query = group_query if form.validate_on_submit(): form.populate_obj(user) user.primary_group_id = form.primary_group.data.id # Don't override the password if form.password.data: user.password = form.password.data user.save(groups=form.secondary_groups.data) flash(_("User updated."), "success") return redirect(url_for("management.edit_user", user_id=user.id)) return render_template("management/user_form.html", form=form, title=_("Edit User"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def is_dirty(self): return any(context['dirty'] for context in self)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def delete_user(user_id=None): # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] for user in User.query.filter(User.id.in_(ids)).all(): # do not delete current user if current_user.id == user.id: continue if user.delete(): data.append({ "id": user.id, "type": "delete", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} users deleted.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() if current_user.id == user.id: flash(_("You cannot delete yourself.", "danger")) return redirect(url_for("management.users")) user.delete() flash(_("User deleted."), "success") return redirect(url_for("management.users"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self): super(TransactionStates, self).__init__() self._states = defaultdict(TransactionState)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_user(): form = AddUserForm() if form.validate_on_submit(): form.save() flash(_("User added."), "success") return redirect(url_for("management.users")) return render_template("management/user_form.html", form=form, title=_("Add User"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __getitem__(self, key): return self._states[key or DEFAULT_DB_ALIAS]
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def banned_users(): page = request.args.get("page", 1, type=int) search_form = UserSearchForm() users = User.query.filter( Group.banned == True, Group.id == User.primary_group_id ).paginate(page, flaskbb_config['USERS_PER_PAGE'], False) if search_form.validate(): users = search_form.get_results().\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/banned_users.html", users=users, search_form=search_form) return render_template("management/banned_users.html", users=users, search_form=search_form)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def is_dirty(self, dbs): return any(self[db].is_dirty() for db in dbs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def ban_user(user_id=None): if not Permission(CanBanUser, identity=current_user): flash(_("You do not have the permissions to ban this user."), "danger") return redirect(url_for("management.overview")) # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] users = User.query.filter(User.id.in_(ids)).all() for user in users: # don't let a user ban himself and do not allow a moderator to ban # a admin user if ( current_user.id == user.id or Permission(IsAdmin, identity=user) and Permission(Not(IsAdmin), current_user) ): continue elif user.ban(): data.append({ "id": user.id, "type": "ban", "reverse": "unban", "reverse_name": _("Unban"), "reverse_url": url_for("management.unban_user", user_id=user.id) }) return jsonify( message="{} users banned.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() # Do not allow moderators to ban admins if Permission(IsAdmin, identity=user) and \ Permission(Not(IsAdmin), identity=current_user): flash(_("A moderator cannot ban an admin user."), "danger") return redirect(url_for("management.overview")) if not current_user.id == user.id and user.ban(): flash(_("User is now banned."), "success") else: flash(_("Could not ban user."), "danger") return redirect(url_for("management.banned_users"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def queue_when_in_transaction(call): if transaction_states[call.using]: transaction_states[call.using].push((call, (), {})) else: return call()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def unban_user(user_id=None): if not Permission(CanBanUser, identity=current_user): flash(_("You do not have the permissions to unban this user."), "danger") return redirect(url_for("management.overview")) # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] for user in User.query.filter(User.id.in_(ids)).all(): if user.unban(): data.append({ "id": user.id, "type": "unban", "reverse": "ban", "reverse_name": _("Ban"), "reverse_url": url_for("management.ban_user", user_id=user.id) }) return jsonify( message="{} users unbanned.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() if user.unban(): flash(_("User is now unbanned."), "success") else: flash(_("Could not unban user."), "danger") return redirect(url_for("management.banned_users"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __enter__(self): entering = not transaction_states[self.using] transaction_states[self.using].begin() self._no_monkey.__enter__(self) if entering: on_commit(transaction_states[self.using].commit, self.using)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def reports(): page = request.args.get("page", 1, type=int) reports = Report.query.\ order_by(Report.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/reports.html", reports=reports)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __exit__(self, exc_type, exc_value, traceback): connection = get_connection(self.using) try: self._no_monkey.__exit__(self, exc_type, exc_value, traceback) except DatabaseError: transaction_states[self.using].rollback() else: if not connection.closed_in_transaction and exc_type is None and \ not connection.needs_rollback: if transaction_states[self.using]: transaction_states[self.using].commit() else: transaction_states[self.using].rollback()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def unread_reports(): page = request.args.get("page", 1, type=int) reports = Report.query.\ filter(Report.zapped == None).\ order_by(Report.id.desc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/unread_reports.html", reports=reports)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def callproc(self, procname, params=None): result = self._no_monkey.callproc(self, procname, params) if transaction_states[self.db.alias]: transaction_states[self.db.alias].mark_dirty() return result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def report_markread(report_id=None): # AJAX request if request.is_xhr: ids = request.get_json()["ids"] data = [] for report in Report.query.filter(Report.id.in_(ids)).all(): report.zapped_by = current_user.id report.zapped = time_utcnow() report.save() data.append({ "id": report.id, "type": "read", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} reports marked as read.".format(len(data)), category="success", data=data, status=200 ) # mark single report as read if report_id: report = Report.query.filter_by(id=report_id).first_or_404() if report.zapped: flash(_("Report %(id)s is already marked as read.", id=report.id), "success") return redirect(url_for("management.reports")) report.zapped_by = current_user.id report.zapped = time_utcnow() report.save() flash(_("Report %(id)s marked as read.", id=report.id), "success") return redirect(url_for("management.reports")) # mark all as read reports = Report.query.filter(Report.zapped == None).all() report_list = [] for report in reports: report.zapped_by = current_user.id report.zapped = time_utcnow() report_list.append(report) db.session.add_all(report_list) db.session.commit() flash(_("All reports were marked as read."), "success") return redirect(url_for("management.reports"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def execute(self, sql, params=None): result = self._no_monkey.execute(self, sql, params) if transaction_states[self.db.alias] and is_sql_dirty(sql): transaction_states[self.db.alias].mark_dirty() return result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def groups(): page = request.args.get("page", 1, type=int) groups = Group.query.\ order_by(Group.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/groups.html", groups=groups)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def executemany(self, sql, param_list): result = self._no_monkey.executemany(self, sql, param_list) if transaction_states[self.db.alias] and is_sql_dirty(sql): transaction_states[self.db.alias].mark_dirty() return result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def edit_group(group_id): group = Group.query.filter_by(id=group_id).first_or_404() form = EditGroupForm(group) if form.validate_on_submit(): form.populate_obj(group) group.save() if group.guest: Guest.invalidate_cache() flash(_("Group updated."), "success") return redirect(url_for("management.groups", group_id=group.id)) return render_template("management/group_form.html", form=form, title=_("Edit Group"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def is_sql_dirty(sql): # This should not happen as using bytes in Python 3 is against db protocol, # but some people will pass it anyway if isinstance(sql, bytes): sql = sql.decode() # NOTE: not using regex here for speed sql = sql.lower() for action in ('update', 'insert', 'delete'): p = sql.find(action) if p == -1: continue start, end = p - 1, p + len(action) if (start < 0 or sql[start] not in CHARS) and (end >= len(sql) or sql[end] not in CHARS): return True else: return False
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def delete_group(group_id=None): if request.is_xhr: ids = request.get_json()["ids"] if not (set(ids) & set(["1", "2", "3", "4", "5"])): data = [] for group in Group.query.filter(Group.id.in_(ids)).all(): group.delete() data.append({ "id": group.id, "type": "delete", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} groups deleted.".format(len(data)), category="success", data=data, status=200 ) return jsonify( message=_("You cannot delete one of the standard groups."), category="danger", data=None, status=404 ) if group_id is not None: if group_id <= 5: # there are 5 standard groups flash(_("You cannot delete the standard groups. " "Try renaming it instead.", "danger")) return redirect(url_for("management.groups")) group = Group.query.filter_by(id=group_id).first_or_404() group.delete() flash(_("Group deleted."), "success") return redirect(url_for("management.groups")) flash(_("No group chosen."), "danger") return redirect(url_for("management.groups"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_group(): form = AddGroupForm() if form.validate_on_submit(): form.save() flash(_("Group added."), "success") return redirect(url_for("management.groups")) return render_template("management/group_form.html", form=form, title=_("Add Group"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def forums(): categories = Category.query.order_by(Category.position.asc()).all() return render_template("management/forums.html", categories=categories)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def edit_forum(forum_id): forum = Forum.query.filter_by(id=forum_id).first_or_404() form = EditForumForm(forum) if form.validate_on_submit(): form.save() flash(_("Forum updated."), "success") return redirect(url_for("management.edit_forum", forum_id=forum.id)) else: if forum.moderators: form.moderators.data = ",".join([ user.username for user in forum.moderators ]) else: form.moderators.data = None return render_template("management/forum_form.html", form=form, title=_("Edit Forum"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def delete_forum(forum_id): forum = Forum.query.filter_by(id=forum_id).first_or_404() involved_users = User.query.filter(Topic.forum_id == forum.id, Post.user_id == User.id).all() forum.delete(involved_users) flash(_("Forum deleted."), "success") return redirect(url_for("management.forums"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_forum(category_id=None): form = AddForumForm() if form.validate_on_submit(): form.save() flash(_("Forum added."), "success") return redirect(url_for("management.forums")) else: form.groups.data = Group.query.order_by(Group.id.asc()).all() if category_id: category = Category.query.filter_by(id=category_id).first() form.category.data = category return render_template("management/forum_form.html", form=form, title=_("Add Forum"))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_category(): form = CategoryForm() if form.validate_on_submit(): form.save() flash(_("Category added."), "success") return redirect(url_for("management.forums")) return render_template("management/category_form.html", form=form, title=_("Add Category"))