INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Use an event to build a one - to - many relationship on a class.
def one_to_many(clsname, **kw): """Use an event to build a one-to-many relationship on a class. This makes use of the :meth:`.References._reference_table` method to generate a full foreign key relationship from the remote table. """ @declared_attr def o2m(cls): cls._references((clsname, cls.__name__)) return relationship(clsname, **kw) return o2m
Djeffifies string_to_djeff
def djeffify_string(string_to_djeff): """ Djeffifies string_to_djeff """ string_to_djeff = re.sub(r'^(?=[jg])', 'd', string_to_djeff, flags=re.IGNORECASE) # first string_to_djeff = re.sub(r'[ ](?=[jg])', ' d', string_to_djeff, flags=re.IGNORECASE) # spaces string_to_djeff = re.sub(r'[\n](?=[jg])', '\nd', string_to_djeff, flags=re.IGNORECASE) # \n return string_to_djeff
Djeffify data between tags
def handle_data(self, data): """ Djeffify data between tags """ if data.strip(): data = djeffify_string(data) self.djhtml += data
Create a foreign key reference from the local class to the given remote table.
def _reference_table(cls, ref_table): """Create a foreign key reference from the local class to the given remote table. Adds column references to the declarative class and adds a ForeignKeyConstraint. """ # create pairs of (Foreign key column, primary key column) cols = [(sa.Column(), refcol) for refcol in ref_table.primary_key] # set "tablename_colname = Foreign key Column" on the local class for col, refcol in cols: setattr(cls, "%s_%s" % (ref_table.name, refcol.name), col) # add a ForeignKeyConstraint([local columns], [remote columns]) cls.__table__.append_constraint(sa.ForeignKeyConstraint(*zip(*cols)))
Try to run __json__ on the given object. Raise TypeError is __json__ is missing
def __try_to_json(self, request, attr): """ Try to run __json__ on the given object. Raise TypeError is __json__ is missing :param request: Pyramid Request object :type request: <Request> :param obj: Object to JSONify :type obj: any object that has __json__ method :exception: TypeError """ # check for __json__ method and try to JSONify if hasattr(attr, '__json__'): return attr.__json__(request) # raise error otherwise raise TypeError('__json__ method missing on %s' % str(attr))
Path join helper method Join paths if list passed
def prepare_path(path): """ Path join helper method Join paths if list passed :type path: str|unicode|list :rtype: str|unicode """ if type(path) == list: return os.path.join(*path) return path
Read helper method
def read_from_file(file_path, encoding="utf-8"): """ Read helper method :type file_path: str|unicode :type encoding: str|unicode :rtype: str|unicode """ with codecs.open(file_path, "r", encoding) as f: return f.read()
Write helper method
def write_to_file(file_path, contents, encoding="utf-8"): """ Write helper method :type file_path: str|unicode :type contents: str|unicode :type encoding: str|unicode """ with codecs.open(file_path, "w", encoding) as f: f.write(contents)
Copy file helper method
def copy_file(src, dest): """ Copy file helper method :type src: str|unicode :type dest: str|unicode """ dir_path = os.path.dirname(dest) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy2(src, dest)
Split file name and extension
def get_path_extension(path): """ Split file name and extension :type path: str|unicode :rtype: one str|unicode """ file_path, file_ext = os.path.splitext(path) return file_ext.lstrip('.')
Helper method for absolute and relative paths resolution Split passed path and return each directory parts
def split_path(path): """ Helper method for absolute and relative paths resolution Split passed path and return each directory parts example: "/usr/share/dir" return: ["usr", "share", "dir"] @type path: one of (unicode, str) @rtype: list """ result_parts = [] #todo: check loops while path != "/": parts = os.path.split(path) if parts[1] == path: result_parts.insert(0, parts[1]) break elif parts[0] == path: result_parts.insert(0, parts[0]) break else: path = parts[0] result_parts.insert(0, parts[1]) return result_parts
Creates fully qualified endpoint URIs.
def _create_api_uri(self, *parts): """Creates fully qualified endpoint URIs. :param parts: the string parts that form the request URI """ return urljoin(self.API_URI, '/'.join(map(quote, parts)))
Makes sure we have proper ISO 8601 time.
def _format_iso_time(self, time): """Makes sure we have proper ISO 8601 time. :param time: either already ISO 8601 a string or datetime.datetime :returns: ISO 8601 time :rtype: str """ if isinstance(time, str): return time elif isinstance(time, datetime): return time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') else: return None
Returns the given response or raises an APIError for non - 2xx responses.
def _handle_response(self, response): """Returns the given response or raises an APIError for non-2xx responses. :param requests.Response response: HTTP response :returns: requested data :rtype: requests.Response :raises APIError: for non-2xx responses """ if not str(response.status_code).startswith('2'): raise get_api_error(response) return response
Checks if a next message is possible.
def _check_next(self): """Checks if a next message is possible. :returns: True if a next message is possible, otherwise False :rtype: bool """ if self.is_initial: return True if self.before: if self.before_cursor: return True else: return False else: if self.after_cursor: return True else: return False
Colors text with code and given format
def _wrap_color(self, code, text, format=None, style=None): """ Colors text with code and given format """ color = None if code[:3] == self.bg.PREFIX: color = self.bg.COLORS.get(code, None) if not color: color = self.fg.COLORS.get(code, None) if not color: raise Exception('Color code not found') if format and format not in self.formats: raise Exception('Color format not found') fmt = "0;" if format == 'bold': fmt = "1;" elif format == 'underline': fmt = "4;" # Manage the format parts = color.split('[') color = '{0}[{1}{2}'.format(parts[0], fmt, parts[1]) if self.has_colors and self.colors_enabled: # Set brightness st = '' if style: st = self.st.COLORS.get(style, '') return "{0}{1}{2}{3}".format(st, color, text, self.st.COLORS['reset_all']) else: return text
Registers the given message type in the local database.
def RegisterMessage(self, message): """Registers the given message type in the local database. Args: message: a message.Message, to be registered. Returns: The provided message. """ desc = message.DESCRIPTOR self._symbols[desc.full_name] = message if desc.file.name not in self._symbols_by_file: self._symbols_by_file[desc.file.name] = {} self._symbols_by_file[desc.file.name][desc.full_name] = message self.pool.AddDescriptor(desc) return message
Gets all the messages from a specified file.
def GetMessages(self, files): """Gets all the messages from a specified file. This will find and resolve dependencies, failing if they are not registered in the symbol database. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message. Raises: KeyError: if a file could not be found. """ result = {} for f in files: result.update(self._symbols_by_file[f]) return result
Insert object before index.
def insert(self, index, value): """ Insert object before index. :param int index: index to insert in :param string value: path to insert """ self._list.insert(index, value) self._sync()
Parse runtime path representation to list.
def parse(self, string): """ Parse runtime path representation to list. :param string string: runtime path string :return: list of runtime paths :rtype: list of string """ var, eq, values = string.strip().partition('=') assert var == 'runtimepath' assert eq == '=' return values.split(',')
Add some bundle to build group
def add_bundle(self, *args): """ Add some bundle to build group :type bundle: static_bundle.bundles.AbstractBundle @rtype: BuildGroup """ for bundle in args: if not self.multitype and self.has_bundles(): first_bundle = self.get_first_bundle() if first_bundle.get_type() != bundle.get_type(): raise Exception( 'Different bundle types for one Asset: %s[%s -> %s]' 'check types or set multitype parameter to True' % (self.name, first_bundle.get_type(), bundle.get_type()) ) self.bundles.append(bundle) return self
Return collected files links
def collect_files(self): """ Return collected files links :rtype: list[static_bundle.files.StaticFileResult] """ self.files = [] for bundle in self.bundles: bundle.init_build(self, self.builder) bundle_files = bundle.prepare() self.files.extend(bundle_files) return self
Asset minifier Uses default minifier in bundle if it s not defined
def get_minifier(self): """ Asset minifier Uses default minifier in bundle if it's not defined :rtype: static_bundle.minifiers.DefaultMinifier|None """ if self.minifier is None: if not self.has_bundles(): raise Exception("Unable to get default minifier, no bundles in build group") minifier = self.get_first_bundle().get_default_minifier() else: minifier = self.minifier if minifier: minifier.init_asset(self) return minifier
Create asset
def create_asset(self, name, **kwargs): """ Create asset :type name: unicode|str :rtype: Asset """ asset = Asset(self, name, **kwargs) self.assets[name] = asset return asset
Render all includes in asset by names
def render_asset(self, name): """ Render all includes in asset by names :type name: str|unicode :rtype: str|unicode """ result = "" if self.has_asset(name): asset = self.get_asset(name) if asset.files: for f in asset.files: result += f.render_include() + "\r\n" return result
Return links without build files
def collect_links(self, env=None): """ Return links without build files """ for asset in self.assets.values(): if asset.has_bundles(): asset.collect_files() if env is None: env = self.config.env if env == static_bundle.ENV_PRODUCTION: self._minify(emulate=True) self._add_url_prefix()
Move files/ make static build
def make_build(self): """ Move files / make static build """ for asset in self.assets.values(): if asset.has_bundles(): asset.collect_files() if not os.path.exists(self.config.output_dir): os.makedirs(self.config.output_dir) if self.config.copy_only_bundles: for asset in self.assets.values(): if not asset.minify and asset.files: for f in asset.files: copy_file(f.abs_path, self._get_output_path(f.abs_path)) else: copy_excludes = {} for asset in self.assets.values(): if asset.minify and asset.files: for f in asset.files: copy_excludes[f.abs_path] = f for root, dirs, files in os.walk(self.config.input_dir): for fpath in files: current_file_path = os.path.join(root, fpath) if current_file_path not in copy_excludes: copy_file(current_file_path, self._get_output_path(current_file_path)) self._minify()
Clear build output dir: type exclude: list|None
def clear(self, exclude=None): """ Clear build output dir :type exclude: list|None """ exclude = exclude or [] for root, dirs, files in os.walk(self.config.output_dir): for f in files: if f not in exclude: os.unlink(os.path.join(root, f)) for d in dirs: if d not in exclude: shutil.rmtree(os.path.join(root, d))
Coerce everything to strings. All objects representing time get output according to default_date_fmt.
def _default_json_default(obj): """ Coerce everything to strings. All objects representing time get output according to default_date_fmt. """ if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return obj.strftime(default_date_fmt) else: return str(obj)
Initialize the zlogger.
def init_logs(path=None, target=None, logger_name='root', level=logging.DEBUG, maxBytes=1*1024*1024, backupCount=5, application_name='default', server_hostname=None, fields=None): """Initialize the zlogger. Sets up a rotating file handler to the specified path and file with the given size and backup count limits, sets the default application_name, server_hostname, and default/whitelist fields. :param path: path to write the log file :param target: name of the log file :param logger_name: name of the logger (defaults to root) :param level: log level for this logger (defaults to logging.DEBUG) :param maxBytes: size of the file before rotation (default 1MB) :param application_name: app name to add to each log entry :param server_hostname: hostname to add to each log entry :param fields: default/whitelist fields. :type path: string :type target: string :type logger_name: string :type level: int :type maxBytes: int :type backupCount: int :type application_name: string :type server_hostname: string :type fields: dict """ log_file = os.path.abspath( os.path.join(path, target)) logger = logging.getLogger(logger_name) logger.setLevel(level) handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=maxBytes, backupCount=backupCount) handler.setLevel(level) handler.setFormatter( JsonFormatter( application_name=application_name, server_hostname=server_hostname, fields=fields)) logger.addHandler(handler)
formats a logging. Record into a standard json log entry
def format(self, record): """formats a logging.Record into a standard json log entry :param record: record to be formatted :type record: logging.Record :return: the formatted json string :rtype: string """ record_fields = record.__dict__.copy() self._set_exc_info(record_fields) event_name = 'default' if record_fields.get('event_name'): event_name = record_fields.pop('event_name') log_level = 'INFO' if record_fields.get('log_level'): log_level = record_fields.pop('log_level') [record_fields.pop(k) for k in record_fields.keys() if k not in self.fields] defaults = self.defaults.copy() fields = self.fields.copy() fields.update(record_fields) filtered_fields = {} for k, v in fields.iteritems(): if v is not None: filtered_fields[k] = v defaults.update({ 'event_timestamp': self._get_now(), 'event_name': event_name, 'log_level': log_level, 'fields': filtered_fields}) return json.dumps(defaults, default=self.json_default)
Initialize the model for a Pyramid app.
def includeme(config): """ Initialize the model for a Pyramid app. Activate this setup using ``config.include('baka_model')``. """ settings = config.get_settings() should_create = asbool(settings.get('baka_model.should_create_all', False)) should_drop = asbool(settings.get('baka_model.should_drop_all', False)) # Configure the transaction manager to support retrying retryable # exceptions. We also register the session factory with the thread-local # transaction manager, so that all sessions it creates are registered. # "tm.attempts": 3, config.add_settings({ "retry.attempts": 3, "tm.activate_hook": tm_activate_hook, "tm.annotate_user": False, }) # use pyramid_retry couse pyramid_tm disabled it config.include('pyramid_retry') # use pyramid_tm to hook the transaction lifecycle to the request config.include('pyramid_tm') engine = get_engine(settings) session_factory = get_session_factory(engine) config.registry['db_session_factory'] = session_factory # make request.db available for use in Pyramid config.add_request_method( # r.tm is the transaction manager used by pyramid_tm lambda r: get_tm_session(session_factory, r.tm), 'db', reify=True ) # service model factory config.include('.service') # Register a deferred action to bind the engine when the configuration is # committed. Deferring the action means that this module can be included # before model modules without ill effect. config.action(None, bind_engine, (engine,), { 'should_create': should_create, 'should_drop': should_drop }, order=10)
validate the passed values in kwargs based on the collection store them in the mongodb collection
def store( self, collection, **kwargs ): ''' validate the passed values in kwargs based on the collection, store them in the mongodb collection ''' key = validate( collection, **kwargs ) if self.fetch( collection, **{ key : kwargs[key] } ): raise Proauth2Error( 'duplicate_key' ) self.db[collection].insert( kwargs )
Return absolute and relative path for file
def get_abs_and_rel_paths(self, root_path, file_name, input_dir): """ Return absolute and relative path for file :type root_path: str|unicode :type file_name: str|unicode :type input_dir: str|unicode :rtype: tuple """ # todo: change relative path resolving [bug on duplicate dir names in path] relative_dir = root_path.replace(input_dir, '') return os.path.join(root_path, file_name), relative_dir + '/' + file_name
: inheritdoc
def get_files(self): """ :inheritdoc """ assert self.bundle, 'Cannot fetch file name with empty bundle' abs_path, rel_path = self.get_abs_and_rel_paths(self.bundle.path, self.file_path, self.bundle.input_dir) file_cls = self.bundle.get_file_cls() return [file_cls(rel_path, abs_path)]
: inheritdoc
def get_files(self): """ :inheritdoc """ assert self.bundle, 'Cannot fetch directory name with empty bundle' result_files = [] bundle_ext = self.bundle.get_extension() ext = "." + bundle_ext if bundle_ext else None if self.directory_path == "": root_path = self.bundle.path else: root_path = os.path.join(self.bundle.path, self.directory_path) for root, dirs, files in os.walk(root_path): for fpath in files: if (not ext or fpath.endswith(ext)) and (not self.exclusions or all(fpath != n for n in self.exclusions)): abs_path, rel_path = self.get_abs_and_rel_paths(root, fpath, self.bundle.input_dir) file_cls = self.bundle.get_file_cls() result_files.append(file_cls(rel_path, abs_path)) return result_files
Replicate an existing database to another existing database.
def replicate_existing(source_db, target_db): """Replicate an existing database to another existing database.""" # Get the server from which to manage the replication. server = shortcuts.get_server() logger = logging.getLogger('relax.couchdb.replicate') logger.debug('POST ' + urlparse.urljoin(server.resource.uri, '/_replicate')) source, target = specifier_to_db(source_db), specifier_to_db(target_db) logger.debug('Source DB: %s' % (source,)) logger.debug('Target DB: %s' % (target,)) try: resp_headers, resp_body = server.resource.post(path='/_replicate', content=json.dumps({'source': source, 'target': target})) except couchdb.client.ServerError, exc: logger.error('Replication failed.') raise ReplicationError(exc.args) result = resp_body['history'][0] if resp_body['ok']: logger.info('Replication %s... successful!' % ( resp_body['session_id'][:6],)) logger.info('Replication started: ' + result['start_time']) logger.info('Replication finished: ' + result['end_time']) result['start_time'] = datetime.datetime.strptime(result['start_time'], '%a, %d %b %Y %H:%M:%S GMT') result['end_time'] = datetime.datetime.strptime(result['end_time'], '%a, %d %b %Y %H:%M:%S GMT') timedelta = result['end_time'] - result['start_time'] if timedelta.days: logger.info('Replication took %d days and %.2f seconds.' % ( timedelta.days, timedelta.seconds + (timedelta.microseconds * (1e-6)))) else: logger.info('Replication took %.2f seconds.' % ( timedelta.seconds + (timedelta.microseconds * (1e-6)))) # Prepare the 'result' dictionary. result['ok'] = resp_body['ok'] result['session_id'] = resp_body['session_id'] result['source_last_seq'] = resp_body['source_last_seq'] # Info-log the number of docs read/written and checked/found. if result['docs_read'] == 1: docs_read = '1 document read' else: docs_read = '%d documents read' % (result['docs_read'],) if result['docs_written'] == 1: docs_written = '1 document written' else: docs_written = '%d documents written' % (result['docs_written'],) if result['missing_checked'] == 1: missing_checked = 'Checked for 1 missing document, found %d.' % ( result['missing_found'],) else: missing_checked = 'Checked for %d missing documents, found %d.' % ( result['missing_checked'], result['missing_found'],) logging.info('%s, %s' % (docs_read, docs_written)) logging.info(missing_checked) return result else: logger.error('Replication %s... failed.' % ( resp_body['session_id'][:6],)) result['ok'] = resp_body['ok'] result['session_id'] = resp_body['session_id'] result['source_last_seq'] = resp_body['source_last_seq'] raise ReplicationFailure(resp_headers, result)
Generic Metropolis MCMC. Advances the chain by nsteps. Called by: func: mcmc: param adapt: enables adaptive stepwidth alteration ( converges ).
def mcmc_advance(start, stdevs, logp, nsteps = 1e300, adapt=True, callback=None): """ Generic Metropolis MCMC. Advances the chain by nsteps. Called by :func:`mcmc` :param adapt: enables adaptive stepwidth alteration (converges). """ import scipy from numpy import log import progressbar prob = logp(start) chain = [start] accepts = [True] probs = [prob] assert not numpy.isnan(start).any() assert not numpy.isnan(stdevs).any() i = 0 widgets=['AR', progressbar.Percentage(), progressbar.Counter('%5d'), progressbar.Bar(), progressbar.ETA()] pbar = progressbar.ProgressBar(widgets=widgets, maxval=nsteps).start() prev = start prev_prob = prob print 'MCMC: start at prob', prob stepchange = 0.1 while len(chain) < nsteps: i = i + 1 next = scipy.random.normal(prev, stdevs) next[next > 1] = 1 next[next < 0] = 0 next_prob = logp(next) assert not numpy.isnan(next).any() assert not numpy.isnan(next_prob).any() delta = next_prob - prev_prob dice = log(scipy.random.uniform(0, 1)) accept = delta > dice if accept: prev = next prev_prob = next_prob if adapt: stdevs *= (1 + stepchange) else: if adapt: stdevs *= (1 + stepchange)**(-0.4) # aiming for 40% acceptance if callback: callback(prev_prob, prev, accept) chain.append(prev) accepts.append(accept) probs.append(prev_prob) if adapt: stepchange = min(0.1, 10. / i) #print 'STDEV', stdevs[:5], stepchange # compute stats widgets[0] = 'AR: %.03f' % numpy.mean(numpy.array(accepts[len(accepts)/3:])+0) pbar.update(pbar.currval + 1) pbar.finish() return chain, probs, accepts, stdevs
** Metropolis Hastings MCMC **
def mcmc(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, stdevs=0.1, start = 0.5, **problem): """ **Metropolis Hastings MCMC** with automatic step width adaption. Burnin period is also used to guess steps. :param nburn: number of burnin steps :param stdevs: step widths to start with """ if 'seed' in problem: numpy.random.seed(problem['seed']) n_params = len(parameter_names) def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) start = start + numpy.zeros(n_params) stdevs = stdevs + numpy.zeros(n_params) def compute_stepwidths(chain): return numpy.std(chain, axis=0) / 3 import matplotlib.pyplot as plt plt.figure(figsize=(7, 7)) steps = numpy.array([0.1]*(n_params)) print 'burn-in (1/2)...' chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print 'burn-in (2/2)...' chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print 'recording chain ...' chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nsteps) chain = numpy.array(chain) i = numpy.argmax(prob) final = chain[-1] print 'postprocessing...' chain = numpy.array([transform(params) for params in chain]) return dict(start=chain[-1], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='Metropolis MCMC')
** Ensemble MCMC ** via emcee <http:// dan. iel. fm/ emcee/ > _
def ensemble(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, start=0.5, **problem): """ **Ensemble MCMC** via `emcee <http://dan.iel.fm/emcee/>`_ """ import emcee import progressbar if 'seed' in problem: numpy.random.seed(problem['seed']) n_params = len(parameter_names) nwalkers = 50 + n_params * 2 if nwalkers > 200: nwalkers = 200 p0 = [numpy.random.rand(n_params) for i in xrange(nwalkers)] start = start + numpy.zeros(n_params) p0[0] = start def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) sampler = emcee.EnsembleSampler(nwalkers, n_params, like, live_dangerously=True) print 'burn-in...' pos, prob, state = sampler.run_mcmc(p0, nburn / nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() print 'running ...' # Starting from the final position in the burn-in chain, sample pbar = progressbar.ProgressBar( widgets=[progressbar.Percentage(), progressbar.Counter('%5d'), progressbar.Bar(), progressbar.ETA()], maxval=nsteps).start() for results in sampler.sample(pos, iterations=nsteps / nwalkers, rstate0=state): pbar.update(pbar.currval + 1) pbar.finish() print "Mean acceptance fraction:", numpy.mean(sampler.acceptance_fraction) chain = sampler.flatchain final = chain[-1] print 'postprocessing...' chain_post = numpy.array([transform(params) for params in chain]) chain_prob = sampler.flatlnprobability return dict(start=final, chain=chain_post, chain_prior=chain, chain_prob=chain_prob, method='Ensemble MCMC')
search monits or works classes. Class must have name attribute: param package_name: monits or works: param base_class: Monit or Work: return: tuple of tuples monit/ work - name and class
def _get_classes(package_name, base_class): """ search monits or works classes. Class must have 'name' attribute :param package_name: 'monits' or 'works' :param base_class: Monit or Work :return: tuple of tuples monit/work-name and class """ classes = {} base_dir = os.getcwd() root_module_name = base_dir.split('/')[-1] package_dir = base_dir + '/%s' % package_name if os.path.isdir(package_dir): for module_path in os.listdir(package_dir): if not module_path.endswith('.py'): continue module_name = os.path.splitext(module_path)[0] module_full_name = '%s.%s.%s' % (root_module_name, package_name, module_name) __import__(module_full_name) work_module = sys.modules[module_full_name] for module_item in work_module.__dict__.values(): if type(module_item) is type \ and issubclass(module_item, base_class) \ and module_item is not base_class\ and hasattr(module_item, 'name') and module_item.name: classes.setdefault(module_item.name, []).append(module_item) # check no duplicated names for work_name, work_modules in classes.items(): if len(work_modules) > 1: raise DuplicatedNameException('Modules %s have same name "%s"' % ( ' and '.join(map(str, work_modules)), work_name )) # create immutable list of modules return tuple([(work_name, work_modules[0]) for work_name, work_modules in classes.items()])
Adds an EnumDescriptor to the pool.
def AddEnumDescriptor(self, enum_desc): """Adds an EnumDescriptor to the pool. This method also registers the FileDescriptor associated with the message. Args: enum_desc: An EnumDescriptor. """ if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') self._enum_descriptors[enum_desc.full_name] = enum_desc self.AddFileDescriptor(enum_desc.file)
Gets the FileDescriptor for the file containing the specified symbol.
def FindFileContainingSymbol(self, symbol): """Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file can not be found in the pool. """ symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: file_proto = self._internal_db.FindFileContainingSymbol(symbol) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) else: raise error if not file_proto: raise KeyError('Cannot find a file containing %s' % symbol) return self._ConvertFileProtoToFileDescriptor(file_proto)
Loads the named descriptor from the pool.
def FindMessageTypeByName(self, full_name): """Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self.FindFileContainingSymbol(full_name) return self._descriptors[full_name]
Loads the named enum descriptor from the pool.
def FindEnumTypeByName(self, full_name): """Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self.FindFileContainingSymbol(full_name) return self._enum_descriptors[full_name]
Loads the named extension descriptor from the pool.
def FindExtensionByName(self, full_name): """Loads the named extension descriptor from the pool. Args: full_name: The full name of the extension descriptor to load. Returns: A FieldDescriptor, describing the named extension. """ full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, extension_name = full_name.rpartition('.') try: # Most extensions are nested inside a message. scope = self.FindMessageTypeByName(message_name) except KeyError: # Some extensions are defined at file scope. scope = self.FindFileContainingSymbol(full_name) return scope.extensions_by_name[extension_name]
Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, containing_type=None, scope=None): """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. Args: enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the enum descriptor. containing_type: The type containing this enum. scope: Scope containing available types. Returns: The added descriptor """ if package: enum_name = '.'.join((package, enum_proto.name)) else: enum_name = enum_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name values = [self._MakeEnumValueDescriptor(value, index) for index, value in enumerate(enum_proto.value)] desc = descriptor.EnumDescriptor(name=enum_proto.name, full_name=enum_name, filename=file_name, file=file_desc, values=values, containing_type=containing_type, options=enum_proto.options) scope['.%s' % enum_name] = desc self._enum_descriptors[enum_name] = desc return desc
Creates a field descriptor from a FieldDescriptorProto.
def _MakeFieldDescriptor(self, field_proto, message_name, index, is_extension=False): """Creates a field descriptor from a FieldDescriptorProto. For message and enum type fields, this method will do a look up in the pool for the appropriate descriptor for that type. If it is unavailable, it will fall back to the _source function to create it. If this type is still unavailable, construction will fail. Args: field_proto: The proto describing the field. message_name: The name of the containing message. index: Index of the field is_extension: Indication that this field is for an extension. Returns: An initialized FieldDescriptor object """ if message_name: full_name = '.'.join((message_name, field_proto.name)) else: full_name = field_proto.name return descriptor.FieldDescriptor( name=field_proto.name, full_name=full_name, index=index, number=field_proto.number, type=field_proto.type, cpp_type=None, message_type=None, enum_type=None, containing_type=None, label=field_proto.label, has_default_value=False, default_value=None, is_extension=is_extension, extension_scope=None, options=field_proto.options)
theoretically any data store can be implemented to work with this package which means basic data validation must be done in - package so that weird stuff can t be stored in the data store. this function raises an exception if an invalid table name is passed not all of the required fields are in the data kwargs or if a field that was passed is not expected. it also returns the key field name for ensuring uniqueness ( again that may not be built into whatever data store is impelemented. )
def validate( table, **data ): ''' theoretically, any data store can be implemented to work with this package, which means basic data validation must be done in-package, so that weird stuff can't be stored in the data store. this function raises an exception if an invalid table name is passed, not all of the required fields are in the data kwargs, or if a field that was passed is not expected. it also returns the key field name, for ensuring uniqueness (again, that may not be built into whatever data store is impelemented.) ''' if table not in good.keys(): raise Proauth2Error( 'invalid_request', 'invalid name: %s' % table ) for req in good[table]['required']: if not data.get( req, None ): raise Proauth2Error( 'invalid_request', 'missing required field: %s' % req ) for key in data.keys(): if key not in good[table]['required'] and \ key not in good[table]['optional']: raise Proauth2Error( 'invalid_request', 'invalid field: %s' % key ) return good[table]['key']
Record the results of this experiment by updating the tag.: param results: A dictionary containing the results of the experiment.: type results: dict
def record_results(self, results): """ Record the results of this experiment, by updating the tag. :param results: A dictionary containing the results of the experiment. :type results: dict """ repository = Repo(self.__repository_directory, search_parent_directories=True) for tag in repository.tags: if tag.name == self.__tag_name: tag_object = tag break else: raise Exception("Experiment tag has been deleted since experiment started") data = json.loads(tag_object.tag.message) data["results"] = results TagReference.create(repository, self.__tag_name, message=json.dumps(data), ref=tag_object.tag.object, force=True) self.__results_recorded = True
Tag the current repository.: param data: a dictionary containing the data about the experiment: type data: dict
def __tag_repo(self, data, repository): """ Tag the current repository. :param data: a dictionary containing the data about the experiment :type data: dict """ assert self.__tag_name not in [t.name for t in repository.tags] return TagReference.create(repository, self.__tag_name, message=json.dumps(data))
: return: the files that have been modified and can be added
def __get_files_to_be_added(self, repository): """ :return: the files that have been modified and can be added """ for root, dirs, files in os.walk(repository.working_dir): for f in files: relative_path = os.path.join(root, f)[len(repository.working_dir) + 1:] try: repository.head.commit.tree[relative_path] # will fail if not tracked yield relative_path except: pass
Start an experiment by capturing the state of the code: param parameters: a dictionary containing the parameters of the experiment: type parameters: dict: return: the tag representing this experiment: rtype: TagReference
def __start_experiment(self, parameters): """ Start an experiment by capturing the state of the code :param parameters: a dictionary containing the parameters of the experiment :type parameters: dict :return: the tag representing this experiment :rtype: TagReference """ repository = Repo(self.__repository_directory, search_parent_directories=True) if len(repository.untracked_files) > 0: logging.warning("Untracked files will not be recorded: %s", repository.untracked_files) current_commit = repository.head.commit started_state_is_dirty = repository.is_dirty() if started_state_is_dirty: repository.index.add([p for p in self.__get_files_to_be_added(repository)]) commit_obj = repository.index.commit("Temporary commit for experiment " + self.__experiment_name) sha = commit_obj.hexsha else: sha = repository.head.object.hexsha data = {"parameters": parameters, "started": time.time(), "description": self.__description, "commit_sha": sha} tag_object = self.__tag_repo(data, repository) if started_state_is_dirty: repository.head.reset(current_commit, working_tree=False, index=True) return tag_object
Get a sqlalchemy. orm. Session instance backed by a transaction.
def get_tm_session(session_factory, transaction_manager): """ Get a ``sqlalchemy.orm.Session`` instance backed by a transaction. This function will hook the session to the transaction manager which will take care of committing any changes. - When using pyramid_tm it will automatically be committed or aborted depending on whether an exception is raised. - When using scripts you should wrap the session in a manager yourself. For example:: import transaction engine = get_engine(settings) session_factory = get_session_factory(engine) with transaction.manager: dbsession = get_tm_session(session_factory, transaction.manager) """ dbsession = session_factory() zope.sqlalchemy.register( dbsession, transaction_manager=transaction_manager) return dbsession
run your main spider here and get a list/ tuple of url as result then make the instance of branch thread
def run(self): """run your main spider here, and get a list/tuple of url as result then make the instance of branch thread :return: None """ global existed_urls_list config = config_creator() debug = config.debug main_thread_sleep = config.main_thread_sleep branch_thread_num = config.branch_thread_num while 1: url = self.main_queue.get() if debug: print('main thread-{} start'.format(url)) main_spider = self.main_spider(url) sleep(random.randrange(*main_thread_sleep)) links = main_spider.request_urls() try: assert type(links) in VALIDATE_URLS except AssertionError: error_message('except to return a list or tuple which contains url') links = list() branch_queue = queue.Queue(branch_thread_num) for i in range(branch_thread_num): branch_thread = BranchThread(branch_queue=branch_queue, branch_spider=self.branch_spider) branch_thread.daemon = True branch_thread.start() for link in links: if link not in existed_urls_list: existed_urls_list.append(link) branch_queue.put(link) branch_queue.join() if debug: print('main thread-{}\'s child threads is all finish'.format(url)) self.main_queue.task_done()
Copied ( and slightly adapted ) from pypi. description_tools
def pypi_render(source): """ Copied (and slightly adapted) from pypi.description_tools """ ALLOWED_SCHEMES = '''file ftp gopher hdl http https imap mailto mms news nntp prospero rsync rtsp rtspu sftp shttp sip sips snews svn svn+ssh telnet wais irc'''.split() settings_overrides = { "raw_enabled": 0, # no raw HTML code "file_insertion_enabled": 0, # no file/URL access "halt_level": 2, # at warnings or errors, raise an exception "report_level": 5, # never report problems with the reST code } # capture publishing errors, they go to stderr old_stderr = sys.stderr sys.stderr = s = StringIO.StringIO() parts = None try: # Convert reStructuredText to HTML using Docutils. document = publish_doctree(source=source, settings_overrides=settings_overrides) for node in document.traverse(): if node.tagname == '#text': continue if node.hasattr('refuri'): uri = node['refuri'] elif node.hasattr('uri'): uri = node['uri'] else: continue o = urlparse.urlparse(uri) if o.scheme not in ALLOWED_SCHEMES: raise TransformError('link scheme not allowed') # now turn the transformed document into HTML reader = readers.doctree.Reader(parser_name='null') pub = Publisher(reader, source=io.DocTreeInput(document), destination_class=io.StringOutput) pub.set_writer('html') pub.process_programmatic_settings(None, settings_overrides, None) pub.set_destination(None, None) pub.publish() parts = pub.writer.parts except: pass sys.stderr = old_stderr # original text if publishing errors occur if parts is None or len(s.getvalue()) > 0: return None else: return parts['body']
Generate a random string of the specified length.
def generate(length=DEFAULT_LENGTH): """ Generate a random string of the specified length. The returned string is composed of an alphabet that shouldn't include any characters that are easily mistakeable for one another (I, 1, O, 0), and hopefully won't accidentally contain any English-language curse words. """ return ''.join(random.SystemRandom().choice(ALPHABET) for _ in range(length))
Require that the named field has the right data_type
def require(name, field, data_type): """Require that the named `field` has the right `data_type`""" if not isinstance(field, data_type): msg = '{0} must have {1}, got: {2}'.format(name, data_type, field) raise AssertionError(msg)
Push a new msg onto the queue return ( success msg )
def _enqueue(self, msg): """Push a new `msg` onto the queue, return `(success, msg)`""" self.log.debug('queueing: %s', msg) if self.queue.full(): self.log.warn('librato_bg queue is full') return False, msg self.queue.put(msg) self.log.debug('enqueued %s.', msg) return True, msg
Forces a flush from the internal queue to the server
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() self.log.debug('successfully flushed %s items.', size)
Use all decompressor possible to make the stream
def open(name=None, fileobj=None, closefd=True): """ Use all decompressor possible to make the stream """ return Guesser().open(name=name, fileobj=fileobj, closefd=closefd)
Manage a Marv site
def marv(ctx, config, loglevel, logfilter, verbosity): """Manage a Marv site""" if config is None: cwd = os.path.abspath(os.path.curdir) while cwd != os.path.sep: config = os.path.join(cwd, 'marv.conf') if os.path.exists(config): break cwd = os.path.dirname(cwd) else: config = '/etc/marv/marv.conf' if not os.path.exists(config): config = None ctx.obj = config setup_logging(loglevel, verbosity, logfilter)
This function returns a Pyramid WSGI application.
def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ set_cache_regions_from_settings(settings) config = Configurator(settings=settings) config.include('cms') config.configure_celery(global_config['__file__']) return config.make_wsgi_app()
Like _VarintDecoder () but decodes signed values.
def _SignedVarintDecoder(mask, result_type): """Like _VarintDecoder() but decodes signed values.""" def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: b = six.indexbytes(buffer, pos) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): if result > 0x7fffffffffffffff: result -= (1 << 64) result |= ~mask else: result &= mask result = result_type(result) return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint
Returns a decoder for a MessageSet item.
def MessageSetItemDecoder(extensions_by_number): """Returns a decoder for a MessageSet item. The parameter is the _extensions_by_number map for the message class. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) local_ReadTag = ReadTag local_DecodeVarint = _DecodeVarint local_SkipField = SkipField def DecodeItem(buffer, pos, end, message, field_dict): message_set_item_start = pos type_id = -1 message_start = -1 message_end = -1 # Technically, type_id and message can appear in any order, so we need # a little loop here. while 1: (tag_bytes, pos) = local_ReadTag(buffer, pos) if tag_bytes == type_id_tag_bytes: (type_id, pos) = local_DecodeVarint(buffer, pos) elif tag_bytes == message_tag_bytes: (size, message_start) = local_DecodeVarint(buffer, pos) pos = message_end = message_start + size elif tag_bytes == item_end_tag_bytes: break else: pos = SkipField(buffer, pos, end, tag_bytes) if pos == -1: raise _DecodeError('Missing group end tag.') if pos > end: raise _DecodeError('Truncated message.') if type_id == -1: raise _DecodeError('MessageSet item missing type_id.') if message_start == -1: raise _DecodeError('MessageSet item missing message.') extension = extensions_by_number.get(type_id) if extension is not None: value = field_dict.get(extension) if value is None: value = field_dict.setdefault( extension, extension.message_type._concrete_class()) if value._InternalParse(buffer, message_start,message_end) != message_end: # The only reason _InternalParse would return early is if it encountered # an end-group tag. raise _DecodeError('Unexpected end-group tag.') else: if not message._unknown_fields: message._unknown_fields = [] message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos])) return pos return DecodeItem
Flask like implementation of getting the applicaiton name via the filename of the including file
def get_app_name(): """Flask like implementation of getting the applicaiton name via the filename of the including file """ fn = getattr(sys.modules['__main__'], '__file__', None) if fn is None: return '__main__' return os.path.splitext(os.path.basename(fn))[0]
Given a Python function name return the function it refers to.
def get_function(function_name): """ Given a Python function name, return the function it refers to. """ module, basename = str(function_name).rsplit('.', 1) try: return getattr(__import__(module, fromlist=[basename]), basename) except (ImportError, AttributeError): raise FunctionNotFound(function_name)
Add a function to the function list in order.
def handle_add_fun(self, function_name): """Add a function to the function list, in order.""" function_name = function_name.strip() try: function = get_function(function_name) except Exception, exc: self.wfile.write(js_error(exc) + NEWLINE) return # This tests to see if the function has been decorated with the view # server synchronisation decorator (``decorate_view``). if not getattr(function, 'view_decorated', None): self.functions[function_name] = (self.function_counter, function) # The decorator gets called with the logger function. else: self.functions[function_name] = (self.function_counter, function(self.log)) self.function_counter += 1 return True
Return the mapping of a document according to the function list.
def handle_map_doc(self, document): """Return the mapping of a document according to the function list.""" # This uses the stored set of functions, sorted by order of addition. for function in sorted(self.functions.values(), key=lambda x: x[0]): try: # It has to be run through ``list``, because it may be a # generator function. yield [list(function(document))] except Exception, exc: # Otherwise, return an empty list and log the event. yield [] self.log(repr(exc))
Reduce several mapped documents by several reduction functions.
def handle_reduce(self, reduce_function_names, mapped_docs): """Reduce several mapped documents by several reduction functions.""" reduce_functions = [] # This gets a large list of reduction functions, given their names. for reduce_function_name in reduce_function_names: try: reduce_function = get_function(reduce_function_name) if getattr(reduce_function, 'view_decorated', None): reduce_function = reduce_function(self.log) reduce_functions.append(reduce_function) except Exception, exc: self.log(repr(exc)) reduce_functions.append(lambda *args, **kwargs: None) # Transform lots of (key, value) pairs into one (keys, values) pair. keys, values = zip( (key, value) for ((key, doc_id), value) in mapped_docs) # This gets the list of results from the reduction functions. results = [] for reduce_function in reduce_functions: try: results.append(reduce_function(keys, values, rereduce=False)) except Exception, exc: self.log(repr(exc)) results.append(None) return [True, results]
Re - reduce a set of values with a list of rereduction functions.
def handle_rereduce(self, reduce_function_names, values): """Re-reduce a set of values, with a list of rereduction functions.""" # This gets a large list of reduction functions, given their names. reduce_functions = [] for reduce_function_name in reduce_function_names: try: reduce_function = get_function(reduce_function_name) if getattr(reduce_function, 'view_decorated', None): reduce_function = reduce_function(self.log) reduce_functions.append(reduce_function) except Exception, exc: self.log(repr(exc)) reduce_functions.append(lambda *args, **kwargs: None) # This gets the list of results from those functions. results = [] for reduce_function in reduce_functions: try: results.append(reduce_function(None, values, rereduce=True)) except Exception, exc: self.log(repr(exc)) results.append(None) return [True, results]
Validate... this function is undocumented but still in CouchDB.
def handle_validate(self, function_name, new_doc, old_doc, user_ctx): """Validate...this function is undocumented, but still in CouchDB.""" try: function = get_function(function_name) except Exception, exc: self.log(repr(exc)) return False try: return function(new_doc, old_doc, user_ctx) except Exception, exc: self.log(repr(exc)) return repr(exc)
The main function called to handle a request.
def handle(self): """The main function called to handle a request.""" while True: try: line = self.rfile.readline() try: # All input data are lines of JSON like the following: # ["<cmd_name>" "<cmd_arg1>" "<cmd_arg2>" ...] # So I handle this by dispatching to various methods. cmd = json.loads(line) except Exception, exc: # Sometimes errors come up. Once again, I can't predict # anything, but can at least tell CouchDB about the error. self.wfile.write(repr(exc) + NEWLINE) continue else: # Automagically get the command handler. handler = getattr(self, 'handle_' + cmd[0], None) if not handler: # We are ready to not find commands. It probably won't # happen, but fortune favours the prepared. self.wfile.write( repr(CommandNotFound(cmd[0])) + NEWLINE) continue return_value = handler(*cmd[1:]) if not return_value: continue # We write the output back to CouchDB. self.wfile.write( one_lineify(json.dumps(return_value)) + NEWLINE) except Exception, exc: self.wfile.write(repr(exc) + NEWLINE) continue
Log an event on the CouchDB server.
def log(self, string): """Log an event on the CouchDB server.""" self.wfile.write(json.dumps({'log': string}) + NEWLINE)
Generates a universally unique ID. Any arguments only create more randomness.
def guid(*args): """ Generates a universally unique ID. Any arguments only create more randomness. """ t = float(time.time() * 1000) r = float(random.random()*10000000000000) a = random.random() * 10000000000000 data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args) data = hashlib.md5(data.encode()).hexdigest()[:10] return data
Return pages the GitModel knows about.: param int limit: The number of pages to return defaults to 5.: param tuple order_by: The attributes to order on defaults to ( position - modified_at )
def get_pages(self, limit=5, order_by=('position', '-modified_at')): """ Return pages the GitModel knows about. :param int limit: The number of pages to return, defaults to 5. :param tuple order_by: The attributes to order on, defaults to ('position', '-modified_at') """ return to_eg_objects(self.workspace.S(Page).filter( language=self.locale).order_by(*order_by)[:limit])
Return featured pages the GitModel knows about.: param str locale: The locale string like eng_UK.: param int limit: The number of pages to return defaults to 5.: param tuple order_by: The attributes to order on defaults to ( position - modified_at ).
def get_featured_pages( self, limit=5, order_by=('position', '-modified_at')): """ Return featured pages the GitModel knows about. :param str locale: The locale string, like `eng_UK`. :param int limit: The number of pages to return, defaults to 5. :param tuple order_by: The attributes to order on, defaults to ('position', '-modified_at'). """ return self._get_featured_pages(self.locale, limit, order_by)
register_app takes an application name and redirect_uri It generates client_id ( client_key ) and client_secret then stores all of the above in the data_store and returns a dictionary containing the client_id and client_secret.
def register_app(self, name, redirect_uri, callback): ''' register_app takes an application name and redirect_uri It generates client_id (client_key) and client_secret, then stores all of the above in the data_store, and returns a dictionary containing the client_id and client_secret. ''' client_id = self._generate_token() client_secret = self._generate_token(64) yield Task(self.data_store.store, 'applications', client_id=client_id, client_secret=client_secret, name=name, redirect_uri=redirect_uri) callback({'client_id':client_id, 'client_secret':client_secret})
request_authorization generates a nonce and stores it in the data_store along with the client_id user_id and expiration timestamp. It then returns a dictionary containing the nonce as code and the passed state. --- response_type MUST be code. this is directly from the OAuth2 spec. this probably doesn t need to be checked here but if it s in the spec I guess it should be verified somewhere. scope has not been implemented here. it will be stored but there is no scope - checking built in here at this time. if a redirect_uri is passed it must match the registered redirect_uri. again this is per spec.
def request_authorization(self, client_id, user_id, response_type, redirect_uri=None, scope=None, state=None, expires=600, callback=None): ''' request_authorization generates a nonce, and stores it in the data_store along with the client_id, user_id, and expiration timestamp. It then returns a dictionary containing the nonce as "code," and the passed state. --- response_type MUST be "code." this is directly from the OAuth2 spec. this probably doesn't need to be checked here, but if it's in the spec I guess it should be verified somewhere. scope has not been implemented here. it will be stored, but there is no scope-checking built in here at this time. if a redirect_uri is passed, it must match the registered redirect_uri. again, this is per spec. ''' if response_type != 'code': raise Proauth2Error('invalid_request', 'response_type must be "code"', state=state) client = yield Task(self.data_store.fetch, 'applications', client_id=client_id) if not client: raise Proauth2Error('access_denied') if redirect_uri and client['redirect_uri'] != redirect_uri: raise Proauth2Error('invalid_request', "redirect_uris don't match") nonce_code = self._generate_token() expires = time() + expires try: yield Task(self.data_store.store, 'nonce_codes', code=nonce_code, client_id=client_id, expires=expires, user_id=user_id, scope=scope) except Proauth2Error as e: e.state = state raise e callback({'code':nonce_code, 'state':state})
request_access_token validates the client_id and client_secret using the provided method then generates an access_token stores it with the user_id from the nonce and returns a dictionary containing an access_token and bearer token. --- from the spec it looks like there are different types of tokens but i don t understand the disctintions so someone else can fix this if need be. regarding the method: it appears that it is intended for there to be multiple ways to verify the client_id. my assumption is that you use the secret as the salt and pass the hashed of the client_id or something and then compare hashes on the server end. currently the only implemented method is direct comparison of the client_ids and client_secrets. additional methods can be added to proauth2. auth_methods
def request_access_token(self, client_id, key, code, grant_type, redirect_uri=None, method='direct_auth', callback=None): ''' request_access_token validates the client_id and client_secret, using the provided method, then generates an access_token, stores it with the user_id from the nonce, and returns a dictionary containing an access_token and bearer token. --- from the spec, it looks like there are different types of tokens, but i don't understand the disctintions, so someone else can fix this if need be. regarding the method: it appears that it is intended for there to be multiple ways to verify the client_id. my assumption is that you use the secret as the salt and pass the hashed of the client_id or something, and then compare hashes on the server end. currently the only implemented method is direct comparison of the client_ids and client_secrets. additional methods can be added to proauth2.auth_methods ''' if grant_type != 'authorization_code': raise Proauth2Error('invalid_request', 'grant_type must be "authorization_code"') yield Task(self._auth, client_id, key, method) user_id = yield Task(self._validate_request_code, code, client_id) access_token = self._generate_token(64) yield Task(self.data_store.store, 'tokens', token=access_token, user_id=user_id, client_id=client_id) callback({'access_token':access_token, 'token_type':'bearer'})
authenticate_token checks the passed token and returns the user_id it is associated with. it is assumed that this method won t be directly exposed to the oauth client but some kind of framework or wrapper. this allows the framework to have the user_id without doing additional DB calls.
def authenticate_token(self, token, callback): ''' authenticate_token checks the passed token and returns the user_id it is associated with. it is assumed that this method won't be directly exposed to the oauth client, but some kind of framework or wrapper. this allows the framework to have the user_id without doing additional DB calls. ''' token_data = yield Task(self.data_store.fetch, 'tokens', token=token) if not token_data: raise Proauth2Error('access_denied', 'token does not exist or has been revoked') callback(token_data['user_id'])
revoke_token removes the access token from the data_store
def revoke_token(self, token, callback): ''' revoke_token removes the access token from the data_store ''' yield Task(self.data_store.remove, 'tokens', token=token) callback()
_auth - internal method to ensure the client_id and client_secret passed with the nonce match
def _auth(self, client_id, key, method, callback): ''' _auth - internal method to ensure the client_id and client_secret passed with the nonce match ''' available = auth_methods.keys() if method not in available: raise Proauth2Error('invalid_request', 'unsupported authentication method: %s' 'available methods: %s' % \ (method, '\n'.join(available))) client = yield Task(self.data_store.fetch, 'applications', client_id=client_id) if not client: raise Proauth2Error('access_denied') if not auth_methods[method](key, client['client_secret']): raise Proauth2Error('access_denied') callback()
_validate_request_code - internal method for verifying the the given nonce. also removes the nonce from the data_store as they are intended for one - time use.
def _validate_request_code(self, code, client_id, callback): ''' _validate_request_code - internal method for verifying the the given nonce. also removes the nonce from the data_store, as they are intended for one-time use. ''' nonce = yield Task(self.data_store.fetch, 'nonce_codes', code=code) if not nonce: raise Proauth2Error('access_denied', 'invalid request code: %s' % code) if client_id != nonce['client_id']: raise Proauth2Error('access_denied', 'invalid request code: %s' % code) user_id = nonce['user_id'] expires = nonce['expires'] yield Task(self.data_store.remove, 'nonce_codes', code=code, client_id=client_id, user_id=user_id) if time() > expires: raise Proauth2Error('access_denied', 'request code %s expired' % code) callback(user_id)
_generate_token - internal function for generating randomized alphanumberic strings of a given length
def _generate_token(self, length=32): ''' _generate_token - internal function for generating randomized alphanumberic strings of a given length ''' return ''.join(choice(ascii_letters + digits) for x in range(length))
Merge multiple ordered so that within - ordered order is preserved
def merge_ordered(ordereds: typing.Iterable[typing.Any]) -> typing.Iterable[typing.Any]: """Merge multiple ordered so that within-ordered order is preserved """ seen_set = set() add_seen = seen_set.add return reversed(tuple(map( lambda obj: add_seen(obj) or obj, filterfalse( seen_set.__contains__, chain.from_iterable(map(reversed, reversed(ordereds))), ), )))
Helps us validate the parameters for the request
def validate_params(required, optional, params): """ Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails """ missing_fields = [x for x in required if x not in params] if missing_fields: field_strings = ", ".join(missing_fields) raise Exception("Missing fields: %s" % field_strings) disallowed_fields = [x for x in params if x not in optional and x not in required] if disallowed_fields: field_strings = ", ".join(disallowed_fields) raise Exception("Disallowed fields: %s" % field_strings)
authenticate_token checks the passed token and returns the user_id it is associated with. it is assumed that this method won t be directly exposed to the oauth client but some kind of framework or wrapper. this allows the framework to have the user_id without doing additional DB calls.
def authenticate_token( self, token ): ''' authenticate_token checks the passed token and returns the user_id it is associated with. it is assumed that this method won't be directly exposed to the oauth client, but some kind of framework or wrapper. this allows the framework to have the user_id without doing additional DB calls. ''' token_data = self.data_store.fetch( 'tokens', token=token ) if not token_data: raise Proauth2Error( 'access_denied', 'token does not exist or has been revoked' ) return token_data['user_id']
Register your own mode and handle method here.
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'filenumber': plugin.filenumber_handle() elif plugin.args.option == 'fileage': plugin.fileage_handle() elif plugin.args.option == 'sqlserverlocks': plugin.sqlserverlocks_handle() else: plugin.unknown("Unknown actions.")
Get the number of file in the folder.
def filenumber_handle(self): """Get the number of file in the folder.""" self.file_list = [] self.count = 0 status = self.ok if self.args.recursion: self.__result, self.__file_list = self.__get_folder(self.args.path) else: self.__result, self.__file_list = self.__get_file(self.args.path) # Compare the vlaue. if self.__result > self.args.critical: status = self.critical elif self.__result > self.args.warning: status = self.warning else: status = self.ok # Output self.shortoutput = "Found {0} files in {1}.".format(self.__result, self.args.path) self.logger.debug("file_list: {}".format(self.__file_list)) [self.longoutput.append(file_data.get('Name')) for file_data in self.__file_list] self.perfdata.append("{path}={result};{warn};{crit};0;".format( crit=self.args.critical, warn=self.args.warning, result=self.__result, path=self.args.path)) # Return status with message to Nagios. status(self.output(long_output_limit=None)) self.logger.debug("Return status and exit to Nagios.")
Get current datetime for every file.
def __get_current_datetime(self): """Get current datetime for every file.""" self.wql_time = "SELECT LocalDateTime FROM Win32_OperatingSystem" self.current_time = self.query(self.wql_time) # [{'LocalDateTime': '20160824161431.977000+480'}]' self.current_time_string = str( self.current_time[0].get('LocalDateTime').split('.')[0]) # '20160824161431' self.current_time_format = datetime.datetime.strptime( self.current_time_string, '%Y%m%d%H%M%S') # param: datetime.datetime(2016, 8, 24, 16, 14, 31) -> type: # datetime.datetime return self.current_time_format
Get the number of file in the folder.
def fileage_handle(self): """Get the number of file in the folder.""" self.file_list = [] self.ok_file = [] self.warn_file = [] self.crit_file = [] status = self.ok if self.args.recursion: self.__file_list = self.__get_folder(self.args.path) else: self.__file_list = self.__get_file(self.args.path) self.logger.debug("file_list: {}".format(self.__file_list)) # [{'LastModified': '20160824142017.737101+480', 'Name': 'd:\\test\\1.txt'}, # {'LastModified': '20160824142021.392101+480', 'Name': 'd:\\test\\2.txt'}, # {'LastModified': '20160824142106.460101+480', 'Name': 'd:\\test\\test1\\21.txt'}] for file_dict in self.__file_list: self.filename = file_dict.get('Name') if self.filename and self.filename != 'Name': self.logger.debug( "===== start to compare {} =====".format( self.filename)) self.file_datetime_string = file_dict.get( 'LastModified').split('.')[0] self.file_datetime = datetime.datetime.strptime( self.file_datetime_string, '%Y%m%d%H%M%S') self.logger.debug( "file_datetime: {}".format( self.file_datetime)) self.current_datetime = self.__get_current_datetime() self.logger.debug( "current_datetime: {}".format( self.current_datetime)) self.__delta_datetime = self.current_datetime - self.file_datetime self.logger.debug( "delta_datetime: {}".format( self.__delta_datetime)) self.logger.debug( "warn_datetime: {}".format( datetime.timedelta( minutes=self.args.warning))) self.logger.debug( "crit_datetime: {}".format( datetime.timedelta( minutes=self.args.critical))) if self.__delta_datetime > datetime.timedelta( minutes=self.args.critical): self.crit_file.append(self.filename) elif self.__delta_datetime > datetime.timedelta(minutes=self.args.warning): self.warn_file.append(self.filename) else: self.ok_file.append(self.filename) # Compare the vlaue. if self.crit_file: status = self.critical elif self.warn_file: status = self.warning else: status = self.ok # Output self.shortoutput = "Found {0} files out of date.".format( len(self.crit_file)) if self.crit_file: self.longoutput.append("===== Critical File out of date ====") [self.longoutput.append(filename) for filename in self.crit_file if self.crit_file] if self.warn_file: self.longoutput.append("===== Warning File out of date ====") [self.longoutput.append(filename) for filename in self.warn_file if self.warn_file] if self.ok_file: self.longoutput.append("===== OK File out of date ====") [self.longoutput.append(filename) for filename in self.ok_file if self.ok_file] self.perfdata.append("{path}={result};{warn};{crit};0;".format( crit=self.args.critical, warn=self.args.warning, result=len(self.crit_file), path=self.args.drive + self.args.path)) # Return status with message to Nagios. status(self.output(long_output_limit=None)) self.logger.debug("Return status and exit to Nagios.")
run your main spider here as for branch spider result data you can return everything or do whatever with it in your own code
def run(self): """run your main spider here as for branch spider result data, you can return everything or do whatever with it in your own code :return: None """ config = config_creator() debug = config.debug branch_thread_sleep = config.branch_thread_sleep while 1: url = self.branch_queue.get() if debug: print('branch thread-{} start'.format(url)) branch_spider = self.branch_spider(url) sleep(random.randrange(*branch_thread_sleep)) branch_spider.request_page() if debug: print('branch thread-{} end'.format(url)) self.branch_queue.task_done()
Read version info from a file without importing it
def get_version(relpath): """Read version info from a file without importing it""" from os.path import dirname, join if '__file__' not in globals(): # Allow to use function interactively root = '.' else: root = dirname(__file__) # The code below reads text file with unknown encoding in # in Python2/3 compatible way. Reading this text file # without specifying encoding will fail in Python 3 on some # systems (see http://goo.gl/5XmOH). Specifying encoding as # open() parameter is incompatible with Python 2 # cp437 is the encoding without missing points, safe against: # UnicodeDecodeError: 'charmap' codec can't decode byte... for line in open(join(root, relpath), 'rb'): line = line.decode('cp437') if '__version__' in line: if '"' in line: # __version__ = "0.9" return line.split('"')[1] elif "'" in line: return line.split("'")[1]
Make a protobuf Descriptor given a DescriptorProto protobuf.
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, syntax=None): """Make a protobuf Descriptor given a DescriptorProto protobuf. Handles nested descriptors. Note that this is limited to the scope of defining a message inside of another message. Composite fields can currently only be resolved if the message is defined in the same scope as the field. Args: desc_proto: The descriptor_pb2.DescriptorProto protobuf message. package: Optional package name for the new message Descriptor (string). build_file_if_cpp: Update the C++ descriptor pool if api matches. Set to False on recursion, so no duplicates are created. syntax: The syntax/semantics that should be used. Set to "proto3" to get proto3 field presence semantics. Returns: A Descriptor for protobuf messages. """ if api_implementation.Type() == 'cpp' and build_file_if_cpp: # The C++ implementation requires all descriptors to be backed by the same # definition in the C++ descriptor pool. To do this, we build a # FileDescriptorProto with the same definition as this descriptor and build # it into the pool. from typy.google.protobuf import descriptor_pb2 file_descriptor_proto = descriptor_pb2.FileDescriptorProto() file_descriptor_proto.message_type.add().MergeFrom(desc_proto) # Generate a random name for this proto file to prevent conflicts with any # imported ones. We need to specify a file name so the descriptor pool # accepts our FileDescriptorProto, but it is not important what that file # name is actually set to. proto_name = str(uuid.uuid4()) if package: file_descriptor_proto.name = os.path.join(package.replace('.', '/'), proto_name + '.proto') file_descriptor_proto.package = package else: file_descriptor_proto.name = proto_name + '.proto' _message.default_pool.Add(file_descriptor_proto) result = _message.default_pool.FindFileByName(file_descriptor_proto.name) if _USE_C_DESCRIPTORS: return result.message_types_by_name[desc_proto.name] full_message_name = [desc_proto.name] if package: full_message_name.insert(0, package) # Create Descriptors for enum types enum_types = {} for enum_proto in desc_proto.enum_type: full_name = '.'.join(full_message_name + [enum_proto.name]) enum_desc = EnumDescriptor( enum_proto.name, full_name, None, [ EnumValueDescriptor(enum_val.name, ii, enum_val.number) for ii, enum_val in enumerate(enum_proto.value)]) enum_types[full_name] = enum_desc # Create Descriptors for nested types nested_types = {} for nested_proto in desc_proto.nested_type: full_name = '.'.join(full_message_name + [nested_proto.name]) # Nested types are just those defined inside of the message, not all types # used by fields in the message, so no loops are possible here. nested_desc = MakeDescriptor(nested_proto, package='.'.join(full_message_name), build_file_if_cpp=False, syntax=syntax) nested_types[full_name] = nested_desc fields = [] for field_proto in desc_proto.field: full_name = '.'.join(full_message_name + [field_proto.name]) enum_desc = None nested_desc = None if field_proto.HasField('type_name'): type_name = field_proto.type_name full_type_name = '.'.join(full_message_name + [type_name[type_name.rfind('.')+1:]]) if full_type_name in nested_types: nested_desc = nested_types[full_type_name] elif full_type_name in enum_types: enum_desc = enum_types[full_type_name] # Else type_name references a non-local type, which isn't implemented field = FieldDescriptor( field_proto.name, full_name, field_proto.number - 1, field_proto.number, field_proto.type, FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), field_proto.label, None, nested_desc, enum_desc, None, False, None, options=field_proto.options, has_default_value=False) fields.append(field) desc_name = '.'.join(full_message_name) return Descriptor(desc_proto.name, desc_name, None, None, fields, list(nested_types.values()), list(enum_types.values()), [], options=desc_proto.options)
Returns the root if this is a nested type or itself if its the root.
def GetTopLevelContainingType(self): """Returns the root if this is a nested type, or itself if its the root.""" desc = self while desc.containing_type is not None: desc = desc.containing_type return desc
Searches for the specified method and returns its descriptor.
def FindMethodByName(self, name): """Searches for the specified method, and returns its descriptor.""" for method in self.methods: if name == method.name: return method return None
Register your own mode and handle method here.
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'sqlserverlocks': plugin.sqlserverlocks_handle() else: plugin.unknown("Unknown actions.")
Converts protobuf message to JSON format.
def MessageToJson(message, including_default_value_fields=False): """Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. Returns: A string containing the JSON formatted protocol buffer message. """ js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
Converts message to an object according to Proto3 JSON Specification.
def _MessageToJsonObject(message, including_default_value_fields): """Converts message to an object according to Proto3 JSON Specification.""" message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): return _WrapperMessageToJsonObject(message) if full_name in _WKTJSONMETHODS: return _WKTJSONMETHODS[full_name][0]( message, including_default_value_fields) js = {} return _RegularMessageToJsonObject( message, js, including_default_value_fields)