INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Converts Struct message according to Proto3 JSON Specification.
def _StructMessageToJsonObject(message, unused_including_default=False): """Converts Struct message according to Proto3 JSON Specification.""" fields = message.fields ret = {} for key in fields: ret[key] = _ValueMessageToJsonObject(fields[key]) return ret
Parses a JSON representation of a protocol message into a message.
def Parse(text, message): """Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems. """ if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): # object_pair_hook is not supported before python2.7 js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
Convert field value pairs into regular message.
def _ConvertFieldValuePair(js, message): """Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting. """ names = [] message_descriptor = message.DESCRIPTOR for name in js: try: field = message_descriptor.fields_by_camelcase_name.get(name, None) if not field: raise ParseError( 'Message type "{0}" has no field named "{1}".'.format( message_descriptor.full_name, name)) if name in names: raise ParseError( 'Message type "{0}" should not have multiple "{1}" fields.'.format( message.DESCRIPTOR.full_name, name)) names.append(name) # Check no other oneof field is parsed. if field.containing_oneof is not None: oneof_name = field.containing_oneof.name if oneof_name in names: raise ParseError('Message type "{0}" should not have multiple "{1}" ' 'oneof fields.'.format( message.DESCRIPTOR.full_name, oneof_name)) names.append(oneof_name) value = js[name] if value is None: message.ClearField(field.name) continue # Parse field value. if _IsMapEntry(field): message.ClearField(field.name) _ConvertMapFieldValue(value, message, field) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: message.ClearField(field.name) if not isinstance(value, list): raise ParseError('repeated field {0} must be in [] which is ' '{1}.'.format(name, value)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: # Repeated message field. for item in value: sub_message = getattr(message, field.name).add() # None is a null_value in Value. if (item is None and sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') _ConvertMessage(item, sub_message) else: # Repeated scalar field. for item in value: if item is None: raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') getattr(message, field.name).append( _ConvertScalarFieldValue(item, field)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: sub_message = getattr(message, field.name) _ConvertMessage(value, sub_message) else: setattr(message, field.name, _ConvertScalarFieldValue(value, field)) except ParseError as e: if field and field.containing_oneof is None: raise ParseError('Failed to parse {0} field: {1}'.format(name, e)) else: raise ParseError(str(e)) except ValueError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) except TypeError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
Convert a JSON object into a message.
def _ConvertMessage(value, message): """Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems. """ message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): _ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: _WKTJSONMETHODS[full_name][1](value, message) else: _ConvertFieldValuePair(value, message)
Convert a JSON representation into Value message.
def _ConvertValueMessage(value, message): """Convert a JSON representation into Value message.""" if isinstance(value, dict): _ConvertStructMessage(value, message.struct_value) elif isinstance(value, list): _ConvertListValueMessage(value, message.list_value) elif value is None: message.null_value = 0 elif isinstance(value, bool): message.bool_value = value elif isinstance(value, six.string_types): message.string_value = value elif isinstance(value, _INT_OR_FLOAT): message.number_value = value else: raise ParseError('Unexpected type for Value message.')
Convert a JSON representation into ListValue message.
def _ConvertListValueMessage(value, message): """Convert a JSON representation into ListValue message.""" if not isinstance(value, list): raise ParseError( 'ListValue must be in [] which is {0}.'.format(value)) message.ClearField('values') for item in value: _ConvertValueMessage(item, message.values.add())
Convert a JSON representation into Struct message.
def _ConvertStructMessage(value, message): """Convert a JSON representation into Struct message.""" if not isinstance(value, dict): raise ParseError( 'Struct must be in a dict which is {0}.'.format(value)) for key in value: _ConvertValueMessage(value[key], message.fields[key]) return
Update config options with the provided dictionary of options.
def update_config(new_config): """ Update config options with the provided dictionary of options. """ flask_app.base_config.update(new_config) # Check for changed working directory. if new_config.has_key('working_directory'): wd = os.path.abspath(new_config['working_directory']) if nbmanager.notebook_dir != wd: if not os.path.exists(wd): raise IOError('Path not found: %s' % wd) nbmanager.notebook_dir = wd
Reset config options to defaults and then update ( optionally ) with the provided dictionary of options.
def set_config(new_config={}): """ Reset config options to defaults, and then update (optionally) with the provided dictionary of options. """ # The default base configuration. flask_app.base_config = dict(working_directory='.', template='collapse-input', debug=False, port=None) update_config(new_config)
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/ transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason.
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason. """ # Validate arguments if self._schema != None: self.validate_and_throw_exception(correlation_id, args) # Call the function try: return self._function(correlation_id, args) # Intercept unhandled errors except Exception as ex: raise InvocationException( correlation_id, "EXEC_FAILED", "Execution " + self._name + " failed: " + str(ex) ).with_details("command", self._name).wrap(ex)
** Optimization method based on Brent s method ** First a bracket ( a b c ) is sought that contains the minimum ( b value is smaller than both a or c ). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c because that could be problematic with the local approximation. Also if the bracket does not seem to include the minimum it is expanded generously in the right direction until it covers it. Thus this function is fail safe and will always find a local minimum.
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False): """ **Optimization method based on Brent's method** First, a bracket (a b c) is sought that contains the minimum (b value is smaller than both a or c). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c, because that could be problematic with the local approximation. Also, if the bracket does not seem to include the minimum, it is expanded generously in the right direction until it covers it. Thus, this function is fail safe, and will always find a local minimum. """ if disp > 0: print print ' ===== custom 1d optimization routine ==== ' print print 'initial suggestion on', function, ':', x0 points = [] values = [] def recordfunction(x): v = function(x) points.append(x) values.append(v) return v (a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found useable minimum bracker after %d evaluations:' % len(points), (a, b, c), (va, vb, vc) if disp > 2: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) pause() result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found minimum after %d evaluations:' % len(points), result if disp > 1 or len(points) > 20: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) if disp > 2: pause() if disp > 0: print '---------------------------------------------------' print print ' ===== end of custom 1d optimization routine ==== ' print global neval neval += len(points) return result
This function will attempt to identify 1 sigma errors assuming your function is a chi^2. For this the 1 - sigma is bracketed. If you were smart enough to build a cache list of [ x y ] into your function you can pass it here. The values bracketing 1 sigma will be used as starting values. If no such values exist e. g. because all values were very close to the optimum ( good starting values ) the bracket is expanded.
def cache2errors(function, cache, disp=0, ftol=0.05): """ This function will attempt to identify 1 sigma errors, assuming your function is a chi^2. For this, the 1-sigma is bracketed. If you were smart enough to build a cache list of [x,y] into your function, you can pass it here. The values bracketing 1 sigma will be used as starting values. If no such values exist, e.g. because all values were very close to the optimum (good starting values), the bracket is expanded. """ vals = numpy.array(sorted(cache, key=lambda x: x[0])) if disp > 0: print ' --- cache2errors --- ', vals vi = vals[:,1].min() def renormedfunc(x): y = function(x) cache.append([x, y]) if disp > 1: print ' renormed:', x, y, y - (vi + 1) return y - (vi + 1) vals[:,1] -= vi + 1 lowmask = vals[:,1] < 0 highmask = vals[:,1] > 0 indices = numpy.arange(len(vals)) b, vb = vals[indices[lowmask][ 0],:] c, vc = vals[indices[lowmask][-1],:] if any(vals[:,0][highmask] < b): if disp > 0: print 'already have bracket' a, va = vals[indices[highmask][vals[:,0][highmask] < b][-1],:] else: a = b va = vb while b > -50: a = b - max(vals[-1,0] - vals[0,0], 1) va = renormedfunc(a) if disp > 0: print 'going further left: %.1f [%.1f] --> %.1f [%.1f]' % (b, vb, a, va) if va > 0: if disp > 0: print 'found outer part' break else: # need to go further b = a vb = va if disp > 0: print 'left bracket', a, b, va, vb if va > 0 and vb < 0: leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' leftroot = a if disp > 0: print 'left root', leftroot if any(vals[:,0][highmask] > c): if disp > 0: print 'already have bracket' d, vd = vals[indices[highmask][vals[:,0][highmask] > c][ 0],:] else: d = c vd = vc while c < 50: d = c + max(vals[-1,0] - vals[0,0], 1) vd = renormedfunc(d) if disp > 0: print 'going further right: %.1f [%.1f] --> %.1f [%.1f]' % (c, vc, d, vd) if vd > 0: if disp > 0: print 'found outer part' break else: # need to go further c = d vc = vd if disp > 0: print 'right bracket', c, d, vc, vd if vd > 0 and vc < 0: rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' rightroot = d if disp > 0: print 'right root', rightroot assert leftroot < rightroot if disp > 2: fullvals = numpy.array(sorted(cache, key=lambda x: x[0])) fullvals[:,1] -= vi + 1 plt.figure() plt.plot(fullvals[:,0], fullvals[:,1], 's') plt.plot(vals[:,0], vals[:,1], 'o') plt.xlim(a, d) plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd)) ymin, ymax = plt.ylim() plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='dotted') plt.savefig('cache_brent.pdf') return leftroot, rightroot
Completes measuring time interval and updates counter.
def end_timing(self): """ Completes measuring time interval and updates counter. """ if self._callback != None: elapsed = time.clock() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
Converts Duration to string format.
def ToJsonString(self): """Converts Duration to string format. Returns: A string converted from self. The string format will contains 3, 6, or 9 fractional digits depending on the precision required to represent the exact Duration value. For example: "1s", "1.010s", "1.000000100s", "-3.100s" """ if self.seconds < 0 or self.nanos < 0: result = '-' seconds = - self.seconds + int((0 - self.nanos) // 1e9) nanos = (0 - self.nanos) % 1e9 else: result = '' seconds = self.seconds + int(self.nanos // 1e9) nanos = self.nanos % 1e9 result += '%d' % seconds if (nanos % 1e9) == 0: # If there are 0 fractional digits, the fractional # point '.' should be omitted when serializing. return result + 's' if (nanos % 1e6) == 0: # Serialize 3 fractional digits. return result + '.%03ds' % (nanos / 1e6) if (nanos % 1e3) == 0: # Serialize 6 fractional digits. return result + '.%06ds' % (nanos / 1e3) # Serialize 9 fractional digits. return result + '.%09ds' % nanos
Converts a string to Duration.
def FromJsonString(self, value): """Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems. """ if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
Converts string to FieldMask according to proto3 JSON spec.
def FromJsonString(self, value): """Converts string to FieldMask according to proto3 JSON spec.""" self.Clear() for path in value.split(','): self.paths.append(path)
Return a CouchDB document given its ID revision and database name.
def get_doc(doc_id, db_name, server_url='http://127.0.0.1:5984/', rev=None): """Return a CouchDB document, given its ID, revision and database name.""" db = get_server(server_url)[db_name] if rev: headers, response = db.resource.get(doc_id, rev=rev) return couchdb.client.Document(response) return db[doc_id]
Return an ( optionally existing ) CouchDB database instance.
def get_or_create_db(db_name, server_url='http://127.0.0.1:5984/'): """Return an (optionally existing) CouchDB database instance.""" server = get_server(server_url) if db_name in server: return server[db_name] return server.create(db_name)
Give reST format README for pypi.
def read(readme): """Give reST format README for pypi.""" extend = os.path.splitext(readme)[1] if (extend == '.rst'): import codecs return codecs.open(readme, 'r', 'utf-8').read() elif (extend == '.md'): import pypandoc return pypandoc.convert(readme, 'rst')
Register your own mode and handle method here.
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'sql': plugin.sql_handle() elif plugin.args.option == 'database-used': plugin.database_used_handle() elif plugin.args.option == 'databaselog-used': plugin.database_log_used_handle() else: plugin.unknown("Unknown actions.")
: param args: arguments: type args: None or string or list of string: return: formatted arguments if specified else self. default_args: rtype: list of string
def parse(self, args): """ :param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string """ if args is None: args = self._default_args if isinstance(args, six.string_types): args = shlex.split(args) return args
Sends an HTTP request to the REST API and receives the requested data.
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) return self._handle_response(response).json()
<https:// docs. exchange. coinbase. com/ #orders > _
def _place_order(self, side, product_id='BTC-USD', client_oid=None, type=None, stp=None, price=None, size=None, funds=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" data = { 'side':side, 'product_id':product_id, 'client_oid':client_oid, 'type':type, 'stp':stp, 'price':price, 'size':size, 'funds':funds, 'time_in_force':time_in_force, 'cancel_after':cancel_after, 'post_only':post_only } return self._post('orders', data=data)
<https:// docs. exchange. coinbase. com/ #orders > _
def place_limit_order(self, side, price, size, product_id='BTC-USD', client_oid=None, stp=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(side, product_id=product_id, client_oid=client_oid, type='limit', stp=stp, price=price, size=size, time_in_force=time_in_force, cancel_after=cancel_after, post_only=post_only)
<https:// docs. exchange. coinbase. com/ #orders > _
def place_market_order(self, side, product_id='BTC-USD', size=None, funds=None, client_oid=None, stp=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(type='market', side=size, product_id=product_id, size=size, funds=funds, client_oid=client_oid, stp=stp)
<https:// docs. exchange. coinbase. com/ #depositwithdraw > _
def _deposit_withdraw(self, type, amount, coinbase_account_id): """`<https://docs.exchange.coinbase.com/#depositwithdraw>`_""" data = { 'type':type, 'amount':amount, 'coinbase_account_id':coinbase_account_id } return self._post('transfers', data=data)
<https:// docs. exchange. coinbase. com/ #create - a - new - report > _
def _new_report(self, type, start_date, end_date, product_id='BTC-USD', account_id=None, format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" data = { 'type':type, 'start_date':self._format_iso_time(start_date), 'end_date':self._format_iso_time(end_date), 'product_id':product_id, 'account_id':account_id, 'format':format, 'email':email } return self._post('reports', data=data)
<https:// docs. exchange. coinbase. com/ #create - a - new - report > _
def new_fills_report(self, start_date, end_date, account_id=None, product_id='BTC-USD', format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" return self._new_report(start_date, 'fills', end_date, account_id, product_id, format, email)
Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors.
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
return one record from the collection whose parameters match kwargs --- kwargs should be a dictionary whose keys match column names ( in traditional SQL/ fields in NoSQL ) and whose values are the values of those fields. e. g. kwargs = { name = my application name client_id = 12345 }
def fetch(self, collection, **kwargs): ''' return one record from the collection whose parameters match kwargs --- kwargs should be a dictionary whose keys match column names (in traditional SQL / fields in NoSQL) and whose values are the values of those fields. e.g. kwargs={name='my application name',client_id=12345} ''' callback = kwargs.pop('callback') data = yield Op(self.db[collection].find_one, kwargs) callback(data)
remove records from collection whose parameters match kwargs
def remove(self, collection, **kwargs): ''' remove records from collection whose parameters match kwargs ''' callback = kwargs.pop('callback') yield Op(self.db[collection].remove, kwargs) callback()
validate the passed values in kwargs based on the collection store them in the mongodb collection
def store(self, collection, **kwargs): ''' validate the passed values in kwargs based on the collection, store them in the mongodb collection ''' callback = kwargs.pop('callback') key = validate(collection, **kwargs) data = yield Task(self.fetch, collection, **{key: kwargs[key]}) if data is not None: raise Proauth2Error('duplicate_key') yield Op(self.db[collection].insert, kwargs) callback()
Generates a factory function to instantiate the API with the given version.
def generate_api(version): """ Generates a factory function to instantiate the API with the given version. """ def get_partial_api(key, token=None): return TrelloAPI(ENDPOINTS[version], version, key, token=token) get_partial_api.__doc__ = \ """Interfaz REST con Trello. Versión {}""".format(version) return get_partial_api
Resolve the URL to this point.
def _url(self): """ Resolve the URL to this point. >>> trello = TrelloAPIV1('APIKEY') >>> trello.batch._url '1/batch' >>> trello.boards(board_id='BOARD_ID')._url '1/boards/BOARD_ID' >>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url '1/boards/BOARD_ID/FIELD' >>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url '1/boards/BOARD_ID/cards/FILTER' """ if self._api_arg: mypart = str(self._api_arg) else: mypart = self._name if self._parent: return '/'.join(filter(None, [self._parent._url, mypart])) else: return mypart
Makes the HTTP request.
def _api_call(self, method_name, *args, **kwargs): """ Makes the HTTP request. """ params = kwargs.setdefault('params', {}) params.update({'key': self._apikey}) if self._token is not None: params.update({'token': self._token}) http_method = getattr(requests, method_name) return http_method(TRELLO_URL + self._url, *args, **kwargs)
Parses an text representation of a protocol message into a message.
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines(text.split('\n'), message, allow_unknown_extension, allow_field_number)
Parses an text representation of a protocol message into a message.
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
Skips over a field value.
def _SkipFieldValue(tokenizer): """Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found. """ # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and not tokenizer.TryConsumeInt64() and not tokenizer.TryConsumeUint64() and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token)
Parses an integer.
def ParseInteger(text, is_signed=False, is_long=False): """Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer. """ # Do the actual parsing. Exception handling is propagated to caller. try: # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) # Check if the integer is sane. Exceptions handled by callers. checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
Convert protobuf message to text format.
def PrintMessage(self, message): """Convert protobuf message to text format. Args: message: The protocol buffers message. """ fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): # This is slow for maps with submessage entires because it copies the # entire tree. Unfortunately this would take significant refactoring # of this file to work around. # # TODO(haberman): refactor and optimize if this becomes an issue. entry_submsg = field.message_type._concrete_class( key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: self.PrintField(field, element) else: self.PrintField(field, value)
Print a single field value ( not including name ).
def PrintFieldValue(self, field, value): """Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field. """ out = self.out if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if self.as_one_line: out.write(' %s ' % openb) self.PrintMessage(value) out.write(closeb) else: out.write(' %s\n' % openb) self.indent += 2 self.PrintMessage(value) self.indent -= 2 out.write(' ' * self.indent + closeb) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if isinstance(value, six.text_type): out_value = value.encode('utf-8') else: out_value = value if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # We need to escape non-UTF8 chars in TYPE_BYTES field. out_as_utf8 = False else: out_as_utf8 = self.as_utf8 out.write(text_encoding.CEscape(out_value, out_as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write('true') else: out.write('false') elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None: out.write('{1:{0}}'.format(self.float_format, value)) else: out.write(str(value))
Converts an text representation of a protocol message into a message.
def _ParseOrMerge(self, lines, message): """Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems. """ tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
Merges a single scalar field into a message.
def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems. """ is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: # pylint: disable=protected-access sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
Consumes protocol message field identifier.
def ConsumeIdentifier(self): """Consumes protocol message field identifier. Returns: Identifier string. Raises: ParseError: If an identifier couldn't be consumed. """ result = self.token if not self._IDENTIFIER.match(result): raise self._ParseError('Expected identifier.') self.NextToken() return result
Consumes a signed 32bit integer number.
def ConsumeInt32(self): """Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed. """ try: result = ParseInteger(self.token, is_signed=True, is_long=False) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
Consumes an floating point number.
def ConsumeFloat(self): """Consumes an floating point number. Returns: The number parsed. Raises: ParseError: If a floating point number couldn't be consumed. """ try: result = ParseFloat(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
Consumes a boolean value.
def ConsumeBool(self): """Consumes a boolean value. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed. """ try: result = ParseBool(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
Consume one token of a string literal.
def _ConsumeSingleByteString(self): """Consume one token of a string literal. String literals (whether bytes or text) can come in multiple adjacent tokens which are automatically concatenated, like in C or Python. This method only consumes one token. Returns: The token parsed. Raises: ParseError: When the wrong format data is found. """ text = self.token if len(text) < 1 or text[0] not in _QUOTES: raise self._ParseError('Expected string but found: %r' % (text,)) if len(text) < 2 or text[-1] != text[0]: raise self._ParseError('String missing ending quote: %r' % (text,)) try: result = text_encoding.CUnescape(text[1:-1]) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
Returns a human - readable timestamp given a Unix timestamp t or for the current time. The Unix timestamp is the number of seconds since start of epoch ( 1970 - 01 - 01 00: 00: 00 ). When forfilename is True then spaces and semicolons are replace with hyphens. The returned string is usable as a ( part of a ) filename.
def timestamp(t = None, forfilename=False): """Returns a human-readable timestamp given a Unix timestamp 't' or for the current time. The Unix timestamp is the number of seconds since start of epoch (1970-01-01 00:00:00). When forfilename is True, then spaces and semicolons are replace with hyphens. The returned string is usable as a (part of a) filename. """ datetimesep = ' ' timesep = ':' if forfilename: datetimesep = '-' timesep = '-' return time.strftime('%Y-%m-%d' + datetimesep + '%H' + timesep + '%M' + timesep + '%S', time.localtime(t))
Returns a human - readable timestamp given an Ark timestamp arct. An Ark timestamp is the number of seconds since Genesis block 2017: 03: 21 15: 55: 44.
def arktimestamp(arkt, forfilename=False): """Returns a human-readable timestamp given an Ark timestamp 'arct'. An Ark timestamp is the number of seconds since Genesis block, 2017:03:21 15:55:44.""" t = arkt + time.mktime((2017, 3, 21, 15, 55, 44, 0, 0, 0)) return '%d %s' % (arkt, timestamp(t))
convert ark timestamp to unix timestamp
def arkt_to_unixt(ark_timestamp): """ convert ark timestamp to unix timestamp""" res = datetime.datetime(2017, 3, 21, 15, 55, 44) + datetime.timedelta(seconds=ark_timestamp) return res.timestamp()
Close the connection.
def close(self): """Close the connection.""" try: self.conn.close() self.logger.debug("Close connect succeed.") except pymssql.Error as e: self.unknown("Close connect error: %s" % e)
Extract package __version__
def get_version(): """Extract package __version__""" with open(VERSION_FILE, encoding='utf-8') as fp: content = fp.read() match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', content, re.M) if match: return match.group(1) raise RuntimeError("Could not extract package __version__")
** Differential evolution ** via inspyred <http:// inspyred. github. io/ > _ specially tuned. steady state replacement n - point crossover pop size 20 gaussian mutation noise 0. 01 & 1e - 6. stores intermediate results ( can be used for resume see seeds ): param start: start point: param seeds: list of start points: param vizfunc: callback to do visualization of current best solution: param printfunc: callback to summarize current best solution: param seed: RNG initialization ( if set )
def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=40000, vizfunc=None, printfunc=None, **problem): """ **Differential evolution** via `inspyred <http://inspyred.github.io/>`_ specially tuned. steady state replacement, n-point crossover, pop size 20, gaussian mutation noise 0.01 & 1e-6. stores intermediate results (can be used for resume, see seeds) :param start: start point :param seeds: list of start points :param vizfunc: callback to do visualization of current best solution :param printfunc: callback to summarize current best solution :param seed: RNG initialization (if set) """ import json import inspyred import random prng = random.Random() if 'seed' in problem: prng.seed(problem['seed']) n_params = len(parameter_names) seeds = problem.get('seeds', []) if 'start' in problem: seeds.append(problem['start']) prefix = output_basename def viz(candidate, args): if vizfunc is not None: vizfunc(candidate) def print_candidate(candidate, l, args): if printfunc is not None: printfunc(cube=candidate, loglikelihood=l) else: print l, candidate def eval_candidate(candidate): params = transform(candidate) l = loglikelihood(params) p = prior(params) if numpy.isinf(p) and p < 0: print ' prior rejection' return -1e300 if numpy.isnan(l): return -1e300 return l, p @inspyred.ec.utilities.memoize @inspyred.ec.evaluators.evaluator def fitness(candidate, args): l, p = eval_candidate(candidate) #print_candidate(candidate, (l + p), args) return (l + p) cutoff_store = 10 def solution_archiver(random, population, archive, args): psize = len(population) population.sort(reverse=True) best = population[0].fitness #print 'BEST: ', best, all_candidates = sorted(population + archive, reverse=True) all_fitness = numpy.array([c.fitness for c in all_candidates]) mask = best - all_fitness > cutoff_store / 3 if mask.sum() < 20: mask = best - all_fitness > cutoff_store newarchive = [c for i, c in enumerate(all_candidates) if i == 0 or all_fitness[i - 1] != c.fitness] print 'ARCHIVE: ', len(archive), len(newarchive) json.dump([{'candidate': [float(f) for f in c.candidate], 'fitness':c.fitness} for c in newarchive], open(prefix + '_values.json', 'w'), indent=4) return newarchive def observer(population, num_generations, num_evaluations, args): population.sort(reverse=True) candidate = population[0] print ('{0} evaluations'.format(num_evaluations)), ' best:', print_candidate(candidate.candidate, candidate.fitness, args) if num_evaluations % len(population) == 0 or num_evaluations < len(population) or args.get('force_viz', False): # for each turnaround of a full generation viz(candidate.candidate, args) def generator(random, args): u = [random.uniform(0, 1) for _ in range(n_params)] u = [random.gauss(0.5, 0.1) for _ in range(n_params)] return bounder(u, args) ea = inspyred.ec.DEA(prng) ea.terminator = inspyred.ec.terminators.evaluation_termination ea.archiver = solution_archiver bounder = inspyred.ec.Bounder(lower_bound=1e-10, upper_bound=1-1e-10) #bounder = inspyred.ec.Bounder(lower_bound=-20, upper_bound=20) import copy from math import log @inspyred.ec.variators.mutator def double_exponential_mutation(random, candidate, args): mut_rate = args.setdefault('mutation_rate', 0.1) mean = args.setdefault('gaussian_mean', 0.0) stdev = args.setdefault('gaussian_stdev', 1.0) scale = log(0.5) / - (stdev) bounder = args['_ec'].bounder mutant = copy.copy(candidate) for i, m in enumerate(mutant): dice = random.random() if dice < mut_rate: sign = (dice < mut_rate / 2) * 2 - 1 delta = -log(random.random()) / scale mutant[i] += delta * sign mutant = bounder(mutant, args) return mutant def minute_gaussian_mutation(random, candidates, args): args = dict(args) args['mutation_rate'] = 1 args['gaussian_stdev'] = 1e-6 return inspyred.ec.variators.gaussian_mutation(random, candidates, args) ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation] #ea.variator = [inspyred.ec.variators.n_point_crossover, double_exponential_mutation] ea.replacer = inspyred.ec.replacers.steady_state_replacement ea.observer = observer pop_size = 20 final_pop = ea.evolve(pop_size=pop_size, max_evaluations=nsteps, maximize=True, seeds=seeds, gaussian_stdev=0.01, #mutation_rate=0.3, bounder=bounder, generator=generator, evaluator=fitness, ) best = max(final_pop) seeds = [c.candidate for c in ea.archive] print 'final candidate:', best return {'start': best.candidate, 'value': best.fitness, 'seeds': seeds, 'method': 'DE'}
Replace macros with content defined in the config.
def process_macros(self, content: str) -> str: '''Replace macros with content defined in the config. :param content: Markdown content :returns: Markdown content without macros ''' def _sub(macro): name = macro.group('body') params = self.get_options(macro.group('options')) return self.options['macros'].get(name, '').format_map(params) return self.pattern.sub(_sub, content)
Sends an HTTP request to the REST API and receives the requested data.
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=kwargs.get('params', None)) return self._handle_response(response).json()
<https:// docs. exchange. coinbase. com/ #get - historic - rates > _
def get_historic_trades(self, start, end, granularity, product_id='BTC-USD'): """`<https://docs.exchange.coinbase.com/#get-historic-rates>`_ :param start: either datetime.datetime or str in ISO 8601 :param end: either datetime.datetime or str in ISO 8601 :pram int granularity: desired timeslice in seconds :returns: desired data """ params = { 'start':self._format_iso_time(start), 'end':self._format_iso_time(end), 'granularity':granularity } return self._get('products', product_id, 'candles', params=params)
Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors.
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=self._get_params(**kwargs)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
Return a pathname possibly with a number appended to it so that it is unique in the directory.
def get_unique_pathname(path, root=''): """Return a pathname possibly with a number appended to it so that it is unique in the directory.""" path = os.path.join(root, path) # consider the path supplied, then the paths with numbers appended potentialPaths = itertools.chain((path,), __get_numbered_paths(path)) potentialPaths = six.moves.filterfalse(os.path.exists, potentialPaths) return next(potentialPaths)
Append numbers in sequential order to the filename or folder name Numbers should be appended before the extension on a filename.
def __get_numbered_paths(filepath): """Append numbers in sequential order to the filename or folder name Numbers should be appended before the extension on a filename.""" format = '%s (%%d)%s' % splitext_files_only(filepath) return map(lambda n: format % n, itertools.count(1))
Custom version of splitext that doesn t perform splitext on directories
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
Set the modified time of a file
def set_time(filename, mod_time): """ Set the modified time of a file """ log.debug('Setting modified time to %s', mod_time) mtime = calendar.timegm(mod_time.utctimetuple()) # utctimetuple discards microseconds, so restore it (for consistency) mtime += mod_time.microsecond / 1000000 atime = os.stat(filename).st_atime os.utime(filename, (atime, mtime))
Get the modified time for a file as a datetime instance
def get_time(filename): """ Get the modified time for a file as a datetime instance """ ts = os.stat(filename).st_mtime return datetime.datetime.utcfromtimestamp(ts)
Given a filename and some content insert the content just before the extension. >>> insert_before_extension ( pages. pdf - old ) pages - old. pdf
def insert_before_extension(filename, content): """ Given a filename and some content, insert the content just before the extension. >>> insert_before_extension('pages.pdf', '-old') 'pages-old.pdf' """ parts = list(os.path.splitext(filename)) parts[1:1] = [content] return ''.join(parts)
Like iglob but recurse directories >>> any ( path. py in result for result in recursive_glob (. *. py )) True >>> all ( result. startswith (. ) for result in recursive_glob (. *. py )) True >>> len ( list ( recursive_glob (. *. foo ))) 0
def recursive_glob(root, spec): """ Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0 """ specs = ( os.path.join(dirpath, dirname, spec) for dirpath, dirnames, filenames in os.walk(root) for dirname in dirnames ) return itertools.chain.from_iterable( glob.iglob(spec) for spec in specs )
Encode the name for a suitable name in the given filesystem >>> encode ( Test: 1 ) Test _1
def encode(name, system='NTFS'): """ Encode the name for a suitable name in the given filesystem >>> encode('Test :1') 'Test _1' """ assert system == 'NTFS', 'unsupported filesystem' special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32))) pattern = '|'.join(map(re.escape, special_characters)) pattern = re.compile(pattern) return pattern.sub('_', name)
wrap a function that returns a dir making sure it exists
def ensure_dir_exists(func): "wrap a function that returns a dir, making sure it exists" @functools.wraps(func) def make_if_not_present(): dir = func() if not os.path.isdir(dir): os.makedirs(dir) return dir return make_if_not_present
Read file in chunks of size chunk_size ( or smaller ). If update_func is specified call it on every chunk with the amount read.
def read_chunks(file, chunk_size=2048, update_func=lambda x: None): """ Read file in chunks of size chunk_size (or smaller). If update_func is specified, call it on every chunk with the amount read. """ while(True): res = file.read(chunk_size) if not res: break update_func(len(res)) yield res
Check whether a file is presumed hidden either because the pathname starts with dot or because the platform indicates such.
def is_hidden(path): """ Check whether a file is presumed hidden, either because the pathname starts with dot or because the platform indicates such. """ full_path = os.path.abspath(path) name = os.path.basename(full_path) def no(path): return False platform_hidden = globals().get('is_hidden_' + platform.system(), no) return name.startswith('.') or platform_hidden(full_path)
Get closer to your EOL
def age(self): """ Get closer to your EOL """ # 0 means this composer will never decompose if self.rounds == 1: self.do_run = False elif self.rounds > 1: self.rounds -= 1
Open a connection over the serial line and receive data lines
def run(self): """ Open a connection over the serial line and receive data lines """ if not self.device: return try: data = "" while (self.do_run): try: if (self.device.inWaiting() > 1): l = self.device.readline()[:-2] l = l.decode("UTF-8") if (l == "["): # start recording data = "[" elif (l == "]") and (len(data) > 4) and (data[0] == "["): # now parse the input data = data + "]" self.store.register_json(data) self.age() elif (l[0:3] == " {"): # this is a data line data = data + " " + l else: # this is a slow interface - give it some time sleep(1) # then count down.. self.age() except (UnicodeDecodeError, ValueError): # only accepting unicode: throw away the whole bunch data = "" # and count down the exit condition self.age() except serial.serialutil.SerialException: print("Could not connect to the serial line at " + self.device_name)
create & start main thread
def append_main_thread(self): """create & start main thread :return: None """ thread = MainThread(main_queue=self.main_queue, main_spider=self.main_spider, branch_spider=self.branch_spider) thread.daemon = True thread.start()
Scans through all children of node and gathers the text. If node has non - text child - nodes then NotTextNodeError is raised.
def getTextFromNode(node): """ Scans through all children of node and gathers the text. If node has non-text child-nodes then NotTextNodeError is raised. """ t = "" for n in node.childNodes: if n.nodeType == n.TEXT_NODE: t += n.nodeValue else: raise NotTextNodeError return t
Get the number of credits remaining at AmbientSMS
def getbalance(self, url='http://services.ambientmobile.co.za/credits'): """ Get the number of credits remaining at AmbientSMS """ postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) if result.get("credits", None): return result["credits"] else: raise AmbientSMSError(result["status"])
Send a mesage via the AmbientSMS API server
def sendmsg(self, message, recipient_mobiles=[], url='http://services.ambientmobile.co.za/sms', concatenate_message=True, message_id=str(time()).replace(".", ""), reply_path=None, allow_duplicates=True, allow_invalid_numbers=True, ): """ Send a mesage via the AmbientSMS API server """ if not recipient_mobiles or not(isinstance(recipient_mobiles, list) \ or isinstance(recipient_mobiles, tuple)): raise AmbientSMSError("Missing recipients") if not message or not len(message): raise AmbientSMSError("Missing message") postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXMLList.append("<recipients>%s</recipients>" % \ "".join(["<mobile>%s</mobile>" % \ m for m in recipient_mobiles])) postXMLList.append("<msg>%s</msg>" % message) postXMLList.append("<concat>%s</concat>" % \ (1 if concatenate_message else 0)) postXMLList.append("<message_id>%s</message_id>" % message_id) postXMLList.append("<allow_duplicates>%s</allow_duplicates>" % \ (1 if allow_duplicates else 0)) postXMLList.append( "<allow_invalid_numbers>%s</allow_invalid_numbers>" % \ (1 if allow_invalid_numbers else 0) ) if reply_path: postXMLList.append("<reply_path>%s</reply_path>" % reply_path) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) status = result.get("status", None) if status and int(status) in [0, 1, 2]: return result else: raise AmbientSMSError(int(status))
Inteface for sending web requests to the AmbientSMS API Server
def curl(self, url, post): """ Inteface for sending web requests to the AmbientSMS API Server """ try: req = urllib2.Request(url) req.add_header("Content-type", "application/xml") data = urllib2.urlopen(req, post.encode('utf-8')).read() except urllib2.URLError, v: raise AmbientSMSError(v) return dictFromXml(data)
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/ transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason. """ return self._intercepter.execute(_next, correlation_id, args)
Called for each file Must return file content Can be wrapped
def contents(self, f, text): """ Called for each file Must return file content Can be wrapped :type f: static_bundle.files.StaticFileResult :type text: str|unicode :rtype: str|unicode """ text += self._read(f.abs_path) + "\r\n" return text
Return True if the class is a date type.
def is_date_type(cls): """Return True if the class is a date type.""" if not isinstance(cls, type): return False return issubclass(cls, date) and not issubclass(cls, datetime)
Convert a date or time to a datetime. If when is a date then it sets the time to midnight. If when is a time it sets the date to the epoch. If when is None or a datetime it returns when. Otherwise a TypeError is raised. Returned datetimes have tzinfo set to None unless when is a datetime with tzinfo set in which case it remains the same.
def to_datetime(when): """ Convert a date or time to a datetime. If when is a date then it sets the time to midnight. If when is a time it sets the date to the epoch. If when is None or a datetime it returns when. Otherwise a TypeError is raised. Returned datetimes have tzinfo set to None unless when is a datetime with tzinfo set in which case it remains the same. """ if when is None or is_datetime(when): return when if is_time(when): return datetime.combine(epoch.date(), when) if is_date(when): return datetime.combine(when, time(0)) raise TypeError("unable to convert {} to datetime".format(when.__class__.__name__))
Return a date time or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime.
def totz(when, tz=None): """ Return a date, time, or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime. """ if when is None: return None when = to_datetime(when) if when.tzinfo is None: when = when.replace(tzinfo=localtz) return when.astimezone(tz or utc)
Return a datetime so much time ago. Takes the same arguments as timedelta ().
def timeago(tz=None, *args, **kwargs): """Return a datetime so much time ago. Takes the same arguments as timedelta().""" return totz(datetime.now(), tz) - timedelta(*args, **kwargs)
Return a Unix timestamp in seconds for the provided datetime. The totz function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
def ts(when, tz=None): """ Return a Unix timestamp in seconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple())
Return a Unix timestamp in milliseconds for the provided datetime. The totz function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
def tsms(when, tz=None): """ Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple()) * 1000 + int(round(when.microsecond / 1000.0))
Return the datetime representation of the provided Unix timestamp. By defaults the timestamp is interpreted as UTC. If tzin is set it will be interpreted as this timestamp instead. By default the output datetime will have UTC time. If tzout is set it will be converted in this timezone instead.
def fromts(ts, tzin=None, tzout=None): """ Return the datetime representation of the provided Unix timestamp. By defaults the timestamp is interpreted as UTC. If tzin is set it will be interpreted as this timestamp instead. By default the output datetime will have UTC time. If tzout is set it will be converted in this timezone instead. """ if ts is None: return None when = datetime.utcfromtimestamp(ts).replace(tzinfo=tzin or utc) return totz(when, tzout)
Return the Unix timestamp in milliseconds as a datetime object. If tz is set it will be converted to the requested timezone otherwise it defaults to UTC.
def fromtsms(ts, tzin=None, tzout=None): """ Return the Unix timestamp in milliseconds as a datetime object. If tz is set it will be converted to the requested timezone otherwise it defaults to UTC. """ if ts is None: return None when = datetime.utcfromtimestamp(ts / 1000).replace(microsecond=ts % 1000 * 1000) when = when.replace(tzinfo=tzin or utc) return totz(when, tzout)
Return the datetime truncated to the precision of the provided unit.
def truncate(when, unit, week_start=mon): """Return the datetime truncated to the precision of the provided unit.""" if is_datetime(when): if unit == millisecond: return when.replace(microsecond=int(round(when.microsecond / 1000.0)) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) elif unit == hour: return when.replace(minute=0, second=0, microsecond=0) elif unit == day: return when.replace(hour=0, minute=0, second=0, microsecond=0) elif unit == week: weekday = prevweekday(when, week_start) return when.replace(year=weekday.year, month=weekday.month, day=weekday.day, hour=0, minute=0, second=0, microsecond=0) elif unit == month: return when.replace(day=1, hour=0, minute=0, second=0, microsecond=0) elif unit == year: return when.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) elif is_date(when): if unit == week: return prevweekday(when, week_start) elif unit == month: return when.replace(day=1) elif unit == year: return when.replace(month=1, day=1) elif is_time(when): if unit == millisecond: return when.replace(microsecond=int(when.microsecond / 1000.0) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) return when
Return the date for the day of this week.
def weekday(when, weekday, start=mon): """Return the date for the day of this week.""" if isinstance(when, datetime): when = when.date() today = when.weekday() delta = weekday - today if weekday < start and today >= start: delta += 7 elif weekday >= start and today < start: delta -= 7 return when + timedelta(days=delta)
Return the date for the most recent day of the week. If inclusive is True ( the default ) today may count as the weekday we re looking for.
def prevweekday(when, weekday, inclusive=True): """ Return the date for the most recent day of the week. If inclusive is True (the default) today may count as the weekday we're looking for. """ if isinstance(when, datetime): when = when.date() delta = weekday - when.weekday() if (inclusive and delta > 0) or (not inclusive and delta >= 0): delta -= 7 return when + timedelta(days=delta)
** optimization algorithm for scale variables ( positive value of unknown magnitude ) ** Each parameter is a normalization of a feature and its value is sought. The parameters are handled in order ( assumed to be independent ) but a second round can be run. Various magnitudes of the normalization are tried. If the normalization converges to zero the largest value yielding a comparable value is used.
def opt_normalizations(params, func, limits, abandon_threshold=100, noimprovement_threshold=1e-3, disp=0): """ **optimization algorithm for scale variables (positive value of unknown magnitude)** Each parameter is a normalization of a feature, and its value is sought. The parameters are handled in order (assumed to be independent), but a second round can be run. Various magnitudes of the normalization are tried. If the normalization converges to zero, the largest value yielding a comparable value is used. Optimizes each normalization parameter in rough steps using multiples of 3 of start point to find reasonable starting values for another algorithm. parameters, minimization function, parameter space definition [(lo, hi) for i in params] :param abandon_threshold: if in one direction the function increases by this much over the best value, abort search in this direction :param noimprovement_threshold: when decreasing the normalization, if the function increases by less than this amount, abort search in this direction :param disp: verbosity """ newparams = numpy.copy(params) lower = [lo for lo, hi in limits] upper = [hi for lo, hi in limits] for i, p in enumerate(params): startval = p beststat = func(newparams) bestval = startval if disp > 0: print '\t\tstart val = %e: %e' % (startval, beststat) go_up = True go_down = True # go up and down in multiples of 3 # once that is done, refine in multiples of 1.1 for n in list(3.**numpy.arange(1, 20)) + [None] + list(1.1**numpy.arange(1, 13)): if n is None: startval = bestval if disp > 0: print '\t\trefining from %e' % (startval) go_up = True go_down = True continue if go_up and startval * n > upper[i]: if disp > 0: print '\t\thit upper border (%e * %e > %e)' % (startval, n, upper[i]) go_up = False if go_down and startval / n < lower[i]: if disp > 0: print '\t\thit lower border (%e / %e > %e)' % (startval, n, lower[i]) go_down = False if go_up: if disp > 1: print '\t\ttrying %e ^' % (startval * n) newparams[i] = startval * n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat <= beststat: bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_up = False if go_down: if disp > 1: print '\t\ttrying %e v' % (startval / n) newparams[i] = startval / n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat + noimprovement_threshold < beststat: # avoid zeros in normalizations bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_down = False newparams[i] = bestval print '\tnew normalization of %d: %e' % (i, newparams[i]) print 'optimization done, reached %.3f' % (beststat) return newparams
see: func: optimize1d. optimize considers each parameter in order: param ftol: difference in values at which the function can be considered flat: param compute_errors: compute standard deviation of gaussian around optimum
def opt_grid(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ see :func:`optimize1d.optimize`, considers each parameter in order :param ftol: difference in values at which the function can be considered flat :param compute_errors: compute standard deviation of gaussian around optimum """ caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] for i, p in enumerate(params): cache = [] def func1(x0): newparams[i] = x0 v = func(newparams) cache.append([x0, v]) return v lo, hi = limits[i] bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors[i] = cache2errors(func1, cache, disp=disp - 1) newparams[i] = bestval caches[i] = cache if disp > 0: if compute_errors: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i, bestval, errors[i][0], errors[i][1], beststat) else: print '\tnew value of %d: %e yielded %e' % (i, bestval, beststat) beststat = func(newparams) if disp > 0: print 'optimization done, reached %.3f' % (beststat) if compute_errors: return newparams, errors else: return newparams
parallelized version of: func: opt_grid
def opt_grid_parallel(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ parallelized version of :func:`opt_grid` """ import multiprocessing def spawn(f): def fun(q_in,q_out): while True: i,x = q_in.get() if i == None: break q_out.put((i,f(x))) return fun def parmap(f, X, nprocs = multiprocessing.cpu_count()): q_in = multiprocessing.Queue(1) q_out = multiprocessing.Queue() proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i,x)) for i,x in enumerate(X)] [q_in.put((None,None)) for _ in range(nprocs)] res = [q_out.get() for _ in range(len(sent))] [p.join() for p in proc] return [x for i,x in sorted(res)] nthreads = multiprocessing.cpu_count() caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] indices = range(0, len(params), nthreads) k = 0 while k < len(params): j = min(len(params), k + nthreads * 2) def run1d((i, curparams, curlimits)): cache = [] def func1(x0): curparams[i] = x0 v = func(curparams) cache.append([x0, v]) return v lo, hi = curlimits bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors = cache2errors(func1, cache, disp=disp - 1) return bestval, beststat, errors, cache return bestval, beststat, cache results = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)]) for i, r in enumerate(results): if compute_errors: v, s, e, c = r if disp > 0: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i + k, v, e[0], e[1], s) else: v, s, c = r e = [] if disp > 0: print '\tnew value of %d: %e yielded %e' % (i + k, v, s) newparams[i + k] = v caches[i + k] = c errors[i + k] = e k = j beststat = func(newparams) if disp > 0: print 'optimization done, reached %e' % (beststat) if compute_errors: return newparams, errors else: return newparams
Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform.
def _GetNativeEolStyle(platform=sys.platform): ''' Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform. ''' _NATIVE_EOL_STYLE_MAP = { 'win32' : EOL_STYLE_WINDOWS, 'linux2' : EOL_STYLE_UNIX, 'linux' : EOL_STYLE_UNIX, 'darwin' : EOL_STYLE_MAC, } result = _NATIVE_EOL_STYLE_MAP.get(platform) if result is None: from ._exceptions import UnknownPlatformError raise UnknownPlatformError(platform) return result
Context manager for current directory ( uses with_statement )
def Cwd(directory): ''' Context manager for current directory (uses with_statement) e.g.: # working on some directory with Cwd('/home/new_dir'): # working on new_dir # working on some directory again :param unicode directory: Target directory to enter ''' old_directory = six.moves.getcwd() if directory is not None: os.chdir(directory) try: yield directory finally: os.chdir(old_directory)
Normalizes a path maintaining the final slashes.
def NormalizePath(path): ''' Normalizes a path maintaining the final slashes. Some environment variables need the final slash in order to work. Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used in the Visual Studio projects. :param unicode path: The path to normalize. :rtype: unicode :returns: Normalized path ''' if path.endswith('/') or path.endswith('\\'): slash = os.path.sep else: slash = '' return os.path.normpath(path) + slash
Returns a version of a path that is unique.
def CanonicalPath(path): ''' Returns a version of a path that is unique. Given two paths path1 and path2: CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on the host OS. Takes account of case, slashes and relative paths. :param unicode path: The original path. :rtype: unicode :returns: The unique path. ''' path = os.path.normpath(path) path = os.path.abspath(path) path = os.path.normcase(path) return path
Replaces all slashes and backslashes with the target separator
def StandardizePath(path, strip=False): ''' Replaces all slashes and backslashes with the target separator StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. :param bool strip: If True, removes additional slashes from the end of the path. ''' path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX) if strip: path = path.rstrip(SEPARATOR_UNIX) return path
Normalizes a standard path ( posixpath. normpath ) maintaining any slashes at the end of the path.
def NormStandardPath(path): ''' Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path. Normalize: Removes any local references in the path "/../" StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. ''' import posixpath if path.endswith('/'): slash = '/' else: slash = '' return posixpath.normpath(path) + slash
Creates a md5 file from a source file ( contents are the md5 hash of source file )
def CreateMD5(source_filename, target_filename=None): ''' Creates a md5 file from a source file (contents are the md5 hash of source file) :param unicode source_filename: Path to source file :type target_filename: unicode or None :param target_filename: Name of the target file with the md5 contents If None, defaults to source_filename + '.md5' ''' if target_filename is None: target_filename = source_filename + '.md5' from six.moves.urllib.parse import urlparse source_url = urlparse(source_filename) # Obtain MD5 hex if _UrlIsLocal(source_url): # If using a local file, we can give Md5Hex the filename md5_contents = Md5Hex(filename=source_filename) else: # Md5Hex can't handle remote files, we open it and pray we won't run out of memory. md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True)) # Write MD5 hash to a file CreateFile(target_filename, md5_contents)
Copy a file from source to target.
def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True): ''' Copy a file from source to target. :param source_filename: @see _DoCopyFile :param target_filename: @see _DoCopyFile :param bool md5_check: If True, checks md5 files (of both source and target files), if they match, skip this copy and return MD5_SKIP Md5 files are assumed to be {source, target} + '.md5' If any file is missing (source, target or md5), the copy will always be made. :param copy_symlink: @see _DoCopyFile :raises FileAlreadyExistsError: If target_filename already exists, and override is False :raises NotImplementedProtocol: If file protocol is not accepted Protocols allowed are: source_filename: local, ftp, http target_filename: local, ftp :rtype: None | MD5_SKIP :returns: MD5_SKIP if the file was not copied because there was a matching .md5 file .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' from ._exceptions import FileNotFoundError # Check override if not override and Exists(target_filename): from ._exceptions import FileAlreadyExistsError raise FileAlreadyExistsError(target_filename) # Don't do md5 check for md5 files themselves. md5_check = md5_check and not target_filename.endswith('.md5') # If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents. if md5_check: source_md5_filename = source_filename + '.md5' target_md5_filename = target_filename + '.md5' try: source_md5_contents = GetFileContents(source_md5_filename) except FileNotFoundError: source_md5_contents = None try: target_md5_contents = GetFileContents(target_md5_filename) except FileNotFoundError: target_md5_contents = None if source_md5_contents is not None and \ source_md5_contents == target_md5_contents and \ Exists(target_filename): return MD5_SKIP # Copy source file _DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink) # If we have a source_md5, but no target_md5, create the target_md5 file if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents: CreateFile(target_md5_filename, source_md5_contents)