repo
stringlengths
1
29
path
stringlengths
24
332
code
stringlengths
39
579k
factory_boy-2.12.0
factory_boy-2.12.0//factory/base.pyclass:BaseFactory/create_batch
@classmethod def create_batch(cls, size, **kwargs): """Create a batch of instances of the given class, with overriden attrs. Args: size (int): the number of instances to create Returns: object list: the created instances """ return [cls.create(**kwargs) for _ in range(size)]
PyQtPurchasing-5.14.0
PyQtPurchasing-5.14.0//configure.pyclass:ModuleConfiguration/get_qmake_configuration
@staticmethod def get_qmake_configuration(target_configuration): """ Return a dict of qmake configuration values for CONFIG, DEFINES, INCLUDEPATH, LIBS and QT. If value names (i.e. dict keys) have either 'Qt4' or 'Qt5' prefixes then they are specific to the corresponding version of Qt. target_configuration is the target configuration. """ return {'QT': 'purchasing'}
bitcoinX-0.2.4
bitcoinX-0.2.4//bitcoinx/keys.pyclass:PublicKey/from_hex
@classmethod def from_hex(cls, hex_str): """Construct a PublicKey from a hexadecimal string.""" return cls.from_bytes(bytes.fromhex(hex_str))
Kivy-1.11.1
Kivy-1.11.1//kivy/parser.pyfile:/kivy/parser.py:function:parse_bool/parse_bool
def parse_bool(text): """Parse a string to a boolean, ignoring case. "true"/"1" is True, "false"/"0" is False. Anything else throws an exception.""" if text.lower() in ('true', '1'): return True elif text.lower() in ('false', '0'): return False raise Exception('Invalid boolean: %s' % text)
dronin-pyqtgraph-20160825.3
dronin-pyqtgraph-20160825.3//dronin_pyqtgraph/exceptionHandling.pyfile:/dronin_pyqtgraph/exceptionHandling.py:function:setTracebackClearing/setTracebackClearing
def setTracebackClearing(clear=True): """ Enable or disable traceback clearing. By default, clearing is disabled and Python will indefinitely store unhandled exception stack traces. This function is provided since Python's default behavior can cause unexpected retention of large memory-consuming objects. """ global clear_tracebacks clear_tracebacks = clear
mercurial
mercurial//interfaces/repository.pyclass:imanifeststorage/strip
def strip(minlink, transaction): """Remove storage of items starting at a linkrev. See the documentation in ``ifilemutation`` for more. """
wxPython-4.1.0
wxPython-4.1.0//wx/lib/agw/cubecolourdialog.pyfile:/wx/lib/agw/cubecolourdialog.py:function:FindC/FindC
def FindC(line): """ Internal function. """ if line.slope is None: c = line.y else: c = line.y - line.slope * line.x return c
cluster_pack
cluster_pack//_version.pyfile:/_version.py:function:render_pep440_pre/render_pep440_pre
def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += '.post.dev%d' % pieces['distance'] else: rendered = '0.post.dev%d' % pieces['distance'] return rendered
memote-0.10.2
memote-0.10.2//src/memote/support/helpers.pyfile:/src/memote/support/helpers.py:function:open_exchanges/open_exchanges
def open_exchanges(model): """ Open all exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ for rxn in model.exchanges: rxn.bounds = -1000, 1000
hapic_apispec-0.37.0
hapic_apispec-0.37.0//apispec/auto_ref_strategy.pyfile:/apispec/auto_ref_strategy.py:function:get_excluded_params/get_excluded_params
def get_excluded_params(schema): """ Get all params excluded in this schema, if "only" is provided in schema instance, consider all not included params as excluded. :param schema: instance or cls schema :return: set of excluded params """ if isinstance(schema, type): return set() exclude = set() only = set() if getattr(schema, 'exclude', ()): exclude = set(getattr(schema, 'exclude', ())) if getattr(schema, 'only', ()): only = set(getattr(schema, 'only', ())) if only: for field in schema._declared_fields: if field not in only: exclude.add(str(field)) return exclude
hg-evolve-9.3.1
hg-evolve-9.3.1//hgext3rd/topic/common.pyfile:/hgext3rd/topic/common.py:function:hastopicext/hastopicext
def hastopicext(repo): """True if the repo use the topic extension""" return getattr(repo, 'hastopicext', False)
zillion
zillion//sql_utils.pyfile:/sql_utils.py:function:column_fullname/column_fullname
def column_fullname(column, prefix=None): """Get a fully qualified name for a column Parameters ---------- column : SQLAlchemy column A SQLAlchemy column object to get the full name for prefix : str, optional If specified, a manual prefix to prepend to the output string. This will automatically be separted with a ".". Returns ------- str A fully qualified column name. The exact format will vary depending on your SQLAlchemy metadata, but an example would be: schema.table.column """ name = '%s.%s' % (column.table.fullname, column.name) if prefix: name = prefix + '.' + name return name
ppp_cas-0.8
ppp_cas-0.8//ppp_cas/calchasYacc.pyfile:/ppp_cas/calchasYacc.py:function:p_expression_parentheses/p_expression_parentheses
def p_expression_parentheses(p): """expression : LPAREN expression RPAREN""" p[0] = p[2]
abipy-0.7.0
abipy-0.7.0//abipy/tools/plotting.pyclass:GenericDataFilesPlotter/from_files
@classmethod def from_files(cls, filepaths): """ Build object from a list of `filenames`. """ new = cls() for filepath in filepaths: new.add_file(filepath) return new
synapse_pay_rest_native-3.4.8
synapse_pay_rest_native-3.4.8//synapse_pay_rest/models/transactions/transaction.pyclass:Transaction/all
@classmethod def all(cls, node=None, **kwargs): """Retrieve all trans records (limited by pagination) as Transactions. Args: node (BaseNode): the node from which to send funds per_page (int, str): (opt) number of records to retrieve page (int, str): (opt) page number to retrieve Returns: list: containing 0 or more Transaction instances """ response = node.user.client.trans.get(node.user.id, node.id, **kwargs) return cls.multiple_from_response(node, response['trans'])
pda
pda//utils.pyfile:/utils.py:function:print_header/print_header
def print_header(): """Print pretty header of list content The head contains 5 columns, each column has a differnt string length. """ headers = ['TASK#', 'SUMMARY', 'LIST TYPE', 'DUE TIME', 'PRIORITY'] print() print('{0:<5} {1:<60} {2:<9} {3:<8} {4:<8}'.format(*headers)) print('{0:=<5} {1:=<60} {2:=<9} {3:=<8} {4:=<8}'.format(*['', '', '', '', '']))
plone.app.standardtiles-2.3.2
plone.app.standardtiles-2.3.2//plone/app/standardtiles/rss.pyclass:IFeed/update_failed
def update_failed(): """Return if the last update failed or not."""
nlpnet
nlpnet//arguments.pyfile:/arguments.py:function:fill_defaults/fill_defaults
def fill_defaults(args, defaults_per_task): """ This function fills arguments not explicitly set (left as None) with default values according to the chosen task. We can't rely on argparse to it because using subparsers with set_defaults and a parent parser overwrites the defaults. """ task = args.task defaults = defaults_per_task[task] for arg in args.__dict__: if getattr(args, arg) is None and arg in defaults: setattr(args, arg, defaults[arg])
pyflux_docker
pyflux_docker//families/normal.pyclass:Normal/reg_score_function
@staticmethod def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Normal Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Normal distribution scale : float scale parameter for the Normal distribution shape : float tail thickness parameter for the Normal distribution skewness : float skewness parameter for the Normal distribution Returns ---------- - Score of the Normal family """ return X * (y - mean)
wvpy-0.1.9
wvpy-0.1.9//wvpy/util.pyfile:/wvpy/util.py:function:cross_predict_model/cross_predict_model
def cross_predict_model(fitter, X, Y, plan): """train a model Y~X using the cross validation plan and return predictions""" preds = [None] * X.shape[0] for g in range(len(plan)): pi = plan[g] model = fitter.fit(X.iloc[pi['train']], Y.iloc[pi['train']]) predg = model.predict(X.iloc[pi['test']]) for i in range(len(pi['test'])): preds[pi['test'][i]] = predg[i] return preds
distributed
distributed//profile.pyfile:/profile.py:function:identifier/identifier
def identifier(frame): """ A string identifier from a frame Strings are cheaper to use as indexes into dicts than tuples or dicts """ if frame is None: return 'None' else: return ';'.join((frame.f_code.co_name, frame.f_code.co_filename, str(frame.f_code.co_firstlineno)))
pyNastran
pyNastran//bdf/cards/materials.pyclass:MAT8/export_to_hdf5
@classmethod def export_to_hdf5(cls, h5_file, model, mids): """exports the materials in a vectorized way""" comments = [] e11 = [] e22 = [] nu12 = [] g12 = [] g1z = [] g2z = [] rho = [] a1 = [] a2 = [] tref = [] Xt = [] Xc = [] Yt = [] Yc = [] S = [] ge = [] F12 = [] strn = [] for mid in mids: material = model.materials[mid] e11.append(material.e11) e22.append(material.e22) nu12.append(material.nu12) g12.append(material.g12) g1z.append(material.g1z) g2z.append(material.g2z) rho.append(material.rho) a1.append(material.a1) a2.append(material.a2) tref.append(material.tref) ge.append(material.ge) Xt.append(material.Xt) Xc.append(material.Xc) Yt.append(material.Yt) Yc.append(material.Yc) S.append(material.S) F12.append(material.F12) strn.append(material.strn) h5_file.create_dataset('mid', data=mids) h5_file.create_dataset('E11', data=e11) h5_file.create_dataset('E22', data=e22) h5_file.create_dataset('Nu12', data=nu12) h5_file.create_dataset('G12', data=g12) h5_file.create_dataset('G1z', data=g1z) h5_file.create_dataset('G2z', data=g2z) h5_file.create_dataset('A1', data=a1) h5_file.create_dataset('A2', data=a2) h5_file.create_dataset('rho', data=rho) h5_file.create_dataset('tref', data=tref) h5_file.create_dataset('ge', data=ge) h5_file.create_dataset('Xt', data=Xt) h5_file.create_dataset('Xc', data=Xc) h5_file.create_dataset('Yt', data=Yt) h5_file.create_dataset('Yc', data=Yc) h5_file.create_dataset('S', data=S) h5_file.create_dataset('F12', data=F12) h5_file.create_dataset('strn', data=strn)
eo-learn-visualization-0.7.3
eo-learn-visualization-0.7.3//eolearn/visualization/eoexecutor_visualization.pyclass:EOExecutorVisualization/_format_timedelta
@staticmethod def _format_timedelta(value1, value2): """ Method for formatting time delta into report """ return str(value2 - value1)
systemrdl
systemrdl//rdltypes.pyclass:UserStruct/get_parent_scope
@classmethod def get_parent_scope(cls): """ Returns reference to parent component that contains this type definition. """ return getattr(cls, '_parent_scope', None)
plone.app.imagecropping-2.2.2
plone.app.imagecropping-2.2.2//src/plone/app/imagecropping/interfaces.pyclass:IImageCroppingUtils/get_image_field
def get_image_field(fieldname): """Returns the image field"""
mmvec-1.0.4
mmvec-1.0.4//mmvec/util.pyfile:/mmvec/util.py:function:embeddings2ranks/embeddings2ranks
def embeddings2ranks(embeddings): """ Converts embeddings to ranks""" microbes = embeddings.loc[embeddings.embed_type == 'microbe'] metabolites = embeddings.loc[embeddings.embed_type == 'metabolite'] U = microbes.pivot(index='feature_id', columns='axis', values='values') V = metabolites.pivot(index='feature_id', columns='axis', values='values') pc_ids = sorted(list(set(U.columns) - {'bias'})) U['ones'] = 1 V['ones'] = 1 ranks = U[pc_ids + ['ones', 'bias']] @ V[pc_ids + ['bias', 'ones']].T ranks = ranks - ranks.mean(axis=1).values.reshape(-1, 1) return ranks
horae.subscription-1.0a1
horae.subscription-1.0a1//horae/subscription/interfaces.pyclass:IMessage/message
def message(html=False): """ Returns the notification message to be sent """
ib_insync-0.9.60
ib_insync-0.9.60//ib_insync/util.pyfile:/ib_insync/util.py:function:isNan/isNan
def isNan(x: float) ->bool: """Not a number test.""" return x != x
failure
failure//failure.pyclass:Failure/from_dict
@classmethod def from_dict(cls, data): """Converts this from a dictionary to a object.""" data = dict(data) cause = data.get('cause') if cause is not None: data['cause'] = cls.from_dict(cause) return cls(**data)
simfin-0.6.0
simfin-0.6.0//simfin/transform.pyfile:/simfin/transform.py:function:avg_ttm/avg_ttm
def avg_ttm(df, years): """ Calculate multi-year averages from TTM financial data, which has 4 data-points per year, that each covers the Trailing Twelve Months. This is different from using a rolling average on TTM data, which over-weighs the most recent quarters in the average. This function should only be used on DataFrames for a single stock. Use :obj:`~simfin.utils.apply` with this function on DataFrames for multiple stocks. :param df: Pandas DataFrame with TTM financial data sorted ascendingly by date. :param years: Integer for the number of years. :return: Pandas DataFrame with the averages. """ df_result = df.copy() for i in range(1, years): df_result += df.shift(4 * i) df_result /= years return df_result
ftw.mopage-1.0
ftw.mopage-1.0//ftw/mopage/interfaces.pyclass:IMopageObjectLookup/get_objects
def get_objects(): """ Return objects providing data for the xml export """
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/iam.pyfile:/pyboto3/iam.py:function:update_open_id_connect_provider_thumbprint/update_open_id_connect_provider_thumbprint
def update_open_id_connect_provider_thumbprint(OpenIDConnectProviderArn= None, ThumbprintList=None): """ Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. The list that you pass with this action completely replaces the existing list of thumbprints. (The lists are not merged.) Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. See also: AWS API Documentation :example: response = client.update_open_id_connect_provider_thumbprint( OpenIDConnectProviderArn='string', ThumbprintList=[ 'string', ] ) :type OpenIDConnectProviderArn: string :param OpenIDConnectProviderArn: [REQUIRED] The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type ThumbprintList: list :param ThumbprintList: [REQUIRED] A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider . (string) --Contains a thumbprint for an identity provider's server certificate. The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string. """ pass
skultrafast-2.0.5
skultrafast-2.0.5//skultrafast/data_io.pyfile:/skultrafast/data_io.py:function:messpy_example_path/messpy_example_path
def messpy_example_path(): """ Returns the path to the messpy example data shipped with skultrafast. Returns ------- str The full path """ import skultrafast return skultrafast.__path__[0] + '/examples/data/messpyv1_data.npz'
sunriset
sunriset//calc.pyfile:/calc.py:function:sunset_float/sunset_float
def sunset_float(solar_noon_float, hour_angle_sunrise): """Returns Sunset as float with Solar Noon Float, solar_noon_float and Hour Angle Deg, hour_angle_deg""" sunset_float = (solar_noon_float * 1440 + hour_angle_sunrise * 4) / 1440 return sunset_float
network2tikz-0.1.8
network2tikz-0.1.8//network2tikz/units.pyclass:UnitConverter/mm_to_pt
@staticmethod def mm_to_pt(measure): """Convert millimeters to points.""" return measure * 2.83465
black_widow
black_widow//app/managers/sniffer/pcap_sniffer.pyclass:PcapSniffer/_merge_addr
@staticmethod def _merge_addr(host1: dict, host2: dict): """ Merge host1 and host2 by preferring host2 :param host1: { 'mac': <mac_addr>, 'mac_manufacturer': tuple, 'ip': <ip_addr>, 'ip_host': list } :param host2: // :return: The host1 merged with host2 """ if host1 is None: return host2 if host2 is None: return host1 host = host2.copy() for key, val in host2.items(): if val is not None: continue host[key] = host1.get(key) ip = host.get('ip') ip_host = host.get('ip_host') mac = host.get('mac') mac_manufacturer = host.get('mac_manufacturer') if ip is not None: host['label'] = ip host['title'] = ip_host elif mac_manufacturer is None: host['label'] = mac else: host['label'] = mac_manufacturer host['title'] = mac return host
ht-0.1.54
ht-0.1.54//ht/conv_external.pyfile:/ht/conv_external.py:function:Nu_cylinder_Fand/Nu_cylinder_Fand
def Nu_cylinder_Fand(Re, Pr): """Calculates Nusselt number for crossflow across a single tube at a specified `Re` and `Pr`, both evaluated at the film temperature. No other wall correction is necessary for this formulation. Also shown in [2]_. .. math:: Nu = (0.35 + 0.34Re^{0.5} + 0.15Re^{0.58})Pr^{0.3} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at film temperature, [-] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Developed with test results for water, and Re from 1E4 to 1E5, but also compared with other data in the literature. Claimed validity of Re from 1E-1 to 1E5. This method applies to both the laminar and turbulent regimes. Examples -------- >>> Nu_cylinder_Fand(6071, 0.7) 45.19984325481126 References ---------- .. [1] Fand, R. M. "Heat Transfer by Forced Convection from a Cylinder to Water in Crossflow." International Journal of Heat and Mass Transfer 8, no. 7 (July 1, 1965): 995-1010. doi:10.1016/0017-9310(65)90084-0. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. """ return (0.35 + 0.34 * Re ** 0.5 + 0.15 * Re ** 0.58) * Pr ** 0.3
plotly
plotly//basewidget.pyclass:BaseFigureWidget/_display_frames_error
@staticmethod def _display_frames_error(): """ Display an informative error when user attempts to set frames on a FigureWidget Raises ------ ValueError always """ msg = """ Frames are not supported by the plotly.graph_objs.FigureWidget class. Note: Frames are supported by the plotly.graph_objs.Figure class""" raise ValueError(msg)
pyRSD-0.1.19
pyRSD-0.1.19//pyRSD/rsd/hzpt/P11.pyclass:HaloZeldovichP11/default_parameters
@staticmethod def default_parameters(): """ The default parameters References ---------- These parameters are from: file: ``mcmc_fit_kmin-0.005_kmax-1.0.npz`` directory: ``$RSD_DIR/SimCalibrations/P11HaloZeldovich/results`` git hash: 8e1304e6 """ d = {} d['_A0_amp'] = 658.9 d['_A0_alpha'] = 3.91 d['_A0_beta'] = 1.917 d['_R_amp'] = 18.95 d['_R_alpha'] = -0.3657 d['_R_beta'] = -0.2585 d['_R1h_amp'] = 0.8473 d['_R1h_alpha'] = -0.1524 d['_R1h_beta'] = 0.7769 return d
dama
dama//measures.pyfile:/measures.py:function:f1/f1
def f1(labels, predictions): """ weighted average presicion and recall """ from sklearn.metrics import f1_score return f1_score(labels, predictions, average='macro', pos_label=None)
FFC-2017.1.0
FFC-2017.1.0//ffc/cpp.pyfile:/ffc/cpp.py:function:indent/indent
def indent(block, num_spaces): """Indent each row of the given string block with n spaces.""" indentation = ' ' * num_spaces return indentation + ('\n' + indentation).join(block.split('\n'))
csv_detective
csv_detective//detection.pyfile:/detection.py:function:detect_trailing_columns/detect_trailing_columns
def detect_trailing_columns(file, sep, heading_columns): """ Tests first 10 lines to see if there are empty trailing columns""" file.seek(0) return_int = float('Inf') for i in range(10): line = file.readline() return_int = min(return_int, len(line.replace('\n', '')) - len(line .replace('\n', '').strip(sep)) - heading_columns) if return_int == 0: return 0 return return_int
marathon_acme
marathon_acme//marathon_util.pyfile:/marathon_util.py:function:_is_legacy_ip_per_task/_is_legacy_ip_per_task
def _is_legacy_ip_per_task(app): """ Return whether the application is using IP-per-task on Marathon < 1.5. :param app: The application to check. :return: True if using IP per task, False otherwise. """ return app.get('ipAddress') is not None
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/cognitoidentityprovider.pyfile:/pyboto3/cognitoidentityprovider.py:function:admin_disable_user/admin_disable_user
def admin_disable_user(UserPoolId=None, Username=None): """ Disables the specified user as an administrator. Works on any user. Requires developer credentials. See also: AWS API Documentation :example: response = client.admin_disable_user( UserPoolId='string', Username='string' ) :type UserPoolId: string :param UserPoolId: [REQUIRED] The user pool ID for the user pool where you want to disable the user. :type Username: string :param Username: [REQUIRED] The user name of the user you wish to disable. :rtype: dict :return: {} """ pass
pyphinb-2.9.4
pyphinb-2.9.4//pyphinb/tpm.pyfile:/pyphinb/tpm.py:function:is_state_by_state/is_state_by_state
def is_state_by_state(tpm): """Return ``True`` if ``tpm`` is in state-by-state form, otherwise ``False``. """ return tpm.ndim == 2 and tpm.shape[0] == tpm.shape[1]
grimoire_elk
grimoire_elk//enriched/mattermost.pyclass:Mapping/get_elastic_mappings
@staticmethod def get_elastic_mappings(es_major): """Get Elasticsearch mapping. :param es_major: major version of Elasticsearch, as string :returns: dictionary with a key, 'items', with the mapping """ mapping = """ { "properties": { "text_analyzed": { "type": "text", "fielddata": true, "index": true } } } """ return {'items': mapping}
gns3-server-2.2.8
gns3-server-2.2.8//gns3server/controller/ports/gigabitethernet_port.pyclass:GigabitEthernetPort/long_name_type
@staticmethod def long_name_type(): """ Returns the long name type for this port. :returns: string """ return 'GigabitEthernet'
mtcli
mtcli//views.pyfile:/views.py:function:var_view/var_view
def var_view(ch_trend, var, num_bar): """ Retorna view com a variação percentual de duas barras.""" return '%s %s %s' % (num_bar, ch_trend, var)
handprint-1.2.2
handprint-1.2.2//handprint/services/amazon.pyfile:/handprint/services/amazon.py:function:corner_list/corner_list
def corner_list(polygon, width, height): """Takes a boundingBox value from Google vision's JSON output and returns a condensed version, in the form [x y x y x y x y], with the first x, y pair representing the upper left corner.""" corners = [] for index in [0, 1, 2, 3]: if 'X' in polygon[index] and 'Y' in polygon[index]: corners.append(int(round(polygon[index]['X'] * width))) corners.append(int(round(polygon[index]['Y'] * height))) else: return [] return corners
lunar-0.0.1
lunar-0.0.1//lunar/template.pyfile:/lunar/template.py:function:unescape/unescape
def unescape(s): """ unescape html tokens. <p>{{ unescape(content) }}</p> """ return s.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>' ).replace('&quot;', '"').replace('&#039;', "'")
process_improve
process_improve//plotting.pyfile:/plotting.py:function:get_plot_title/get_plot_title
def get_plot_title(main, model, prefix=''): """ Constructs a sensible plot title from the ``model``. """ if main is not None: main = prefix title = model.get_title() if title: main += f': {title}' return main
lifelib-0.0.14
lifelib-0.0.14//lifelib/projects/solvency2/projection.pyfile:/lifelib/projects/solvency2/projection.py:function:PolsAccHosp/PolsAccHosp
def PolsAccHosp(t): """Number of policies: Accidental Hospitalization""" return 0
inmembrane-0.95.0
inmembrane-0.95.0//inmembrane/protocols/gram_neg.pyfile:/inmembrane/protocols/gram_neg.py:function:summary_table/summary_table
def summary_table(params, proteins): """ Returns a string representing a simple summary table of protein classifcations. """ out = '' counts = {} for seqid in proteins: category = proteins[seqid]['category'] if category not in counts: counts[category] = 1 else: counts[category] += 1 out += '\n\n# Number of proteins in each class:\n' for c in counts: out += '# %-15s %i\n' % (c, counts[c]) return out
openplc_editor
openplc_editor//graphics/GraphicCommons.pyfile:/graphics/GraphicCommons.py:function:GetScaledEventPosition/GetScaledEventPosition
def GetScaledEventPosition(event, dc, scaling): """ Function that calculates the nearest point of the grid defined by scaling for the given point """ pos = event.GetLogicalPosition(dc) if scaling: pos.x = round(pos.x / scaling[0]) * scaling[0] pos.y = round(pos.y / scaling[1]) * scaling[1] return pos
carpedm
carpedm//data/ops.pyfile:/data/ops.py:function:in_line/in_line
def in_line(xmin_line, xmax_line, ymin_line, xmin_new, xmax_new, ymax_new): """Heuristic for determining whether a character is in a line. Note: Currently dependent on the order in which characters are added. For example, a character may vertically overlap with a line, but adding it to the line would be out of reading order. This should be fixed in a future version. Args: xmin_line (:obj:`list` of :obj:`int`): Minimum x-coordinate of characters in the line the new character is tested against. xmax_line (:obj:`list` of :obj:`int`): Maximum x-coordinate of characters in the line the new character is tested against. ymin_line (int): Minimum y-coordinate of line the new character is tested against. xmin_new (int): Minimum x-coordinate of new character. xmax_new (int): Maximum x-coordinate of new character. ymax_new (int): Maximum y-coordinate of new character. Returns: bool: The new character vertically overlaps with the "average" character in the line. """ xmin_avg = sum(xmin_line) / len(xmin_line) xmax_avg = sum(xmax_line) / len(xmax_line) return (xmin_avg <= xmax_new and xmax_avg >= xmin_new and ymax_new >= ymin_line)
numpydoc-0.9.2
numpydoc-0.9.2//numpydoc/docscrape.pyfile:/numpydoc/docscrape.py:function:strip_blank_lines/strip_blank_lines
def strip_blank_lines(l): """Remove leading and trailing blank lines from a list of lines""" while l and not l[0].strip(): del l[0] while l and not l[-1].strip(): del l[-1] return l
cleo-0.8.1
cleo-0.8.1//cleo/parser.pyclass:Parser/_parameters
@classmethod def _parameters(cls, tokens): """ Extract all of the parameters from the tokens. :param tokens: The tokens to extract the parameters from :type tokens: list :rtype: dict """ arguments = [] options = [] for token in tokens: if not token.startswith('--'): arguments.append(cls._parse_argument(token)) else: options.append(cls._parse_option(token)) return {'arguments': arguments, 'options': options}
nipy
nipy//core/image/image.pyfile:/core/image/image.py:function:synchronized_order/synchronized_order
def synchronized_order(img, target_img, axes=True, reference=True): """ Reorder reference and axes of `img` to match target_img. Parameters ---------- img : Image target_img : Image axes : bool, optional If True, synchronize the order of the axes. reference : bool, optional If True, synchronize the order of the reference coordinates. Returns ------- newimg : Image An Image satisfying newimg.axes == target.axes (if axes == True), newimg.reference == target.reference (if reference == True). Examples -------- >>> data = np.random.standard_normal((3,4,7,5)) >>> im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) >>> im_scrambled = im.reordered_axes('iljk').reordered_reference('txyz') >>> im == im_scrambled False >>> im_unscrambled = synchronized_order(im_scrambled, im) >>> im == im_unscrambled True The images don't have to be the same shape >>> data2 = np.random.standard_normal((3,11,9,4)) >>> im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) >>> im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz') >>> im_unscrambled2 = synchronized_order(im_scrambled2, im) >>> im_unscrambled2.coordmap == im.coordmap True or have the same coordmap >>> data3 = np.random.standard_normal((3,11,9,4)) >>> im3 = Image(data3, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1]))) >>> im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz') >>> im_unscrambled3 = synchronized_order(im_scrambled3, im) >>> im_unscrambled3.axes == im.axes True >>> im_unscrambled3.reference == im.reference True >>> im_unscrambled4 = synchronized_order(im_scrambled3, im, axes=False) >>> im_unscrambled4.axes == im.axes False >>> im_unscrambled4.axes == im_scrambled3.axes True >>> im_unscrambled4.reference == im.reference True """ target_axes = target_img.axes target_reference = target_img.coordmap.function_range if axes: img = img.reordered_axes(target_axes.coord_names) if reference: img = img.reordered_reference(target_reference.coord_names) return img
CDS-1.0.1
CDS-1.0.1//cds/modules/deposit/api.pyfile:/cds/modules/deposit/api.py:function:is_deposit/is_deposit
def is_deposit(url): """Check if it's a deposit or a record.""" try: return 'deposit' in url except TypeError: return False
ldapper
ldapper//utils.pyfile:/utils.py:function:dn_attribute/dn_attribute
def dn_attribute(dn, attr): """Given a full DN return the value of the attribute given""" for rdn in dn.split(','): if rdn.startswith('%s=' % attr): return rdn.split('=', 1)[1]
haggis
haggis//recipes.pyclass:KeyedSingleton/__call__
def __call__(cls, *args, **kwargs): """ The constructor/initializer require at least one argument since the first argument is the singleton key. If an instance was already created with the requested key, it is returned without being re-allocated or re-initialized. """ if not args: raise ValueError('Instance key must be first argument in constructor') key = args[0] if key in cls._instances: instance = cls._instances[key] else: instance = super().__call__(*args, **kwargs) cls._instances[key] = instance return instance
rcgrep-0.1.0
rcgrep-0.1.0//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe
def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += '-%d-g%s' % (pieces['distance'], pieces['short']) else: rendered = pieces['short'] if pieces['dirty']: rendered += '-dirty' return rendered
maestro
maestro//utils/text.pyfile:/utils/text.py:function:make_string/make_string
def make_string(x): """ Force object into string and raises a TypeError if object is not of a compatible type. """ if isinstance(x, str): return str elif isinstance(x, bytes): return x.decode('utf8') else: cls_name = type(x).__name__ raise TypeError(f'expect string type, got {cls_name}')
xblock
xblock//mixins.pyclass:HandlersMixin/handler
@classmethod def handler(cls, func): """ A decorator to indicate a function is usable as a handler. The wrapped function must return a `webob.Response` object. """ func._is_xblock_handler = True return func
dkPYUtils-0.1.10
dkPYUtils-0.1.10//src/functionapi.pyclass:functionapi/getConfig
@staticmethod def getConfig(conf, section, key): """ 获取指定section下面的key :param conf: :param section: :param key: :return: """ return conf.get(section, key)
pando-0.47
pando-0.47//pando/state_chain.pyfile:/pando/state_chain.py:function:request_available/request_available
def request_available(): """No-op placeholder for easy hookage""" pass
fake-bpy-module-2.80-20200428
fake-bpy-module-2.80-20200428//bpy/ops/armature.pyfile:/bpy/ops/armature.py:function:select_more/select_more
def select_more(): """Select those bones connected to the initial selection """ pass
kipoi
kipoi//readers.pyclass:ZarrReader/load
@classmethod def load(cls, file_path, unflatten=True): """Load the data all at once (classmethod). # Arguments file_path: Zarr file path unflatten: see `load_all` """ with cls(file_path) as f: return f.load_all(unflatten=unflatten)
mf_horizon_client-2.1.1.1
mf_horizon_client-2.1.1.1//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe
def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += '-%d-g%s' % (pieces['distance'], pieces['short']) else: rendered = pieces['short'] if pieces['dirty']: rendered += '-dirty' return rendered
nmrglue
nmrglue//fileio/glue.pyfile:/fileio/glue.py:function:get_dic/get_dic
def get_dic(f, dataset='spectrum'): """ Get a dictionary from dataset in a HDF5 File """ dset = f[dataset] dic = {} for key, value in dset.attrs.items(): if '_' in key: axis, subkey = key.split('_', 1) axis = int(axis) if axis not in dic: dic[axis] = {} dic[axis][subkey] = value else: dic[key] = value return dic
bpy
bpy//ops/mask.pyfile:/ops/mask.py:function:select_linked/select_linked
def select_linked(): """Select all curve points linked to already selected ones """ pass
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/rds.pyfile:/pyboto3/rds.py:function:describe_source_regions/describe_source_regions
def describe_source_regions(RegionName=None, MaxRecords=None, Marker=None, Filters=None): """ Returns a list of the source AWS regions where the current AWS region can create a Read Replica or copy a DB snapshot from. This API action supports pagination. See also: AWS API Documentation Examples To list the AWS regions where a Read Replica can be created. Expected Output: :example: response = client.describe_source_regions( RegionName='string', MaxRecords=123, Marker='string', Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ] ) :type RegionName: string :param RegionName: The source region name. For example, us-east-1 . Constraints: Must specify a valid AWS Region name. :type MaxRecords: integer :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. :type Marker: string :param Marker: An optional pagination token provided by a previous DescribeSourceRegions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords . :type Filters: list :param Filters: This parameter is not currently supported. (dict) --This type is not currently supported. Name (string) -- [REQUIRED]This parameter is not currently supported. Values (list) -- [REQUIRED]This parameter is not currently supported. (string) -- :rtype: dict :return: { 'Marker': 'string', 'SourceRegions': [ { 'RegionName': 'string', 'Endpoint': 'string', 'Status': 'string' }, ] } """ pass
scikit-discovery-0.9.18
scikit-discovery-0.9.18//skdiscovery/utilities/patterns/polygon_utils.pyfile:/skdiscovery/utilities/patterns/polygon_utils.py:function:findPolygon/findPolygon
def findPolygon(in_data, in_point): """ Find the polygon that a point resides in @param in_data: Input data containing polygons as read in by parseBasemapShape @param in_point: Shapely point @return: Index of shape in in_data that contains in_point """ result_num = None for index, data in enumerate(in_data): if data['polygon'].contains(in_point): if result_num == None: result_num = index else: raise RuntimeError('Multiple polygons contains point') if result_num == None: return -1 return result_num
ytrss-0.2.6
ytrss-0.2.6//ytrss/core/settings.pyclass:YTSettings/__print_url_name
@staticmethod def __print_url_name(elem): """ Print infromation from dictionary @param elem: url information @type elem: L{dict} @return: formated name of url @rtype: str """ name = '' try: name = '{} ({})'.format(elem['name'], elem['code']) except ValueError: name = elem['code'] return name
pdftotree-0.4.0
pdftotree-0.4.0//pdftotree/utils/pdf/node.pyfile:/pdftotree/utils/pdf/node.py:function:_one_contains_other/_one_contains_other
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
hesong
hesong//utils/jsonrpc.pyclass:Local/get
@classmethod def get(cls, method): """Get RPC implementation stub by `method` name. :param str method: :rtype: Local """ return cls._stubs.get(method)
coala_utils
coala_utils//string_processing/Core.pyfile:/string_processing/Core.py:function:convert_to_raw/convert_to_raw
def convert_to_raw(string, exceptions=''): """ Converts a string to its raw form, converting all backslash to double backslash except when the backslash escapes a character given in exceptions. :param string: The given string that needs to be converted :param exceptions: A list of characters that if escaped with backslash should not be converted to double backslash. :return: Returns the corresponding raw string. """ i = 0 length = len(string) output = '' while i < length: if string[i] == '\\' and i + 1 < length and string[i + 1 ] not in exceptions: output += '\\' if string[i + 1] == '\\': i += 1 output += string[i] i += 1 return output
dtrspnsy-0.0.2
dtrspnsy-0.0.2//dtrspnsy/parse_wikiRU.pyfile:/dtrspnsy/parse_wikiRU.py:function:get_hero_name/get_hero_name
def get_hero_name(hero_page): """Method that parses hero name from its responses page. Pages for heroes are in the form of `Hero name/Responses`. We need only the `Hero name` part for heroes. :param hero_page: hero's responses page as string. :return: Hero name as parsed """ return hero_page.split('/')[0]
phenopy
phenopy//score.pyclass:Scorer/maximum
@staticmethod def maximum(df): """Returns the maximum similarity value between to term lists""" return df.values.max()
rootpy-1.0.1
rootpy-1.0.1//rootpy/plotting/contrib/plot_corrcoef_matrix.pyfile:/rootpy/plotting/contrib/plot_corrcoef_matrix.py:function:cov/cov
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0 ): """ Estimate a covariance matrix, given data. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : int, optional Default normalization is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is 1, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional .. versionadded:: 1.5 If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is the number of observations; this overrides the value implied by ``bias``. The default value is ``None``. weights : array-like, optional A 1-D array of weights with a length equal to the number of observations. repeat_weights : int, optional The default treatment of weights in the weighted covariance is to first normalize them to unit sum and use the biased weighted covariance equation. If `repeat_weights` is 1 then the weights must represent an integer number of occurrences of each observation and both a biased and unbiased weighted covariance is defined because the total sample size can be determined. Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print np.cov(X) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x, y) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x) 11.71 """ import numpy as np if ddof is not None and ddof != int(ddof): raise ValueError('ddof must be integer') X = np.array(m, ndmin=2, dtype=float) if X.size == 0: return np.array(m) if X.shape[0] == 1: rowvar = 1 if rowvar: axis = 0 tup = slice(None), np.newaxis else: axis = 1 tup = np.newaxis, slice(None) if y is not None: y = np.array(y, copy=False, ndmin=2, dtype=float) X = np.concatenate((X, y), axis) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 if weights is not None: weights = np.array(weights, dtype=float) weights_sum = weights.sum() if weights_sum <= 0: raise ValueError('sum of weights is non-positive') X -= np.average(X, axis=1 - axis, weights=weights)[tup] if repeat_weights: fact = weights_sum - ddof else: weights /= weights_sum fact = 1.0 - np.power(weights, 2).sum() else: weights = 1 X -= X.mean(axis=1 - axis)[tup] if rowvar: N = X.shape[1] else: N = X.shape[0] fact = float(N - ddof) if not rowvar: return (np.dot(weights * X.T, X.conj()) / fact).squeeze() else: return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
dynamorm
dynamorm//types/base.pyclass:DynamORMSchema/base_field_type
@staticmethod def base_field_type(): """Returns the class that all fields in the schema will inherit from""" raise NotImplementedError('Child class must implement base_field_type')
rickshaw-1.5.3
rickshaw-1.5.3//rickshaw/simspec.pyfile:/rickshaw/simspec.py:function:def_archetypes/def_archetypes
def def_archetypes(): """ Produces the default niche-archetype links for a rickshaw simspec. Returns ---------- spec : dict Dictionary representation of the niche-archetype links. """ arches = {'mine': {':cycamore:Source'}, 'conversion': { ':cycamore:Storage'}, 'enrichment': {':cycamore:Enrichment'}, 'fuel_fab': {':cycamore:FuelFab'}, 'fuel_fab:uo2': { ':cycamore:FuelFab'}, 'fuel_fab:triso': {':cycamore:FuelFab'}, 'fuel_fab:mox': {':cycamore:FuelFab'}, 'reactor': { ':cycamore:Reactor'}, 'reactor:fr': {':cycamore:Reactor'}, 'reactor:lwr': {':cycamore:Reactor'}, 'reactor:hwr': { ':cycamore:Reactor'}, 'reactor:htgr': {':cycamore:Reactor'}, 'reactor:rbmk': {':cycamore:Reactor'}, 'reactor:pb': { ':cycamore:Reactor'}, 'storage': {':cycamore:Storage'}, 'storage:wet': {':cycamore:Storage'}, 'storage:dry': { ':cycamore:Storage'}, 'storage:interim': {':cycamore:Storage'}, 'separations': {':cycamore:Separations'}, 'repository': { ':cycamore:Sink'}} return arches
dropbox-10.1.2
dropbox-10.1.2//dropbox/team_log.pyclass:EventType/tfa_add_backup_phone
@classmethod def tfa_add_backup_phone(cls, val): """ Create an instance of this class set to the ``tfa_add_backup_phone`` tag with value ``val``. :param TfaAddBackupPhoneType val: :rtype: EventType """ return cls('tfa_add_backup_phone', val)
sdcflows-1.3.0
sdcflows-1.3.0//sdcflows/workflows/gre.pyfile:/sdcflows/workflows/gre.py:function:_demean/_demean
def _demean(in_file, in_mask=None, usemode=True): """ Subtract the median (since it is robuster than the mean) from a map. Parameters ---------- usemode : bool Use the mode instead of the median (should be even more robust against outliers). """ from os import getcwd import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix nii = nb.load(in_file) data = nii.get_fdata(dtype='float32') msk = np.ones_like(data, dtype=bool) if in_mask is not None: msk[nb.load(in_mask).get_fdata(dtype='float32') < 0.0001] = False if usemode: from scipy.stats import mode data[msk] -= mode(data[msk], axis=None)[0][0] else: data[msk] -= np.median(data[msk], axis=None) out_file = fname_presuffix(in_file, suffix='_demean', newpath=getcwd()) nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file) return out_file
pyphinb-2.9.4
pyphinb-2.9.4//pyphinb/validate.pyfile:/pyphinb/validate.py:function:is_network/is_network
def is_network(network): """Validate that the argument is a |Network|.""" from . import Network if not isinstance(network, Network): raise ValueError( 'Input must be a Network (perhaps you passed a Subsystem instead?')
metaknowledge
metaknowledge//medline/tagProcessing/tagFunctions.pyfile:/medline/tagProcessing/tagFunctions.py:function:TT/TT
def TT(val): """TransliteratedTitle""" return val
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/apigateway.pyfile:/pyboto3/apigateway.py:function:get_request_validators/get_request_validators
def get_request_validators(restApiId=None, position=None, limit=None): """ Gets the RequestValidators collection of a given RestApi . See also: AWS API Documentation :example: response = client.get_request_validators( restApiId='string', position='string', limit=123 ) :type restApiId: string :param restApiId: [REQUIRED] [Required] The identifier of a RestApi to which the RequestValidators collection belongs. :type position: string :param position: The current pagination position in the paged result set. :type limit: integer :param limit: The maximum number of returned results per page. :rtype: dict :return: { 'position': 'string', 'items': [ { 'id': 'string', 'name': 'string', 'validateRequestBody': True|False, 'validateRequestParameters': True|False }, ] } """ pass
pyphs-0.5.1
pyphs-0.5.1//pyphs/misc/tools.pyfile:/pyphs/misc/tools.py:function:remove_duplicates/remove_duplicates
def remove_duplicates(lis): """ Remove duplicate entries from a given list, preserving ordering. """ out_list = [] for el in lis: if el not in out_list: out_list.append(el) return out_list
fake-bpy-module-2.80-20200428
fake-bpy-module-2.80-20200428//bpy/ops/object.pyfile:/bpy/ops/object.py:function:posemode_toggle/posemode_toggle
def posemode_toggle(): """Enable or disable posing/selecting bones """ pass
SpiffWorkflow
SpiffWorkflow//bpmn/parser/util.pyfile:/bpmn/parser/util.py:function:first/first
def first(nodes): """ Return the first node in the given list, or None, if the list is empty. """ if len(nodes) >= 1: return nodes[0] else: return None
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/iam.pyfile:/pyboto3/iam.py:function:delete_login_profile/delete_login_profile
def delete_login_profile(UserName=None): """ Deletes the password for the specified IAM user, which terminates the user's ability to access AWS services through the AWS Management Console. See also: AWS API Documentation Examples The following command deletes the password for the IAM user named Bob. Expected Output: :example: response = client.delete_login_profile( UserName='string' ) :type UserName: string :param UserName: [REQUIRED] The name of the user whose password you want to delete. This parameter allows (per its regex pattern ) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@- :return: response = client.delete_login_profile( UserName='Bob', ) print(response) """ pass
win32comext
win32comext//axscript/client/framework.pyfile:/axscript/client/framework.py:function:trace/trace
def trace(*args): """A function used instead of "print" for debugging output. """ for arg in args: print(arg, end=' ') print()
fake-bpy-module-2.78-20200428
fake-bpy-module-2.78-20200428//bpy/ops/clip.pyfile:/bpy/ops/clip.py:function:set_center_principal/set_center_principal
def set_center_principal(): """Set optical center to center of footage """ pass
acitoolkit-0.4
acitoolkit-0.4//acitoolkit/acibaseobject.pyclass:BaseACIObject/get_table
@staticmethod def get_table(aci_object, title=''): """ Abstract method that should be replaced by a version that is specific to the object :param aci_object: :param title: String containing the table title :return: list of Table objects """ return [None]
nlg_yongzhuo
nlg_yongzhuo//text_summarization/extractive_sum/graph_base/textrank/textrank_gensim.pyfile:/text_summarization/extractive_sum/graph_base/textrank/textrank_gensim.py:function:_get_sentences_with_word_count/_get_sentences_with_word_count
def _get_sentences_with_word_count(sentences, word_count): """Get list of sentences. Total number of returned words close to specified `word_count`. Parameters ---------- sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit` Given sentences. word_count : int or None Number of returned words. If None full most important sentences will be returned. Returns ------- list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit` Most important sentences. """ length = 0 selected_sentences = [] for sentence in sentences: words_in_sentence = len(sentence.text.split()) if abs(word_count - length - words_in_sentence) > abs(word_count - length): return selected_sentences selected_sentences.append(sentence) length += words_in_sentence return selected_sentences
jqfactor_analyzer-1.0.6
jqfactor_analyzer-1.0.6//jqfactor_analyzer/performance.pyfile:/jqfactor_analyzer/performance.py:function:factor_autocorrelation/factor_autocorrelation
def factor_autocorrelation(factor_data, period=1, rank=True): """ 计算指定时间跨度内平均因子排名/因子值的自相关性. 该指标对于衡量因子的换手率非常有用. 如果每个因子值在一个周期内随机变化,我们预计自相关为 0. 参数 ---------- factor_data : pd.DataFrame - MultiIndex 一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex, values 包括因子的值, 各期因子远期收益, 因子分位数, 因子分组(可选), 因子权重(可选) period: int, optional 对应的因子远期收益时间跨度 Returns ------- autocorr : pd.Series 滞后一期的因子自相关性 """ grouper = [factor_data.index.get_level_values('date')] if rank: ranks = factor_data.groupby(grouper)[['factor']].rank() else: ranks = factor_data[['factor']] asset_factor_rank = ranks.reset_index().pivot(index='date', columns= 'asset', values='factor') autocorr = asset_factor_rank.corrwith(asset_factor_rank.shift(period), axis=1) autocorr.name = period return autocorr
telethon
telethon//utils.pyfile:/utils.py:function:get_message_id/get_message_id
def get_message_id(message): """Sanitizes the 'reply_to' parameter a user may send""" if message is None: return None if isinstance(message, int): return message if hasattr(message, 'original_message'): return message.original_message.id try: if message.SUBCLASS_OF_ID == 2030045667: return message.id except AttributeError: pass raise TypeError('Invalid message type: {}'.format(type(message)))
hyperparameter_hunter-3.0.0
hyperparameter_hunter-3.0.0//hyperparameter_hunter/keys/makers.pyclass:KeyMaker/_filter_parameters_to_hash
@staticmethod def _filter_parameters_to_hash(parameters): """Produce a filtered version of `parameters` that does not include values that should be ignored during hashing Parameters ---------- parameters: Dict The full dictionary of initial parameters to be filtered Returns ------- parameters: Dict The filtered version of the given `parameters`""" return parameters
edx-sga-0.10.0
edx-sga-0.10.0//edx_sga/utils.pyfile:/edx_sga/utils.py:function:is_finalized_submission/is_finalized_submission
def is_finalized_submission(submission_data): """ Helper function to determine whether or not a Submission was finalized by the student """ if submission_data and submission_data.get('answer') is not None: return submission_data['answer'].get('finalized', True) return False
bismuthclient-0.0.49
bismuthclient-0.0.49//bismuthclient/bismuthutil.pyclass:BismuthUtil/height_to_supply
@staticmethod def height_to_supply(height): """Gives total supply at a given block height""" R0 = 11680000.4 delta = 2e-06 pos = 0.8 pow = 12.6 N = height - 800000.0 dev_rew = 1.1 R = dev_rew * R0 + N * (pos + dev_rew * (pow - N / 2 * delta)) return R