nwo
stringlengths
5
91
sha
stringlengths
40
40
path
stringlengths
5
174
language
stringclasses
1 value
identifier
stringlengths
1
120
parameters
stringlengths
0
3.15k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
24.1k
docstring
stringlengths
0
27.3k
docstring_summary
stringlengths
0
13.8k
docstring_tokens
sequence
function
stringlengths
22
139k
function_tokens
sequence
url
stringlengths
87
283
marinho/geraldo
868ebdce67176d9b6205cddc92476f642c783fff
site/newsite/site-geraldo/django/utils/html.py
python
conditional_escape
(html)
Similar to escape(), except that it doesn't operate on pre-escaped strings.
Similar to escape(), except that it doesn't operate on pre-escaped strings.
[ "Similar", "to", "escape", "()", "except", "that", "it", "doesn", "t", "operate", "on", "pre", "-", "escaped", "strings", "." ]
def conditional_escape(html): """ Similar to escape(), except that it doesn't operate on pre-escaped strings. """ if isinstance(html, SafeData): return html else: return escape(html)
[ "def", "conditional_escape", "(", "html", ")", ":", "if", "isinstance", "(", "html", ",", "SafeData", ")", ":", "return", "html", "else", ":", "return", "escape", "(", "html", ")" ]
https://github.com/marinho/geraldo/blob/868ebdce67176d9b6205cddc92476f642c783fff/site/newsite/site-geraldo/django/utils/html.py#L35-L42
deepgram/kur
fd0c120e50815c1e5be64e5dde964dcd47234556
kur/containers/operators/assertion.py
python
Assertion.__init__
(self, *args, **kwargs)
Create a new assertion container.
Create a new assertion container.
[ "Create", "a", "new", "assertion", "container", "." ]
def __init__(self, *args, **kwargs): """ Create a new assertion container. """ super().__init__(*args, **kwargs) self.condition = None
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "condition", "=", "None" ]
https://github.com/deepgram/kur/blob/fd0c120e50815c1e5be64e5dde964dcd47234556/kur/containers/operators/assertion.py#L35-L39
duckduckgo/zeroclickinfo-fathead
477c5652f6576746618dbb8158b67f0960ae9f56
lib/fathead/reactjs/parse.py
python
OutputFileData.__init__
(self, api_data, output_file)
Initialize with parsed api data list and name of the output file
Initialize with parsed api data list and name of the output file
[ "Initialize", "with", "parsed", "api", "data", "list", "and", "name", "of", "the", "output", "file" ]
def __init__(self, api_data, output_file): """ Initialize with parsed api data list and name of the output file """ self.data = api_data self.output_file = output_file
[ "def", "__init__", "(", "self", ",", "api_data", ",", "output_file", ")", ":", "self", ".", "data", "=", "api_data", "self", ".", "output_file", "=", "output_file" ]
https://github.com/duckduckgo/zeroclickinfo-fathead/blob/477c5652f6576746618dbb8158b67f0960ae9f56/lib/fathead/reactjs/parse.py#L155-L161
musiKk/plyj
c27d159b2fffe241a2d091e1be3d79790b216732
plyj/parser.py
python
StatementParser.p_method_invocation
(self, p)
method_invocation : NAME '(' argument_list_opt ')'
method_invocation : NAME '(' argument_list_opt ')'
[ "method_invocation", ":", "NAME", "(", "argument_list_opt", ")" ]
def p_method_invocation(self, p): '''method_invocation : NAME '(' argument_list_opt ')' ''' p[0] = MethodInvocation(p[1], arguments=p[3])
[ "def", "p_method_invocation", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "MethodInvocation", "(", "p", "[", "1", "]", ",", "arguments", "=", "p", "[", "3", "]", ")" ]
https://github.com/musiKk/plyj/blob/c27d159b2fffe241a2d091e1be3d79790b216732/plyj/parser.py#L600-L602
ARISE-Initiative/robosuite
a5dfaf03cd769170881a1931d8f19c8eb72f531a
robosuite/environments/manipulation/nut_assembly.py
python
NutAssembly._create_nut_sensors
(self, nut_name, modality="object")
return sensors, names
Helper function to create sensors for a given nut. This is abstracted in a separate function call so that we don't have local function naming collisions during the _setup_observables() call. Args: nut_name (str): Name of nut to create sensors for modality (str): Modality to assign to all sensors Returns: 2-tuple: sensors (list): Array of sensors for the given nut names (list): array of corresponding observable names
Helper function to create sensors for a given nut. This is abstracted in a separate function call so that we don't have local function naming collisions during the _setup_observables() call.
[ "Helper", "function", "to", "create", "sensors", "for", "a", "given", "nut", ".", "This", "is", "abstracted", "in", "a", "separate", "function", "call", "so", "that", "we", "don", "t", "have", "local", "function", "naming", "collisions", "during", "the", "_setup_observables", "()", "call", "." ]
def _create_nut_sensors(self, nut_name, modality="object"): """ Helper function to create sensors for a given nut. This is abstracted in a separate function call so that we don't have local function naming collisions during the _setup_observables() call. Args: nut_name (str): Name of nut to create sensors for modality (str): Modality to assign to all sensors Returns: 2-tuple: sensors (list): Array of sensors for the given nut names (list): array of corresponding observable names """ pf = self.robots[0].robot_model.naming_prefix @sensor(modality=modality) def nut_pos(obs_cache): return np.array(self.sim.data.body_xpos[self.obj_body_id[nut_name]]) @sensor(modality=modality) def nut_quat(obs_cache): return T.convert_quat(self.sim.data.body_xquat[self.obj_body_id[nut_name]], to="xyzw") @sensor(modality=modality) def nut_to_eef_pos(obs_cache): # Immediately return default value if cache is empty if any( [name not in obs_cache for name in [f"{nut_name}_pos", f"{nut_name}_quat", "world_pose_in_gripper"]] ): return np.zeros(3) obj_pose = T.pose2mat((obs_cache[f"{nut_name}_pos"], obs_cache[f"{nut_name}_quat"])) rel_pose = T.pose_in_A_to_pose_in_B(obj_pose, obs_cache["world_pose_in_gripper"]) rel_pos, rel_quat = T.mat2pose(rel_pose) obs_cache[f"{nut_name}_to_{pf}eef_quat"] = rel_quat return rel_pos @sensor(modality=modality) def nut_to_eef_quat(obs_cache): return ( obs_cache[f"{nut_name}_to_{pf}eef_quat"] if f"{nut_name}_to_{pf}eef_quat" in obs_cache else np.zeros(4) ) sensors = [nut_pos, nut_quat, nut_to_eef_pos, nut_to_eef_quat] names = [f"{nut_name}_pos", f"{nut_name}_quat", f"{nut_name}_to_{pf}eef_pos", f"{nut_name}_to_{pf}eef_quat"] return sensors, names
[ "def", "_create_nut_sensors", "(", "self", ",", "nut_name", ",", "modality", "=", "\"object\"", ")", ":", "pf", "=", "self", ".", "robots", "[", "0", "]", ".", "robot_model", ".", "naming_prefix", "@", "sensor", "(", "modality", "=", "modality", ")", "def", "nut_pos", "(", "obs_cache", ")", ":", "return", "np", ".", "array", "(", "self", ".", "sim", ".", "data", ".", "body_xpos", "[", "self", ".", "obj_body_id", "[", "nut_name", "]", "]", ")", "@", "sensor", "(", "modality", "=", "modality", ")", "def", "nut_quat", "(", "obs_cache", ")", ":", "return", "T", ".", "convert_quat", "(", "self", ".", "sim", ".", "data", ".", "body_xquat", "[", "self", ".", "obj_body_id", "[", "nut_name", "]", "]", ",", "to", "=", "\"xyzw\"", ")", "@", "sensor", "(", "modality", "=", "modality", ")", "def", "nut_to_eef_pos", "(", "obs_cache", ")", ":", "# Immediately return default value if cache is empty", "if", "any", "(", "[", "name", "not", "in", "obs_cache", "for", "name", "in", "[", "f\"{nut_name}_pos\"", ",", "f\"{nut_name}_quat\"", ",", "\"world_pose_in_gripper\"", "]", "]", ")", ":", "return", "np", ".", "zeros", "(", "3", ")", "obj_pose", "=", "T", ".", "pose2mat", "(", "(", "obs_cache", "[", "f\"{nut_name}_pos\"", "]", ",", "obs_cache", "[", "f\"{nut_name}_quat\"", "]", ")", ")", "rel_pose", "=", "T", ".", "pose_in_A_to_pose_in_B", "(", "obj_pose", ",", "obs_cache", "[", "\"world_pose_in_gripper\"", "]", ")", "rel_pos", ",", "rel_quat", "=", "T", ".", "mat2pose", "(", "rel_pose", ")", "obs_cache", "[", "f\"{nut_name}_to_{pf}eef_quat\"", "]", "=", "rel_quat", "return", "rel_pos", "@", "sensor", "(", "modality", "=", "modality", ")", "def", "nut_to_eef_quat", "(", "obs_cache", ")", ":", "return", "(", "obs_cache", "[", "f\"{nut_name}_to_{pf}eef_quat\"", "]", "if", "f\"{nut_name}_to_{pf}eef_quat\"", "in", "obs_cache", "else", "np", ".", "zeros", "(", "4", ")", ")", "sensors", "=", "[", "nut_pos", ",", "nut_quat", ",", "nut_to_eef_pos", ",", "nut_to_eef_quat", "]", "names", "=", "[", "f\"{nut_name}_pos\"", ",", "f\"{nut_name}_quat\"", ",", "f\"{nut_name}_to_{pf}eef_pos\"", ",", "f\"{nut_name}_to_{pf}eef_quat\"", "]", "return", "sensors", ",", "names" ]
https://github.com/ARISE-Initiative/robosuite/blob/a5dfaf03cd769170881a1931d8f19c8eb72f531a/robosuite/environments/manipulation/nut_assembly.py#L540-L586
jcberquist/sublimetext-cfml
d1e37202eacbf4dd048f2822b7b9d9a93e8cebcf
src/cfml_view.py
python
CfmlView.get_dot_context
(self, pt, cachable=True)
return self._cache["get_dot_context"][pt]
[]
def get_dot_context(self, pt, cachable=True): if not cachable or pt not in self._cache["get_dot_context"]: self._cache["get_dot_context"][pt] = utils.get_dot_context(self.view, pt) return self._cache["get_dot_context"][pt]
[ "def", "get_dot_context", "(", "self", ",", "pt", ",", "cachable", "=", "True", ")", ":", "if", "not", "cachable", "or", "pt", "not", "in", "self", ".", "_cache", "[", "\"get_dot_context\"", "]", ":", "self", ".", "_cache", "[", "\"get_dot_context\"", "]", "[", "pt", "]", "=", "utils", ".", "get_dot_context", "(", "self", ".", "view", ",", "pt", ")", "return", "self", ".", "_cache", "[", "\"get_dot_context\"", "]", "[", "pt", "]" ]
https://github.com/jcberquist/sublimetext-cfml/blob/d1e37202eacbf4dd048f2822b7b9d9a93e8cebcf/src/cfml_view.py#L213-L217
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/_vendor/pyparsing.py
python
ParserElement.__call__
(self, name=None)
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}.
[ "Shortcut", "for", "C", "{", "L", "{", "setResultsName", "}}", "with", "C", "{", "listAllMatches", "=", "False", "}", ".", "If", "C", "{", "name", "}", "is", "given", "with", "a", "trailing", "C", "{", "*", "}", "character", "then", "C", "{", "listAllMatches", "}", "will", "be", "passed", "as", "C", "{", "True", "}", ".", "If", "C", "{", "name", "}", "is", "omitted", "same", "as", "calling", "C", "{", "L", "{", "copy", "}}", "." ]
def __call__(self, name=None): """ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") """ if name is not None: return self.setResultsName(name) else: return self.copy()
[ "def", "__call__", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "not", "None", ":", "return", "self", ".", "setResultsName", "(", "name", ")", "else", ":", "return", "self", ".", "copy", "(", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/_vendor/pyparsing.py#L1985-L2002
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-47/fabmetheus_utilities/geometry/manipulation_paths/overhang.py
python
OverhangWiddershinsLeft.getBottomLoop
( self, closestBottomIndex, insertedPoint )
return euclidean.getAroundLoop( self.alongAway.pointIndex, endIndex, self.alongAway.loop )
Get loop around bottom.
Get loop around bottom.
[ "Get", "loop", "around", "bottom", "." ]
def getBottomLoop( self, closestBottomIndex, insertedPoint ): "Get loop around bottom." endIndex = closestBottomIndex + len( self.alongAway.loop ) + 1 return euclidean.getAroundLoop( self.alongAway.pointIndex, endIndex, self.alongAway.loop )
[ "def", "getBottomLoop", "(", "self", ",", "closestBottomIndex", ",", "insertedPoint", ")", ":", "endIndex", "=", "closestBottomIndex", "+", "len", "(", "self", ".", "alongAway", ".", "loop", ")", "+", "1", "return", "euclidean", ".", "getAroundLoop", "(", "self", ".", "alongAway", ".", "pointIndex", ",", "endIndex", ",", "self", ".", "alongAway", ".", "loop", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-47/fabmetheus_utilities/geometry/manipulation_paths/overhang.py#L313-L316
MasoniteFramework/masonite
faa448377916e9e0f618ea6bdc82330fa6604efc
src/masonite/utils/collections.py
python
Collection.last
(self, callback=None)
return filtered[-1]
Takes the last result in the items. If a callback is given then the last result will be the result after the filter. Keyword Arguments: callback {callable} -- Used to filter the results before returning the last item. (default: {None}) Returns: mixed -- Returns whatever the last item is.
Takes the last result in the items.
[ "Takes", "the", "last", "result", "in", "the", "items", "." ]
def last(self, callback=None): """Takes the last result in the items. If a callback is given then the last result will be the result after the filter. Keyword Arguments: callback {callable} -- Used to filter the results before returning the last item. (default: {None}) Returns: mixed -- Returns whatever the last item is. """ filtered = self if callback: filtered = self.filter(callback) return filtered[-1]
[ "def", "last", "(", "self", ",", "callback", "=", "None", ")", ":", "filtered", "=", "self", "if", "callback", ":", "filtered", "=", "self", ".", "filter", "(", "callback", ")", "return", "filtered", "[", "-", "1", "]" ]
https://github.com/MasoniteFramework/masonite/blob/faa448377916e9e0f618ea6bdc82330fa6604efc/src/masonite/utils/collections.py#L50-L64
fossfreedom/alternative-toolbar
b43709bc5de20c7ea66c0e992f59361793874ed3
alternative-toolbar.py
python
AltToolbarPlugin.do_activate
(self)
Called by Rhythmbox when the plugin is activated. It creates the plugin's source and connects signals to manage the plugin's preferences.
Called by Rhythmbox when the plugin is activated. It creates the plugin's source and connects signals to manage the plugin's preferences.
[ "Called", "by", "Rhythmbox", "when", "the", "plugin", "is", "activated", ".", "It", "creates", "the", "plugin", "s", "source", "and", "connects", "signals", "to", "manage", "the", "plugin", "s", "preferences", "." ]
def do_activate(self): """ Called by Rhythmbox when the plugin is activated. It creates the plugin's source and connects signals to manage the plugin's preferences. """ self.shell = self.object self.db = self.shell.props.db self.shell_player = self.shell.props.shell_player # Prepare internal variables self.song_duration = 0 self.entry = None self._plugin_dialog_width = 760 self._plugin_dialog_height = 550 # locale stuff cl = CoverLocale() cl.switch_locale(cl.Locale.LOCALE_DOMAIN) # for custom icons ensure we start looking in the plugin img folder # as a fallback theme = Gtk.IconTheme.get_default() theme.append_search_path(rb.find_plugin_file(self, 'img')) # Find the Rhythmbox Toolbar self.rb_toolbar = AltToolbarPlugin.find(self.shell.props.window, 'main-toolbar', 'by_id') # get values from gsettings self.gs = GSetting() self.plugin_settings = self.gs.get_setting(self.gs.Path.PLUGIN) display_type = self.plugin_settings[self.gs.PluginKey.DISPLAY_TYPE] self.volume_control = self.plugin_settings[ self.gs.PluginKey.VOLUME_CONTROL] self.show_compact_toolbar = self.plugin_settings[ self.gs.PluginKey.SHOW_COMPACT] self.compact_toolbar_pos = self.plugin_settings[ self.gs.PluginKey.COMPACT_POS] self.start_hidden = self.plugin_settings[ self.gs.PluginKey.START_HIDDEN] self.inline_label = self.plugin_settings[ self.gs.PluginKey.INLINE_LABEL] self.enhanced_sidebar = self.plugin_settings[ self.gs.PluginKey.ENHANCED_SIDEBAR] self.show_tooltips = self.plugin_settings[ self.gs.PluginKey.SHOW_TOOLTIPS] self.enhanced_plugins = self.plugin_settings[ self.gs.PluginKey.ENHANCED_PLUGINS] self.horiz_categories = self.plugin_settings[ self.gs.PluginKey.HORIZ_CATEGORIES] self.app_menu = self.plugin_settings[ self.gs.PluginKey.APP_MENU] self.prefer_dark_theme = \ self.plugin_settings[self.gs.PluginKey.DARK_THEME] # Add the various application view menus self.appshell = ApplicationShell(self.shell) self._add_menu_options() # Determine what type of toolbar is to be displayed if display_type == 0: if 'gnome' in os.environ['XDG_CURRENT_DESKTOP'].lower(): display_type = 1 else: display_type = 2 self.plugin_settings[self.gs.PluginKey.DISPLAY_TYPE] = display_type self.toolbar_type = None if display_type == 1: self.toolbar_type = AltToolbarHeaderBar() elif self.show_compact_toolbar: self.toolbar_type = AltToolbarCompact() else: self.toolbar_type = AltToolbarStandard() self.toolbar_type.initialise(self) self.toolbar_type.post_initialise() try: process = Gio.Subprocess.new(['rhythmbox', '--version'], Gio.SubprocessFlags.STDOUT_PIPE) passval, buf, err = process.communicate_utf8(None) if passval: buf = buf[:-1] ver = buf.split(' ')[1] except: ver = "999.99.99" if self.enhanced_plugins and ver <= "3.4.3": # redirect plugins action to our implementation # after v3.4.3 plugins has been moved into # preferences so no need to activate our own # implementation action = Gio.SimpleAction.new('plugins', None) action.connect('activate', self._display_plugins) self.shell.props.application.add_action(action) self._connect_signals() self._connect_properties() # allow other plugins access to this toolbar self.shell.alternative_toolbar = self cl.switch_locale(cl.Locale.RB)
[ "def", "do_activate", "(", "self", ")", ":", "self", ".", "shell", "=", "self", ".", "object", "self", ".", "db", "=", "self", ".", "shell", ".", "props", ".", "db", "self", ".", "shell_player", "=", "self", ".", "shell", ".", "props", ".", "shell_player", "# Prepare internal variables", "self", ".", "song_duration", "=", "0", "self", ".", "entry", "=", "None", "self", ".", "_plugin_dialog_width", "=", "760", "self", ".", "_plugin_dialog_height", "=", "550", "# locale stuff", "cl", "=", "CoverLocale", "(", ")", "cl", ".", "switch_locale", "(", "cl", ".", "Locale", ".", "LOCALE_DOMAIN", ")", "# for custom icons ensure we start looking in the plugin img folder", "# as a fallback", "theme", "=", "Gtk", ".", "IconTheme", ".", "get_default", "(", ")", "theme", ".", "append_search_path", "(", "rb", ".", "find_plugin_file", "(", "self", ",", "'img'", ")", ")", "# Find the Rhythmbox Toolbar", "self", ".", "rb_toolbar", "=", "AltToolbarPlugin", ".", "find", "(", "self", ".", "shell", ".", "props", ".", "window", ",", "'main-toolbar'", ",", "'by_id'", ")", "# get values from gsettings", "self", ".", "gs", "=", "GSetting", "(", ")", "self", ".", "plugin_settings", "=", "self", ".", "gs", ".", "get_setting", "(", "self", ".", "gs", ".", "Path", ".", "PLUGIN", ")", "display_type", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "DISPLAY_TYPE", "]", "self", ".", "volume_control", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "VOLUME_CONTROL", "]", "self", ".", "show_compact_toolbar", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "SHOW_COMPACT", "]", "self", ".", "compact_toolbar_pos", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "COMPACT_POS", "]", "self", ".", "start_hidden", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "START_HIDDEN", "]", "self", ".", "inline_label", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "INLINE_LABEL", "]", "self", ".", "enhanced_sidebar", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "ENHANCED_SIDEBAR", "]", "self", ".", "show_tooltips", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "SHOW_TOOLTIPS", "]", "self", ".", "enhanced_plugins", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "ENHANCED_PLUGINS", "]", "self", ".", "horiz_categories", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "HORIZ_CATEGORIES", "]", "self", ".", "app_menu", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "APP_MENU", "]", "self", ".", "prefer_dark_theme", "=", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "DARK_THEME", "]", "# Add the various application view menus", "self", ".", "appshell", "=", "ApplicationShell", "(", "self", ".", "shell", ")", "self", ".", "_add_menu_options", "(", ")", "# Determine what type of toolbar is to be displayed", "if", "display_type", "==", "0", ":", "if", "'gnome'", "in", "os", ".", "environ", "[", "'XDG_CURRENT_DESKTOP'", "]", ".", "lower", "(", ")", ":", "display_type", "=", "1", "else", ":", "display_type", "=", "2", "self", ".", "plugin_settings", "[", "self", ".", "gs", ".", "PluginKey", ".", "DISPLAY_TYPE", "]", "=", "display_type", "self", ".", "toolbar_type", "=", "None", "if", "display_type", "==", "1", ":", "self", ".", "toolbar_type", "=", "AltToolbarHeaderBar", "(", ")", "elif", "self", ".", "show_compact_toolbar", ":", "self", ".", "toolbar_type", "=", "AltToolbarCompact", "(", ")", "else", ":", "self", ".", "toolbar_type", "=", "AltToolbarStandard", "(", ")", "self", ".", "toolbar_type", ".", "initialise", "(", "self", ")", "self", ".", "toolbar_type", ".", "post_initialise", "(", ")", "try", ":", "process", "=", "Gio", ".", "Subprocess", ".", "new", "(", "[", "'rhythmbox'", ",", "'--version'", "]", ",", "Gio", ".", "SubprocessFlags", ".", "STDOUT_PIPE", ")", "passval", ",", "buf", ",", "err", "=", "process", ".", "communicate_utf8", "(", "None", ")", "if", "passval", ":", "buf", "=", "buf", "[", ":", "-", "1", "]", "ver", "=", "buf", ".", "split", "(", "' '", ")", "[", "1", "]", "except", ":", "ver", "=", "\"999.99.99\"", "if", "self", ".", "enhanced_plugins", "and", "ver", "<=", "\"3.4.3\"", ":", "# redirect plugins action to our implementation", "# after v3.4.3 plugins has been moved into", "# preferences so no need to activate our own", "# implementation", "action", "=", "Gio", ".", "SimpleAction", ".", "new", "(", "'plugins'", ",", "None", ")", "action", ".", "connect", "(", "'activate'", ",", "self", ".", "_display_plugins", ")", "self", ".", "shell", ".", "props", ".", "application", ".", "add_action", "(", "action", ")", "self", ".", "_connect_signals", "(", ")", "self", ".", "_connect_properties", "(", ")", "# allow other plugins access to this toolbar", "self", ".", "shell", ".", "alternative_toolbar", "=", "self", "cl", ".", "switch_locale", "(", "cl", ".", "Locale", ".", "RB", ")" ]
https://github.com/fossfreedom/alternative-toolbar/blob/b43709bc5de20c7ea66c0e992f59361793874ed3/alternative-toolbar.py#L96-L205
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/trusthub/v1/customer_profiles/__init__.py
python
CustomerProfilesList.get_page
(self, target_url)
return CustomerProfilesPage(self._version, response, self._solution)
Retrieve a specific page of CustomerProfilesInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of CustomerProfilesInstance :rtype: twilio.rest.trusthub.v1.customer_profiles.CustomerProfilesPage
Retrieve a specific page of CustomerProfilesInstance records from the API. Request is executed immediately
[ "Retrieve", "a", "specific", "page", "of", "CustomerProfilesInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
def get_page(self, target_url): """ Retrieve a specific page of CustomerProfilesInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of CustomerProfilesInstance :rtype: twilio.rest.trusthub.v1.customer_profiles.CustomerProfilesPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return CustomerProfilesPage(self._version, response, self._solution)
[ "def", "get_page", "(", "self", ",", "target_url", ")", ":", "response", "=", "self", ".", "_version", ".", "domain", ".", "twilio", ".", "request", "(", "'GET'", ",", "target_url", ",", ")", "return", "CustomerProfilesPage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/trusthub/v1/customer_profiles/__init__.py#L151-L166
Theano/Theano
8fd9203edfeecebced9344b0c70193be292a9ade
theano/gof/cmodule.py
python
_get_ext_suffix
()
return dist_suffix
Get the suffix for compiled extensions
Get the suffix for compiled extensions
[ "Get", "the", "suffix", "for", "compiled", "extensions" ]
def _get_ext_suffix(): """Get the suffix for compiled extensions""" dist_suffix = distutils.sysconfig.get_config_var("EXT_SUFFIX") if dist_suffix is None: dist_suffix = distutils.sysconfig.get_config_var("SO") return dist_suffix
[ "def", "_get_ext_suffix", "(", ")", ":", "dist_suffix", "=", "distutils", ".", "sysconfig", ".", "get_config_var", "(", "\"EXT_SUFFIX\"", ")", "if", "dist_suffix", "is", "None", ":", "dist_suffix", "=", "distutils", ".", "sysconfig", ".", "get_config_var", "(", "\"SO\"", ")", "return", "dist_suffix" ]
https://github.com/Theano/Theano/blob/8fd9203edfeecebced9344b0c70193be292a9ade/theano/gof/cmodule.py#L251-L256
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py
python
CommandLineParser.parseoptions
(self, args)
return None
Parse command line options
Parse command line options
[ "Parse", "command", "line", "options" ]
def parseoptions(self, args): "Parse command line options" if len(args) == 0: return None while len(args) > 0 and args[0].startswith('--'): key, value = self.readoption(args) if not key: return 'Option ' + value + ' not recognized' if not value: return 'Option ' + key + ' needs a value' setattr(self.options, key, value) return None
[ "def", "parseoptions", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "==", "0", ":", "return", "None", "while", "len", "(", "args", ")", ">", "0", "and", "args", "[", "0", "]", ".", "startswith", "(", "'--'", ")", ":", "key", ",", "value", "=", "self", ".", "readoption", "(", "args", ")", "if", "not", "key", ":", "return", "'Option '", "+", "value", "+", "' not recognized'", "if", "not", "value", ":", "return", "'Option '", "+", "key", "+", "' needs a value'", "setattr", "(", "self", ".", "options", ",", "key", ",", "value", ")", "return", "None" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py#L911-L922
ctxis/canape
5f0e03424577296bcc60c2008a60a98ec5307e4b
CANAPE.Scripting/Lib/SimpleXMLRPCServer.py
python
SimpleXMLRPCRequestHandler.log_request
(self, code='-', size='-')
Selectively log an accepted request.
Selectively log an accepted request.
[ "Selectively", "log", "an", "accepted", "request", "." ]
def log_request(self, code='-', size='-'): """Selectively log an accepted request.""" if self.server.logRequests: BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
[ "def", "log_request", "(", "self", ",", "code", "=", "'-'", ",", "size", "=", "'-'", ")", ":", "if", "self", ".", "server", ".", "logRequests", ":", "BaseHTTPServer", ".", "BaseHTTPRequestHandler", ".", "log_request", "(", "self", ",", "code", ",", "size", ")" ]
https://github.com/ctxis/canape/blob/5f0e03424577296bcc60c2008a60a98ec5307e4b/CANAPE.Scripting/Lib/SimpleXMLRPCServer.py#L560-L564
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/gzip.py
python
_GzipReader._read_eof
(self)
[]
def _read_eof(self): # We've read to the end of the file # We check that the computed CRC and size of the # uncompressed data matches the stored values. Note that the size # stored is the true file size mod 2**32. crc32, isize = struct.unpack("<II", self._read_exact(8)) if crc32 != self._crc: raise BadGzipFile("CRC check failed %s != %s" % (hex(crc32), hex(self._crc))) elif isize != (self._stream_size & 0xffffffff): raise BadGzipFile("Incorrect length of data produced") # Gzip files can be padded with zeroes and still have archives. # Consume all zero bytes and set the file position to the first # non-zero byte. See http://www.gzip.org/#faq8 c = b"\x00" while c == b"\x00": c = self._fp.read(1) if c: self._fp.prepend(c)
[ "def", "_read_eof", "(", "self", ")", ":", "# We've read to the end of the file", "# We check that the computed CRC and size of the", "# uncompressed data matches the stored values. Note that the size", "# stored is the true file size mod 2**32.", "crc32", ",", "isize", "=", "struct", ".", "unpack", "(", "\"<II\"", ",", "self", ".", "_read_exact", "(", "8", ")", ")", "if", "crc32", "!=", "self", ".", "_crc", ":", "raise", "BadGzipFile", "(", "\"CRC check failed %s != %s\"", "%", "(", "hex", "(", "crc32", ")", ",", "hex", "(", "self", ".", "_crc", ")", ")", ")", "elif", "isize", "!=", "(", "self", ".", "_stream_size", "&", "0xffffffff", ")", ":", "raise", "BadGzipFile", "(", "\"Incorrect length of data produced\"", ")", "# Gzip files can be padded with zeroes and still have archives.", "# Consume all zero bytes and set the file position to the first", "# non-zero byte. See http://www.gzip.org/#faq8", "c", "=", "b\"\\x00\"", "while", "c", "==", "b\"\\x00\"", ":", "c", "=", "self", ".", "_fp", ".", "read", "(", "1", ")", "if", "c", ":", "self", ".", "_fp", ".", "prepend", "(", "c", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/gzip.py#L518-L537
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py
python
MacroDefinition.instantiate
(self)
return self.definition.clone()
Return an instance of the macro.
Return an instance of the macro.
[ "Return", "an", "instance", "of", "the", "macro", "." ]
def instantiate(self): "Return an instance of the macro." return self.definition.clone()
[ "def", "instantiate", "(", "self", ")", ":", "return", "self", ".", "definition", ".", "clone", "(", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py#L5107-L5109
seppius-xbmc-repo/ru
d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2
script.module.YaTv/lib/YaTv.py
python
GetChannels
(Url)
[]
def GetChannels(Url): http = GET(Url, Url) ht={} if http == None: showMessage('YaTV:', 'Сервер не отвечает', 1000) http = GET(Url, Url) if http == None: http = GET(Url, Url) if http == None: return None else: http = http.replace(':false',':0').replace(':true',':1') http=eval(http) for channel in http["schedules"]: if xbmc.abortRequested: break else: save_cache(channel, channel["channel"]["id"]) else: http = http.replace(':false',':0').replace(':true',':1') http=eval(http) for channel in http["schedules"]: if xbmc.abortRequested: break else: save_cache(channel, channel["channel"]["id"]) else: http = http.replace(':false',':0').replace(':true',':1') http=eval(http) for channel in http["schedules"]: if xbmc.abortRequested: break else: save_cache(channel, channel["channel"]["id"])
[ "def", "GetChannels", "(", "Url", ")", ":", "http", "=", "GET", "(", "Url", ",", "Url", ")", "ht", "=", "{", "}", "if", "http", "==", "None", ":", "showMessage", "(", "'YaTV:'", ",", "'Сервер не отвечает', 1000)", "", "", "", "http", "=", "GET", "(", "Url", ",", "Url", ")", "if", "http", "==", "None", ":", "http", "=", "GET", "(", "Url", ",", "Url", ")", "if", "http", "==", "None", ":", "return", "None", "else", ":", "http", "=", "http", ".", "replace", "(", "':false'", ",", "':0'", ")", ".", "replace", "(", "':true'", ",", "':1'", ")", "http", "=", "eval", "(", "http", ")", "for", "channel", "in", "http", "[", "\"schedules\"", "]", ":", "if", "xbmc", ".", "abortRequested", ":", "break", "else", ":", "save_cache", "(", "channel", ",", "channel", "[", "\"channel\"", "]", "[", "\"id\"", "]", ")", "else", ":", "http", "=", "http", ".", "replace", "(", "':false'", ",", "':0'", ")", ".", "replace", "(", "':true'", ",", "':1'", ")", "http", "=", "eval", "(", "http", ")", "for", "channel", "in", "http", "[", "\"schedules\"", "]", ":", "if", "xbmc", ".", "abortRequested", ":", "break", "else", ":", "save_cache", "(", "channel", ",", "channel", "[", "\"channel\"", "]", "[", "\"id\"", "]", ")", "else", ":", "http", "=", "http", ".", "replace", "(", "':false'", ",", "':0'", ")", ".", "replace", "(", "':true'", ",", "':1'", ")", "http", "=", "eval", "(", "http", ")", "for", "channel", "in", "http", "[", "\"schedules\"", "]", ":", "if", "xbmc", ".", "abortRequested", ":", "break", "else", ":", "save_cache", "(", "channel", ",", "channel", "[", "\"channel\"", "]", "[", "\"id\"", "]", ")" ]
https://github.com/seppius-xbmc-repo/ru/blob/d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2/script.module.YaTv/lib/YaTv.py#L469-L502
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v9/services/services/customer_user_access_service/client.py
python
CustomerUserAccessServiceClient.from_service_account_file
(cls, filename: str, *args, **kwargs)
return cls(*args, **kwargs)
Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CustomerUserAccessServiceClient: The constructed client.
Creates an instance of this client using the provided credentials file.
[ "Creates", "an", "instance", "of", "this", "client", "using", "the", "provided", "credentials", "file", "." ]
def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CustomerUserAccessServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs)
[ "def", "from_service_account_file", "(", "cls", ",", "filename", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "credentials", "=", "service_account", ".", "Credentials", ".", "from_service_account_file", "(", "filename", ")", "kwargs", "[", "\"credentials\"", "]", "=", "credentials", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/customer_user_access_service/client.py#L138-L155
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/plat-atheos/TYPES.py
python
__sigisemptyset
(set)
return
[]
def __sigisemptyset(set): return
[ "def", "__sigisemptyset", "(", "set", ")", ":", "return" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-atheos/TYPES.py#L132-L132
caserec/CaseRecommender
779bc5dd91ff704ce60c0f3fafd07e2eba689dd7
caserec/utils/split_database.py
python
SplitDatabase.k_fold_cross_validation
(self, shuffle=True, random_state=None)
k-fold cross-validation In k-fold cross-validation, the original sample is randomly partitioned into k equal sized subsamples. Of the k subsamples, a single subsample is retained as the validation data for testing the model, and the remaining k − 1 subsamples are used as training data. The cross-validation process is then repeated k times (the folds), with each of the k subsamples used exactly once as the validation data. The k results from the folds can then be averaged (or otherwise combined) to produce a single estimation. Reference: https://en.wikipedia.org/wiki/Cross-validation_(statistics) :param shuffle: :type shuffle: :param random_state: :type random_state: :return:
k-fold cross-validation
[ "k", "-", "fold", "cross", "-", "validation" ]
def k_fold_cross_validation(self, shuffle=True, random_state=None): """ k-fold cross-validation In k-fold cross-validation, the original sample is randomly partitioned into k equal sized subsamples. Of the k subsamples, a single subsample is retained as the validation data for testing the model, and the remaining k − 1 subsamples are used as training data. The cross-validation process is then repeated k times (the folds), with each of the k subsamples used exactly once as the validation data. The k results from the folds can then be averaged (or otherwise combined) to produce a single estimation. Reference: https://en.wikipedia.org/wiki/Cross-validation_(statistics) :param shuffle: :type shuffle: :param random_state: :type random_state: :return: """ kfold = KFold(n_splits=self.n_splits, shuffle=shuffle, random_state=random_state) trained_model = list(kfold.split(self.df)) if self.dir_folds is not None: self.write_files(trained_model)
[ "def", "k_fold_cross_validation", "(", "self", ",", "shuffle", "=", "True", ",", "random_state", "=", "None", ")", ":", "kfold", "=", "KFold", "(", "n_splits", "=", "self", ".", "n_splits", ",", "shuffle", "=", "shuffle", ",", "random_state", "=", "random_state", ")", "trained_model", "=", "list", "(", "kfold", ".", "split", "(", "self", ".", "df", ")", ")", "if", "self", ".", "dir_folds", "is", "not", "None", ":", "self", ".", "write_files", "(", "trained_model", ")" ]
https://github.com/caserec/CaseRecommender/blob/779bc5dd91ff704ce60c0f3fafd07e2eba689dd7/caserec/utils/split_database.py#L107-L133
mozilla/elasticutils
b880cc5d51fb1079b0581255ec664c1ec934656e
elasticutils/contrib/django/__init__.py
python
Indexable.get_es
(cls, **overrides)
return get_es(**overrides)
Returns an ElasticSearch object using Django settings Override this if you need special functionality. :arg overrides: Allows you to override defaults to create the ElasticSearch object. You can override any of the arguments listed in :py:func:`elasticutils.get_es`. :returns: a elasticsearch `Elasticsearch` instance
Returns an ElasticSearch object using Django settings
[ "Returns", "an", "ElasticSearch", "object", "using", "Django", "settings" ]
def get_es(cls, **overrides): """Returns an ElasticSearch object using Django settings Override this if you need special functionality. :arg overrides: Allows you to override defaults to create the ElasticSearch object. You can override any of the arguments listed in :py:func:`elasticutils.get_es`. :returns: a elasticsearch `Elasticsearch` instance """ return get_es(**overrides)
[ "def", "get_es", "(", "cls", ",", "*", "*", "overrides", ")", ":", "return", "get_es", "(", "*", "*", "overrides", ")" ]
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/contrib/django/__init__.py#L283-L295
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/rings/polynomial/omega.py
python
Omega_ge
(a, exponents)
return numerator, factors_denominator
r""" Return `\Omega_{\ge}` of the expression specified by the input. To be more precise, calculate .. MATH:: \Omega_{\ge} \frac{\mu^a}{ (1 - z_0 \mu^{e_0}) \dots (1 - z_{n-1} \mu^{e_{n-1}})} and return its numerator and a factorization of its denominator. Note that `z_0`, ..., `z_{n-1}` only appear in the output, but not in the input. INPUT: - ``a`` -- an integer - ``exponents`` -- a tuple of integers OUTPUT: A pair representing a quotient as follows: Its first component is the numerator as a Laurent polynomial, its second component a factorization of the denominator as a tuple of Laurent polynomials, where each Laurent polynomial `z` represents a factor `1 - z`. The parents of these Laurent polynomials is always a Laurent polynomial ring in `z_0`, ..., `z_{n-1}` over `\ZZ`, where `n` is the length of ``exponents``. EXAMPLES:: sage: from sage.rings.polynomial.omega import Omega_ge sage: Omega_ge(0, (1, -2)) (1, (z0, z0^2*z1)) sage: Omega_ge(0, (1, -3)) (1, (z0, z0^3*z1)) sage: Omega_ge(0, (1, -4)) (1, (z0, z0^4*z1)) sage: Omega_ge(0, (2, -1)) (z0*z1 + 1, (z0, z0*z1^2)) sage: Omega_ge(0, (3, -1)) (z0*z1^2 + z0*z1 + 1, (z0, z0*z1^3)) sage: Omega_ge(0, (4, -1)) (z0*z1^3 + z0*z1^2 + z0*z1 + 1, (z0, z0*z1^4)) sage: Omega_ge(0, (1, 1, -2)) (-z0^2*z1*z2 - z0*z1^2*z2 + z0*z1*z2 + 1, (z0, z1, z0^2*z2, z1^2*z2)) sage: Omega_ge(0, (2, -1, -1)) (z0*z1*z2 + z0*z1 + z0*z2 + 1, (z0, z0*z1^2, z0*z2^2)) sage: Omega_ge(0, (2, 1, -1)) (-z0*z1*z2^2 - z0*z1*z2 + z0*z2 + 1, (z0, z1, z0*z2^2, z1*z2)) :: sage: Omega_ge(0, (2, -2)) (-z0*z1 + 1, (z0, z0*z1, z0*z1)) sage: Omega_ge(0, (2, -3)) (z0^2*z1 + 1, (z0, z0^3*z1^2)) sage: Omega_ge(0, (3, 1, -3)) (-z0^3*z1^3*z2^3 + 2*z0^2*z1^3*z2^2 - z0*z1^3*z2 + z0^2*z2^2 - 2*z0*z2 + 1, (z0, z1, z0*z2, z0*z2, z0*z2, z1^3*z2)) :: sage: Omega_ge(0, (3, 6, -1)) (-z0*z1*z2^8 - z0*z1*z2^7 - z0*z1*z2^6 - z0*z1*z2^5 - z0*z1*z2^4 + z1*z2^5 - z0*z1*z2^3 + z1*z2^4 - z0*z1*z2^2 + z1*z2^3 - z0*z1*z2 + z0*z2^2 + z1*z2^2 + z0*z2 + z1*z2 + 1, (z0, z1, z0*z2^3, z1*z2^6)) TESTS:: sage: Omega_ge(0, (2, 2, 1, 1, 1, -1, -1))[0].number_of_terms() # long time 1695 sage: Omega_ge(0, (2, 2, 1, 1, 1, 1, 1, -1, -1))[0].number_of_terms() # not tested (too long, 1 min) 27837 :: sage: Omega_ge(1, (2,)) (1, (z0,))
r""" Return `\Omega_{\ge}` of the expression specified by the input.
[ "r", "Return", "\\", "Omega_", "{", "\\", "ge", "}", "of", "the", "expression", "specified", "by", "the", "input", "." ]
def Omega_ge(a, exponents): r""" Return `\Omega_{\ge}` of the expression specified by the input. To be more precise, calculate .. MATH:: \Omega_{\ge} \frac{\mu^a}{ (1 - z_0 \mu^{e_0}) \dots (1 - z_{n-1} \mu^{e_{n-1}})} and return its numerator and a factorization of its denominator. Note that `z_0`, ..., `z_{n-1}` only appear in the output, but not in the input. INPUT: - ``a`` -- an integer - ``exponents`` -- a tuple of integers OUTPUT: A pair representing a quotient as follows: Its first component is the numerator as a Laurent polynomial, its second component a factorization of the denominator as a tuple of Laurent polynomials, where each Laurent polynomial `z` represents a factor `1 - z`. The parents of these Laurent polynomials is always a Laurent polynomial ring in `z_0`, ..., `z_{n-1}` over `\ZZ`, where `n` is the length of ``exponents``. EXAMPLES:: sage: from sage.rings.polynomial.omega import Omega_ge sage: Omega_ge(0, (1, -2)) (1, (z0, z0^2*z1)) sage: Omega_ge(0, (1, -3)) (1, (z0, z0^3*z1)) sage: Omega_ge(0, (1, -4)) (1, (z0, z0^4*z1)) sage: Omega_ge(0, (2, -1)) (z0*z1 + 1, (z0, z0*z1^2)) sage: Omega_ge(0, (3, -1)) (z0*z1^2 + z0*z1 + 1, (z0, z0*z1^3)) sage: Omega_ge(0, (4, -1)) (z0*z1^3 + z0*z1^2 + z0*z1 + 1, (z0, z0*z1^4)) sage: Omega_ge(0, (1, 1, -2)) (-z0^2*z1*z2 - z0*z1^2*z2 + z0*z1*z2 + 1, (z0, z1, z0^2*z2, z1^2*z2)) sage: Omega_ge(0, (2, -1, -1)) (z0*z1*z2 + z0*z1 + z0*z2 + 1, (z0, z0*z1^2, z0*z2^2)) sage: Omega_ge(0, (2, 1, -1)) (-z0*z1*z2^2 - z0*z1*z2 + z0*z2 + 1, (z0, z1, z0*z2^2, z1*z2)) :: sage: Omega_ge(0, (2, -2)) (-z0*z1 + 1, (z0, z0*z1, z0*z1)) sage: Omega_ge(0, (2, -3)) (z0^2*z1 + 1, (z0, z0^3*z1^2)) sage: Omega_ge(0, (3, 1, -3)) (-z0^3*z1^3*z2^3 + 2*z0^2*z1^3*z2^2 - z0*z1^3*z2 + z0^2*z2^2 - 2*z0*z2 + 1, (z0, z1, z0*z2, z0*z2, z0*z2, z1^3*z2)) :: sage: Omega_ge(0, (3, 6, -1)) (-z0*z1*z2^8 - z0*z1*z2^7 - z0*z1*z2^6 - z0*z1*z2^5 - z0*z1*z2^4 + z1*z2^5 - z0*z1*z2^3 + z1*z2^4 - z0*z1*z2^2 + z1*z2^3 - z0*z1*z2 + z0*z2^2 + z1*z2^2 + z0*z2 + z1*z2 + 1, (z0, z1, z0*z2^3, z1*z2^6)) TESTS:: sage: Omega_ge(0, (2, 2, 1, 1, 1, -1, -1))[0].number_of_terms() # long time 1695 sage: Omega_ge(0, (2, 2, 1, 1, 1, 1, 1, -1, -1))[0].number_of_terms() # not tested (too long, 1 min) 27837 :: sage: Omega_ge(1, (2,)) (1, (z0,)) """ import logging logger = logging.getLogger(__name__) logger.info('Omega_ge: a=%s, exponents=%s', a, exponents) from sage.arith.all import lcm, srange from sage.rings.integer_ring import ZZ from sage.rings.polynomial.laurent_polynomial_ring import LaurentPolynomialRing from sage.rings.number_field.number_field import CyclotomicField if not exponents or any(e == 0 for e in exponents): raise NotImplementedError rou = sorted(set(abs(e) for e in exponents) - set([1])) ellcm = lcm(rou) B = CyclotomicField(ellcm, 'zeta') zeta = B.gen() z_names = tuple('z{}'.format(i) for i in range(len(exponents))) L = LaurentPolynomialRing(B, ('t',) + z_names, len(z_names) + 1) t = L.gens()[0] Z = LaurentPolynomialRing(ZZ, z_names, len(z_names)) powers = {i: L(zeta**(ellcm//i)) for i in rou} powers[2] = L(-1) powers[1] = L(1) exponents_and_values = tuple( (e, tuple(powers[abs(e)]**j * z for j in srange(abs(e)))) for z, e in zip(L.gens()[1:], exponents)) x = tuple(v for e, v in exponents_and_values if e > 0) y = tuple(v for e, v in exponents_and_values if e < 0) def subs_power(expression, var, exponent): r""" Substitute ``var^exponent`` by ``var`` in ``expression``. It is assumed that ``var`` only occurs with exponents divisible by ``exponent``. """ p = tuple(var.dict().popitem()[0]).index(1) # var is the p-th generator def subs_e(e): e = list(e) assert e[p] % exponent == 0 e[p] = e[p] // exponent return tuple(e) parent = expression.parent() result = parent({subs_e(e): c for e, c in expression.dict().items()}) return result def de_power(expression): expression = Z(expression) for e, var in zip(exponents, Z.gens()): if abs(e) == 1: continue expression = subs_power(expression, var, abs(e)) return expression logger.debug('Omega_ge: preparing denominator') factors_denominator = tuple(de_power(1 - factor) for factor in _Omega_factors_denominator_(x, y)) logger.debug('Omega_ge: preparing numerator') numerator = de_power(_Omega_numerator_(a, x, y, t)) logger.info('Omega_ge: completed') return numerator, factors_denominator
[ "def", "Omega_ge", "(", "a", ",", "exponents", ")", ":", "import", "logging", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "info", "(", "'Omega_ge: a=%s, exponents=%s'", ",", "a", ",", "exponents", ")", "from", "sage", ".", "arith", ".", "all", "import", "lcm", ",", "srange", "from", "sage", ".", "rings", ".", "integer_ring", "import", "ZZ", "from", "sage", ".", "rings", ".", "polynomial", ".", "laurent_polynomial_ring", "import", "LaurentPolynomialRing", "from", "sage", ".", "rings", ".", "number_field", ".", "number_field", "import", "CyclotomicField", "if", "not", "exponents", "or", "any", "(", "e", "==", "0", "for", "e", "in", "exponents", ")", ":", "raise", "NotImplementedError", "rou", "=", "sorted", "(", "set", "(", "abs", "(", "e", ")", "for", "e", "in", "exponents", ")", "-", "set", "(", "[", "1", "]", ")", ")", "ellcm", "=", "lcm", "(", "rou", ")", "B", "=", "CyclotomicField", "(", "ellcm", ",", "'zeta'", ")", "zeta", "=", "B", ".", "gen", "(", ")", "z_names", "=", "tuple", "(", "'z{}'", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "exponents", ")", ")", ")", "L", "=", "LaurentPolynomialRing", "(", "B", ",", "(", "'t'", ",", ")", "+", "z_names", ",", "len", "(", "z_names", ")", "+", "1", ")", "t", "=", "L", ".", "gens", "(", ")", "[", "0", "]", "Z", "=", "LaurentPolynomialRing", "(", "ZZ", ",", "z_names", ",", "len", "(", "z_names", ")", ")", "powers", "=", "{", "i", ":", "L", "(", "zeta", "**", "(", "ellcm", "//", "i", ")", ")", "for", "i", "in", "rou", "}", "powers", "[", "2", "]", "=", "L", "(", "-", "1", ")", "powers", "[", "1", "]", "=", "L", "(", "1", ")", "exponents_and_values", "=", "tuple", "(", "(", "e", ",", "tuple", "(", "powers", "[", "abs", "(", "e", ")", "]", "**", "j", "*", "z", "for", "j", "in", "srange", "(", "abs", "(", "e", ")", ")", ")", ")", "for", "z", ",", "e", "in", "zip", "(", "L", ".", "gens", "(", ")", "[", "1", ":", "]", ",", "exponents", ")", ")", "x", "=", "tuple", "(", "v", "for", "e", ",", "v", "in", "exponents_and_values", "if", "e", ">", "0", ")", "y", "=", "tuple", "(", "v", "for", "e", ",", "v", "in", "exponents_and_values", "if", "e", "<", "0", ")", "def", "subs_power", "(", "expression", ",", "var", ",", "exponent", ")", ":", "r\"\"\"\n Substitute ``var^exponent`` by ``var`` in ``expression``.\n\n It is assumed that ``var`` only occurs with exponents\n divisible by ``exponent``.\n \"\"\"", "p", "=", "tuple", "(", "var", ".", "dict", "(", ")", ".", "popitem", "(", ")", "[", "0", "]", ")", ".", "index", "(", "1", ")", "# var is the p-th generator", "def", "subs_e", "(", "e", ")", ":", "e", "=", "list", "(", "e", ")", "assert", "e", "[", "p", "]", "%", "exponent", "==", "0", "e", "[", "p", "]", "=", "e", "[", "p", "]", "//", "exponent", "return", "tuple", "(", "e", ")", "parent", "=", "expression", ".", "parent", "(", ")", "result", "=", "parent", "(", "{", "subs_e", "(", "e", ")", ":", "c", "for", "e", ",", "c", "in", "expression", ".", "dict", "(", ")", ".", "items", "(", ")", "}", ")", "return", "result", "def", "de_power", "(", "expression", ")", ":", "expression", "=", "Z", "(", "expression", ")", "for", "e", ",", "var", "in", "zip", "(", "exponents", ",", "Z", ".", "gens", "(", ")", ")", ":", "if", "abs", "(", "e", ")", "==", "1", ":", "continue", "expression", "=", "subs_power", "(", "expression", ",", "var", ",", "abs", "(", "e", ")", ")", "return", "expression", "logger", ".", "debug", "(", "'Omega_ge: preparing denominator'", ")", "factors_denominator", "=", "tuple", "(", "de_power", "(", "1", "-", "factor", ")", "for", "factor", "in", "_Omega_factors_denominator_", "(", "x", ",", "y", ")", ")", "logger", ".", "debug", "(", "'Omega_ge: preparing numerator'", ")", "numerator", "=", "de_power", "(", "_Omega_numerator_", "(", "a", ",", "x", ",", "y", ",", "t", ")", ")", "logger", ".", "info", "(", "'Omega_ge: completed'", ")", "return", "numerator", ",", "factors_denominator" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/polynomial/omega.py#L469-L619
twopirllc/pandas-ta
b92e45c0b8f035ac76292f8f130be32ec49b2ef4
pandas_ta/utils/_core.py
python
_camelCase2Title
(x: str)
return re_.sub("([a-z])([A-Z])","\g<1> \g<2>", x).title()
https://stackoverflow.com/questions/5020906/python-convert-camel-case-to-space-delimited-using-regex-and-taking-acronyms-in
https://stackoverflow.com/questions/5020906/python-convert-camel-case-to-space-delimited-using-regex-and-taking-acronyms-in
[ "https", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "5020906", "/", "python", "-", "convert", "-", "camel", "-", "case", "-", "to", "-", "space", "-", "delimited", "-", "using", "-", "regex", "-", "and", "-", "taking", "-", "acronyms", "-", "in" ]
def _camelCase2Title(x: str): """https://stackoverflow.com/questions/5020906/python-convert-camel-case-to-space-delimited-using-regex-and-taking-acronyms-in""" return re_.sub("([a-z])([A-Z])","\g<1> \g<2>", x).title()
[ "def", "_camelCase2Title", "(", "x", ":", "str", ")", ":", "return", "re_", ".", "sub", "(", "\"([a-z])([A-Z])\"", ",", "\"\\g<1> \\g<2>\"", ",", "x", ")", ".", "title", "(", ")" ]
https://github.com/twopirllc/pandas-ta/blob/b92e45c0b8f035ac76292f8f130be32ec49b2ef4/pandas_ta/utils/_core.py#L12-L14
dgilland/sqlservice
369dfa5d1f95aae7129ff793006d70ce666d0dbc
src/sqlservice/model.py
python
ModelBase.relationships
(cls)
return cls.class_mapper().relationships
Return ORM relationships.
Return ORM relationships.
[ "Return", "ORM", "relationships", "." ]
def relationships(cls): """Return ORM relationships.""" return cls.class_mapper().relationships
[ "def", "relationships", "(", "cls", ")", ":", "return", "cls", ".", "class_mapper", "(", ")", ".", "relationships" ]
https://github.com/dgilland/sqlservice/blob/369dfa5d1f95aae7129ff793006d70ce666d0dbc/src/sqlservice/model.py#L150-L152
modoboa/modoboa
9065b7a5679fee149fc6f6f0e1760699c194cf89
modoboa/lib/form_utils.py
python
TabForms.is_valid
(self, mandatory_only=False, optional_only=False)
return True
Check if the form is valid. :param boolean mandatory_only: :param boolean optional_only:
Check if the form is valid.
[ "Check", "if", "the", "form", "is", "valid", "." ]
def is_valid(self, mandatory_only=False, optional_only=False): """Check if the form is valid. :param boolean mandatory_only: :param boolean optional_only: """ to_remove = [] for f in self.forms: if mandatory_only and \ ("mandatory" not in f or not f["mandatory"]): continue elif optional_only and ("mandatory" in f and f["mandatory"]): continue if not self._before_is_valid(f): to_remove.append(f) continue if not f["instance"].is_valid(): self.active_id = f["id"] return False self.forms = [f for f in self.forms if f not in to_remove] return True
[ "def", "is_valid", "(", "self", ",", "mandatory_only", "=", "False", ",", "optional_only", "=", "False", ")", ":", "to_remove", "=", "[", "]", "for", "f", "in", "self", ".", "forms", ":", "if", "mandatory_only", "and", "(", "\"mandatory\"", "not", "in", "f", "or", "not", "f", "[", "\"mandatory\"", "]", ")", ":", "continue", "elif", "optional_only", "and", "(", "\"mandatory\"", "in", "f", "and", "f", "[", "\"mandatory\"", "]", ")", ":", "continue", "if", "not", "self", ".", "_before_is_valid", "(", "f", ")", ":", "to_remove", ".", "append", "(", "f", ")", "continue", "if", "not", "f", "[", "\"instance\"", "]", ".", "is_valid", "(", ")", ":", "self", ".", "active_id", "=", "f", "[", "\"id\"", "]", "return", "False", "self", ".", "forms", "=", "[", "f", "for", "f", "in", "self", ".", "forms", "if", "f", "not", "in", "to_remove", "]", "return", "True" ]
https://github.com/modoboa/modoboa/blob/9065b7a5679fee149fc6f6f0e1760699c194cf89/modoboa/lib/form_utils.py#L287-L307
PennyLaneAI/pennylane
1275736f790ced1d778858ed383448d4a43a4cdd
pennylane/drawer/charsets.py
python
CharSet.BOTTOM_MULTI_LINE_GATE_CONNECTOR
(self)
The string used to connect to the bottommost wire of a multi-wire gate.
The string used to connect to the bottommost wire of a multi-wire gate.
[ "The", "string", "used", "to", "connect", "to", "the", "bottommost", "wire", "of", "a", "multi", "-", "wire", "gate", "." ]
def BOTTOM_MULTI_LINE_GATE_CONNECTOR(self): """The string used to connect to the bottommost wire of a multi-wire gate."""
[ "def", "BOTTOM_MULTI_LINE_GATE_CONNECTOR", "(", "self", ")", ":" ]
https://github.com/PennyLaneAI/pennylane/blob/1275736f790ced1d778858ed383448d4a43a4cdd/pennylane/drawer/charsets.py#L47-L48
uqfoundation/multiprocess
028cc73f02655e6451d92e5147d19d8c10aebe50
py3.1/multiprocess/__init__.py
python
log_to_stderr
(level=None)
return log_to_stderr(level)
Turn on logging and add a handler which prints to stderr
Turn on logging and add a handler which prints to stderr
[ "Turn", "on", "logging", "and", "add", "a", "handler", "which", "prints", "to", "stderr" ]
def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' from multiprocess.util import log_to_stderr return log_to_stderr(level)
[ "def", "log_to_stderr", "(", "level", "=", "None", ")", ":", "from", "multiprocess", ".", "util", "import", "log_to_stderr", "return", "log_to_stderr", "(", "level", ")" ]
https://github.com/uqfoundation/multiprocess/blob/028cc73f02655e6451d92e5147d19d8c10aebe50/py3.1/multiprocess/__init__.py#L156-L161
canonical/cloud-init
dc1aabfca851e520693c05322f724bd102c76364
cloudinit/sources/DataSourceOpenStack.py
python
detect_openstack
(accept_oracle=False)
return False
Return True when a potential OpenStack platform is detected.
Return True when a potential OpenStack platform is detected.
[ "Return", "True", "when", "a", "potential", "OpenStack", "platform", "is", "detected", "." ]
def detect_openstack(accept_oracle=False): """Return True when a potential OpenStack platform is detected.""" if not util.is_x86(): return True # Non-Intel cpus don't properly report dmi product names product_name = dmi.read_dmi_data("system-product-name") if product_name in VALID_DMI_PRODUCT_NAMES: return True elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS: return True elif accept_oracle and oracle._is_platform_viable(): return True elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA: return True return False
[ "def", "detect_openstack", "(", "accept_oracle", "=", "False", ")", ":", "if", "not", "util", ".", "is_x86", "(", ")", ":", "return", "True", "# Non-Intel cpus don't properly report dmi product names", "product_name", "=", "dmi", ".", "read_dmi_data", "(", "\"system-product-name\"", ")", "if", "product_name", "in", "VALID_DMI_PRODUCT_NAMES", ":", "return", "True", "elif", "dmi", ".", "read_dmi_data", "(", "\"chassis-asset-tag\"", ")", "in", "VALID_DMI_ASSET_TAGS", ":", "return", "True", "elif", "accept_oracle", "and", "oracle", ".", "_is_platform_viable", "(", ")", ":", "return", "True", "elif", "util", ".", "get_proc_env", "(", "1", ")", ".", "get", "(", "\"product_name\"", ")", "==", "DMI_PRODUCT_NOVA", ":", "return", "True", "return", "False" ]
https://github.com/canonical/cloud-init/blob/dc1aabfca851e520693c05322f724bd102c76364/cloudinit/sources/DataSourceOpenStack.py#L260-L273
declare-lab/conv-emotion
0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
DialogueGCN/model.py
python
classify_node_features
(emotions, seq_lengths, umask, matchatt_layer, linear_layer, dropout_layer, smax_fc_layer, nodal_attn, avec, no_cuda)
Function for the final classification, as in Equation 7, 8, 9. in the paper.
Function for the final classification, as in Equation 7, 8, 9. in the paper.
[ "Function", "for", "the", "final", "classification", "as", "in", "Equation", "7", "8", "9", ".", "in", "the", "paper", "." ]
def classify_node_features(emotions, seq_lengths, umask, matchatt_layer, linear_layer, dropout_layer, smax_fc_layer, nodal_attn, avec, no_cuda): """ Function for the final classification, as in Equation 7, 8, 9. in the paper. """ if nodal_attn: emotions = attentive_node_features(emotions, seq_lengths, umask, matchatt_layer, no_cuda) hidden = F.relu(linear_layer(emotions)) hidden = dropout_layer(hidden) hidden = smax_fc_layer(hidden) if avec: return torch.cat([hidden[:, j, :][:seq_lengths[j]] for j in range(len(seq_lengths))]) log_prob = F.log_softmax(hidden, 2) log_prob = torch.cat([log_prob[:, j, :][:seq_lengths[j]] for j in range(len(seq_lengths))]) return log_prob else: hidden = F.relu(linear_layer(emotions)) hidden = dropout_layer(hidden) hidden = smax_fc_layer(hidden) if avec: return hidden log_prob = F.log_softmax(hidden, 1) return log_prob
[ "def", "classify_node_features", "(", "emotions", ",", "seq_lengths", ",", "umask", ",", "matchatt_layer", ",", "linear_layer", ",", "dropout_layer", ",", "smax_fc_layer", ",", "nodal_attn", ",", "avec", ",", "no_cuda", ")", ":", "if", "nodal_attn", ":", "emotions", "=", "attentive_node_features", "(", "emotions", ",", "seq_lengths", ",", "umask", ",", "matchatt_layer", ",", "no_cuda", ")", "hidden", "=", "F", ".", "relu", "(", "linear_layer", "(", "emotions", ")", ")", "hidden", "=", "dropout_layer", "(", "hidden", ")", "hidden", "=", "smax_fc_layer", "(", "hidden", ")", "if", "avec", ":", "return", "torch", ".", "cat", "(", "[", "hidden", "[", ":", ",", "j", ",", ":", "]", "[", ":", "seq_lengths", "[", "j", "]", "]", "for", "j", "in", "range", "(", "len", "(", "seq_lengths", ")", ")", "]", ")", "log_prob", "=", "F", ".", "log_softmax", "(", "hidden", ",", "2", ")", "log_prob", "=", "torch", ".", "cat", "(", "[", "log_prob", "[", ":", ",", "j", ",", ":", "]", "[", ":", "seq_lengths", "[", "j", "]", "]", "for", "j", "in", "range", "(", "len", "(", "seq_lengths", ")", ")", "]", ")", "return", "log_prob", "else", ":", "hidden", "=", "F", ".", "relu", "(", "linear_layer", "(", "emotions", ")", ")", "hidden", "=", "dropout_layer", "(", "hidden", ")", "hidden", "=", "smax_fc_layer", "(", "hidden", ")", "if", "avec", ":", "return", "hidden", "log_prob", "=", "F", ".", "log_softmax", "(", "hidden", ",", "1", ")", "return", "log_prob" ]
https://github.com/declare-lab/conv-emotion/blob/0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e/DialogueGCN/model.py#L757-L786
bjmayor/hacker
e3ce2ad74839c2733b27dac6c0f495e0743e1866
venv/lib/python3.5/site-packages/setuptools/msvc.py
python
SystemInfo.WindowsSDKExecutablePath
(self)
return execpath
Microsoft Windows SDK executable directory.
Microsoft Windows SDK executable directory.
[ "Microsoft", "Windows", "SDK", "executable", "directory", "." ]
def WindowsSDKExecutablePath(self): """ Microsoft Windows SDK executable directory. """ # Find WinSDK NetFx Tools registry dir name if self.vc_ver <= 11.0: netfxver = 35 arch = '' else: netfxver = 40 hidex86 = True if self.vc_ver <= 12.0 else False arch = self.pi.current_dir(x64=True, hidex86=hidex86) fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) # liste all possibles registry paths regpaths = [] if self.vc_ver >= 14.0: for ver in self.NetFxSdkVersion: regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)] for ver in self.WindowsSdkVersion: regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)] # Return installation folder from the more recent path for path in regpaths: execpath = self.ri.lookup(path, 'installationfolder') if execpath: break return execpath
[ "def", "WindowsSDKExecutablePath", "(", "self", ")", ":", "# Find WinSDK NetFx Tools registry dir name", "if", "self", ".", "vc_ver", "<=", "11.0", ":", "netfxver", "=", "35", "arch", "=", "''", "else", ":", "netfxver", "=", "40", "hidex86", "=", "True", "if", "self", ".", "vc_ver", "<=", "12.0", "else", "False", "arch", "=", "self", ".", "pi", ".", "current_dir", "(", "x64", "=", "True", ",", "hidex86", "=", "hidex86", ")", "fx", "=", "'WinSDK-NetFx%dTools%s'", "%", "(", "netfxver", ",", "arch", ".", "replace", "(", "'\\\\'", ",", "'-'", ")", ")", "# liste all possibles registry paths", "regpaths", "=", "[", "]", "if", "self", ".", "vc_ver", ">=", "14.0", ":", "for", "ver", "in", "self", ".", "NetFxSdkVersion", ":", "regpaths", "+=", "[", "os", ".", "path", ".", "join", "(", "self", ".", "ri", ".", "netfx_sdk", ",", "ver", ",", "fx", ")", "]", "for", "ver", "in", "self", ".", "WindowsSdkVersion", ":", "regpaths", "+=", "[", "os", ".", "path", ".", "join", "(", "self", ".", "ri", ".", "windows_sdk", ",", "'v%sA'", "%", "ver", ",", "fx", ")", "]", "# Return installation folder from the more recent path", "for", "path", "in", "regpaths", ":", "execpath", "=", "self", ".", "ri", ".", "lookup", "(", "path", ",", "'installationfolder'", ")", "if", "execpath", ":", "break", "return", "execpath" ]
https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/setuptools/msvc.py#L602-L630
ImagingLab/Colorizing-with-GANs
c10b11ba85bd6c5a3350f7637607a1348fc497da
src/models.py
python
BaseModel.eval_outputs
(self, feed_dic)
return lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step
evaluates the loss and accuracy returns (D loss, D_fake loss, D_real loss, G loss, G_L1 loss, G_gan loss, accuracy, step)
evaluates the loss and accuracy returns (D loss, D_fake loss, D_real loss, G loss, G_L1 loss, G_gan loss, accuracy, step)
[ "evaluates", "the", "loss", "and", "accuracy", "returns", "(", "D", "loss", "D_fake", "loss", "D_real", "loss", "G", "loss", "G_L1", "loss", "G_gan", "loss", "accuracy", "step", ")" ]
def eval_outputs(self, feed_dic): ''' evaluates the loss and accuracy returns (D loss, D_fake loss, D_real loss, G loss, G_L1 loss, G_gan loss, accuracy, step) ''' lossD_fake = self.dis_loss_fake.eval(feed_dict=feed_dic) lossD_real = self.dis_loss_real.eval(feed_dict=feed_dic) lossD = self.dis_loss.eval(feed_dict=feed_dic) lossG_l1 = self.gen_loss_l1.eval(feed_dict=feed_dic) lossG_gan = self.gen_loss_gan.eval(feed_dict=feed_dic) lossG = lossG_l1 + lossG_gan acc = self.accuracy.eval(feed_dict=feed_dic) step = self.sess.run(self.global_step) return lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step
[ "def", "eval_outputs", "(", "self", ",", "feed_dic", ")", ":", "lossD_fake", "=", "self", ".", "dis_loss_fake", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "lossD_real", "=", "self", ".", "dis_loss_real", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "lossD", "=", "self", ".", "dis_loss", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "lossG_l1", "=", "self", ".", "gen_loss_l1", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "lossG_gan", "=", "self", ".", "gen_loss_gan", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "lossG", "=", "lossG_l1", "+", "lossG_gan", "acc", "=", "self", ".", "accuracy", ".", "eval", "(", "feed_dict", "=", "feed_dic", ")", "step", "=", "self", ".", "sess", ".", "run", "(", "self", ".", "global_step", ")", "return", "lossD", ",", "lossD_fake", ",", "lossD_real", ",", "lossG", ",", "lossG_l1", ",", "lossG_gan", ",", "acc", ",", "step" ]
https://github.com/ImagingLab/Colorizing-with-GANs/blob/c10b11ba85bd6c5a3350f7637607a1348fc497da/src/models.py#L252-L268
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/SimpleHTTPServer.py
python
SimpleHTTPRequestHandler.guess_type
(self, path)
Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess.
Guess the type of a file.
[ "Guess", "the", "type", "of", "a", "file", "." ]
def guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map['']
[ "def", "guess_type", "(", "self", ",", "path", ")", ":", "base", ",", "ext", "=", "posixpath", ".", "splitext", "(", "path", ")", "if", "ext", "in", "self", ".", "extensions_map", ":", "return", "self", ".", "extensions_map", "[", "ext", "]", "ext", "=", "ext", ".", "lower", "(", ")", "if", "ext", "in", "self", ".", "extensions_map", ":", "return", "self", ".", "extensions_map", "[", "ext", "]", "else", ":", "return", "self", ".", "extensions_map", "[", "''", "]" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/SimpleHTTPServer.py#L194-L216
OpenZWave/python-openzwave
8be4c070294348f3fc268bc1d7ad2c535f352f5a
src-api/openzwave/network.py
python
ZWaveNetwork._handle_delete_button
(self, args)
Handheld controller button event deleted. dispatcher.send(self.SIGNAL_DELETE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]}) :param args: data sent by the notification :type args: dict()
Handheld controller button event deleted.
[ "Handheld", "controller", "button", "event", "deleted", "." ]
def _handle_delete_button(self, args): """ Handheld controller button event deleted. dispatcher.send(self.SIGNAL_DELETE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]}) :param args: data sent by the notification :type args: dict() """ logger.debug(u'Z-Wave Notification DeleteButton : %s', args) dispatcher.send(self.SIGNAL_DELETE_BUTTON, \ **{'network': self, 'node' : self.nodes[args['nodeId']]})
[ "def", "_handle_delete_button", "(", "self", ",", "args", ")", ":", "logger", ".", "debug", "(", "u'Z-Wave Notification DeleteButton : %s'", ",", "args", ")", "dispatcher", ".", "send", "(", "self", ".", "SIGNAL_DELETE_BUTTON", ",", "*", "*", "{", "'network'", ":", "self", ",", "'node'", ":", "self", ".", "nodes", "[", "args", "[", "'nodeId'", "]", "]", "}", ")" ]
https://github.com/OpenZWave/python-openzwave/blob/8be4c070294348f3fc268bc1d7ad2c535f352f5a/src-api/openzwave/network.py#L1398-L1410
thmoa/octopus
cb9e6b68b9d995241c3d30538d4f33740a446353
lib/io.py
python
write_mesh
(filename, v, f)
[]
def write_mesh(filename, v, f): with open(filename, 'w') as fp: fp.write(('v {:f} {:f} {:f}\n' * len(v)).format(*v.reshape(-1))) fp.write(('f {:d} {:d} {:d}\n' * len(f)).format(*(f.reshape(-1) + 1)))
[ "def", "write_mesh", "(", "filename", ",", "v", ",", "f", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "(", "'v {:f} {:f} {:f}\\n'", "*", "len", "(", "v", ")", ")", ".", "format", "(", "*", "v", ".", "reshape", "(", "-", "1", ")", ")", ")", "fp", ".", "write", "(", "(", "'f {:d} {:d} {:d}\\n'", "*", "len", "(", "f", ")", ")", ".", "format", "(", "*", "(", "f", ".", "reshape", "(", "-", "1", ")", "+", "1", ")", ")", ")" ]
https://github.com/thmoa/octopus/blob/cb9e6b68b9d995241c3d30538d4f33740a446353/lib/io.py#L54-L57
facebookresearch/SlowFast
39ef35c9a086443209b458cceaec86a02e27b369
slowfast/utils/ava_evaluation/np_box_ops.py
python
intersection
(boxes1, boxes2)
return intersect_heights * intersect_widths
Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area
Compute pairwise intersection areas between boxes.
[ "Compute", "pairwise", "intersection", "areas", "between", "boxes", "." ]
def intersection(boxes1, boxes2): """Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) intersect_heights = np.maximum( np.zeros(all_pairs_max_ymin.shape), all_pairs_min_ymax - all_pairs_max_ymin, ) all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) intersect_widths = np.maximum( np.zeros(all_pairs_max_xmin.shape), all_pairs_min_xmax - all_pairs_max_xmin, ) return intersect_heights * intersect_widths
[ "def", "intersection", "(", "boxes1", ",", "boxes2", ")", ":", "[", "y_min1", ",", "x_min1", ",", "y_max1", ",", "x_max1", "]", "=", "np", ".", "split", "(", "boxes1", ",", "4", ",", "axis", "=", "1", ")", "[", "y_min2", ",", "x_min2", ",", "y_max2", ",", "x_max2", "]", "=", "np", ".", "split", "(", "boxes2", ",", "4", ",", "axis", "=", "1", ")", "all_pairs_min_ymax", "=", "np", ".", "minimum", "(", "y_max1", ",", "np", ".", "transpose", "(", "y_max2", ")", ")", "all_pairs_max_ymin", "=", "np", ".", "maximum", "(", "y_min1", ",", "np", ".", "transpose", "(", "y_min2", ")", ")", "intersect_heights", "=", "np", ".", "maximum", "(", "np", ".", "zeros", "(", "all_pairs_max_ymin", ".", "shape", ")", ",", "all_pairs_min_ymax", "-", "all_pairs_max_ymin", ",", ")", "all_pairs_min_xmax", "=", "np", ".", "minimum", "(", "x_max1", ",", "np", ".", "transpose", "(", "x_max2", ")", ")", "all_pairs_max_xmin", "=", "np", ".", "maximum", "(", "x_min1", ",", "np", ".", "transpose", "(", "x_min2", ")", ")", "intersect_widths", "=", "np", ".", "maximum", "(", "np", ".", "zeros", "(", "all_pairs_max_xmin", ".", "shape", ")", ",", "all_pairs_min_xmax", "-", "all_pairs_max_xmin", ",", ")", "return", "intersect_heights", "*", "intersect_widths" ]
https://github.com/facebookresearch/SlowFast/blob/39ef35c9a086443209b458cceaec86a02e27b369/slowfast/utils/ava_evaluation/np_box_ops.py#L43-L68
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /tools/inject/thirdparty/oset/pyoset.py
python
OrderedSet.__init__
(self, iterable=None)
[]
def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable
[ "def", "__init__", "(", "self", ",", "iterable", "=", "None", ")", ":", "self", ".", "end", "=", "end", "=", "[", "]", "end", "+=", "[", "None", ",", "end", ",", "end", "]", "# sentinel node for doubly linked list", "self", ".", "map", "=", "{", "}", "# key --> [key, prev, next]", "if", "iterable", "is", "not", "None", ":", "self", "|=", "iterable" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/inject/thirdparty/oset/pyoset.py#L20-L25
LiberAI/NSpM
cc352dbbda6751e8cf19769c9440c03e31687829
gsoc/zheyuan/pipeline/paraphrase_questions.py
python
paraphrase_questions
(tokenizer, device, model, sentence)
return final_outputs
@param tokenizer: Tokenizer is in charge of preparing the inputs for a model @param device: Device the model will be run on @param model: The pre-trained model @param sentence: The sentence need to be templates @return: final_outputs: the candidates of templates questions
[]
def paraphrase_questions(tokenizer, device, model, sentence): """ @param tokenizer: Tokenizer is in charge of preparing the inputs for a model @param device: Device the model will be run on @param model: The pre-trained model @param sentence: The sentence need to be templates @return: final_outputs: the candidates of templates questions """ sentence = sentence.replace("<A>", "XYZ") text = "paraphrase: " + sentence + " </s>" max_len = 256 encoding = tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt") input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device) beam_outputs = model.generate( input_ids=input_ids, attention_mask=attention_masks, do_sample=True, max_length=256, top_k=120, top_p=0.98, early_stopping=True, num_return_sequences=10 ) print("\nOriginal Question ::") print(sentence) print("\n") print("Paraphrased Questions :: ") final_outputs = [] for beam_output in beam_outputs: sent = tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True) if sent.replace("?", " ?").lower() != sentence.lower() and sent.replace("?", " ?") not in final_outputs: if has_NNP(sent.replace("?", " ?"), count_NNP(sent.replace("?", " ?"))): sent = re.sub('XYZ', '<A>', sent, flags=re.IGNORECASE) final_outputs.append(sent.replace("?", " ?")) else: print("******************", sent.replace("?", " ?")) sentence = sentence.replace("XYZ", "<A>") return final_outputs
[ "def", "paraphrase_questions", "(", "tokenizer", ",", "device", ",", "model", ",", "sentence", ")", ":", "sentence", "=", "sentence", ".", "replace", "(", "\"<A>\"", ",", "\"XYZ\"", ")", "text", "=", "\"paraphrase: \"", "+", "sentence", "+", "\" </s>\"", "max_len", "=", "256", "encoding", "=", "tokenizer", ".", "encode_plus", "(", "text", ",", "pad_to_max_length", "=", "True", ",", "return_tensors", "=", "\"pt\"", ")", "input_ids", ",", "attention_masks", "=", "encoding", "[", "\"input_ids\"", "]", ".", "to", "(", "device", ")", ",", "encoding", "[", "\"attention_mask\"", "]", ".", "to", "(", "device", ")", "beam_outputs", "=", "model", ".", "generate", "(", "input_ids", "=", "input_ids", ",", "attention_mask", "=", "attention_masks", ",", "do_sample", "=", "True", ",", "max_length", "=", "256", ",", "top_k", "=", "120", ",", "top_p", "=", "0.98", ",", "early_stopping", "=", "True", ",", "num_return_sequences", "=", "10", ")", "print", "(", "\"\\nOriginal Question ::\"", ")", "print", "(", "sentence", ")", "print", "(", "\"\\n\"", ")", "print", "(", "\"Paraphrased Questions :: \"", ")", "final_outputs", "=", "[", "]", "for", "beam_output", "in", "beam_outputs", ":", "sent", "=", "tokenizer", ".", "decode", "(", "beam_output", ",", "skip_special_tokens", "=", "True", ",", "clean_up_tokenization_spaces", "=", "True", ")", "if", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", ".", "lower", "(", ")", "!=", "sentence", ".", "lower", "(", ")", "and", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", "not", "in", "final_outputs", ":", "if", "has_NNP", "(", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", ",", "count_NNP", "(", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", ")", ")", ":", "sent", "=", "re", ".", "sub", "(", "'XYZ'", ",", "'<A>'", ",", "sent", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "final_outputs", ".", "append", "(", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", ")", "else", ":", "print", "(", "\"******************\"", ",", "sent", ".", "replace", "(", "\"?\"", ",", "\" ?\"", ")", ")", "sentence", "=", "sentence", ".", "replace", "(", "\"XYZ\"", ",", "\"<A>\"", ")", "return", "final_outputs" ]
https://github.com/LiberAI/NSpM/blob/cc352dbbda6751e8cf19769c9440c03e31687829/gsoc/zheyuan/pipeline/paraphrase_questions.py#L61-L101
OpenXenManager/openxenmanager
1cb5c1cb13358ba584856e99a94f9669d17670ff
src/OXM/window_storage.py
python
oxcWindowStorage.on_cancelnewstorage_clicked
(self, widget, data=None)
Function called when you press "Cancel" on new storage wizard
Function called when you press "Cancel" on new storage wizard
[ "Function", "called", "when", "you", "press", "Cancel", "on", "new", "storage", "wizard" ]
def on_cancelnewstorage_clicked(self, widget, data=None): """ Function called when you press "Cancel" on new storage wizard """ self.builder.get_object("newstorage").hide()
[ "def", "on_cancelnewstorage_clicked", "(", "self", ",", "widget", ",", "data", "=", "None", ")", ":", "self", ".", "builder", ".", "get_object", "(", "\"newstorage\"", ")", ".", "hide", "(", ")" ]
https://github.com/OpenXenManager/openxenmanager/blob/1cb5c1cb13358ba584856e99a94f9669d17670ff/src/OXM/window_storage.py#L423-L427
travisgoodspeed/goodfet
1750cc1e8588af5470385e52fa098ca7364c2863
client/GoodFET.py
python
GoodFET.peek32
(self,address, memory="vn")
return (self.peek16(address,memory)+ (self.peek16(address+2,memory)<<16))
Peek 32 bits.
Peek 32 bits.
[ "Peek", "32", "bits", "." ]
def peek32(self,address, memory="vn"): """Peek 32 bits.""" return (self.peek16(address,memory)+ (self.peek16(address+2,memory)<<16));
[ "def", "peek32", "(", "self", ",", "address", ",", "memory", "=", "\"vn\"", ")", ":", "return", "(", "self", ".", "peek16", "(", "address", ",", "memory", ")", "+", "(", "self", ".", "peek16", "(", "address", "+", "2", ",", "memory", ")", "<<", "16", ")", ")" ]
https://github.com/travisgoodspeed/goodfet/blob/1750cc1e8588af5470385e52fa098ca7364c2863/client/GoodFET.py#L773-L776
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/stats/plm.py
python
PanelOLS._df_raw
(self)
return df
Returns the degrees of freedom.
Returns the degrees of freedom.
[ "Returns", "the", "degrees", "of", "freedom", "." ]
def _df_raw(self): """Returns the degrees of freedom.""" df = math.rank(self._x_trans.values) if self._time_effects: df += self._total_times return df
[ "def", "_df_raw", "(", "self", ")", ":", "df", "=", "math", ".", "rank", "(", "self", ".", "_x_trans", ".", "values", ")", "if", "self", ".", "_time_effects", ":", "df", "+=", "self", ".", "_total_times", "return", "df" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/stats/plm.py#L332-L338
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/videos/apps.py
python
VideosConfig.ready
(self)
[]
def ready(self): super(VideosConfig, self).ready() from tendenci.apps.videos.signals import init_signals, create_notice_types init_signals() post_migrate.connect(create_notice_types, sender=self)
[ "def", "ready", "(", "self", ")", ":", "super", "(", "VideosConfig", ",", "self", ")", ".", "ready", "(", ")", "from", "tendenci", ".", "apps", ".", "videos", ".", "signals", "import", "init_signals", ",", "create_notice_types", "init_signals", "(", ")", "post_migrate", ".", "connect", "(", "create_notice_types", ",", "sender", "=", "self", ")" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/videos/apps.py#L9-L13
huntfx/MouseTracks
4dfab6386f9461be77cb19b54c9c498d74fb4ef6
mousetracks/api/web.py
python
close_connection
(port_type=None)
return jsonify(False)
[]
def close_connection(port_type=None): if port_type.lower() == 'message': app.config['PIPE_CONTROL_SEND'].send(CLOSE_MESSAGE_CONNECTIONS) return jsonify(True) return jsonify(False)
[ "def", "close_connection", "(", "port_type", "=", "None", ")", ":", "if", "port_type", ".", "lower", "(", ")", "==", "'message'", ":", "app", ".", "config", "[", "'PIPE_CONTROL_SEND'", "]", ".", "send", "(", "CLOSE_MESSAGE_CONNECTIONS", ")", "return", "jsonify", "(", "True", ")", "return", "jsonify", "(", "False", ")" ]
https://github.com/huntfx/MouseTracks/blob/4dfab6386f9461be77cb19b54c9c498d74fb4ef6/mousetracks/api/web.py#L95-L99
ethereum/trinity
6383280c5044feb06695ac2f7bc1100b7bcf4fe0
trinity/db/manager.py
python
_run
()
[]
def _run() -> None: from eth.db.backends.level import LevelDB from eth.db.chain import ChainDB from trinity.cli_parser import parser from trinity.config import Eth1AppConfig, TrinityConfig from trinity.constants import APP_IDENTIFIER_ETH1 from trinity.initialization import ( initialize_data_dir, is_data_dir_initialized, is_database_initialized, initialize_database, ensure_eth1_dirs, ) # Require a root dir to be specified as we don't want to mess with the default one. for action in parser._actions: if action.dest == 'trinity_root_dir': action.required = True break args = parser.parse_args() # FIXME: Figure out a way to avoid having to set this. args.sync_mode = "full" logging.basicConfig( level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') for name, level in args.log_levels.items(): logging.getLogger(name).setLevel(level) trinity_config = TrinityConfig.from_parser_args(args, APP_IDENTIFIER_ETH1, (Eth1AppConfig,)) trinity_config.trinity_root_dir.mkdir(exist_ok=True) if not is_data_dir_initialized(trinity_config): initialize_data_dir(trinity_config) with trinity_config.process_id_file('database'): app_config = trinity_config.get_app_config(Eth1AppConfig) ensure_eth1_dirs(app_config) base_db = LevelDB(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) manager = DBManager(base_db) with manager.run(trinity_config.database_ipc_path): try: manager.wait_stopped() except KeyboardInterrupt: pass
[ "def", "_run", "(", ")", "->", "None", ":", "from", "eth", ".", "db", ".", "backends", ".", "level", "import", "LevelDB", "from", "eth", ".", "db", ".", "chain", "import", "ChainDB", "from", "trinity", ".", "cli_parser", "import", "parser", "from", "trinity", ".", "config", "import", "Eth1AppConfig", ",", "TrinityConfig", "from", "trinity", ".", "constants", "import", "APP_IDENTIFIER_ETH1", "from", "trinity", ".", "initialization", "import", "(", "initialize_data_dir", ",", "is_data_dir_initialized", ",", "is_database_initialized", ",", "initialize_database", ",", "ensure_eth1_dirs", ",", ")", "# Require a root dir to be specified as we don't want to mess with the default one.", "for", "action", "in", "parser", ".", "_actions", ":", "if", "action", ".", "dest", "==", "'trinity_root_dir'", ":", "action", ".", "required", "=", "True", "break", "args", "=", "parser", ".", "parse_args", "(", ")", "# FIXME: Figure out a way to avoid having to set this.", "args", ".", "sync_mode", "=", "\"full\"", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(asctime)s %(levelname)s: %(message)s'", ",", "datefmt", "=", "'%H:%M:%S'", ")", "for", "name", ",", "level", "in", "args", ".", "log_levels", ".", "items", "(", ")", ":", "logging", ".", "getLogger", "(", "name", ")", ".", "setLevel", "(", "level", ")", "trinity_config", "=", "TrinityConfig", ".", "from_parser_args", "(", "args", ",", "APP_IDENTIFIER_ETH1", ",", "(", "Eth1AppConfig", ",", ")", ")", "trinity_config", ".", "trinity_root_dir", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "if", "not", "is_data_dir_initialized", "(", "trinity_config", ")", ":", "initialize_data_dir", "(", "trinity_config", ")", "with", "trinity_config", ".", "process_id_file", "(", "'database'", ")", ":", "app_config", "=", "trinity_config", ".", "get_app_config", "(", "Eth1AppConfig", ")", "ensure_eth1_dirs", "(", "app_config", ")", "base_db", "=", "LevelDB", "(", "db_path", "=", "app_config", ".", "database_dir", ")", "chaindb", "=", "ChainDB", "(", "base_db", ")", "if", "not", "is_database_initialized", "(", "chaindb", ")", ":", "chain_config", "=", "app_config", ".", "get_chain_config", "(", ")", "initialize_database", "(", "chain_config", ",", "chaindb", ",", "base_db", ")", "manager", "=", "DBManager", "(", "base_db", ")", "with", "manager", ".", "run", "(", "trinity_config", ".", "database_ipc_path", ")", ":", "try", ":", "manager", ".", "wait_stopped", "(", ")", "except", "KeyboardInterrupt", ":", "pass" ]
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/db/manager.py#L376-L424
lad1337/XDM
0c1b7009fe00f06f102a6f67c793478f515e7efe
site-packages/pylint/checkers/variables.py
python
VariablesChecker.leave_genexpr
(self, _)
leave genexpr: update consumption analysis variable
leave genexpr: update consumption analysis variable
[ "leave", "genexpr", ":", "update", "consumption", "analysis", "variable" ]
def leave_genexpr(self, _): """leave genexpr: update consumption analysis variable """ # do not check for not used locals here self._to_consume.pop()
[ "def", "leave_genexpr", "(", "self", ",", "_", ")", ":", "# do not check for not used locals here", "self", ".", "_to_consume", ".", "pop", "(", ")" ]
https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/pylint/checkers/variables.py#L236-L240
mckinziebrandon/DeepChatModels
4fef8a6ce00d92235a2fb0e427d2ec60833022d2
chatbot/_models.py
python
BucketModel.compile
(self)
Configure training process. Name was inspired by Keras. <3
Configure training process. Name was inspired by Keras. <3
[ "Configure", "training", "process", ".", "Name", "was", "inspired", "by", "Keras", ".", "<3" ]
def compile(self): """ Configure training process. Name was inspired by Keras. <3 """ if self.losses is None: raise ValueError("Tried compiling model before defining losses.") print("Configuring training operations. This may take some time . . . ") # Note: variables are trainable=True by default. params = tf.trainable_variables() # train_op will store the parameter (S)GD train_op. self.apply_gradients = [] optimizer = OPTIMIZERS[self.optimizer](self.learning_rate) for b in range(len(self.buckets)): gradients = tf.gradients(self.losses[b], params) # Gradient clipping is actually extremely simple, it basically just # checks if L2Norm(gradients) > max_gradient, and if it is, # it returns (gradients / L2Norm(gradients)) * max_grad. clipped_gradients, _ = tf.clip_by_global_norm( gradients, self.max_gradient) self.apply_gradients.append(optimizer.apply_gradients( zip(clipped_gradients, params),global_step=self.global_step)) super(BucketModel, self).compile()
[ "def", "compile", "(", "self", ")", ":", "if", "self", ".", "losses", "is", "None", ":", "raise", "ValueError", "(", "\"Tried compiling model before defining losses.\"", ")", "print", "(", "\"Configuring training operations. This may take some time . . . \"", ")", "# Note: variables are trainable=True by default.", "params", "=", "tf", ".", "trainable_variables", "(", ")", "# train_op will store the parameter (S)GD train_op.", "self", ".", "apply_gradients", "=", "[", "]", "optimizer", "=", "OPTIMIZERS", "[", "self", ".", "optimizer", "]", "(", "self", ".", "learning_rate", ")", "for", "b", "in", "range", "(", "len", "(", "self", ".", "buckets", ")", ")", ":", "gradients", "=", "tf", ".", "gradients", "(", "self", ".", "losses", "[", "b", "]", ",", "params", ")", "# Gradient clipping is actually extremely simple, it basically just", "# checks if L2Norm(gradients) > max_gradient, and if it is,", "# it returns (gradients / L2Norm(gradients)) * max_grad.", "clipped_gradients", ",", "_", "=", "tf", ".", "clip_by_global_norm", "(", "gradients", ",", "self", ".", "max_gradient", ")", "self", ".", "apply_gradients", ".", "append", "(", "optimizer", ".", "apply_gradients", "(", "zip", "(", "clipped_gradients", ",", "params", ")", ",", "global_step", "=", "self", ".", "global_step", ")", ")", "super", "(", "BucketModel", ",", "self", ")", ".", "compile", "(", ")" ]
https://github.com/mckinziebrandon/DeepChatModels/blob/4fef8a6ce00d92235a2fb0e427d2ec60833022d2/chatbot/_models.py#L247-L269
MicrosoftResearch/Azimuth
84eb013b8dde7132357a9b69206e99a4c65e2b89
azimuth/load_data.py
python
mergeV1_V2
(data_file, data_file2, learn_options)
return Xdf, Y, gene_position, target_genes
ground_truth_label, etc. are taken to correspond to the V2 data, and then the V1 is appropriately matched based on semantics
ground_truth_label, etc. are taken to correspond to the V2 data, and then the V1 is appropriately matched based on semantics
[ "ground_truth_label", "etc", ".", "are", "taken", "to", "correspond", "to", "the", "V2", "data", "and", "then", "the", "V1", "is", "appropriately", "matched", "based", "on", "semantics" ]
def mergeV1_V2(data_file, data_file2, learn_options): ''' ground_truth_label, etc. are taken to correspond to the V2 data, and then the V1 is appropriately matched based on semantics ''' assert not learn_options['include_strand'], "don't currently have 'Strand' column in V1 data" annotations, gene_position1, target_genes1, Xdf1, Y1 = read_V1_data(data_file, learn_options) Xdf2, drugs_to_genes, target_genes2, Y2, gene_position2 = read_V2_data(data_file2) Y1.rename(columns={'average rank': learn_options["rank-transformed target name"]}, inplace=True) Y1.rename(columns={'average threshold': learn_options["binary target name"]}, inplace=True) # rename columns, and add a dummy "drug" to V1 so can join the data sets Y1["drug"] = ["nodrug" for x in range(Y1.shape[0])] Y1 = Y1.set_index('drug', append=True) Y1.index.names = ['Sequence', 'Target gene', 'drug'] Y_cols_to_keep = np.unique(['Target gene', 'test', 'score_drug_gene_rank', 'score_drug_gene_threshold']) Y1 = Y1[Y_cols_to_keep] Y2 = Y2[Y_cols_to_keep] Xdf1["drug"] = ["nodrug" for x in range(Xdf1.shape[0])] Xdf1 = Xdf1.set_index('drug', append=True) X_cols_to_keep = ['30mer', 'Strand'] Xdf1 = Xdf1[X_cols_to_keep] Xdf2 = Xdf2[X_cols_to_keep] gene_position1["drug"] = ["nodrug" for x in range(gene_position1.shape[0])] gene_position1 = gene_position1.set_index('drug', append=True) gene_position1.index.names = ['Sequence', 'Target gene', 'drug'] cols_to_keep = [u'Percent Peptide', u'Amino Acid Cut position'] gene_position1 = gene_position1[cols_to_keep] gene_position2 = gene_position2[cols_to_keep] Y = pandas.concat((Y1, Y2), axis=0) Xdf = pandas.concat((Xdf1, Xdf2), axis=0) gene_position = pandas.concat((gene_position1, gene_position2)) # target_genes = target_genes1 + target_genes2 target_genes = np.concatenate((target_genes1, target_genes2)) save_to_file = False if save_to_file: Y.index.names = ['Sequence', 'Target', 'drug'] assert np.all(Xdf.index.values==Y.index.values), "rows don't match up" onedupind = np.where(Y.index.duplicated())[0][0] alldupind = np.where(Y.index.get_level_values(0).values==Y.index[onedupind][0])[0] #arbitrarily set one of these to have "nodrug2" as the third level index #so that they are not repeated, and the joints therefore do not augment the data set assert len(alldupind)==2, "expected only duplicates" newindex = Y.index.tolist() newindex[onedupind] = (newindex[onedupind][0], newindex[onedupind][1], "nodrug2") Y.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names) Xdf.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names) # there seems to be a duplicate index, and thus this increases the data set size, so doing it the hacky way... XandY = pandas.merge(Xdf, Y, how="inner", left_index=True, right_index=True) gene_position_tmp = gene_position.copy() gene_position_tmp.index.names = ['Sequence', 'Target', 'drug'] gene_position_tmp.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names) XandY = pandas.merge(XandY, gene_position_tmp, how="inner", left_index=True, right_index=True) # truncate to 30mers XandY["30mer"] = XandY["30mer"].apply(lambda x: x[0:30]) XandY.to_csv(r'D:\Source\CRISPR\data\tmp\V3.csv') return Xdf, Y, gene_position, target_genes
[ "def", "mergeV1_V2", "(", "data_file", ",", "data_file2", ",", "learn_options", ")", ":", "assert", "not", "learn_options", "[", "'include_strand'", "]", ",", "\"don't currently have 'Strand' column in V1 data\"", "annotations", ",", "gene_position1", ",", "target_genes1", ",", "Xdf1", ",", "Y1", "=", "read_V1_data", "(", "data_file", ",", "learn_options", ")", "Xdf2", ",", "drugs_to_genes", ",", "target_genes2", ",", "Y2", ",", "gene_position2", "=", "read_V2_data", "(", "data_file2", ")", "Y1", ".", "rename", "(", "columns", "=", "{", "'average rank'", ":", "learn_options", "[", "\"rank-transformed target name\"", "]", "}", ",", "inplace", "=", "True", ")", "Y1", ".", "rename", "(", "columns", "=", "{", "'average threshold'", ":", "learn_options", "[", "\"binary target name\"", "]", "}", ",", "inplace", "=", "True", ")", "# rename columns, and add a dummy \"drug\" to V1 so can join the data sets", "Y1", "[", "\"drug\"", "]", "=", "[", "\"nodrug\"", "for", "x", "in", "range", "(", "Y1", ".", "shape", "[", "0", "]", ")", "]", "Y1", "=", "Y1", ".", "set_index", "(", "'drug'", ",", "append", "=", "True", ")", "Y1", ".", "index", ".", "names", "=", "[", "'Sequence'", ",", "'Target gene'", ",", "'drug'", "]", "Y_cols_to_keep", "=", "np", ".", "unique", "(", "[", "'Target gene'", ",", "'test'", ",", "'score_drug_gene_rank'", ",", "'score_drug_gene_threshold'", "]", ")", "Y1", "=", "Y1", "[", "Y_cols_to_keep", "]", "Y2", "=", "Y2", "[", "Y_cols_to_keep", "]", "Xdf1", "[", "\"drug\"", "]", "=", "[", "\"nodrug\"", "for", "x", "in", "range", "(", "Xdf1", ".", "shape", "[", "0", "]", ")", "]", "Xdf1", "=", "Xdf1", ".", "set_index", "(", "'drug'", ",", "append", "=", "True", ")", "X_cols_to_keep", "=", "[", "'30mer'", ",", "'Strand'", "]", "Xdf1", "=", "Xdf1", "[", "X_cols_to_keep", "]", "Xdf2", "=", "Xdf2", "[", "X_cols_to_keep", "]", "gene_position1", "[", "\"drug\"", "]", "=", "[", "\"nodrug\"", "for", "x", "in", "range", "(", "gene_position1", ".", "shape", "[", "0", "]", ")", "]", "gene_position1", "=", "gene_position1", ".", "set_index", "(", "'drug'", ",", "append", "=", "True", ")", "gene_position1", ".", "index", ".", "names", "=", "[", "'Sequence'", ",", "'Target gene'", ",", "'drug'", "]", "cols_to_keep", "=", "[", "u'Percent Peptide'", ",", "u'Amino Acid Cut position'", "]", "gene_position1", "=", "gene_position1", "[", "cols_to_keep", "]", "gene_position2", "=", "gene_position2", "[", "cols_to_keep", "]", "Y", "=", "pandas", ".", "concat", "(", "(", "Y1", ",", "Y2", ")", ",", "axis", "=", "0", ")", "Xdf", "=", "pandas", ".", "concat", "(", "(", "Xdf1", ",", "Xdf2", ")", ",", "axis", "=", "0", ")", "gene_position", "=", "pandas", ".", "concat", "(", "(", "gene_position1", ",", "gene_position2", ")", ")", "# target_genes = target_genes1 + target_genes2", "target_genes", "=", "np", ".", "concatenate", "(", "(", "target_genes1", ",", "target_genes2", ")", ")", "save_to_file", "=", "False", "if", "save_to_file", ":", "Y", ".", "index", ".", "names", "=", "[", "'Sequence'", ",", "'Target'", ",", "'drug'", "]", "assert", "np", ".", "all", "(", "Xdf", ".", "index", ".", "values", "==", "Y", ".", "index", ".", "values", ")", ",", "\"rows don't match up\"", "onedupind", "=", "np", ".", "where", "(", "Y", ".", "index", ".", "duplicated", "(", ")", ")", "[", "0", "]", "[", "0", "]", "alldupind", "=", "np", ".", "where", "(", "Y", ".", "index", ".", "get_level_values", "(", "0", ")", ".", "values", "==", "Y", ".", "index", "[", "onedupind", "]", "[", "0", "]", ")", "[", "0", "]", "#arbitrarily set one of these to have \"nodrug2\" as the third level index", "#so that they are not repeated, and the joints therefore do not augment the data set", "assert", "len", "(", "alldupind", ")", "==", "2", ",", "\"expected only duplicates\"", "newindex", "=", "Y", ".", "index", ".", "tolist", "(", ")", "newindex", "[", "onedupind", "]", "=", "(", "newindex", "[", "onedupind", "]", "[", "0", "]", ",", "newindex", "[", "onedupind", "]", "[", "1", "]", ",", "\"nodrug2\"", ")", "Y", ".", "index", "=", "pandas", ".", "MultiIndex", ".", "from_tuples", "(", "newindex", ",", "names", "=", "Y", ".", "index", ".", "names", ")", "Xdf", ".", "index", "=", "pandas", ".", "MultiIndex", ".", "from_tuples", "(", "newindex", ",", "names", "=", "Y", ".", "index", ".", "names", ")", "# there seems to be a duplicate index, and thus this increases the data set size, so doing it the hacky way...", "XandY", "=", "pandas", ".", "merge", "(", "Xdf", ",", "Y", ",", "how", "=", "\"inner\"", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "gene_position_tmp", "=", "gene_position", ".", "copy", "(", ")", "gene_position_tmp", ".", "index", ".", "names", "=", "[", "'Sequence'", ",", "'Target'", ",", "'drug'", "]", "gene_position_tmp", ".", "index", "=", "pandas", ".", "MultiIndex", ".", "from_tuples", "(", "newindex", ",", "names", "=", "Y", ".", "index", ".", "names", ")", "XandY", "=", "pandas", ".", "merge", "(", "XandY", ",", "gene_position_tmp", ",", "how", "=", "\"inner\"", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "# truncate to 30mers", "XandY", "[", "\"30mer\"", "]", "=", "XandY", "[", "\"30mer\"", "]", ".", "apply", "(", "lambda", "x", ":", "x", "[", "0", ":", "30", "]", ")", "XandY", ".", "to_csv", "(", "r'D:\\Source\\CRISPR\\data\\tmp\\V3.csv'", ")", "return", "Xdf", ",", "Y", ",", "gene_position", ",", "target_genes" ]
https://github.com/MicrosoftResearch/Azimuth/blob/84eb013b8dde7132357a9b69206e99a4c65e2b89/azimuth/load_data.py#L380-L452
HazyResearch/pdftotree
0686a1845c7901aa975544a9107fc10594523986
pdftotree/utils/img_utils.py
python
normalize_bbox
(coords, ymax, scaler=2)
return [ coords[0] * scaler, ymax - (coords[3] * scaler), coords[2] * scaler, ymax - (coords[1] * scaler), ]
scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left)
scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left)
[ "scales", "all", "coordinates", "and", "flip", "y", "axis", "due", "to", "different", "origin", "coordinates", "(", "top", "left", "vs", ".", "bottom", "left", ")" ]
def normalize_bbox(coords, ymax, scaler=2): """ scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left) """ return [ coords[0] * scaler, ymax - (coords[3] * scaler), coords[2] * scaler, ymax - (coords[1] * scaler), ]
[ "def", "normalize_bbox", "(", "coords", ",", "ymax", ",", "scaler", "=", "2", ")", ":", "return", "[", "coords", "[", "0", "]", "*", "scaler", ",", "ymax", "-", "(", "coords", "[", "3", "]", "*", "scaler", ")", ",", "coords", "[", "2", "]", "*", "scaler", ",", "ymax", "-", "(", "coords", "[", "1", "]", "*", "scaler", ")", ",", "]" ]
https://github.com/HazyResearch/pdftotree/blob/0686a1845c7901aa975544a9107fc10594523986/pdftotree/utils/img_utils.py#L39-L49
rcorcs/NatI
fdf014f4292afdc95250add7b6658468043228e1
en/parser/nltk_lite/semantics/utilities.py
python
text_evaluate
(inputs, grammar, model, assignment)
return evaluations
Add the truth-in-a-model value to each semantic representation for each syntactic parse of each input sentences.
Add the truth-in-a-model value to each semantic representation for each syntactic parse of each input sentences.
[ "Add", "the", "truth", "-", "in", "-", "a", "-", "model", "value", "to", "each", "semantic", "representation", "for", "each", "syntactic", "parse", "of", "each", "input", "sentences", "." ]
def text_evaluate(inputs, grammar, model, assignment): """ Add the truth-in-a-model value to each semantic representation for each syntactic parse of each input sentences. """ g = assignment m = model semreps = text_interpret(inputs, grammar) evaluations = {} for sent in inputs: syn_sem_val = \ [(syn, sem, m.evaluate(str(sem), g)) for (syn, sem) in semreps[sent]] evaluations[sent] = syn_sem_val return evaluations
[ "def", "text_evaluate", "(", "inputs", ",", "grammar", ",", "model", ",", "assignment", ")", ":", "g", "=", "assignment", "m", "=", "model", "semreps", "=", "text_interpret", "(", "inputs", ",", "grammar", ")", "evaluations", "=", "{", "}", "for", "sent", "in", "inputs", ":", "syn_sem_val", "=", "[", "(", "syn", ",", "sem", ",", "m", ".", "evaluate", "(", "str", "(", "sem", ")", ",", "g", ")", ")", "for", "(", "syn", ",", "sem", ")", "in", "semreps", "[", "sent", "]", "]", "evaluations", "[", "sent", "]", "=", "syn_sem_val", "return", "evaluations" ]
https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/parser/nltk_lite/semantics/utilities.py#L89-L102
Trusted-AI/adversarial-robustness-toolbox
9fabffdbb92947efa1ecc5d825d634d30dfbaf29
art/defences/preprocessor/mp3_compression.py
python
Mp3Compression.__call__
(self, x: np.ndarray, y: Optional[np.ndarray] = None)
return x_mp3, y
Apply MP3 compression to sample `x`. :param x: Sample to compress with shape `(batch_size, length, channel)` or an array of sample arrays with shape (length,) or (length, channel). :param y: Labels of the sample `x`. This function does not affect them in any way. :return: Compressed sample.
Apply MP3 compression to sample `x`.
[ "Apply", "MP3", "compression", "to", "sample", "x", "." ]
def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: """ Apply MP3 compression to sample `x`. :param x: Sample to compress with shape `(batch_size, length, channel)` or an array of sample arrays with shape (length,) or (length, channel). :param y: Labels of the sample `x`. This function does not affect them in any way. :return: Compressed sample. """ def wav_to_mp3(x, sample_rate): """ Apply MP3 compression to audio input of shape (samples, channel). """ from pydub import AudioSegment from scipy.io.wavfile import write x_dtype = x.dtype normalized = bool(x.min() >= -1.0 and x.max() <= 1.0) if x_dtype != np.int16 and not normalized: # input is not of type np.int16 and seems to be unnormalized. Therefore casting to np.int16. x = x.astype(np.int16) elif x_dtype != np.int16 and normalized: # x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and # casting to np.int16. x = (x * 2 ** 15).astype(np.int16) tmp_wav, tmp_mp3 = BytesIO(), BytesIO() write(tmp_wav, sample_rate, x) AudioSegment.from_wav(tmp_wav).export(tmp_mp3) audio_segment = AudioSegment.from_mp3(tmp_mp3) tmp_wav.close() tmp_mp3.close() x_mp3 = np.array(audio_segment.get_array_of_samples()).reshape((-1, audio_segment.channels)) # WARNING: Sometimes we *still* need to manually resize x_mp3 to original length. # This should not be the case, e.g. see https://github.com/jiaaro/pydub/issues/474 if x.shape[0] != x_mp3.shape[0]: # pragma: no cover logger.warning( "Lengths original input and compressed output don't match. Truncating compressed result." ) x_mp3 = x_mp3[: x.shape[0]] if normalized: # x was normalized. Therefore normalizing x_mp3. x_mp3 = x_mp3 * 2 ** -15 return x_mp3.astype(x_dtype) if x.dtype != object and x.ndim != 3: raise ValueError("Mp3 compression can only be applied to temporal data across at least one channel.") if x.dtype != object and self.channels_first: x = np.swapaxes(x, 1, 2) # apply mp3 compression per audio item x_mp3 = x.copy() for i, x_i in enumerate(tqdm(x, desc="MP3 compression", disable=not self.verbose)): x_i_ndim_0 = x_i.ndim if x.dtype == object: if x_i.ndim == 1: x_i = np.expand_dims(x_i, axis=1) if x_i_ndim_0 == 2 and self.channels_first: x_i = np.swapaxes(x_i, 0, 1) x_i = wav_to_mp3(x_i, self.sample_rate) if x.dtype == object: if x_i_ndim_0 == 2 and self.channels_first: x_i = np.swapaxes(x_i, 0, 1) if x_i_ndim_0 == 1: x_i = np.squeeze(x_i) x_mp3[i] = x_i if x.dtype != object and self.channels_first: x_mp3 = np.swapaxes(x_mp3, 1, 2) return x_mp3, y
[ "def", "__call__", "(", "self", ",", "x", ":", "np", ".", "ndarray", ",", "y", ":", "Optional", "[", "np", ".", "ndarray", "]", "=", "None", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "Optional", "[", "np", ".", "ndarray", "]", "]", ":", "def", "wav_to_mp3", "(", "x", ",", "sample_rate", ")", ":", "\"\"\"\n Apply MP3 compression to audio input of shape (samples, channel).\n \"\"\"", "from", "pydub", "import", "AudioSegment", "from", "scipy", ".", "io", ".", "wavfile", "import", "write", "x_dtype", "=", "x", ".", "dtype", "normalized", "=", "bool", "(", "x", ".", "min", "(", ")", ">=", "-", "1.0", "and", "x", ".", "max", "(", ")", "<=", "1.0", ")", "if", "x_dtype", "!=", "np", ".", "int16", "and", "not", "normalized", ":", "# input is not of type np.int16 and seems to be unnormalized. Therefore casting to np.int16.", "x", "=", "x", ".", "astype", "(", "np", ".", "int16", ")", "elif", "x_dtype", "!=", "np", ".", "int16", "and", "normalized", ":", "# x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and", "# casting to np.int16.", "x", "=", "(", "x", "*", "2", "**", "15", ")", ".", "astype", "(", "np", ".", "int16", ")", "tmp_wav", ",", "tmp_mp3", "=", "BytesIO", "(", ")", ",", "BytesIO", "(", ")", "write", "(", "tmp_wav", ",", "sample_rate", ",", "x", ")", "AudioSegment", ".", "from_wav", "(", "tmp_wav", ")", ".", "export", "(", "tmp_mp3", ")", "audio_segment", "=", "AudioSegment", ".", "from_mp3", "(", "tmp_mp3", ")", "tmp_wav", ".", "close", "(", ")", "tmp_mp3", ".", "close", "(", ")", "x_mp3", "=", "np", ".", "array", "(", "audio_segment", ".", "get_array_of_samples", "(", ")", ")", ".", "reshape", "(", "(", "-", "1", ",", "audio_segment", ".", "channels", ")", ")", "# WARNING: Sometimes we *still* need to manually resize x_mp3 to original length.", "# This should not be the case, e.g. see https://github.com/jiaaro/pydub/issues/474", "if", "x", ".", "shape", "[", "0", "]", "!=", "x_mp3", ".", "shape", "[", "0", "]", ":", "# pragma: no cover", "logger", ".", "warning", "(", "\"Lengths original input and compressed output don't match. Truncating compressed result.\"", ")", "x_mp3", "=", "x_mp3", "[", ":", "x", ".", "shape", "[", "0", "]", "]", "if", "normalized", ":", "# x was normalized. Therefore normalizing x_mp3.", "x_mp3", "=", "x_mp3", "*", "2", "**", "-", "15", "return", "x_mp3", ".", "astype", "(", "x_dtype", ")", "if", "x", ".", "dtype", "!=", "object", "and", "x", ".", "ndim", "!=", "3", ":", "raise", "ValueError", "(", "\"Mp3 compression can only be applied to temporal data across at least one channel.\"", ")", "if", "x", ".", "dtype", "!=", "object", "and", "self", ".", "channels_first", ":", "x", "=", "np", ".", "swapaxes", "(", "x", ",", "1", ",", "2", ")", "# apply mp3 compression per audio item", "x_mp3", "=", "x", ".", "copy", "(", ")", "for", "i", ",", "x_i", "in", "enumerate", "(", "tqdm", "(", "x", ",", "desc", "=", "\"MP3 compression\"", ",", "disable", "=", "not", "self", ".", "verbose", ")", ")", ":", "x_i_ndim_0", "=", "x_i", ".", "ndim", "if", "x", ".", "dtype", "==", "object", ":", "if", "x_i", ".", "ndim", "==", "1", ":", "x_i", "=", "np", ".", "expand_dims", "(", "x_i", ",", "axis", "=", "1", ")", "if", "x_i_ndim_0", "==", "2", "and", "self", ".", "channels_first", ":", "x_i", "=", "np", ".", "swapaxes", "(", "x_i", ",", "0", ",", "1", ")", "x_i", "=", "wav_to_mp3", "(", "x_i", ",", "self", ".", "sample_rate", ")", "if", "x", ".", "dtype", "==", "object", ":", "if", "x_i_ndim_0", "==", "2", "and", "self", ".", "channels_first", ":", "x_i", "=", "np", ".", "swapaxes", "(", "x_i", ",", "0", ",", "1", ")", "if", "x_i_ndim_0", "==", "1", ":", "x_i", "=", "np", ".", "squeeze", "(", "x_i", ")", "x_mp3", "[", "i", "]", "=", "x_i", "if", "x", ".", "dtype", "!=", "object", "and", "self", ".", "channels_first", ":", "x_mp3", "=", "np", ".", "swapaxes", "(", "x_mp3", ",", "1", ",", "2", ")", "return", "x_mp3", ",", "y" ]
https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/defences/preprocessor/mp3_compression.py#L70-L149
donnemartin/gitsome
d7c57abc7cb66e9c910a844f15d4536866da3310
xonsh/environ.py
python
Env.get_ensurer
(self, key, default=None)
return ensurer
Gets an ensurer for the given key.
Gets an ensurer for the given key.
[ "Gets", "an", "ensurer", "for", "the", "given", "key", "." ]
def get_ensurer(self, key, default=None): """Gets an ensurer for the given key.""" if key in self._ensurers: return self._ensurers[key] for k, ensurer in self._ensurers.items(): if isinstance(k, str): continue if k.match(key) is not None: break else: ensurer = self._get_default_ensurer(default=default) self._ensurers[key] = ensurer return ensurer
[ "def", "get_ensurer", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "in", "self", ".", "_ensurers", ":", "return", "self", ".", "_ensurers", "[", "key", "]", "for", "k", ",", "ensurer", "in", "self", ".", "_ensurers", ".", "items", "(", ")", ":", "if", "isinstance", "(", "k", ",", "str", ")", ":", "continue", "if", "k", ".", "match", "(", "key", ")", "is", "not", "None", ":", "break", "else", ":", "ensurer", "=", "self", ".", "_get_default_ensurer", "(", "default", "=", "default", ")", "self", ".", "_ensurers", "[", "key", "]", "=", "ensurer", "return", "ensurer" ]
https://github.com/donnemartin/gitsome/blob/d7c57abc7cb66e9c910a844f15d4536866da3310/xonsh/environ.py#L1392-L1404
af/djangbone
22ca5f29588c8a4e1d372661e2372a2f2be9fe73
djangbone/views.py
python
DjangboneJSONEncoder.default
(self, obj)
Converts datetime objects to ISO-compatible strings during json serialization. Converts Decimal objects to floats during json serialization.
Converts datetime objects to ISO-compatible strings during json serialization. Converts Decimal objects to floats during json serialization.
[ "Converts", "datetime", "objects", "to", "ISO", "-", "compatible", "strings", "during", "json", "serialization", ".", "Converts", "Decimal", "objects", "to", "floats", "during", "json", "serialization", "." ]
def default(self, obj): """ Converts datetime objects to ISO-compatible strings during json serialization. Converts Decimal objects to floats during json serialization. """ if isinstance(obj, datetime.datetime): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj) else: return None
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "elif", "isinstance", "(", "obj", ",", "decimal", ".", "Decimal", ")", ":", "return", "float", "(", "obj", ")", "else", ":", "return", "None" ]
https://github.com/af/djangbone/blob/22ca5f29588c8a4e1d372661e2372a2f2be9fe73/djangbone/views.py#L14-L24
sighingnow/parsec.py
3b7daa0b93483d6de5817fd445664aeff2f5721a
src/parsec/__init__.py
python
try_choice
(pa, pb)
return pa.try_choice(pb)
Choice one from two parsers with backtrack, implements the operator of `(^)`.
Choice one from two parsers with backtrack, implements the operator of `(^)`.
[ "Choice", "one", "from", "two", "parsers", "with", "backtrack", "implements", "the", "operator", "of", "(", "^", ")", "." ]
def try_choice(pa, pb): '''Choice one from two parsers with backtrack, implements the operator of `(^)`.''' return pa.try_choice(pb)
[ "def", "try_choice", "(", "pa", ",", "pb", ")", ":", "return", "pa", ".", "try_choice", "(", "pb", ")" ]
https://github.com/sighingnow/parsec.py/blob/3b7daa0b93483d6de5817fd445664aeff2f5721a/src/parsec/__init__.py#L338-L340
dvlab-research/3DSSD
8bc7605d4d3a6ec9051e7689e96a23bdac4c4cd9
lib/np_functions/gt_sampler.py
python
vote_targets_np
(vote_base, gt_boxes_3d)
return vote_mask, vote_target
Generating vote_targets for each vote_base point vote_base: [bs, points_num, 3] gt_boxes_3d: [bs, gt_num, 7] Return: vote_mask: [bs, points_num] vote_target: [bs, points_num, 3]
Generating vote_targets for each vote_base point vote_base: [bs, points_num, 3] gt_boxes_3d: [bs, gt_num, 7]
[ "Generating", "vote_targets", "for", "each", "vote_base", "point", "vote_base", ":", "[", "bs", "points_num", "3", "]", "gt_boxes_3d", ":", "[", "bs", "gt_num", "7", "]" ]
def vote_targets_np(vote_base, gt_boxes_3d): """ Generating vote_targets for each vote_base point vote_base: [bs, points_num, 3] gt_boxes_3d: [bs, gt_num, 7] Return: vote_mask: [bs, points_num] vote_target: [bs, points_num, 3] """ bs, points_num, _ = vote_base.shape vote_mask = np.zeros([bs, points_num], dtype=np.float32) vote_target = np.zeros([bs, points_num, 3], dtype=np.float32) for i in range(bs): cur_vote_base = vote_base[i] cur_gt_boxes_3d = gt_boxes_3d[i] filter_idx = np.where(np.any(np.not_equal(cur_gt_boxes_3d, 0), axis=-1))[0] cur_gt_boxes_3d = cur_gt_boxes_3d[filter_idx] cur_expand_boxes_3d = cur_gt_boxes_3d.copy() cur_expand_boxes_3d[:, 3:-1] += cfg.TRAIN.AUGMENTATIONS.EXPAND_DIMS_LENGTH cur_points_mask = check_inside_points(cur_vote_base, cur_expand_boxes_3d) # [pts_num, gt_num] cur_vote_mask = np.max(cur_points_mask, axis=1).astype(np.float32) vote_mask[i] = cur_vote_mask cur_vote_target_idx = np.argmax(cur_points_mask, axis=1) # [pts_num] cur_vote_target = cur_gt_boxes_3d[cur_vote_target_idx] cur_vote_target[:, 1] = cur_vote_target[:, 1] - cur_vote_target[:, 4] / 2. cur_vote_target = cur_vote_target[:, :3] - cur_vote_base vote_target[i] = cur_vote_target return vote_mask, vote_target
[ "def", "vote_targets_np", "(", "vote_base", ",", "gt_boxes_3d", ")", ":", "bs", ",", "points_num", ",", "_", "=", "vote_base", ".", "shape", "vote_mask", "=", "np", ".", "zeros", "(", "[", "bs", ",", "points_num", "]", ",", "dtype", "=", "np", ".", "float32", ")", "vote_target", "=", "np", ".", "zeros", "(", "[", "bs", ",", "points_num", ",", "3", "]", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", "in", "range", "(", "bs", ")", ":", "cur_vote_base", "=", "vote_base", "[", "i", "]", "cur_gt_boxes_3d", "=", "gt_boxes_3d", "[", "i", "]", "filter_idx", "=", "np", ".", "where", "(", "np", ".", "any", "(", "np", ".", "not_equal", "(", "cur_gt_boxes_3d", ",", "0", ")", ",", "axis", "=", "-", "1", ")", ")", "[", "0", "]", "cur_gt_boxes_3d", "=", "cur_gt_boxes_3d", "[", "filter_idx", "]", "cur_expand_boxes_3d", "=", "cur_gt_boxes_3d", ".", "copy", "(", ")", "cur_expand_boxes_3d", "[", ":", ",", "3", ":", "-", "1", "]", "+=", "cfg", ".", "TRAIN", ".", "AUGMENTATIONS", ".", "EXPAND_DIMS_LENGTH", "cur_points_mask", "=", "check_inside_points", "(", "cur_vote_base", ",", "cur_expand_boxes_3d", ")", "# [pts_num, gt_num]", "cur_vote_mask", "=", "np", ".", "max", "(", "cur_points_mask", ",", "axis", "=", "1", ")", ".", "astype", "(", "np", ".", "float32", ")", "vote_mask", "[", "i", "]", "=", "cur_vote_mask", "cur_vote_target_idx", "=", "np", ".", "argmax", "(", "cur_points_mask", ",", "axis", "=", "1", ")", "# [pts_num]", "cur_vote_target", "=", "cur_gt_boxes_3d", "[", "cur_vote_target_idx", "]", "cur_vote_target", "[", ":", ",", "1", "]", "=", "cur_vote_target", "[", ":", ",", "1", "]", "-", "cur_vote_target", "[", ":", ",", "4", "]", "/", "2.", "cur_vote_target", "=", "cur_vote_target", "[", ":", ",", ":", "3", "]", "-", "cur_vote_base", "vote_target", "[", "i", "]", "=", "cur_vote_target", "return", "vote_mask", ",", "vote_target" ]
https://github.com/dvlab-research/3DSSD/blob/8bc7605d4d3a6ec9051e7689e96a23bdac4c4cd9/lib/np_functions/gt_sampler.py#L28-L61
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Tools/pybench/Strings.py
python
CreateStringsWithConcat.test
(self)
[]
def test(self): for i in xrange(self.rounds): s = 'om' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex' s = s + 'xax' s = s + 'xbx' s = s + 'xcx' s = s + 'xdx' s = s + 'xex'
[ "def", "test", "(", "self", ")", ":", "for", "i", "in", "xrange", "(", "self", ".", "rounds", ")", ":", "s", "=", "'om'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'", "s", "=", "s", "+", "'xax'", "s", "=", "s", "+", "'xbx'", "s", "=", "s", "+", "'xcx'", "s", "=", "s", "+", "'xdx'", "s", "=", "s", "+", "'xex'" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Tools/pybench/Strings.py#L256-L317
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/genericpath.py
python
commonprefix
(m)
return s1
Given a list of pathnames, returns the longest common leading component
Given a list of pathnames, returns the longest common leading component
[ "Given", "a", "list", "of", "pathnames", "returns", "the", "longest", "common", "leading", "component" ]
def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' # Some people pass in a list of pathname parts to operate in an OS-agnostic # fashion; don't try to translate in that case as that's an abuse of the # API and they are already doing what they need to be OS-agnostic and so # they most likely won't be using an os.PathLike object in the sublists. if not isinstance(m[0], (list, tuple)): m = tuple(map(os.fspath, m)) s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
[ "def", "commonprefix", "(", "m", ")", ":", "if", "not", "m", ":", "return", "''", "# Some people pass in a list of pathname parts to operate in an OS-agnostic", "# fashion; don't try to translate in that case as that's an abuse of the", "# API and they are already doing what they need to be OS-agnostic and so", "# they most likely won't be using an os.PathLike object in the sublists.", "if", "not", "isinstance", "(", "m", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "m", "=", "tuple", "(", "map", "(", "os", ".", "fspath", ",", "m", ")", ")", "s1", "=", "min", "(", "m", ")", "s2", "=", "max", "(", "m", ")", "for", "i", ",", "c", "in", "enumerate", "(", "s1", ")", ":", "if", "c", "!=", "s2", "[", "i", "]", ":", "return", "s1", "[", ":", "i", "]", "return", "s1" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/genericpath.py#L69-L83
numenta/nupic
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py
python
contiguous_regions
(mask)
return boundaries
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is True and we cover all such regions TODO: this is a pure python implementation which probably has a much faster numpy impl
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is True and we cover all such regions
[ "return", "a", "list", "of", "(", "ind0", "ind1", ")", "such", "that", "mask", "[", "ind0", ":", "ind1", "]", ".", "all", "()", "is", "True", "and", "we", "cover", "all", "such", "regions" ]
def contiguous_regions(mask): """ return a list of (ind0, ind1) such that mask[ind0:ind1].all() is True and we cover all such regions TODO: this is a pure python implementation which probably has a much faster numpy impl """ in_region = None boundaries = [] for i, val in enumerate(mask): if in_region is None and val: in_region = i elif in_region is not None and not val: boundaries.append((in_region, i)) in_region = None if in_region is not None: boundaries.append((in_region, i+1)) return boundaries
[ "def", "contiguous_regions", "(", "mask", ")", ":", "in_region", "=", "None", "boundaries", "=", "[", "]", "for", "i", ",", "val", "in", "enumerate", "(", "mask", ")", ":", "if", "in_region", "is", "None", "and", "val", ":", "in_region", "=", "i", "elif", "in_region", "is", "not", "None", "and", "not", "val", ":", "boundaries", ".", "append", "(", "(", "in_region", ",", "i", ")", ")", "in_region", "=", "None", "if", "in_region", "is", "not", "None", ":", "boundaries", ".", "append", "(", "(", "in_region", ",", "i", "+", "1", ")", ")", "return", "boundaries" ]
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py#L3207-L3226
severb/graypy
a5a82c535002db6015473cb59a7476361060d358
graypy/handler.py
python
GELFTCPHandler.makePickle
(self, record)
return super(GELFTCPHandler, self).makePickle(record) + b"\x00"
Add a null terminator to generated pickles as TCP frame objects need to be null terminated :param record: :class:`logging.LogRecord` to create a null terminated GELF log. :type record: logging.LogRecord :return: Null terminated bytes representing a GELF log. :rtype: bytes
Add a null terminator to generated pickles as TCP frame objects need to be null terminated
[ "Add", "a", "null", "terminator", "to", "generated", "pickles", "as", "TCP", "frame", "objects", "need", "to", "be", "null", "terminated" ]
def makePickle(self, record): """Add a null terminator to generated pickles as TCP frame objects need to be null terminated :param record: :class:`logging.LogRecord` to create a null terminated GELF log. :type record: logging.LogRecord :return: Null terminated bytes representing a GELF log. :rtype: bytes """ return super(GELFTCPHandler, self).makePickle(record) + b"\x00"
[ "def", "makePickle", "(", "self", ",", "record", ")", ":", "return", "super", "(", "GELFTCPHandler", ",", "self", ")", ".", "makePickle", "(", "record", ")", "+", "b\"\\x00\"" ]
https://github.com/severb/graypy/blob/a5a82c535002db6015473cb59a7476361060d358/graypy/handler.py#L639-L650
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/api/v2010/account/queue/member.py
python
MemberPage.__repr__
(self)
return '<Twilio.Api.V2010.MemberPage>'
Provide a friendly representation :returns: Machine friendly representation :rtype: str
Provide a friendly representation
[ "Provide", "a", "friendly", "representation" ]
def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Api.V2010.MemberPage>'
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<Twilio.Api.V2010.MemberPage>'" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/queue/member.py#L190-L197
PySimpleGUI/PySimpleGUI
6c0d1fb54f493d45e90180b322fbbe70f7a5af3c
DemoPrograms/Demo_System_Tray_GUI_Window_Design_Pattern.py
python
time_as_int
()
return int(round(time.time()))
[]
def time_as_int(): return int(round(time.time()))
[ "def", "time_as_int", "(", ")", ":", "return", "int", "(", "round", "(", "time", ".", "time", "(", ")", ")", ")" ]
https://github.com/PySimpleGUI/PySimpleGUI/blob/6c0d1fb54f493d45e90180b322fbbe70f7a5af3c/DemoPrograms/Demo_System_Tray_GUI_Window_Design_Pattern.py#L34-L35
aws/aws-parallelcluster
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
cli/src/pcluster/schemas/cluster_schema.py
python
SchedulerPluginComputeResourceSchema.make_resource
(self, data, **kwargs)
return SchedulerPluginComputeResource(**data)
Generate resource.
Generate resource.
[ "Generate", "resource", "." ]
def make_resource(self, data, **kwargs): """Generate resource.""" return SchedulerPluginComputeResource(**data)
[ "def", "make_resource", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "return", "SchedulerPluginComputeResource", "(", "*", "*", "data", ")" ]
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/cli/src/pcluster/schemas/cluster_schema.py#L1060-L1062
karanchahal/distiller
a17ec06cbeafcdd2aea19d7c7663033c951392f5
models/vision/mobilenet.py
python
MobileNetV2._forward_impl
(self, x)
return x
[]
def _forward_impl(self, x): # This exists since TorchScript doesn't support inheritance, so the superclass method # (this one) needs to have a name other than `forward` that can be accessed in a subclass x = self.features(x) x = x.mean([2, 3]) x = self.classifier(x) return x
[ "def", "_forward_impl", "(", "self", ",", "x", ")", ":", "# This exists since TorchScript doesn't support inheritance, so the superclass method", "# (this one) needs to have a name other than `forward` that can be accessed in a subclass", "x", "=", "self", ".", "features", "(", "x", ")", "x", "=", "x", ".", "mean", "(", "[", "2", ",", "3", "]", ")", "x", "=", "self", ".", "classifier", "(", "x", ")", "return", "x" ]
https://github.com/karanchahal/distiller/blob/a17ec06cbeafcdd2aea19d7c7663033c951392f5/models/vision/mobilenet.py#L150-L156
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/notebook-4.0.6-py3.3.egg/notebook/base/handlers.py
python
IPythonHandler.write_error
(self, status_code, **kwargs)
render custom error pages
render custom error pages
[ "render", "custom", "error", "pages" ]
def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: self.log.debug("No template for %d", status_code) html = self.render_template('error.html', **ns) self.write(html)
[ "def", "write_error", "(", "self", ",", "status_code", ",", "*", "*", "kwargs", ")", ":", "exc_info", "=", "kwargs", ".", "get", "(", "'exc_info'", ")", "message", "=", "''", "status_message", "=", "responses", ".", "get", "(", "status_code", ",", "'Unknown HTTP Error'", ")", "if", "exc_info", ":", "exception", "=", "exc_info", "[", "1", "]", "# get the custom message, if defined", "try", ":", "message", "=", "exception", ".", "log_message", "%", "exception", ".", "args", "except", "Exception", ":", "pass", "# construct the custom reason, if defined", "reason", "=", "getattr", "(", "exception", ",", "'reason'", ",", "''", ")", "if", "reason", ":", "status_message", "=", "reason", "# build template namespace", "ns", "=", "dict", "(", "status_code", "=", "status_code", ",", "status_message", "=", "status_message", ",", "message", "=", "message", ",", "exception", "=", "exception", ",", ")", "self", ".", "set_header", "(", "'Content-Type'", ",", "'text/html'", ")", "# render the template", "try", ":", "html", "=", "self", ".", "render_template", "(", "'%s.html'", "%", "status_code", ",", "*", "*", "ns", ")", "except", "TemplateNotFound", ":", "self", ".", "log", ".", "debug", "(", "\"No template for %d\"", ",", "status_code", ")", "html", "=", "self", ".", "render_template", "(", "'error.html'", ",", "*", "*", "ns", ")", "self", ".", "write", "(", "html", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/notebook-4.0.6-py3.3.egg/notebook/base/handlers.py#L281-L315
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/preview/hosted_numbers/authorization_document/__init__.py
python
AuthorizationDocumentList.get
(self, sid)
return AuthorizationDocumentContext(self._version, sid=sid, )
Constructs a AuthorizationDocumentContext :param sid: AuthorizationDocument sid. :returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
Constructs a AuthorizationDocumentContext
[ "Constructs", "a", "AuthorizationDocumentContext" ]
def get(self, sid): """ Constructs a AuthorizationDocumentContext :param sid: AuthorizationDocument sid. :returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext """ return AuthorizationDocumentContext(self._version, sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "AuthorizationDocumentContext", "(", "self", ".", "_version", ",", "sid", "=", "sid", ",", ")" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/preview/hosted_numbers/authorization_document/__init__.py#L158-L167
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/templates/historic/RLPPTM/helpers.py
python
TestFacilityInfo.apply_method
(self, r, **attr)
return output
Report test facility information @param r: the S3Request instance @param attr: controller attributes
Report test facility information
[ "Report", "test", "facility", "information" ]
def apply_method(self, r, **attr): """ Report test facility information @param r: the S3Request instance @param attr: controller attributes """ if r.http == "POST": if r.representation == "json": output = self.facility_info(r, **attr) else: r.error(415, current.ERROR.BAD_FORMAT) else: r.error(405, current.ERROR.BAD_METHOD) return output
[ "def", "apply_method", "(", "self", ",", "r", ",", "*", "*", "attr", ")", ":", "if", "r", ".", "http", "==", "\"POST\"", ":", "if", "r", ".", "representation", "==", "\"json\"", ":", "output", "=", "self", ".", "facility_info", "(", "r", ",", "*", "*", "attr", ")", "else", ":", "r", ".", "error", "(", "415", ",", "current", ".", "ERROR", ".", "BAD_FORMAT", ")", "else", ":", "r", ".", "error", "(", "405", ",", "current", ".", "ERROR", ".", "BAD_METHOD", ")", "return", "output" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/templates/historic/RLPPTM/helpers.py#L2256-L2272
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/keystone/keystone/common/controller.py
python
filterprotected
(*filters)
return _filterprotected
Wraps filtered API calls with role based access controls (RBAC).
Wraps filtered API calls with role based access controls (RBAC).
[ "Wraps", "filtered", "API", "calls", "with", "role", "based", "access", "controls", "(", "RBAC", ")", "." ]
def filterprotected(*filters): """Wraps filtered API calls with role based access controls (RBAC).""" def _filterprotected(f): @functools.wraps(f) def wrapper(self, context, **kwargs): if not context['is_admin']: action = 'identity:%s' % f.__name__ creds = _build_policy_check_credentials(self, action, context, kwargs) # Now, build the target dict for policy check. We include: # # - Any query filter parameters # - Data from the main url (which will be in the kwargs # parameter) and would typically include the prime key # of a get/update/delete call # # First any query filter parameters target = dict() if len(filters) > 0: for filter in filters: if filter in context['query_string']: target[filter] = context['query_string'][filter] LOG.debug(_('RBAC: Adding query filter params (%s)') % ( ', '.join(['%s=%s' % (filter, target[filter]) for filter in target]))) # Now any formal url parameters for key in kwargs: target[key] = kwargs[key] self.policy_api.enforce(context, creds, action, flatten(target)) LOG.debug(_('RBAC: Authorization granted')) else: LOG.warning(_('RBAC: Bypassing authorization')) return f(self, context, filters, **kwargs) return wrapper return _filterprotected
[ "def", "filterprotected", "(", "*", "filters", ")", ":", "def", "_filterprotected", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "context", ",", "*", "*", "kwargs", ")", ":", "if", "not", "context", "[", "'is_admin'", "]", ":", "action", "=", "'identity:%s'", "%", "f", ".", "__name__", "creds", "=", "_build_policy_check_credentials", "(", "self", ",", "action", ",", "context", ",", "kwargs", ")", "# Now, build the target dict for policy check. We include:", "#", "# - Any query filter parameters", "# - Data from the main url (which will be in the kwargs", "# parameter) and would typically include the prime key", "# of a get/update/delete call", "#", "# First any query filter parameters", "target", "=", "dict", "(", ")", "if", "len", "(", "filters", ")", ">", "0", ":", "for", "filter", "in", "filters", ":", "if", "filter", "in", "context", "[", "'query_string'", "]", ":", "target", "[", "filter", "]", "=", "context", "[", "'query_string'", "]", "[", "filter", "]", "LOG", ".", "debug", "(", "_", "(", "'RBAC: Adding query filter params (%s)'", ")", "%", "(", "', '", ".", "join", "(", "[", "'%s=%s'", "%", "(", "filter", ",", "target", "[", "filter", "]", ")", "for", "filter", "in", "target", "]", ")", ")", ")", "# Now any formal url parameters", "for", "key", "in", "kwargs", ":", "target", "[", "key", "]", "=", "kwargs", "[", "key", "]", "self", ".", "policy_api", ".", "enforce", "(", "context", ",", "creds", ",", "action", ",", "flatten", "(", "target", ")", ")", "LOG", ".", "debug", "(", "_", "(", "'RBAC: Authorization granted'", ")", ")", "else", ":", "LOG", ".", "warning", "(", "_", "(", "'RBAC: Bypassing authorization'", ")", ")", "return", "f", "(", "self", ",", "context", ",", "filters", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "_filterprotected" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/keystone/keystone/common/controller.py#L108-L148
Calysto/calysto_scheme
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
calysto_scheme/src/Scheme.py
python
length
(lyst)
return count
[]
def length(lyst): current = lyst count = 0 while isinstance(current, cons): current = current.cdr count += 1 if current != symbol_emptylist: raise Exception("not a proper list") return count
[ "def", "length", "(", "lyst", ")", ":", "current", "=", "lyst", "count", "=", "0", "while", "isinstance", "(", "current", ",", "cons", ")", ":", "current", "=", "current", ".", "cdr", "count", "+=", "1", "if", "current", "!=", "symbol_emptylist", ":", "raise", "Exception", "(", "\"not a proper list\"", ")", "return", "count" ]
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/src/Scheme.py#L245-L253
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/setuptools/sandbox.py
python
run_setup
(setup_script, args)
Run a distutils setup script, sandboxed in its directory
Run a distutils setup script, sandboxed in its directory
[ "Run", "a", "distutils", "setup", "script", "sandboxed", "in", "its", "directory" ]
def run_setup(setup_script, args): """Run a distutils setup script, sandboxed in its directory""" setup_dir = os.path.abspath(os.path.dirname(setup_script)) with setup_context(setup_dir): try: sys.argv[:] = [setup_script] + list(args) sys.path.insert(0, setup_dir) # reset to include setup dir, w/clean callback list working_set.__init__() working_set.callbacks.append(lambda dist: dist.activate()) # __file__ should be a byte string on Python 2 (#712) dunder_file = ( setup_script if isinstance(setup_script, str) else setup_script.encode(sys.getfilesystemencoding()) ) with DirectorySandbox(setup_dir): ns = dict(__file__=dunder_file, __name__='__main__') _execfile(setup_script, ns) except SystemExit as v: if v.args and v.args[0]: raise
[ "def", "run_setup", "(", "setup_script", ",", "args", ")", ":", "setup_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "setup_script", ")", ")", "with", "setup_context", "(", "setup_dir", ")", ":", "try", ":", "sys", ".", "argv", "[", ":", "]", "=", "[", "setup_script", "]", "+", "list", "(", "args", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "setup_dir", ")", "# reset to include setup dir, w/clean callback list", "working_set", ".", "__init__", "(", ")", "working_set", ".", "callbacks", ".", "append", "(", "lambda", "dist", ":", "dist", ".", "activate", "(", ")", ")", "# __file__ should be a byte string on Python 2 (#712)", "dunder_file", "=", "(", "setup_script", "if", "isinstance", "(", "setup_script", ",", "str", ")", "else", "setup_script", ".", "encode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ")", ")", "with", "DirectorySandbox", "(", "setup_dir", ")", ":", "ns", "=", "dict", "(", "__file__", "=", "dunder_file", ",", "__name__", "=", "'__main__'", ")", "_execfile", "(", "setup_script", ",", "ns", ")", "except", "SystemExit", "as", "v", ":", "if", "v", ".", "args", "and", "v", ".", "args", "[", "0", "]", ":", "raise" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/setuptools/sandbox.py#L234-L257
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/lib2to3/fixes/fix_metaclass.py
python
fixup_parse_tree
(cls_node)
one-line classes don't get a suite in the parse tree so we add one to normalize the tree
one-line classes don't get a suite in the parse tree so we add one to normalize the tree
[ "one", "-", "line", "classes", "don", "t", "get", "a", "suite", "in", "the", "parse", "tree", "so", "we", "add", "one", "to", "normalize", "the", "tree" ]
def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite
[ "def", "fixup_parse_tree", "(", "cls_node", ")", ":", "for", "node", "in", "cls_node", ".", "children", ":", "if", "node", ".", "type", "==", "syms", ".", "suite", ":", "# already in the preferred format, do nothing", "return", "# !%@#! oneliners have no suite node, we have to fake one up", "for", "i", ",", "node", "in", "enumerate", "(", "cls_node", ".", "children", ")", ":", "if", "node", ".", "type", "==", "token", ".", "COLON", ":", "break", "else", ":", "raise", "ValueError", "(", "\"No class suite and no ':'!\"", ")", "# move everything into a suite node", "suite", "=", "Node", "(", "syms", ".", "suite", ",", "[", "]", ")", "while", "cls_node", ".", "children", "[", "i", "+", "1", ":", "]", ":", "move_node", "=", "cls_node", ".", "children", "[", "i", "+", "1", "]", "suite", ".", "append_child", "(", "move_node", ".", "clone", "(", ")", ")", "move_node", ".", "remove", "(", ")", "cls_node", ".", "append_child", "(", "suite", ")", "node", "=", "suite" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/lib2to3/fixes/fix_metaclass.py#L45-L68
Lonero-Team/Decentralized-Internet
3cb157834fcc19ff8c2316e66bf07b103c137068
clusterpost/bigchaindb/bigchaindb/lib.py
python
BigchainDB.post_transaction
(self, transaction, mode)
return requests.post(self.endpoint, json=payload)
Submit a valid transaction to the mempool.
Submit a valid transaction to the mempool.
[ "Submit", "a", "valid", "transaction", "to", "the", "mempool", "." ]
def post_transaction(self, transaction, mode): """Submit a valid transaction to the mempool.""" if not mode or mode not in self.mode_list: raise ValidationError('Mode must be one of the following {}.' .format(', '.join(self.mode_list))) tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict() payload = { 'method': mode, 'jsonrpc': '2.0', 'params': [encode_transaction(tx_dict)], 'id': str(uuid4()) } # TODO: handle connection errors! return requests.post(self.endpoint, json=payload)
[ "def", "post_transaction", "(", "self", ",", "transaction", ",", "mode", ")", ":", "if", "not", "mode", "or", "mode", "not", "in", "self", ".", "mode_list", ":", "raise", "ValidationError", "(", "'Mode must be one of the following {}.'", ".", "format", "(", "', '", ".", "join", "(", "self", ".", "mode_list", ")", ")", ")", "tx_dict", "=", "transaction", ".", "tx_dict", "if", "transaction", ".", "tx_dict", "else", "transaction", ".", "to_dict", "(", ")", "payload", "=", "{", "'method'", ":", "mode", ",", "'jsonrpc'", ":", "'2.0'", ",", "'params'", ":", "[", "encode_transaction", "(", "tx_dict", ")", "]", ",", "'id'", ":", "str", "(", "uuid4", "(", ")", ")", "}", "# TODO: handle connection errors!", "return", "requests", ".", "post", "(", "self", ".", "endpoint", ",", "json", "=", "payload", ")" ]
https://github.com/Lonero-Team/Decentralized-Internet/blob/3cb157834fcc19ff8c2316e66bf07b103c137068/clusterpost/bigchaindb/bigchaindb/lib.py#L80-L94
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/yedit.py
python
Yedit.remove_entry
(data, key, index=None, value=None, sep='.')
remove data at location key
remove data at location key
[ "remove", "data", "at", "location", "key" ]
def remove_entry(data, key, index=None, value=None, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): if value is not None: data.pop(value) elif index is not None: raise YeditException("remove_entry for a dictionary does not have an index {}".format(index)) else: data.clear() return True elif key == '' and isinstance(data, list): ind = None if value is not None: try: ind = data.index(value) except ValueError: return False elif index is not None: ind = index else: del data[:] if ind is not None: data.pop(ind) return True if not (key and Yedit.valid_key(key, sep)) and \ isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True
[ "def", "remove_entry", "(", "data", ",", "key", ",", "index", "=", "None", ",", "value", "=", "None", ",", "sep", "=", "'.'", ")", ":", "if", "key", "==", "''", "and", "isinstance", "(", "data", ",", "dict", ")", ":", "if", "value", "is", "not", "None", ":", "data", ".", "pop", "(", "value", ")", "elif", "index", "is", "not", "None", ":", "raise", "YeditException", "(", "\"remove_entry for a dictionary does not have an index {}\"", ".", "format", "(", "index", ")", ")", "else", ":", "data", ".", "clear", "(", ")", "return", "True", "elif", "key", "==", "''", "and", "isinstance", "(", "data", ",", "list", ")", ":", "ind", "=", "None", "if", "value", "is", "not", "None", ":", "try", ":", "ind", "=", "data", ".", "index", "(", "value", ")", "except", "ValueError", ":", "return", "False", "elif", "index", "is", "not", "None", ":", "ind", "=", "index", "else", ":", "del", "data", "[", ":", "]", "if", "ind", "is", "not", "None", ":", "data", ".", "pop", "(", "ind", ")", "return", "True", "if", "not", "(", "key", "and", "Yedit", ".", "valid_key", "(", "key", ",", "sep", ")", ")", "and", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "return", "None", "key_indexes", "=", "Yedit", ".", "parse_key", "(", "key", ",", "sep", ")", "for", "arr_ind", ",", "dict_key", "in", "key_indexes", "[", ":", "-", "1", "]", ":", "if", "dict_key", "and", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "data", ".", "get", "(", "dict_key", ")", "elif", "(", "arr_ind", "and", "isinstance", "(", "data", ",", "list", ")", "and", "int", "(", "arr_ind", ")", "<=", "len", "(", "data", ")", "-", "1", ")", ":", "data", "=", "data", "[", "int", "(", "arr_ind", ")", "]", "else", ":", "return", "None", "# process last index for remove", "# expected list entry", "if", "key_indexes", "[", "-", "1", "]", "[", "0", "]", ":", "if", "isinstance", "(", "data", ",", "list", ")", "and", "int", "(", "key_indexes", "[", "-", "1", "]", "[", "0", "]", ")", "<=", "len", "(", "data", ")", "-", "1", ":", "# noqa: E501", "del", "data", "[", "int", "(", "key_indexes", "[", "-", "1", "]", "[", "0", "]", ")", "]", "return", "True", "# expected dict entry", "elif", "key_indexes", "[", "-", "1", "]", "[", "1", "]", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "del", "data", "[", "key_indexes", "[", "-", "1", "]", "[", "1", "]", "]", "return", "True" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/yedit.py#L287-L341
nosmokingbandit/Watcher3
0217e75158b563bdefc8e01c3be7620008cf3977
lib/infi/pkg_resources/_vendor/pyparsing.py
python
traceParseAction
(f)
return z
Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls']
Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
[ "Decorator", "for", "debugging", "parse", "actions", ".", "When", "the", "parse", "action", "is", "called", "this", "decorator", "will", "print", "C", "{", ">>", "entering", "I", "{", "method", "-", "name", "}", "(", "line", ":", "I", "{", "current_source_line", "}", "I", "{", "parse_location", "}", "I", "{", "matched_tokens", "}", ")", ".", "}", "When", "the", "parse", "action", "completes", "the", "decorator", "will", "print", "C", "{", "<<", "}", "followed", "by", "the", "returned", "value", "or", "any", "exception", "that", "the", "parse", "action", "raised", "." ]
def traceParseAction(f): """ Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls'] """ f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ s,l,t = paArgs[-3:] if len(paArgs)>3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) raise sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) return ret try: z.__name__ = f.__name__ except AttributeError: pass return z
[ "def", "traceParseAction", "(", "f", ")", ":", "f", "=", "_trim_arity", "(", "f", ")", "def", "z", "(", "*", "paArgs", ")", ":", "thisFunc", "=", "f", ".", "__name__", "s", ",", "l", ",", "t", "=", "paArgs", "[", "-", "3", ":", "]", "if", "len", "(", "paArgs", ")", ">", "3", ":", "thisFunc", "=", "paArgs", "[", "0", "]", ".", "__class__", ".", "__name__", "+", "'.'", "+", "thisFunc", "sys", ".", "stderr", ".", "write", "(", "\">>entering %s(line: '%s', %d, %r)\\n\"", "%", "(", "thisFunc", ",", "line", "(", "l", ",", "s", ")", ",", "l", ",", "t", ")", ")", "try", ":", "ret", "=", "f", "(", "*", "paArgs", ")", "except", "Exception", "as", "exc", ":", "sys", ".", "stderr", ".", "write", "(", "\"<<leaving %s (exception: %s)\\n\"", "%", "(", "thisFunc", ",", "exc", ")", ")", "raise", "sys", ".", "stderr", ".", "write", "(", "\"<<leaving %s (ret: %r)\\n\"", "%", "(", "thisFunc", ",", "ret", ")", ")", "return", "ret", "try", ":", "z", ".", "__name__", "=", "f", ".", "__name__", "except", "AttributeError", ":", "pass", "return", "z" ]
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/infi/pkg_resources/_vendor/pyparsing.py#L4365-L4404
getsentry/sentry
83b1f25aac3e08075e0e2495bc29efaf35aca18a
src/sentry/models/release.py
python
Release.clear_commits
(self)
Delete all release-specific commit data associated to this release. We will not delete the Commit model values because other releases may use these commits.
Delete all release-specific commit data associated to this release. We will not delete the Commit model values because other releases may use these commits.
[ "Delete", "all", "release", "-", "specific", "commit", "data", "associated", "to", "this", "release", ".", "We", "will", "not", "delete", "the", "Commit", "model", "values", "because", "other", "releases", "may", "use", "these", "commits", "." ]
def clear_commits(self): """ Delete all release-specific commit data associated to this release. We will not delete the Commit model values because other releases may use these commits. """ with sentry_sdk.start_span(op="clear_commits"): from sentry.models import ReleaseCommit, ReleaseHeadCommit ReleaseHeadCommit.objects.get( organization_id=self.organization_id, release=self ).delete() ReleaseCommit.objects.filter( organization_id=self.organization_id, release=self ).delete() self.authors = [] self.commit_count = 0 self.last_commit_id = None self.save()
[ "def", "clear_commits", "(", "self", ")", ":", "with", "sentry_sdk", ".", "start_span", "(", "op", "=", "\"clear_commits\"", ")", ":", "from", "sentry", ".", "models", "import", "ReleaseCommit", ",", "ReleaseHeadCommit", "ReleaseHeadCommit", ".", "objects", ".", "get", "(", "organization_id", "=", "self", ".", "organization_id", ",", "release", "=", "self", ")", ".", "delete", "(", ")", "ReleaseCommit", ".", "objects", ".", "filter", "(", "organization_id", "=", "self", ".", "organization_id", ",", "release", "=", "self", ")", ".", "delete", "(", ")", "self", ".", "authors", "=", "[", "]", "self", ".", "commit_count", "=", "0", "self", ".", "last_commit_id", "=", "None", "self", ".", "save", "(", ")" ]
https://github.com/getsentry/sentry/blob/83b1f25aac3e08075e0e2495bc29efaf35aca18a/src/sentry/models/release.py#L1087-L1104
easezyc/deep-transfer-learning
9af0921f4f21bc2ccea61be53cf8e8a49873d613
UDA/pytorch1.0/DeepCoral/ResNet.py
python
Bottleneck.forward
(self, x)
return out
[]
def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out
[ "def", "forward", "(", "self", ",", "x", ")", ":", "residual", "=", "x", "out", "=", "self", ".", "conv1", "(", "x", ")", "out", "=", "self", ".", "bn1", "(", "out", ")", "out", "=", "self", ".", "relu", "(", "out", ")", "out", "=", "self", ".", "conv2", "(", "out", ")", "out", "=", "self", ".", "bn2", "(", "out", ")", "out", "=", "self", ".", "relu", "(", "out", ")", "out", "=", "self", ".", "conv3", "(", "out", ")", "out", "=", "self", ".", "bn3", "(", "out", ")", "if", "self", ".", "downsample", "is", "not", "None", ":", "residual", "=", "self", ".", "downsample", "(", "x", ")", "out", "+=", "residual", "out", "=", "self", ".", "relu", "(", "out", ")", "return", "out" ]
https://github.com/easezyc/deep-transfer-learning/blob/9af0921f4f21bc2ccea61be53cf8e8a49873d613/UDA/pytorch1.0/DeepCoral/ResNet.py#L74-L94
Blockstream/satellite
ceb46a00e176c43a6b4170359f6948663a0616bb
blocksatcli/api/demorx.py
python
DemoRx.__init__
(self, server, socks, kbps, tx_event, channel, regions=None, tls_cert=None, tls_key=None)
DemoRx Constructor Args: server : API server address where the order lives. socks : Instances of UdpSock over which to send the packets kbps : Target bit rate in kbps. tx_event : SSE event to use as trigger for transmissions. channel : API channel number. regions : Regions to process and potentially confirm Tx. tls_key : API client key (for Tx confirmation). tls_cer : API client certificate (for Tx confirmation).
DemoRx Constructor
[ "DemoRx", "Constructor" ]
def __init__(self, server, socks, kbps, tx_event, channel, regions=None, tls_cert=None, tls_key=None): """ DemoRx Constructor Args: server : API server address where the order lives. socks : Instances of UdpSock over which to send the packets kbps : Target bit rate in kbps. tx_event : SSE event to use as trigger for transmissions. channel : API channel number. regions : Regions to process and potentially confirm Tx. tls_key : API client key (for Tx confirmation). tls_cer : API client certificate (for Tx confirmation). """ # Validate args assert (isinstance(socks, list)) assert (all([isinstance(x, net.UdpSock) for x in socks])) # Configs self.server = server self.socks = socks self.kbps = kbps self.tx_event = tx_event self.channel = channel self.regions = regions self.tls_cert = tls_cert self.tls_key = tls_key
[ "def", "__init__", "(", "self", ",", "server", ",", "socks", ",", "kbps", ",", "tx_event", ",", "channel", ",", "regions", "=", "None", ",", "tls_cert", "=", "None", ",", "tls_key", "=", "None", ")", ":", "# Validate args", "assert", "(", "isinstance", "(", "socks", ",", "list", ")", ")", "assert", "(", "all", "(", "[", "isinstance", "(", "x", ",", "net", ".", "UdpSock", ")", "for", "x", "in", "socks", "]", ")", ")", "# Configs", "self", ".", "server", "=", "server", "self", ".", "socks", "=", "socks", "self", ".", "kbps", "=", "kbps", "self", ".", "tx_event", "=", "tx_event", "self", ".", "channel", "=", "channel", "self", ".", "regions", "=", "regions", "self", ".", "tls_cert", "=", "tls_cert", "self", ".", "tls_key", "=", "tls_key" ]
https://github.com/Blockstream/satellite/blob/ceb46a00e176c43a6b4170359f6948663a0616bb/blocksatcli/api/demorx.py#L25-L60
Esri/ArcREST
ab240fde2b0200f61d4a5f6df033516e53f2f416
src/arcrest/manageorg/_community.py
python
Group.__init
(self)
loads the property data into the class
loads the property data into the class
[ "loads", "the", "property", "data", "into", "the", "class" ]
def __init(self): """loads the property data into the class""" params = { "f" : "json" } json_dict = self._get(url=self._url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self._json_dict = json_dict self._json = json.dumps(json_dict) attributes = [attr for attr in dir(self) if not attr.startswith('__') and \ not attr.startswith('_')] for k,v in json_dict.items(): if k in attributes: setattr(self, "_"+ k, json_dict[k]) else: print (k, " - attribute not implemented in Group class.")
[ "def", "__init", "(", "self", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "json_dict", "=", "self", ".", "_get", "(", "url", "=", "self", ".", "_url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")", "self", ".", "_json_dict", "=", "json_dict", "self", ".", "_json", "=", "json", ".", "dumps", "(", "json_dict", ")", "attributes", "=", "[", "attr", "for", "attr", "in", "dir", "(", "self", ")", "if", "not", "attr", ".", "startswith", "(", "'__'", ")", "and", "not", "attr", ".", "startswith", "(", "'_'", ")", "]", "for", "k", ",", "v", "in", "json_dict", ".", "items", "(", ")", ":", "if", "k", "in", "attributes", ":", "setattr", "(", "self", ",", "\"_\"", "+", "k", ",", "json_dict", "[", "k", "]", ")", "else", ":", "print", "(", "k", ",", "\" - attribute not implemented in Group class.\"", ")" ]
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_community.py#L476-L495