repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
librasungirl/openthread
tools/harness-automation/cases_R140/leader_9_2_4.py
18
1877
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest from autothreadharness.harness_case import HarnessCase class Leader_9_2_4(HarnessCase): role = HarnessCase.ROLE_LEADER case = '9 2 4' golden_devices_required = 1 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
bsd-3-clause
-3,294,191,595,287,015,000
-2,981,387,187,416,250,000
40.711111
77
0.761854
false
barbagroup/PetIBM
examples/ibpm/cylinder2dRe40/scripts/plotVorticity.py
4
1401
""" Computes, plots, and saves the 2D vorticity field from a PetIBM simulation after 2000 time steps (20 non-dimensional time-units). """ import pathlib import h5py import numpy from matplotlib import pyplot simu_dir = pathlib.Path(__file__).absolute().parents[1] data_dir = simu_dir / 'output' # Read vorticity field and its grid from files. name = 'wz' filepath = data_dir / 'grid.h5' f = h5py.File(filepath, 'r') x, y = f[name]['x'][:], f[name]['y'][:] X, Y = numpy.meshgrid(x, y) timestep = 2000 filepath = data_dir / '{:0>7}.h5'.format(timestep) f = h5py.File(filepath, 'r') wz = f[name][:] # Read body coordinates from file. filepath = simu_dir / 'circle.body' with open(filepath, 'r') as infile: xb, yb = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True, skiprows=1) pyplot.rc('font', family='serif', size=16) # Plot the filled contour of the vorticity. fig, ax = pyplot.subplots(figsize=(6.0, 6.0)) ax.grid() ax.set_xlabel('x') ax.set_ylabel('y') levels = numpy.linspace(-3.0, 3.0, 16) ax.contour(X, Y, wz, levels=levels, colors='black') ax.plot(xb, yb, color='red') ax.set_xlim(-1.0, 4.0) ax.set_ylim(-2.0, 2.0) ax.set_aspect('equal') fig.tight_layout() pyplot.show() # Save figure. fig_dir = simu_dir / 'figures' fig_dir.mkdir(parents=True, exist_ok=True) filepath = fig_dir / 'wz{:0>7}.png'.format(timestep) fig.savefig(str(filepath), dpi=300)
bsd-3-clause
5,372,482,012,220,502,000
-2,541,821,106,238,948,400
25.433962
74
0.665953
false
zstyblik/infernal-twin
sql_insert.py
1
3025
import MySQLdb import db_connect_creds from datetime import datetime username, password = db_connect_creds.read_creds() cxn = MySQLdb.connect('localhost', user=username, passwd=password) date = datetime.now() cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless') cxn.commit() cxn.close() cxn = MySQLdb.connect(db='InfernalWireless') cur = cxn.cursor() current_project_id = 0 #~ cxn = MySQLdb.connect('localhost','root',"") #~ #~ date = datetime.now() #~ #~ #~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless') #~ #~ cxn.commit() #~ cxn.close() #~ #~ cxn = MySQLdb.connect(db='InfernalWireless') #~ #~ cur = cxn.cursor() #~ #~ current_project_id = 0 def create_project_table(): ##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT #~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT") PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects ( ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)''' cur.execute(PROJECT_TITLE) create_project_table() def project_details(projectname, Authors_name, TargetName, date): PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date) cur.execute(PROJECT_DETAILS) current_project_id_tmp = cur.lastrowid current_project_id = current_project_id_tmp print "report is generated" return current_project_id_tmp def create_report_table(): ##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))''' cur.execute(report_table) create_report_table() def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id): ########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE pID = current_project_id REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) cur.execute(REPORT_DETAILS) print pID def print_hello(test_data): print test_data ################ DB POPULATE DATABASE ########### #~ prID = project_details('test','est','23s','12/12/12') #~ #~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID)) ################################################################### DUMMY DATABASE QUERIES ############## #~ print type(prID) cur.close() cxn.commit() cxn.close() print "DB has been updated"
gpl-3.0
2,102,736,526,878,153,200
-8,710,712,311,219,156,000
25.077586
315
0.676694
false
javierag/samba
python/samba/tests/__init__.py
3
8238
# Unix SMB/CIFS implementation. # Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Samba Python tests.""" import os import ldb import samba import samba.auth from samba import param from samba.samdb import SamDB import subprocess import tempfile samba.ensure_external_module("mimeparse", "mimeparse") samba.ensure_external_module("extras", "extras") samba.ensure_external_module("testtools", "testtools") # Other modules import these two classes from here, for convenience: from testtools.testcase import ( TestCase as TesttoolsTestCase, TestSkipped, ) class TestCase(TesttoolsTestCase): """A Samba test case.""" def setUp(self): super(TestCase, self).setUp() test_debug_level = os.getenv("TEST_DEBUG_LEVEL") if test_debug_level is not None: test_debug_level = int(test_debug_level) self._old_debug_level = samba.get_debug_level() samba.set_debug_level(test_debug_level) self.addCleanup(samba.set_debug_level, test_debug_level) def get_loadparm(self): return env_loadparm() def get_credentials(self): return cmdline_credentials class LdbTestCase(TesttoolsTestCase): """Trivial test case for running tests against a LDB.""" def setUp(self): super(LdbTestCase, self).setUp() self.filename = os.tempnam() self.ldb = samba.Ldb(self.filename) def set_modules(self, modules=[]): """Change the modules for this Ldb.""" m = ldb.Message() m.dn = ldb.Dn(self.ldb, "@MODULES") m["@LIST"] = ",".join(modules) self.ldb.add(m) self.ldb = samba.Ldb(self.filename) class TestCaseInTempDir(TestCase): def setUp(self): super(TestCaseInTempDir, self).setUp() self.tempdir = tempfile.mkdtemp() self.addCleanup(self._remove_tempdir) def _remove_tempdir(self): self.assertEquals([], os.listdir(self.tempdir)) os.rmdir(self.tempdir) self.tempdir = None def env_loadparm(): lp = param.LoadParm() try: lp.load(os.environ["SMB_CONF_PATH"]) except KeyError: raise KeyError("SMB_CONF_PATH not set") return lp def env_get_var_value(var_name): """Returns value for variable in os.environ Function throws AssertionError if variable is defined. Unit-test based python tests require certain input params to be set in environment, otherwise they can't be run """ assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name return os.environ[var_name] cmdline_credentials = None class RpcInterfaceTestCase(TestCase): """DCE/RPC Test case.""" class ValidNetbiosNameTests(TestCase): def test_valid(self): self.assertTrue(samba.valid_netbios_name("FOO")) def test_too_long(self): self.assertFalse(samba.valid_netbios_name("FOO"*10)) def test_invalid_characters(self): self.assertFalse(samba.valid_netbios_name("*BLA")) class BlackboxProcessError(Exception): """This is raised when check_output() process returns a non-zero exit status Exception instance should contain the exact exit code (S.returncode), command line (S.cmd), process output (S.stdout) and process error stream (S.stderr) """ def __init__(self, returncode, cmd, stdout, stderr): self.returncode = returncode self.cmd = cmd self.stdout = stdout self.stderr = stderr def __str__(self): return "Command '%s'; exit status %d; stdout: '%s'; stderr: '%s'" % (self.cmd, self.returncode, self.stdout, self.stderr) class BlackboxTestCase(TestCase): """Base test case for blackbox tests.""" def _make_cmdline(self, line): bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../bin")) parts = line.split(" ") if os.path.exists(os.path.join(bindir, parts[0])): parts[0] = os.path.join(bindir, parts[0]) line = " ".join(parts) return line def check_run(self, line): line = self._make_cmdline(line) p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) retcode = p.wait() if retcode: raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read()) def check_output(self, line): line = self._make_cmdline(line) p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True) retcode = p.wait() if retcode: raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read()) return p.stdout.read() def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None, flags=0, ldb_options=None, ldap_only=False, global_schema=True): """Create SamDB instance and connects to samdb_url database. :param samdb_url: Url for database to connect to. :param lp: Optional loadparm object :param session_info: Optional session information :param credentials: Optional credentials, defaults to anonymous. :param flags: Optional LDB flags :param ldap_only: If set, only remote LDAP connection will be created. :param global_schema: Whether to use global schema. Added value for tests is that we have a shorthand function to make proper URL for ldb.connect() while using default parameters for connection based on test environment """ samdb_url = samdb_url.lower() if not "://" in samdb_url: if not ldap_only and os.path.isfile(samdb_url): samdb_url = "tdb://%s" % samdb_url else: samdb_url = "ldap://%s" % samdb_url # use 'paged_search' module when connecting remotely if samdb_url.startswith("ldap://"): ldb_options = ["modules:paged_searches"] elif ldap_only: raise AssertionError("Trying to connect to %s while remote " "connection is required" % samdb_url) # set defaults for test environment if lp is None: lp = env_loadparm() if session_info is None: session_info = samba.auth.system_session(lp) if credentials is None: credentials = cmdline_credentials return SamDB(url=samdb_url, lp=lp, session_info=session_info, credentials=credentials, flags=flags, options=ldb_options, global_schema=global_schema) def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None, flags=0, ldb_options=None, ldap_only=False): """Connects to samdb_url database :param samdb_url: Url for database to connect to. :param lp: Optional loadparm object :param session_info: Optional session information :param credentials: Optional credentials, defaults to anonymous. :param flags: Optional LDB flags :param ldap_only: If set, only remote LDAP connection will be created. :return: (sam_db_connection, rootDse_record) tuple """ sam_db = connect_samdb(samdb_url, lp, session_info, credentials, flags, ldb_options, ldap_only) # fetch RootDse res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE, attrs=["*"]) return (sam_db, res[0]) def delete_force(samdb, dn): try: samdb.delete(dn) except ldb.LdbError, (num, _): assert(num == ldb.ERR_NO_SUCH_OBJECT)
gpl-3.0
-4,313,334,816,687,023,000
-6,662,180,440,986,177,000
33.041322
110
0.645909
false
martinbuc/missionplanner
packages/IronPython.StdLib.2.7.4/content/Lib/rlcompleter.py
61
6036
"""Word completion for GNU readline 2.0. This requires the latest extension to the readline module. The completer completes keywords, built-ins and globals in a selectable namespace (which defaults to __main__); when completing NAME.NAME..., it evaluates (!) the expression up to the last dot and completes its attributes. It's very cool to do "import sys" type "sys.", hit the completion key (twice), and see the list of names defined by the sys module! Tip: to use the tab key as the completion key, call readline.parse_and_bind("tab: complete") Notes: - Exceptions raised by the completer function are *ignored* (and generally cause the completion to fail). This is a feature -- since readline sets the tty device in raw (or cbreak) mode, printing a traceback wouldn't work well without some complicated hoopla to save, reset and restore the tty state. - The evaluation of the NAME.NAME... form may cause arbitrary application defined code to be executed if an object with a __getattr__ hook is found. Since it is the responsibility of the application (or the user) to enable this feature, I consider this an acceptable risk. More complicated expressions (e.g. function calls or indexing operations) are *not* evaluated. - GNU readline is also used by the built-in functions input() and raw_input(), and thus these also benefit/suffer from the completer features. Clearly an interactive application can benefit by specifying its own completer function and using raw_input() for all its input. - When the original stdin is not a tty device, GNU readline is never used, and this module (and the readline module) are silently inactive. """ import __builtin__ import __main__ __all__ = ["Completer"] class Completer: def __init__(self, namespace = None): """Create a new completer for the command line. Completer([namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete) """ if namespace and not isinstance(namespace, dict): raise TypeError,'namespace must be a dictionary' # Don't bind to namespace quite yet, but flag whether the user wants a # specific namespace or to use __main__.__dict__. This will allow us # to bind to __main__.__dict__ at completion time, not now. if namespace is None: self.use_main_ns = 1 else: self.use_main_ns = 0 self.namespace = namespace def complete(self, text, state): """Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. """ if self.use_main_ns: self.namespace = __main__.__dict__ if state == 0: if "." in text: self.matches = self.attr_matches(text) else: self.matches = self.global_matches(text) try: return self.matches[state] except IndexError: return None def _callable_postfix(self, val, word): if hasattr(val, '__call__'): word = word + "(" return word def global_matches(self, text): """Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace that match. """ import keyword matches = [] n = len(text) for word in keyword.kwlist: if word[:n] == text: matches.append(word) for nspace in [__builtin__.__dict__, self.namespace]: for word, val in nspace.items(): if word[:n] == text and word != "__builtins__": matches.append(self._callable_postfix(val, word)) return matches def attr_matches(self, text): """Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated. """ import re m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) if not m: return [] expr, attr = m.group(1, 3) try: thisobject = eval(expr, self.namespace) except Exception: return [] # get the content of the object, except __builtins__ words = dir(thisobject) if "__builtins__" in words: words.remove("__builtins__") if hasattr(thisobject, '__class__'): words.append('__class__') words.extend(get_class_members(thisobject.__class__)) matches = [] n = len(attr) for word in words: if word[:n] == attr and hasattr(thisobject, word): val = getattr(thisobject, word) word = self._callable_postfix(val, "%s.%s" % (expr, word)) matches.append(word) return matches def get_class_members(klass): ret = dir(klass) if hasattr(klass,'__bases__'): for base in klass.__bases__: ret = ret + get_class_members(base) return ret try: import readline except ImportError: pass else: readline.set_completer(Completer().complete)
gpl-3.0
2,260,912,191,611,114,000
7,484,371,646,620,429,000
33.505882
78
0.604374
false
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/units.py
2
6084
""" The classes here provide support for using custom classes with matplotlib, e.g., those that do not expose the array interface but know how to convert themselves to arrays. It also supports classes with units and units conversion. Use cases include converters for custom objects, e.g., a list of datetime objects, as well as for objects that are unit aware. We don't assume any particular units implementation; rather a units implementation must provide the register with the Registry converter dictionary and a ConversionInterface. For example, here is a complete implementation which supports plotting with native datetime objects:: import matplotlib.units as units import matplotlib.dates as dates import matplotlib.ticker as ticker import datetime class DateConverter(units.ConversionInterface): @staticmethod def convert(value, unit, axis): 'convert value to a scalar or array' return dates.date2num(value) @staticmethod def axisinfo(unit, axis): 'return major and minor tick locators and formatters' if unit!='date': return None majloc = dates.AutoDateLocator() majfmt = dates.AutoDateFormatter(majloc) return AxisInfo(majloc=majloc, majfmt=majfmt, label='date') @staticmethod def default_units(x, axis): 'return the default unit for x or None' return 'date' # finally we register our object type with a converter units.registry[datetime.date] = DateConverter() """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six from matplotlib.cbook import iterable, is_numlike, safe_first_element import numpy as np class AxisInfo(object): """information to support default axis labeling and tick labeling, and default limits""" def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None, default_limits=None): """ majloc and minloc: TickLocators for the major and minor ticks majfmt and minfmt: TickFormatters for the major and minor ticks label: the default axis label default_limits: the default min, max of the axis if no data is present If any of the above are None, the axis will simply use the default """ self.majloc = majloc self.minloc = minloc self.majfmt = majfmt self.minfmt = minfmt self.label = label self.default_limits = default_limits class ConversionInterface(object): """ The minimal interface for a converter to take custom instances (or sequences) and convert them to values mpl can use """ @staticmethod def axisinfo(unit, axis): 'return an units.AxisInfo instance for axis with the specified units' return None @staticmethod def default_units(x, axis): 'return the default unit for x or None for the given axis' return None @staticmethod def convert(obj, unit, axis): """ convert obj using unit for the specified axis. If obj is a sequence, return the converted sequence. The output must be a sequence of scalars that can be used by the numpy array layer """ return obj @staticmethod def is_numlike(x): """ The matplotlib datalim, autoscaling, locators etc work with scalars which are the units converted to floats given the current unit. The converter may be passed these floats, or arrays of them, even when units are set. Derived conversion interfaces may opt to pass plain-ol unitless numbers through the conversion interface and this is a helper function for them. """ if iterable(x): for thisx in x: return is_numlike(thisx) else: return is_numlike(x) class Registry(dict): """ register types with conversion interface """ def __init__(self): dict.__init__(self) self._cached = {} def get_converter(self, x): 'get the converter interface instance for x, or None' if not len(self): return None # nothing registered # DISABLED idx = id(x) # DISABLED cached = self._cached.get(idx) # DISABLED if cached is not None: return cached converter = None classx = getattr(x, '__class__', None) if classx is not None: converter = self.get(classx) if isinstance(x, np.ndarray) and x.size: xravel = x.ravel() try: # pass the first value of x that is not masked back to # get_converter if not np.all(xravel.mask): # some elements are not masked converter = self.get_converter( xravel[np.argmin(xravel.mask)]) return converter except AttributeError: # not a masked_array # Make sure we don't recurse forever -- it's possible for # ndarray subclasses to continue to return subclasses and # not ever return a non-subclass for a single element. next_item = xravel[0] if (not isinstance(next_item, np.ndarray) or next_item.shape != x.shape): converter = self.get_converter(next_item) return converter if converter is None: try: thisx = safe_first_element(x) except (TypeError, StopIteration): pass else: if classx and classx != getattr(thisx, '__class__', None): converter = self.get_converter(thisx) return converter # DISABLED self._cached[idx] = converter return converter registry = Registry()
gpl-3.0
5,679,433,629,467,777,000
-8,865,394,975,748,336,000
33.965517
78
0.607824
false
dkerwin/ansible-modules-core
network/cumulus/cl_bond.py
5
15552
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com> # # This file is part of Ansible # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: cl_bond version_added: "2.1" author: "Cumulus Networks (@CumulusNetworks)" short_description: Configures a bond port on Cumulus Linux description: - Configures a bond interface on Cumulus Linux To configure a bridge port use the cl_bridge module. To configure any other type of interface use the cl_interface module. Follow the guidelines for bonding found in the Cumulus User Guide at http://docs.cumulusnetworks.com options: name: description: - name of the interface required: true alias_name: description: - add a port description ipv4: description: - list of IPv4 addresses to configure on the interface. use X.X.X.X/YY syntax. ipv6: description: - list of IPv6 addresses to configure on the interface. use X:X:X::X/YYY syntax addr_method: description: - configures the port to use DHCP. To enable this feature use the option 'dhcp' choices: ['dhcp'] mtu: description: - set MTU. Configure Jumbo Frame by setting MTU to 9000. virtual_ip: description: - define IPv4 virtual IP used by the Cumulus Linux VRR feature virtual_mac: description: - define Ethernet mac associated with Cumulus Linux VRR feature vids: description: - in vlan aware mode, lists vlans defined under the interface mstpctl_bpduguard: description: - Enables BPDU Guard on a port in vlan-aware mode mstpctl_portnetwork: description: - Enables bridge assurance in vlan-aware mode mstpctl_portadminedge: description: - Enables admin edge port clag_id: description: - specify a unique clag_id for every dual connected bond on each peer switch. The value must be between 1 and 65535 and must be the same on both peer switches in order for the bond to be considered dual-connected pvid: description: - in vlan aware mode, defines vlan that is the untagged vlan miimon: description: - mii link monitoring interval default: 100 mode: description: - bond mode. as of Cumulus Linux 2.5 only LACP bond mode is supported default: '802.3ad' min_links: description: - minimum number of links default: 1 lacp_bypass_allow: description: - Enable LACP bypass. lacp_bypass_period: description: - Period for enabling LACP bypass. Max value is 900. lacp_bypass_priority: description: - List of ports and priorities. Example "swp1=10, swp2=20" lacp_bypass_all_active: description: - Activate all interfaces for bypass. It is recommended to configure all_active instead of using bypass_priority. lacp_rate: description: - lacp rate default: 1 slaves: description: - bond members required: True xmit_hash_policy: description: - transmit load balancing algorithm. As of Cumulus Linux 2.5 only layer3+4 policy is supported default: layer3+4 location: description: - interface directory location default: - /etc/network/interfaces.d requirements: [ Alternate Debian network interface manager - \ ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ] notes: - because the module writes the interface directory location. Ensure that ``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/*' or whatever path is mentioned in the ``location`` attribute. - For the config to be activated, i.e installed in the kernel, "service networking reload" needs be be executed. See EXAMPLES section. ''' EXAMPLES = ''' # Options ['virtual_mac', 'virtual_ip'] are required together # configure a bond interface with IP address cl_bond: name=bond0 slaves="swp4-5" ipv4=10.1.1.1/24 notify: reload networking # configure bond as a dual-connected clag bond cl_bond: name=bond1 slaves="swp1s0 swp2s0" clag_id=1 notify: reload networking # define cl_bond once in tasks file # then write inteface config in variables file # with just the options you want. cl_bond: name: "{{ item.key }}" slaves: "{{ item.value.slaves }}" clag_id: "{{ item.value.clag_id|default(omit) }}" ipv4: "{{ item.value.ipv4|default(omit) }}" ipv6: "{{ item.value.ipv6|default(omit) }}" alias_name: "{{ item.value.alias_name|default(omit) }}" addr_method: "{{ item.value.addr_method|default(omit) }}" mtu: "{{ item.value.mtu|default(omit) }}" vids: "{{ item.value.vids|default(omit) }}" virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}" mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}" mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}" with_dict: cl_bonds notify: reload networking # In vars file # ============ cl_bonds: bond0: alias_name: 'uplink to isp' slaves: ['swp1', 'swp3'] ipv4: '10.1.1.1/24' bond2: vids: [1, 50] clag_id: 1 ''' RETURN = ''' changed: description: whether the interface was changed returned: changed type: bool sample: True msg: description: human-readable report of success or failure returned: always type: string sample: "interface bond0 config updated" ''' # handy helper for calling system calls. # calls AnsibleModule.run_command and prints a more appropriate message # exec_path - path to file to execute, with all its arguments. # E.g "/sbin/ip -o link show" # failure_msg - what message to print on failure def run_cmd(module, exec_path): (_rc, out, _err) = module.run_command(exec_path) if _rc > 0: if re.search('cannot find interface', _err): return '[{}]' failure_msg = "Failed; %s Error: %s" % (exec_path, _err) module.fail_json(msg=failure_msg) else: return out def current_iface_config(module): # due to a bug in ifquery, have to check for presence of interface file # and not rely solely on ifquery. when bug is fixed, this check can be # removed _ifacename = module.params.get('name') _int_dir = module.params.get('location') module.custom_current_config = {} if os.path.exists(_int_dir + '/' + _ifacename): _cmd = "/sbin/ifquery -o json %s" % (module.params.get('name')) module.custom_current_config = module.from_json( run_cmd(module, _cmd))[0] def build_address(module): # if addr_method == 'dhcp', dont add IP address if module.params.get('addr_method') == 'dhcp': return _ipv4 = module.params.get('ipv4') _ipv6 = module.params.get('ipv6') _addresslist = [] if _ipv4 and len(_ipv4) > 0: _addresslist += _ipv4 if _ipv6 and len(_ipv6) > 0: _addresslist += _ipv6 if len(_addresslist) > 0: module.custom_desired_config['config']['address'] = ' '.join( _addresslist) def build_vids(module): _vids = module.params.get('vids') if _vids and len(_vids) > 0: module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids) def build_pvid(module): _pvid = module.params.get('pvid') if _pvid: module.custom_desired_config['config']['bridge-pvid'] = str(_pvid) def conv_bool_to_str(_value): if isinstance(_value, bool): if _value is True: return 'yes' else: return 'no' return _value def conv_array_to_str(_value): if isinstance(_value, list): return ' '.join(_value) return _value def build_generic_attr(module, _attr): _value = module.params.get(_attr) _value = conv_bool_to_str(_value) _value = conv_array_to_str(_value) if _value: module.custom_desired_config['config'][ re.sub('_', '-', _attr)] = str(_value) def build_alias_name(module): alias_name = module.params.get('alias_name') if alias_name: module.custom_desired_config['config']['alias'] = alias_name def build_addr_method(module): _addr_method = module.params.get('addr_method') if _addr_method: module.custom_desired_config['addr_family'] = 'inet' module.custom_desired_config['addr_method'] = _addr_method def build_vrr(module): _virtual_ip = module.params.get('virtual_ip') _virtual_mac = module.params.get('virtual_mac') vrr_config = [] if _virtual_ip: vrr_config.append(_virtual_mac) vrr_config.append(_virtual_ip) module.custom_desired_config.get('config')['address-virtual'] = \ ' '.join(vrr_config) def add_glob_to_array(_bondmems): """ goes through each bond member if it sees a dash add glob before it """ result = [] if isinstance(_bondmems, list): for _entry in _bondmems: if re.search('-', _entry): _entry = 'glob ' + _entry result.append(_entry) return ' '.join(result) return _bondmems def build_bond_attr(module, _attr): _value = module.params.get(_attr) _value = conv_bool_to_str(_value) _value = add_glob_to_array(_value) if _value: module.custom_desired_config['config'][ 'bond-' + re.sub('_', '-', _attr)] = str(_value) def build_desired_iface_config(module): """ take parameters defined and build ifupdown2 compatible hash """ module.custom_desired_config = { 'addr_family': None, 'auto': True, 'config': {}, 'name': module.params.get('name') } for _attr in ['slaves', 'mode', 'xmit_hash_policy', 'miimon', 'lacp_rate', 'lacp_bypass_allow', 'lacp_bypass_period', 'lacp_bypass_all_active', 'min_links']: build_bond_attr(module, _attr) build_addr_method(module) build_address(module) build_vids(module) build_pvid(module) build_alias_name(module) build_vrr(module) for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge' 'mstpctl_bpduguard', 'clag_id', 'lacp_bypass_priority']: build_generic_attr(module, _attr) def config_dict_changed(module): """ return true if 'config' dict in hash is different between desired and current config """ current_config = module.custom_current_config.get('config') desired_config = module.custom_desired_config.get('config') return current_config != desired_config def config_changed(module): """ returns true if config has changed """ if config_dict_changed(module): return True # check if addr_method is changed return module.custom_desired_config.get('addr_method') != \ module.custom_current_config.get('addr_method') def replace_config(module): temp = tempfile.NamedTemporaryFile() desired_config = module.custom_desired_config # by default it will be something like /etc/network/interfaces.d/swp1 final_location = module.params.get('location') + '/' + \ module.params.get('name') final_text = '' _fh = open(final_location, 'w') # make sure to put hash in array or else ifquery will fail # write to temp file try: temp.write(module.jsonify([desired_config])) # need to seek to 0 so that data is written to tempfile. temp.seek(0) _cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name) final_text = run_cmd(module, _cmd) finally: temp.close() try: _fh.write(final_text) finally: _fh.close() def main(): module = AnsibleModule( argument_spec=dict( slaves=dict(required=True, type='list'), name=dict(required=True, type='str'), ipv4=dict(type='list'), ipv6=dict(type='list'), alias_name=dict(type='str'), addr_method=dict(type='str', choices=['', 'dhcp']), mtu=dict(type='str'), virtual_ip=dict(type='str'), virtual_mac=dict(type='str'), vids=dict(type='list'), pvid=dict(type='str'), mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS), mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS), mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS), clag_id=dict(type='str'), min_links=dict(type='int', default=1), mode=dict(type='str', default='802.3ad'), miimon=dict(type='int', default=100), xmit_hash_policy=dict(type='str', default='layer3+4'), lacp_rate=dict(type='int', default=1), lacp_bypass_allow=dict(type='int', choices=[0, 1]), lacp_bypass_all_active=dict(type='int', choices=[0, 1]), lacp_bypass_priority=dict(type='list'), lacp_bypass_period=dict(type='int'), location=dict(type='str', default='/etc/network/interfaces.d') ), mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']], required_together=[['virtual_ip', 'virtual_mac']] ) # if using the jinja default filter, this resolves to # create an list with an empty string ['']. The following # checks all lists and removes it, so that functions expecting # an empty list, get this result. May upstream this fix into # the AnsibleModule code to have it check for this. for k, _param in module.params.iteritems(): if isinstance(_param, list): module.params[k] = [x for x in _param if x] _location = module.params.get('location') if not os.path.exists(_location): _msg = "%s does not exist." % (_location) module.fail_json(msg=_msg) return # for testing purposes only ifacename = module.params.get('name') _changed = False _msg = "interface %s config not changed" % (ifacename) current_iface_config(module) build_desired_iface_config(module) if config_changed(module): replace_config(module) _msg = "interface %s config updated" % (ifacename) _changed = True module.exit_json(changed=_changed, msg=_msg) # import module snippets from ansible.module_utils.basic import * import tempfile import os import re if __name__ == '__main__': main()
gpl-3.0
-1,038,376,414,986,291,200
6,541,446,850,788,097,000
32.230769
80
0.615098
false
JianyuWang/nova
nova/tests/unit/network/security_group/test_neutron_driver.py
9
18614
# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from mox3 import mox from neutronclient.common import exceptions as n_exc from neutronclient.v2_0 import client from six.moves import range from nova import context from nova import exception from nova.network.neutronv2 import api as neutronapi from nova.network.security_group import neutron_driver from nova import test class TestNeutronDriver(test.NoDBTestCase): def setUp(self): super(TestNeutronDriver, self).setUp() self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') def test_list_with_project(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=project_id) def test_list_with_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, search_opts=search_opts) mock_list_secgroup.assert_called_once_with() def test_list_without_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id) mock_list_secgroup.assert_called_once_with(tenant_id=project_id) def test_list_with_all_tenants_sec_name_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, tenant_id=project_id) def test_list_with_all_tenants_sec_name_ids_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_group_ids = ['id1'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, ids=security_group_ids, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, id=security_group_ids, tenant_id=project_id) def test_list_with_all_tenants_not_admin(self): search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=self.context.tenant, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( tenant_id=self.context.tenant) def test_get_with_name_duplicated(self): sg_name = 'web_server' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) expected_sg = {'security_group': {'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []}} self.moxed_client.show_security_group(expected_sg_id).AndReturn( expected_sg) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() observed_sg = sg_api.get(self.context, name=sg_name) expected_sg['security_group']['project_id'] = self.context.tenant del expected_sg['security_group']['tenant_id'] self.assertEqual(expected_sg['security_group'], observed_sg) def test_get_with_invalid_name(self): sg_name = 'invalid_name' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) self.moxed_client.show_security_group(expected_sg_id).AndRaise( TypeError) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupNotFound, sg_api.get, self.context, name=sg_name) def test_create_security_group_with_bad_request(self): name = 'test-security-group' description = None body = {'security_group': {'name': name, 'description': description}} message = "Invalid input. Reason: 'None' is not a valid string." self.moxed_client.create_security_group( body).AndRaise(n_exc.BadRequest(message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.create_security_group, self.context, name, description) def test_create_security_group_exceed_quota(self): name = 'test-security-group' description = 'test-security-group' body = {'security_group': {'name': name, 'description': description}} message = "Quota exceeded for resources: ['security_group']" self.moxed_client.create_security_group( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.create_security_group, self.context, name, description) def test_create_security_group_rules_exceed_quota(self): vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'from_port': 1025, 'to_port': 1025} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 1025, 'port_range_min': 1025, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "Quota exceeded for resources: ['security_group_rule']" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.add_rules, self.context, None, name, [vals]) def test_create_security_group_rules_bad_request(self): vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'to_port': 255} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'icmp', 'ethertype': 'IPv4', 'port_range_max': 255, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "ICMP code (port-range-max) 255 is provided but ICMP type" \ " (port-range-min) is missing" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=400, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.add_rules, self.context, None, name, [vals]) def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self): sg1 = {'description': 'default', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6', 'security_group_rules': [{'direction': 'ingress', 'ethertype': 'IPv4', 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb', 'port_range_max': None, 'port_range_min': None, 'protocol': '51', 'remote_group_id': None, 'remote_ip_prefix': None, 'security_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]} self.moxed_client.list_security_groups().AndReturn( {'security_groups': [sg1]}) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.list(self.context) expected = [{'rules': [{'from_port': -1, 'protocol': '51', 'to_port': -1, 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'cidr': '0.0.0.0/0', 'group_id': None, 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}], 'project_id': 'c166d9316f814891bcb66b96c4c891d6', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'description': 'default'}] self.assertEqual(expected, result) def test_instances_security_group_bindings(self): server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1' port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0' port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44' sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' servers = [{'id': server_id}] ports = [{'id': port1_id, 'device_id': server_id, 'security_groups': [sg1_id]}, {'id': port2_id, 'device_id': server_id, 'security_groups': [sg2_id]}] port_list = {'ports': ports} sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]} self.moxed_client.list_ports(device_id=[server_id]).AndReturn( port_list) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(result, sg_bindings) def _test_instances_security_group_bindings_scale(self, num_servers): max_query = 150 sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} servers = [] device_ids = [] ports = [] sg_bindings = {} for i in range(0, num_servers): server_id = "server-%d" % i port_id = "port-%d" % i servers.append({'id': server_id}) device_ids.append(server_id) ports.append({'id': port_id, 'device_id': server_id, 'security_groups': [sg1_id, sg2_id]}) sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}] for x in range(0, num_servers, max_query): self.moxed_client.list_ports( device_id=device_ids[x:x + max_query]).\ AndReturn({'ports': ports[x:x + max_query]}) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(result, sg_bindings) def test_instances_security_group_bindings_less_than_max(self): self._test_instances_security_group_bindings_scale(100) def test_instances_security_group_bindings_max(self): self._test_instances_security_group_bindings_scale(150) def test_instances_security_group_bindings_more_then_max(self): self._test_instances_security_group_bindings_scale(300) def test_instances_security_group_bindings_with_hidden_sg(self): servers = [{'id': 'server_1'}] ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']}, {'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}] port_list = {'ports': ports} sg1 = {'id': '1', 'name': 'wol'} # User doesn't have access to sg2 security_groups_list = {'security_groups': [sg1]} sg_bindings = {'dev_1': [{'name': 'wol'}]} self.moxed_client.list_ports(device_id=['server_1']).AndReturn( port_list) self.moxed_client.\ list_security_groups(id=mox.SameElementsAs(['1', '2'])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(result, sg_bindings) def test_instance_empty_security_groups(self): port_list = {'ports': [{'id': 1, 'device_id': '1', 'security_groups': []}]} self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instance_security_groups(self.context, '1') self.assertEqual([], result) class TestNeutronDriverWithoutMock(test.NoDBTestCase): def test_validate_property(self): sg_api = neutron_driver.SecurityGroupAPI() sg_api.validate_property('foo', 'name', None) sg_api.validate_property('', 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, 'a' * 256, 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, None, 'name', None)
apache-2.0
6,664,272,452,287,406,000
-2,631,396,089,757,433,000
43.961353
79
0.568819
false
kevinlee12/oppia
core/domain/draft_upgrade_services_test.py
1
56055
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for draft upgrade services.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import draft_upgrade_services from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.tests import test_utils import feconf import python_utils import utils class DraftUpgradeUnitTests(test_utils.GenericTestBase): """Test the draft upgrade services module.""" EXP_ID = 'exp_id' USER_ID = 'user_id' OTHER_CHANGE_LIST = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'title', 'new_value': 'New title' })] EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, 'from_version': '0', 'to_version': python_utils.UNICODE( feconf.CURRENT_STATE_SCHEMA_VERSION) })] DRAFT_CHANGELIST = [exp_domain.ExplorationChange({ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'old_value': None, 'new_value': 'Updated title'})] def setUp(self): super(DraftUpgradeUnitTests, self).setUp() self.save_new_valid_exploration(self.EXP_ID, self.USER_ID) def test_try_upgrade_with_no_version_difference(self): self.assertIsNone( draft_upgrade_services.try_upgrading_draft_to_exp_version( self.DRAFT_CHANGELIST, 1, 1, self.EXP_ID)) def test_try_upgrade_raises_exception_if_versions_are_invalid(self): with self.assertRaisesRegexp( utils.InvalidInputException, 'Current draft version is greater than the exploration version.'): draft_upgrade_services.try_upgrading_draft_to_exp_version( self.DRAFT_CHANGELIST, 2, 1, self.EXP_ID) exp_services.update_exploration( self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST, 'Changed exploration title.') exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual(exploration.version, 2) self.assertIsNone( draft_upgrade_services.try_upgrading_draft_to_exp_version( self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID)) def test_try_upgrade_failure_due_to_unsupported_commit_type(self): exp_services.update_exploration( self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST, 'Changed exploration title.') exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual(exploration.version, 2) self.assertIsNone( draft_upgrade_services.try_upgrading_draft_to_exp_version( self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID)) def test_try_upgrade_failure_due_to_unimplemented_upgrade_methods(self): exp_services.update_exploration( self.USER_ID, self.EXP_ID, self.EXP_MIGRATION_CHANGE_LIST, 'Ran Exploration Migration job.') exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual(exploration.version, 2) self.assertIsNone( draft_upgrade_services.try_upgrading_draft_to_exp_version( self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID)) def test_extract_html_from_draft_change_list(self): html_content = ( '<p>Value</p><oppia-noninteractive-math math_content-with-value=' '"{&amp;quot;raw_latex&amp;quot;: &amp;quot;+,-,-,+&amp;quot;, &' 'amp;quot;svg_filename&amp;quot;: &amp;quot;&amp;quot;}"></oppia' '-noninteractive-math>') draft_change_list = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state2', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', html_content, '<p>4</p>' ] }, 'maxAllowableSelectionCount': { 'value': 1 }, 'minAllowableSelectionCount': { 'value': 1 } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'answer_groups', 'state_name': 'State 1', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'ContainsAtLeastOneOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'IsProperSubsetOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'DoesNotContainAtLeastOneOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'Equals', 'inputs': { 'x': 1 } }, { 'rule_type': 'HasElementXAtPositionY', 'inputs': { 'x': html_content, 'y': 2 } }, { 'rule_type': 'IsEqualToOrdering', 'inputs': { 'x': [[html_content]] } }, { 'rule_type': 'HasElementXBeforeElementY', 'inputs': { 'x': html_content, 'y': html_content } }, { 'rule_type': ( 'IsEqualToOrderingWithOneItemAtIncorrectPosition'), 'inputs': { 'x': [[html_content]] } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': html_content }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': { 'content_id': 'content', 'html': html_content } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'written_translations', 'new_value': { 'translations_mapping': { 'content1': { 'en': { 'data_format': 'html', 'translation': html_content, 'needs_update': True }, 'hi': { 'data_format': 'html', 'translation': 'Hey!', 'needs_update': False } }, 'feedback_1': { 'hi': { 'data_format': 'html', 'translation': html_content, 'needs_update': False }, 'en': { 'data_format': 'html', 'translation': 'hello!', 'needs_update': False } } } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { 'content_id': 'solution', 'html': html_content }, } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': True, 'correct_answer': [ [html_content], ['<p>2</p>'], ['<p>3</p>'], ['<p>4</p>'] ], 'explanation': { 'content_id': 'solution', 'html': '<p>This is solution for state1</p>' } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'default_outcome', 'new_value': { 'param_changes': [], 'feedback': { 'content_id': 'default_outcome', 'html': html_content }, 'dest': 'Introduction', 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'hints', 'new_value': [{ 'hint_content': { 'content_id': 'hint1', 'html': html_content } }] }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_RENAME_STATE, 'old_state_name': 'Intro', 'new_state_name': 'Introduction', }) ] list_of_html = ( draft_upgrade_services.extract_html_from_draft_change_list( draft_change_list)) self.assertEqual(len(list_of_html), 27) expected_html_strings = [ html_content, '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>', '<p>This is solution for state1</p>', 'Hey!', 'hello!'] for html in list_of_html: self.assertTrue(html in expected_html_strings) class DraftUpgradeUtilUnitTests(test_utils.GenericTestBase): """Test the DraftUpgradeUtil module.""" EXP_ID = 'exp_id' USER_ID = 'user_id' EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, 'from_version': '34', 'to_version': '35' })] # EXP_ID and USER_ID used to create default explorations. EXP_ID = 'exp_id' USER_ID = 'user_id' def create_and_migrate_new_exploration( self, current_schema_version, target_schema_version): """Creates an exploration and applies a state schema migration to it. Creates an exploration and migrates its state schema from version current_schema_version to target_schema_version. Asserts that the exploration was successfully migrated. Args: current_schema_version: string. The current schema version of the exploration (eg. '29'). target_schema_version: string. The schema version to upgrade the exploration to (eg. '30'). """ # Create an exploration change list with the command that will migrate # the schema from current_schema_version to target_schema_version. exp_migration_change_list = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, 'from_version': current_schema_version, 'to_version': target_schema_version }) ] # The migration will automatically migrate the exploration to the latest # state schema version, so we set the latest schema version to be the # target_schema_version. with self.swap( feconf, 'CURRENT_STATE_SCHEMA_VERSION', int(target_schema_version)): # Create and migrate the exploration. self.save_new_valid_exploration(self.EXP_ID, self.USER_ID) exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) exp_services.update_exploration( self.USER_ID, self.EXP_ID, exp_migration_change_list, 'Ran Exploration Migration job.') # Assert that the update was applied and that the exploration state # schema was successfully updated. exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual(exploration.version, 2) self.assertEqual( python_utils.UNICODE( exploration.states_schema_version), target_schema_version) def test_convert_to_latest_schema_version_implemented(self): state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % ( state_schema_version - 1, state_schema_version) self.assertTrue( hasattr( draft_upgrade_services.DraftUpgradeUtil, conversion_fn_name), msg='Current schema version is %d but DraftUpgradeUtil.%s is ' 'unimplemented.' % (state_schema_version, conversion_fn_name)) def test_convert_states_v36_dict_to_v37_dict(self): draft_change_list_v36 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'state_name': 'Intro', 'property_name': 'answer_groups', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'CaseSensitiveEquals', 'inputs': { 'x': 'test' } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': '<p>Content</p>' }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }) ] draft_change_list_v37 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'state_name': 'Intro', 'property_name': 'answer_groups', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { 'x': 'test' } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': '<p>Content</p>' }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }) ] # Migrate exploration to state schema version 37. self.create_and_migrate_new_exploration('36', '37') migrated_draft_change_list_v37 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v36, 1, 2, self.EXP_ID)) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. draft_change_list_v37_dict_list = [ change.to_dict() for change in draft_change_list_v37 ] migrated_draft_change_list_v37_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v37 ] self.assertEqual( draft_change_list_v37_dict_list, migrated_draft_change_list_v37_dict_list) def test_convert_states_v35_dict_to_v36_dict(self): draft_change_list_1_v35 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'widget_id', 'new_value': 'MathExpressionInput' }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'widget_customization_args', 'new_value': {} }) ] draft_change_list_2_v35 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'widget_id', 'new_value': 'MathExpressionInput' }) ] # Migrate exploration to state schema version 36. self.create_and_migrate_new_exploration('35', '36') migrated_draft_change_list_1_v36 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_1_v35, 1, 2, self.EXP_ID)) self.assertIsNone(migrated_draft_change_list_1_v36) migrated_draft_change_list_2_v36 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_2_v35, 1, 2, self.EXP_ID)) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. draft_change_list_2_v35_dict_list = [ change.to_dict() for change in draft_change_list_2_v35 ] migrated_draft_change_list_2_v36_dict_list = [ change.to_dict() for change in migrated_draft_change_list_2_v36 ] self.assertEqual( draft_change_list_2_v35_dict_list, migrated_draft_change_list_2_v36_dict_list) def test_convert_states_v34_dict_to_v35_dict(self): draft_change_list_1_v34 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'widget_id', 'new_value': 'MathExpressionInput' }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'answer_groups', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'IsMathematicallyEquivalentTo', 'inputs': { 'x': 'x+y/2' } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': '<p>Content</p>' }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }) ] draft_change_list_2_v34 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }) ] # Migrate exploration to state schema version 35. self.create_and_migrate_new_exploration('34', '35') migrated_draft_change_list_1_v35 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_1_v34, 1, 2, self.EXP_ID)) self.assertIsNone(migrated_draft_change_list_1_v35) migrated_draft_change_list_2_v35 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_2_v34, 1, 2, self.EXP_ID)) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. draft_change_list_2_v34_dict_list = [ change.to_dict() for change in draft_change_list_2_v34 ] migrated_draft_change_list_2_v35_dict_list = [ change.to_dict() for change in migrated_draft_change_list_2_v35 ] self.assertEqual( draft_change_list_2_v34_dict_list, migrated_draft_change_list_2_v35_dict_list) def test_convert_states_v33_dict_to_v34_dict(self): html_content = ( '<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a' 'mp;quot;+,-,-,+&amp;quot;"></oppia-noninteractive-math>') expected_html_content = ( '<p>Value</p><oppia-noninteractive-math math_content-with-value=' '"{&amp;quot;raw_latex&amp;quot;: &amp;quot;+,-,-,+&amp;quot;, &' 'amp;quot;svg_filename&amp;quot;: &amp;quot;&amp;quot;}"></oppia' '-noninteractive-math>') draft_change_list = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state2', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', html_content, '<p>4</p>' ] }, 'maxAllowableSelectionCount': { 'value': 1 }, 'minAllowableSelectionCount': { 'value': 1 } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'answer_groups', 'state_name': 'State 1', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'ContainsAtLeastOneOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'IsProperSubsetOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'DoesNotContainAtLeastOneOf', 'inputs': { 'x': [html_content] } }, { 'rule_type': 'Equals', 'inputs': { 'x': 1 } }, { 'rule_type': 'HasElementXAtPositionY', 'inputs': { 'x': html_content, 'y': 2 } }, { 'rule_type': 'IsEqualToOrdering', 'inputs': { 'x': [[html_content]] } }, { 'rule_type': 'HasElementXBeforeElementY', 'inputs': { 'x': html_content, 'y': html_content } }, { 'rule_type': ( 'IsEqualToOrderingWithOneItemAtIncorrectPosition'), 'inputs': { 'x': [[html_content]] } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': html_content }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': { 'content_id': 'content', 'html': html_content } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'written_translations', 'new_value': { 'translations_mapping': { 'content1': { 'en': { 'html': html_content, 'needs_update': True }, 'hi': { 'html': 'Hey!', 'needs_update': False } }, 'feedback_1': { 'hi': { 'html': html_content, 'needs_update': False }, 'en': { 'html': 'hello!', 'needs_update': False } } } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { 'content_id': 'solution', 'html': html_content }, } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': True, 'correct_answer': [ [html_content], ['<p>2</p>'], ['<p>3</p>'], ['<p>4</p>'] ], 'explanation': { 'content_id': 'solution', 'html': '<p>This is solution for state1</p>' } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'default_outcome', 'new_value': { 'param_changes': [], 'feedback': { 'content_id': 'default_outcome', 'html': html_content }, 'dest': 'Introduction', 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'hints', 'new_value': [{ 'hint_content': { 'content_id': 'hint1', 'html': html_content } }] }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_RENAME_STATE, 'old_state_name': 'Intro', 'new_state_name': 'Introduction', }) ] self.create_and_migrate_new_exploration('33', '34') migrated_draft_change_list = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list, 1, 2, self.EXP_ID)) self.assertEqual( migrated_draft_change_list[0].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state2', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', expected_html_content, '<p>4</p>' ] }, 'maxAllowableSelectionCount': { 'value': 1 }, 'minAllowableSelectionCount': { 'value': 1 } } }).to_dict()) self.assertEqual( migrated_draft_change_list[1].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'answer_groups', 'state_name': 'State 1', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { 'x': [expected_html_content] } }, { 'rule_type': 'ContainsAtLeastOneOf', 'inputs': { 'x': [expected_html_content] } }, { 'rule_type': 'IsProperSubsetOf', 'inputs': { 'x': [expected_html_content] } }, { 'rule_type': 'DoesNotContainAtLeastOneOf', 'inputs': { 'x': [expected_html_content] } }, { 'rule_type': 'Equals', 'inputs': { 'x': 1 } }, { 'rule_type': 'HasElementXAtPositionY', 'inputs': { 'x': expected_html_content, 'y': 2 } }, { 'rule_type': 'IsEqualToOrdering', 'inputs': { 'x': [[expected_html_content]] } }, { 'rule_type': 'HasElementXBeforeElementY', 'inputs': { 'x': expected_html_content, 'y': expected_html_content } }, { 'rule_type': ( 'IsEqualToOrderingWithOneItemAtIncorrectPosition'), 'inputs': { 'x': [[expected_html_content]] } }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': expected_html_content }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None }] }).to_dict()) self.assertEqual( migrated_draft_change_list[2].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': { 'content_id': 'content', 'html': expected_html_content } }).to_dict()) self.assertEqual( migrated_draft_change_list[3].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'written_translations', 'new_value': { 'translations_mapping': { 'content1': { 'en': { 'html': expected_html_content, 'needs_update': True }, 'hi': { 'html': 'Hey!', 'needs_update': False } }, 'feedback_1': { 'hi': { 'html': expected_html_content, 'needs_update': False }, 'en': { 'html': 'hello!', 'needs_update': False } } } } }).to_dict()) self.assertEqual( migrated_draft_change_list[4].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { 'content_id': 'solution', 'html': expected_html_content }, } }).to_dict()) self.assertEqual( migrated_draft_change_list[5].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'solution', 'new_value': { 'answer_is_exclusive': True, 'correct_answer': [ [expected_html_content], ['<p>2</p>'], ['<p>3</p>'], ['<p>4</p>'] ], 'explanation': { 'content_id': 'solution', 'html': '<p>This is solution for state1</p>' } } }).to_dict()) self.assertEqual( migrated_draft_change_list[6].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'default_outcome', 'new_value': { 'param_changes': [], 'feedback': { 'content_id': 'default_outcome', 'html': expected_html_content }, 'dest': 'Introduction', 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False } }).to_dict()) self.assertEqual( migrated_draft_change_list[7].to_dict(), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'hints', 'new_value': [{ 'hint_content': { 'content_id': 'hint1', 'html': expected_html_content } }] }).to_dict()) def test_convert_states_v32_dict_to_v33_dict(self): draft_change_list_v32 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state1', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>' ] } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state2', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>' ] }, 'maxAllowableSelectionCount': { 'value': 1 }, 'minAllowableSelectionCount': { 'value': 1 } } }) ] # Version 33 adds a showChoicesInShuffledOrder bool, which doesn't # impact the second ExplorationChange because it will only impact # it if 'choices' is the only key for new_value. expected_draft_change_list_v33 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state1', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>' ] }, 'showChoicesInShuffledOrder': { 'value': False } } }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'state2', 'property_name': 'widget_customization_args', 'new_value': { 'choices': { 'value': [ '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>' ] }, 'maxAllowableSelectionCount': { 'value': 1 }, 'minAllowableSelectionCount': { 'value': 1 } } }) ] # Migrate exploration to state schema version 33. self.create_and_migrate_new_exploration('32', '33') # Migrate the draft change list's state schema to the migrated # exploration's schema. migrated_draft_change_list_v33 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v32, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. expected_draft_change_list_v33_dict_list = [ change.to_dict() for change in expected_draft_change_list_v33 ] migrated_draft_change_list_v33_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v33 ] self.assertEqual( expected_draft_change_list_v33_dict_list, migrated_draft_change_list_v33_dict_list) def test_convert_states_v31_dict_to_v32_dict(self): draft_change_list_v31 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }) ] # Migrate exploration to state schema version 32. self.create_and_migrate_new_exploration('31', '32') # Migrate the draft change list's state schema to the migrated # exploration's schema. In this case there are no changes to the # draft change list since version 32 adds a customization arg # for the "Add" button text in SetInput interaction for the # exploration, for which there should be no changes to drafts. migrated_draft_change_list_v32 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v31, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. draft_change_list_v31_dict_list = [ change.to_dict() for change in draft_change_list_v31 ] migrated_draft_change_list_v32_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v32 ] self.assertEqual( draft_change_list_v31_dict_list, migrated_draft_change_list_v32_dict_list) def test_convert_states_v30_dict_to_v31_dict(self): draft_change_list_v30 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'recorded_voiceovers', 'new_value': { 'voiceovers_mapping': { 'content': { 'en': { 'file_size_name': 100, 'filename': 'atest.mp3', 'needs_update': False } } } } }) ] # Version 31 adds the duration_secs property. expected_draft_change_list_v31 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'recorded_voiceovers', 'new_value': { 'voiceovers_mapping': { 'content': { 'en': { 'file_size_name': 100, 'filename': 'atest.mp3', 'needs_update': False, 'duration_secs': 0.0 } } } } }) ] # Migrate exploration to state schema version 31. self.create_and_migrate_new_exploration('30', '31') # Migrate the draft change list's state schema to the migrated # exploration's schema. migrated_draft_change_list_v31 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v30, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. expected_draft_change_list_v31_dict_list = [ change.to_dict() for change in expected_draft_change_list_v31 ] migrated_draft_change_list_v31_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v31 ] self.assertEqual( expected_draft_change_list_v31_dict_list, migrated_draft_change_list_v31_dict_list) def test_convert_states_v29_dict_to_v30_dict(self): draft_change_list_v29 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'answer_groups', 'state_name': 'State 1', 'new_value': { 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': {'x': [ '<p>This is value1 for ItemSelection</p>' ]} }, { 'rule_type': 'Equals', 'inputs': {'x': [ '<p>This is value2 for ItemSelection</p>' ]} }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': '<p>Outcome for state1</p>' }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_misconception_id': None } }) ] # Version 30 replaces the tagged_misconception_id in version 29 # with tagged_skill_misconception_id. expected_draft_change_list_v30 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'answer_groups', 'state_name': 'State 1', 'new_value': { 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': {'x': [ '<p>This is value1 for ItemSelection</p>' ]} }, { 'rule_type': 'Equals', 'inputs': {'x': [ '<p>This is value2 for ItemSelection</p>' ]} }], 'outcome': { 'dest': 'Introduction', 'feedback': { 'content_id': 'feedback', 'html': '<p>Outcome for state1</p>' }, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None }, 'training_data': [], 'tagged_skill_misconception_id': None } }) ] # Migrate exploration to state schema version 30. self.create_and_migrate_new_exploration('29', '30') # Migrate the draft change list's state schema to the migrated # exploration's schema. migrated_draft_change_list_v30 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v29, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. expected_draft_change_list_v30_dict_list = [ change.to_dict() for change in expected_draft_change_list_v30 ] migrated_draft_change_list_v30_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v30 ] self.assertEqual( expected_draft_change_list_v30_dict_list, migrated_draft_change_list_v30_dict_list) def test_convert_states_v28_dict_to_v29_dict(self): draft_change_list_v28 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'Intro', 'property_name': 'content', 'new_value': 'new value' }) ] # Migrate exploration to state schema version 29. self.create_and_migrate_new_exploration('28', '29') # Migrate the draft change list's state schema to the migrated # exploration's schema. In this case there are no change to the # draft change list since version 29 adds the # solicit_answer_details boolean variable to the exploration # state, for which there should be no changes to drafts. migrated_draft_change_list_v29 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v28, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. draft_change_list_v28_dict_list = [ change.to_dict() for change in draft_change_list_v28 ] migrated_draft_change_list_v29_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v29 ] self.assertEqual( draft_change_list_v28_dict_list, migrated_draft_change_list_v29_dict_list) def test_convert_states_v27_dict_to_v28_dict(self): draft_change_list_v27 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'content_ids_to_audio_translations', 'state_name': 'State B', 'new_value': 'new value', }) ] # Version 28 adds voiceovers_mapping. expected_draft_change_list_v28 = [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': 'recorded_voiceovers', 'state_name': 'State B', 'new_value': {'voiceovers_mapping': 'new value'} }) ] # Migrate exploration to state schema version 28. self.create_and_migrate_new_exploration('27', '28') # Migrate the draft change list's state schema to the migrated # exploration's schema. migrated_draft_change_list_v28 = ( draft_upgrade_services.try_upgrading_draft_to_exp_version( draft_change_list_v27, 1, 2, self.EXP_ID) ) # Change draft change lists into a list of dicts so that it is # easy to compare the whole draft change list. expected_draft_change_list_v28_dict_list = [ change.to_dict() for change in expected_draft_change_list_v28 ] migrated_draft_change_list_v28_dict_list = [ change.to_dict() for change in migrated_draft_change_list_v28 ] self.assertEqual( expected_draft_change_list_v28_dict_list, migrated_draft_change_list_v28_dict_list)
apache-2.0
707,740,308,838,436,900
-4,049,900,894,241,809,400
40.399557
80
0.42758
false
axinging/chromium-crosswalk
third_party/protobuf/python/google/protobuf/internal/text_format_test.py
15
41879
#! /usr/bin/env python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test for google.protobuf.text_format.""" __author__ = 'kenton@google.com (Kenton Varda)' import re import six import string try: import unittest2 as unittest except ImportError: import unittest from google.protobuf.internal import _parameterized from google.protobuf import map_unittest_pb2 from google.protobuf import unittest_mset_pb2 from google.protobuf import unittest_pb2 from google.protobuf import unittest_proto3_arena_pb2 from google.protobuf.internal import api_implementation from google.protobuf.internal import test_util from google.protobuf.internal import message_set_extensions_pb2 from google.protobuf import text_format # Low-level nuts-n-bolts tests. class SimpleTextFormatTests(unittest.TestCase): # The members of _QUOTES are formatted into a regexp template that # expects single characters. Therefore it's an error (in addition to being # non-sensical in the first place) to try to specify a "quote mark" that is # more than one character. def TestQuoteMarksAreSingleChars(self): for quote in text_format._QUOTES: self.assertEqual(1, len(quote)) # Base class with some common functionality. class TextFormatBase(unittest.TestCase): def ReadGolden(self, golden_filename): with test_util.GoldenFile(golden_filename) as f: return (f.readlines() if str is bytes else # PY3 [golden_line.decode('utf-8') for golden_line in f]) def CompareToGoldenFile(self, text, golden_filename): golden_lines = self.ReadGolden(golden_filename) self.assertMultiLineEqual(text, ''.join(golden_lines)) def CompareToGoldenText(self, text, golden_text): self.assertEqual(text, golden_text) def RemoveRedundantZeros(self, text): # Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove # these zeros in order to match the golden file. text = text.replace('e+0','e+').replace('e+0','e+') \ .replace('e-0','e-').replace('e-0','e-') # Floating point fields are printed with .0 suffix even if they are # actualy integer numbers. text = re.compile('\.0$', re.MULTILINE).sub('', text) return text @_parameterized.Parameters( (unittest_pb2), (unittest_proto3_arena_pb2)) class TextFormatTest(TextFormatBase): def testPrintExotic(self, message_module): message = message_module.TestAllTypes() message.repeated_int64.append(-9223372036854775808) message.repeated_uint64.append(18446744073709551615) message.repeated_double.append(123.456) message.repeated_double.append(1.23e22) message.repeated_double.append(1.23e-18) message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') message.repeated_string.append(u'\u00fc\ua71f') self.CompareToGoldenText( self.RemoveRedundantZeros(text_format.MessageToString(message)), 'repeated_int64: -9223372036854775808\n' 'repeated_uint64: 18446744073709551615\n' 'repeated_double: 123.456\n' 'repeated_double: 1.23e+22\n' 'repeated_double: 1.23e-18\n' 'repeated_string:' ' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n' 'repeated_string: "\\303\\274\\352\\234\\237"\n') def testPrintExoticUnicodeSubclass(self, message_module): class UnicodeSub(six.text_type): pass message = message_module.TestAllTypes() message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f')) self.CompareToGoldenText( text_format.MessageToString(message), 'repeated_string: "\\303\\274\\352\\234\\237"\n') def testPrintNestedMessageAsOneLine(self, message_module): message = message_module.TestAllTypes() msg = message.repeated_nested_message.add() msg.bb = 42 self.CompareToGoldenText( text_format.MessageToString(message, as_one_line=True), 'repeated_nested_message { bb: 42 }') def testPrintRepeatedFieldsAsOneLine(self, message_module): message = message_module.TestAllTypes() message.repeated_int32.append(1) message.repeated_int32.append(1) message.repeated_int32.append(3) message.repeated_string.append('Google') message.repeated_string.append('Zurich') self.CompareToGoldenText( text_format.MessageToString(message, as_one_line=True), 'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 ' 'repeated_string: "Google" repeated_string: "Zurich"') def testPrintNestedNewLineInStringAsOneLine(self, message_module): message = message_module.TestAllTypes() message.optional_string = 'a\nnew\nline' self.CompareToGoldenText( text_format.MessageToString(message, as_one_line=True), 'optional_string: "a\\nnew\\nline"') def testPrintExoticAsOneLine(self, message_module): message = message_module.TestAllTypes() message.repeated_int64.append(-9223372036854775808) message.repeated_uint64.append(18446744073709551615) message.repeated_double.append(123.456) message.repeated_double.append(1.23e22) message.repeated_double.append(1.23e-18) message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') message.repeated_string.append(u'\u00fc\ua71f') self.CompareToGoldenText( self.RemoveRedundantZeros( text_format.MessageToString(message, as_one_line=True)), 'repeated_int64: -9223372036854775808' ' repeated_uint64: 18446744073709551615' ' repeated_double: 123.456' ' repeated_double: 1.23e+22' ' repeated_double: 1.23e-18' ' repeated_string: ' '"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""' ' repeated_string: "\\303\\274\\352\\234\\237"') def testRoundTripExoticAsOneLine(self, message_module): message = message_module.TestAllTypes() message.repeated_int64.append(-9223372036854775808) message.repeated_uint64.append(18446744073709551615) message.repeated_double.append(123.456) message.repeated_double.append(1.23e22) message.repeated_double.append(1.23e-18) message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') message.repeated_string.append(u'\u00fc\ua71f') # Test as_utf8 = False. wire_text = text_format.MessageToString( message, as_one_line=True, as_utf8=False) parsed_message = message_module.TestAllTypes() r = text_format.Parse(wire_text, parsed_message) self.assertIs(r, parsed_message) self.assertEqual(message, parsed_message) # Test as_utf8 = True. wire_text = text_format.MessageToString( message, as_one_line=True, as_utf8=True) parsed_message = message_module.TestAllTypes() r = text_format.Parse(wire_text, parsed_message) self.assertIs(r, parsed_message) self.assertEqual(message, parsed_message, '\n%s != %s' % (message, parsed_message)) def testPrintRawUtf8String(self, message_module): message = message_module.TestAllTypes() message.repeated_string.append(u'\u00fc\ua71f') text = text_format.MessageToString(message, as_utf8=True) self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n') parsed_message = message_module.TestAllTypes() text_format.Parse(text, parsed_message) self.assertEqual(message, parsed_message, '\n%s != %s' % (message, parsed_message)) def testPrintFloatFormat(self, message_module): # Check that float_format argument is passed to sub-message formatting. message = message_module.NestedTestAllTypes() # We use 1.25 as it is a round number in binary. The proto 32-bit float # will not gain additional imprecise digits as a 64-bit Python float and # show up in its str. 32-bit 1.2 is noisy when extended to 64-bit: # >>> struct.unpack('f', struct.pack('f', 1.2))[0] # 1.2000000476837158 # >>> struct.unpack('f', struct.pack('f', 1.25))[0] # 1.25 message.payload.optional_float = 1.25 # Check rounding at 15 significant digits message.payload.optional_double = -.000003456789012345678 # Check no decimal point. message.payload.repeated_float.append(-5642) # Check no trailing zeros. message.payload.repeated_double.append(.000078900) formatted_fields = ['optional_float: 1.25', 'optional_double: -3.45678901234568e-6', 'repeated_float: -5642', 'repeated_double: 7.89e-5'] text_message = text_format.MessageToString(message, float_format='.15g') self.CompareToGoldenText( self.RemoveRedundantZeros(text_message), 'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(*formatted_fields)) # as_one_line=True is a separate code branch where float_format is passed. text_message = text_format.MessageToString(message, as_one_line=True, float_format='.15g') self.CompareToGoldenText( self.RemoveRedundantZeros(text_message), 'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields)) def testMessageToString(self, message_module): message = message_module.ForeignMessage() message.c = 123 self.assertEqual('c: 123\n', str(message)) def testParseAllFields(self, message_module): message = message_module.TestAllTypes() test_util.SetAllFields(message) ascii_text = text_format.MessageToString(message) parsed_message = message_module.TestAllTypes() text_format.Parse(ascii_text, parsed_message) self.assertEqual(message, parsed_message) if message_module is unittest_pb2: test_util.ExpectAllFieldsSet(self, message) def testParseExotic(self, message_module): message = message_module.TestAllTypes() text = ('repeated_int64: -9223372036854775808\n' 'repeated_uint64: 18446744073709551615\n' 'repeated_double: 123.456\n' 'repeated_double: 1.23e+22\n' 'repeated_double: 1.23e-18\n' 'repeated_string: \n' '"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n' 'repeated_string: "foo" \'corge\' "grault"\n' 'repeated_string: "\\303\\274\\352\\234\\237"\n' 'repeated_string: "\\xc3\\xbc"\n' 'repeated_string: "\xc3\xbc"\n') text_format.Parse(text, message) self.assertEqual(-9223372036854775808, message.repeated_int64[0]) self.assertEqual(18446744073709551615, message.repeated_uint64[0]) self.assertEqual(123.456, message.repeated_double[0]) self.assertEqual(1.23e22, message.repeated_double[1]) self.assertEqual(1.23e-18, message.repeated_double[2]) self.assertEqual( '\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0]) self.assertEqual('foocorgegrault', message.repeated_string[1]) self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2]) self.assertEqual(u'\u00fc', message.repeated_string[3]) def testParseTrailingCommas(self, message_module): message = message_module.TestAllTypes() text = ('repeated_int64: 100;\n' 'repeated_int64: 200;\n' 'repeated_int64: 300,\n' 'repeated_string: "one",\n' 'repeated_string: "two";\n') text_format.Parse(text, message) self.assertEqual(100, message.repeated_int64[0]) self.assertEqual(200, message.repeated_int64[1]) self.assertEqual(300, message.repeated_int64[2]) self.assertEqual(u'one', message.repeated_string[0]) self.assertEqual(u'two', message.repeated_string[1]) def testParseRepeatedScalarShortFormat(self, message_module): message = message_module.TestAllTypes() text = ('repeated_int64: [100, 200];\n' 'repeated_int64: 300,\n' 'repeated_string: ["one", "two"];\n') text_format.Parse(text, message) self.assertEqual(100, message.repeated_int64[0]) self.assertEqual(200, message.repeated_int64[1]) self.assertEqual(300, message.repeated_int64[2]) self.assertEqual(u'one', message.repeated_string[0]) self.assertEqual(u'two', message.repeated_string[1]) def testParseEmptyText(self, message_module): message = message_module.TestAllTypes() text = '' text_format.Parse(text, message) self.assertEqual(message_module.TestAllTypes(), message) def testParseInvalidUtf8(self, message_module): message = message_module.TestAllTypes() text = 'repeated_string: "\\xc3\\xc3"' self.assertRaises(text_format.ParseError, text_format.Parse, text, message) def testParseSingleWord(self, message_module): message = message_module.TestAllTypes() text = 'foo' six.assertRaisesRegex(self, text_format.ParseError, (r'1:1 : Message type "\w+.TestAllTypes" has no field named ' r'"foo".'), text_format.Parse, text, message) def testParseUnknownField(self, message_module): message = message_module.TestAllTypes() text = 'unknown_field: 8\n' six.assertRaisesRegex(self, text_format.ParseError, (r'1:1 : Message type "\w+.TestAllTypes" has no field named ' r'"unknown_field".'), text_format.Parse, text, message) def testParseBadEnumValue(self, message_module): message = message_module.TestAllTypes() text = 'optional_nested_enum: BARR' six.assertRaisesRegex(self, text_format.ParseError, (r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" ' r'has no value named BARR.'), text_format.Parse, text, message) message = message_module.TestAllTypes() text = 'optional_nested_enum: 100' six.assertRaisesRegex(self, text_format.ParseError, (r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" ' r'has no value with number 100.'), text_format.Parse, text, message) def testParseBadIntValue(self, message_module): message = message_module.TestAllTypes() text = 'optional_int32: bork' six.assertRaisesRegex(self, text_format.ParseError, ('1:17 : Couldn\'t parse integer: bork'), text_format.Parse, text, message) def testParseStringFieldUnescape(self, message_module): message = message_module.TestAllTypes() text = r'''repeated_string: "\xf\x62" repeated_string: "\\xf\\x62" repeated_string: "\\\xf\\\x62" repeated_string: "\\\\xf\\\\x62" repeated_string: "\\\\\xf\\\\\x62" repeated_string: "\x5cx20"''' text_format.Parse(text, message) SLASH = '\\' self.assertEqual('\x0fb', message.repeated_string[0]) self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1]) self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2]) self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62', message.repeated_string[3]) self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b', message.repeated_string[4]) self.assertEqual(SLASH + 'x20', message.repeated_string[5]) def testMergeDuplicateScalars(self, message_module): message = message_module.TestAllTypes() text = ('optional_int32: 42 ' 'optional_int32: 67') r = text_format.Merge(text, message) self.assertIs(r, message) self.assertEqual(67, message.optional_int32) def testMergeDuplicateNestedMessageScalars(self, message_module): message = message_module.TestAllTypes() text = ('optional_nested_message { bb: 1 } ' 'optional_nested_message { bb: 2 }') r = text_format.Merge(text, message) self.assertTrue(r is message) self.assertEqual(2, message.optional_nested_message.bb) def testParseOneof(self, message_module): m = message_module.TestAllTypes() m.oneof_uint32 = 11 m2 = message_module.TestAllTypes() text_format.Parse(text_format.MessageToString(m), m2) self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field')) # These are tests that aren't fundamentally specific to proto2, but are at # the moment because of differences between the proto2 and proto3 test schemas. # Ideally the schemas would be made more similar so these tests could pass. class OnlyWorksWithProto2RightNowTests(TextFormatBase): def testPrintAllFieldsPointy(self): message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.CompareToGoldenFile( self.RemoveRedundantZeros( text_format.MessageToString(message, pointy_brackets=True)), 'text_format_unittest_data_pointy_oneof.txt') def testParseGolden(self): golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt')) parsed_message = unittest_pb2.TestAllTypes() r = text_format.Parse(golden_text, parsed_message) self.assertIs(r, parsed_message) message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.assertEqual(message, parsed_message) def testPrintAllFields(self): message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.CompareToGoldenFile( self.RemoveRedundantZeros(text_format.MessageToString(message)), 'text_format_unittest_data_oneof_implemented.txt') def testPrintAllFieldsPointy(self): message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.CompareToGoldenFile( self.RemoveRedundantZeros( text_format.MessageToString(message, pointy_brackets=True)), 'text_format_unittest_data_pointy_oneof.txt') def testPrintInIndexOrder(self): message = unittest_pb2.TestFieldOrderings() message.my_string = '115' message.my_int = 101 message.my_float = 111 message.optional_nested_message.oo = 0 message.optional_nested_message.bb = 1 self.CompareToGoldenText( self.RemoveRedundantZeros(text_format.MessageToString( message, use_index_order=True)), 'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n' 'optional_nested_message {\n oo: 0\n bb: 1\n}\n') self.CompareToGoldenText( self.RemoveRedundantZeros(text_format.MessageToString( message)), 'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n' 'optional_nested_message {\n bb: 1\n oo: 0\n}\n') def testMergeLinesGolden(self): opened = self.ReadGolden('text_format_unittest_data.txt') parsed_message = unittest_pb2.TestAllTypes() r = text_format.MergeLines(opened, parsed_message) self.assertIs(r, parsed_message) message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.assertEqual(message, parsed_message) def testParseLinesGolden(self): opened = self.ReadGolden('text_format_unittest_data.txt') parsed_message = unittest_pb2.TestAllTypes() r = text_format.ParseLines(opened, parsed_message) self.assertIs(r, parsed_message) message = unittest_pb2.TestAllTypes() test_util.SetAllFields(message) self.assertEqual(message, parsed_message) def testPrintMap(self): message = map_unittest_pb2.TestMap() message.map_int32_int32[-123] = -456 message.map_int64_int64[-2**33] = -2**34 message.map_uint32_uint32[123] = 456 message.map_uint64_uint64[2**33] = 2**34 message.map_string_string["abc"] = "123" message.map_int32_foreign_message[111].c = 5 # Maps are serialized to text format using their underlying repeated # representation. self.CompareToGoldenText( text_format.MessageToString(message), 'map_int32_int32 {\n' ' key: -123\n' ' value: -456\n' '}\n' 'map_int64_int64 {\n' ' key: -8589934592\n' ' value: -17179869184\n' '}\n' 'map_uint32_uint32 {\n' ' key: 123\n' ' value: 456\n' '}\n' 'map_uint64_uint64 {\n' ' key: 8589934592\n' ' value: 17179869184\n' '}\n' 'map_string_string {\n' ' key: "abc"\n' ' value: "123"\n' '}\n' 'map_int32_foreign_message {\n' ' key: 111\n' ' value {\n' ' c: 5\n' ' }\n' '}\n') def testMapOrderEnforcement(self): message = map_unittest_pb2.TestMap() for letter in string.ascii_uppercase[13:26]: message.map_string_string[letter] = 'dummy' for letter in reversed(string.ascii_uppercase[0:13]): message.map_string_string[letter] = 'dummy' golden = ''.join(( 'map_string_string {\n key: "%c"\n value: "dummy"\n}\n' % (letter,) for letter in string.ascii_uppercase)) self.CompareToGoldenText(text_format.MessageToString(message), golden) def testMapOrderSemantics(self): golden_lines = self.ReadGolden('map_test_data.txt') # The C++ implementation emits defaulted-value fields, while the Python # implementation does not. Adjusting for this is awkward, but it is # valuable to test against a common golden file. line_blacklist = (' key: 0\n', ' value: 0\n', ' key: false\n', ' value: false\n') golden_lines = [line for line in golden_lines if line not in line_blacklist] message = map_unittest_pb2.TestMap() text_format.ParseLines(golden_lines, message) candidate = text_format.MessageToString(message) # The Python implementation emits "1.0" for the double value that the C++ # implementation emits as "1". candidate = candidate.replace('1.0', '1', 2) self.assertMultiLineEqual(candidate, ''.join(golden_lines)) # Tests of proto2-only features (MessageSet, extensions, etc.). class Proto2Tests(TextFormatBase): def testPrintMessageSet(self): message = unittest_mset_pb2.TestMessageSetContainer() ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension message.message_set.Extensions[ext1].i = 23 message.message_set.Extensions[ext2].str = 'foo' self.CompareToGoldenText( text_format.MessageToString(message), 'message_set {\n' ' [protobuf_unittest.TestMessageSetExtension1] {\n' ' i: 23\n' ' }\n' ' [protobuf_unittest.TestMessageSetExtension2] {\n' ' str: \"foo\"\n' ' }\n' '}\n') message = message_set_extensions_pb2.TestMessageSet() ext = message_set_extensions_pb2.message_set_extension3 message.Extensions[ext].text = 'bar' self.CompareToGoldenText( text_format.MessageToString(message), '[google.protobuf.internal.TestMessageSetExtension3] {\n' ' text: \"bar\"\n' '}\n') def testPrintMessageSetAsOneLine(self): message = unittest_mset_pb2.TestMessageSetContainer() ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension message.message_set.Extensions[ext1].i = 23 message.message_set.Extensions[ext2].str = 'foo' self.CompareToGoldenText( text_format.MessageToString(message, as_one_line=True), 'message_set {' ' [protobuf_unittest.TestMessageSetExtension1] {' ' i: 23' ' }' ' [protobuf_unittest.TestMessageSetExtension2] {' ' str: \"foo\"' ' }' ' }') def testParseMessageSet(self): message = unittest_pb2.TestAllTypes() text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n') text_format.Parse(text, message) self.assertEqual(1, message.repeated_uint64[0]) self.assertEqual(2, message.repeated_uint64[1]) message = unittest_mset_pb2.TestMessageSetContainer() text = ('message_set {\n' ' [protobuf_unittest.TestMessageSetExtension1] {\n' ' i: 23\n' ' }\n' ' [protobuf_unittest.TestMessageSetExtension2] {\n' ' str: \"foo\"\n' ' }\n' '}\n') text_format.Parse(text, message) ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension self.assertEqual(23, message.message_set.Extensions[ext1].i) self.assertEqual('foo', message.message_set.Extensions[ext2].str) def testPrintAllExtensions(self): message = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(message) self.CompareToGoldenFile( self.RemoveRedundantZeros(text_format.MessageToString(message)), 'text_format_unittest_extensions_data.txt') def testPrintAllExtensionsPointy(self): message = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(message) self.CompareToGoldenFile( self.RemoveRedundantZeros(text_format.MessageToString( message, pointy_brackets=True)), 'text_format_unittest_extensions_data_pointy.txt') def testParseGoldenExtensions(self): golden_text = '\n'.join(self.ReadGolden( 'text_format_unittest_extensions_data.txt')) parsed_message = unittest_pb2.TestAllExtensions() text_format.Parse(golden_text, parsed_message) message = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(message) self.assertEqual(message, parsed_message) def testParseAllExtensions(self): message = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(message) ascii_text = text_format.MessageToString(message) parsed_message = unittest_pb2.TestAllExtensions() text_format.Parse(ascii_text, parsed_message) self.assertEqual(message, parsed_message) def testParseAllowedUnknownExtension(self): # Skip over unknown extension correctly. message = unittest_mset_pb2.TestMessageSetContainer() text = ('message_set {\n' ' [unknown_extension] {\n' ' i: 23\n' ' [nested_unknown_ext]: {\n' ' i: 23\n' ' test: "test_string"\n' ' floaty_float: -0.315\n' ' num: -inf\n' ' multiline_str: "abc"\n' ' "def"\n' ' "xyz."\n' ' [nested_unknown_ext]: <\n' ' i: 23\n' ' i: 24\n' ' pointfloat: .3\n' ' test: "test_string"\n' ' floaty_float: -0.315\n' ' num: -inf\n' ' long_string: "test" "test2" \n' ' >\n' ' }\n' ' }\n' ' [unknown_extension]: 5\n' '}\n') text_format.Parse(text, message, allow_unknown_extension=True) golden = 'message_set {\n}\n' self.CompareToGoldenText(text_format.MessageToString(message), golden) # Catch parse errors in unknown extension. message = unittest_mset_pb2.TestMessageSetContainer() malformed = ('message_set {\n' ' [unknown_extension] {\n' ' i:\n' # Missing value. ' }\n' '}\n') six.assertRaisesRegex(self, text_format.ParseError, 'Invalid field value: }', text_format.Parse, malformed, message, allow_unknown_extension=True) message = unittest_mset_pb2.TestMessageSetContainer() malformed = ('message_set {\n' ' [unknown_extension] {\n' ' str: "malformed string\n' # Missing closing quote. ' }\n' '}\n') six.assertRaisesRegex(self, text_format.ParseError, 'Invalid field value: "', text_format.Parse, malformed, message, allow_unknown_extension=True) message = unittest_mset_pb2.TestMessageSetContainer() malformed = ('message_set {\n' ' [unknown_extension] {\n' ' str: "malformed\n multiline\n string\n' ' }\n' '}\n') six.assertRaisesRegex(self, text_format.ParseError, 'Invalid field value: "', text_format.Parse, malformed, message, allow_unknown_extension=True) message = unittest_mset_pb2.TestMessageSetContainer() malformed = ('message_set {\n' ' [malformed_extension] <\n' ' i: -5\n' ' \n' # Missing '>' here. '}\n') six.assertRaisesRegex(self, text_format.ParseError, '5:1 : Expected ">".', text_format.Parse, malformed, message, allow_unknown_extension=True) # Don't allow unknown fields with allow_unknown_extension=True. message = unittest_mset_pb2.TestMessageSetContainer() malformed = ('message_set {\n' ' unknown_field: true\n' ' \n' # Missing '>' here. '}\n') six.assertRaisesRegex(self, text_format.ParseError, ('2:3 : Message type ' '"proto2_wireformat_unittest.TestMessageSet" has no' ' field named "unknown_field".'), text_format.Parse, malformed, message, allow_unknown_extension=True) # Parse known extension correcty. message = unittest_mset_pb2.TestMessageSetContainer() text = ('message_set {\n' ' [protobuf_unittest.TestMessageSetExtension1] {\n' ' i: 23\n' ' }\n' ' [protobuf_unittest.TestMessageSetExtension2] {\n' ' str: \"foo\"\n' ' }\n' '}\n') text_format.Parse(text, message, allow_unknown_extension=True) ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension self.assertEqual(23, message.message_set.Extensions[ext1].i) self.assertEqual('foo', message.message_set.Extensions[ext2].str) def testParseBadExtension(self): message = unittest_pb2.TestAllExtensions() text = '[unknown_extension]: 8\n' six.assertRaisesRegex(self, text_format.ParseError, '1:2 : Extension "unknown_extension" not registered.', text_format.Parse, text, message) message = unittest_pb2.TestAllTypes() six.assertRaisesRegex(self, text_format.ParseError, ('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have ' 'extensions.'), text_format.Parse, text, message) def testMergeDuplicateExtensionScalars(self): message = unittest_pb2.TestAllExtensions() text = ('[protobuf_unittest.optional_int32_extension]: 42 ' '[protobuf_unittest.optional_int32_extension]: 67') text_format.Merge(text, message) self.assertEqual( 67, message.Extensions[unittest_pb2.optional_int32_extension]) def testParseDuplicateExtensionScalars(self): message = unittest_pb2.TestAllExtensions() text = ('[protobuf_unittest.optional_int32_extension]: 42 ' '[protobuf_unittest.optional_int32_extension]: 67') six.assertRaisesRegex(self, text_format.ParseError, ('1:96 : Message type "protobuf_unittest.TestAllExtensions" ' 'should not have multiple ' '"protobuf_unittest.optional_int32_extension" extensions.'), text_format.Parse, text, message) def testParseDuplicateNestedMessageScalars(self): message = unittest_pb2.TestAllTypes() text = ('optional_nested_message { bb: 1 } ' 'optional_nested_message { bb: 2 }') six.assertRaisesRegex(self, text_format.ParseError, ('1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" ' 'should not have multiple "bb" fields.'), text_format.Parse, text, message) def testParseDuplicateScalars(self): message = unittest_pb2.TestAllTypes() text = ('optional_int32: 42 ' 'optional_int32: 67') six.assertRaisesRegex(self, text_format.ParseError, ('1:36 : Message type "protobuf_unittest.TestAllTypes" should not ' 'have multiple "optional_int32" fields.'), text_format.Parse, text, message) def testParseGroupNotClosed(self): message = unittest_pb2.TestAllTypes() text = 'RepeatedGroup: <' six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected ">".', text_format.Parse, text, message) text = 'RepeatedGroup: {' six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected "}".', text_format.Parse, text, message) def testParseEmptyGroup(self): message = unittest_pb2.TestAllTypes() text = 'OptionalGroup: {}' text_format.Parse(text, message) self.assertTrue(message.HasField('optionalgroup')) message.Clear() message = unittest_pb2.TestAllTypes() text = 'OptionalGroup: <>' text_format.Parse(text, message) self.assertTrue(message.HasField('optionalgroup')) # Maps aren't really proto2-only, but our test schema only has maps for # proto2. def testParseMap(self): text = ('map_int32_int32 {\n' ' key: -123\n' ' value: -456\n' '}\n' 'map_int64_int64 {\n' ' key: -8589934592\n' ' value: -17179869184\n' '}\n' 'map_uint32_uint32 {\n' ' key: 123\n' ' value: 456\n' '}\n' 'map_uint64_uint64 {\n' ' key: 8589934592\n' ' value: 17179869184\n' '}\n' 'map_string_string {\n' ' key: "abc"\n' ' value: "123"\n' '}\n' 'map_int32_foreign_message {\n' ' key: 111\n' ' value {\n' ' c: 5\n' ' }\n' '}\n') message = map_unittest_pb2.TestMap() text_format.Parse(text, message) self.assertEqual(-456, message.map_int32_int32[-123]) self.assertEqual(-2**34, message.map_int64_int64[-2**33]) self.assertEqual(456, message.map_uint32_uint32[123]) self.assertEqual(2**34, message.map_uint64_uint64[2**33]) self.assertEqual("123", message.map_string_string["abc"]) self.assertEqual(5, message.map_int32_foreign_message[111].c) class TokenizerTest(unittest.TestCase): def testSimpleTokenCases(self): text = ('identifier1:"string1"\n \n\n' 'identifier2 : \n \n123 \n identifier3 :\'string\'\n' 'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n' 'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n' 'ID9: 22 ID10: -111111111111111111 ID11: -22\n' 'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f ' 'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ') tokenizer = text_format._Tokenizer(text.splitlines()) methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ':', (tokenizer.ConsumeString, 'string1'), (tokenizer.ConsumeIdentifier, 'identifier2'), ':', (tokenizer.ConsumeInt32, 123), (tokenizer.ConsumeIdentifier, 'identifier3'), ':', (tokenizer.ConsumeString, 'string'), (tokenizer.ConsumeIdentifier, 'identifiER_4'), ':', (tokenizer.ConsumeFloat, 1.1e+2), (tokenizer.ConsumeIdentifier, 'ID5'), ':', (tokenizer.ConsumeFloat, -0.23), (tokenizer.ConsumeIdentifier, 'ID6'), ':', (tokenizer.ConsumeString, 'aaaa\'bbbb'), (tokenizer.ConsumeIdentifier, 'ID7'), ':', (tokenizer.ConsumeString, 'aa\"bb'), (tokenizer.ConsumeIdentifier, 'ID8'), ':', '{', (tokenizer.ConsumeIdentifier, 'A'), ':', (tokenizer.ConsumeFloat, float('inf')), (tokenizer.ConsumeIdentifier, 'B'), ':', (tokenizer.ConsumeFloat, -float('inf')), (tokenizer.ConsumeIdentifier, 'C'), ':', (tokenizer.ConsumeBool, True), (tokenizer.ConsumeIdentifier, 'D'), ':', (tokenizer.ConsumeBool, False), '}', (tokenizer.ConsumeIdentifier, 'ID9'), ':', (tokenizer.ConsumeUint32, 22), (tokenizer.ConsumeIdentifier, 'ID10'), ':', (tokenizer.ConsumeInt64, -111111111111111111), (tokenizer.ConsumeIdentifier, 'ID11'), ':', (tokenizer.ConsumeInt32, -22), (tokenizer.ConsumeIdentifier, 'ID12'), ':', (tokenizer.ConsumeUint64, 2222222222222222222), (tokenizer.ConsumeIdentifier, 'ID13'), ':', (tokenizer.ConsumeFloat, 1.23456), (tokenizer.ConsumeIdentifier, 'ID14'), ':', (tokenizer.ConsumeFloat, 1.2e+2), (tokenizer.ConsumeIdentifier, 'false_bool'), ':', (tokenizer.ConsumeBool, False), (tokenizer.ConsumeIdentifier, 'true_BOOL'), ':', (tokenizer.ConsumeBool, True), (tokenizer.ConsumeIdentifier, 'true_bool1'), ':', (tokenizer.ConsumeBool, True), (tokenizer.ConsumeIdentifier, 'false_BOOL1'), ':', (tokenizer.ConsumeBool, False)] i = 0 while not tokenizer.AtEnd(): m = methods[i] if type(m) == str: token = tokenizer.token self.assertEqual(token, m) tokenizer.NextToken() else: self.assertEqual(m[1], m[0]()) i += 1 def testConsumeIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32) self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64) self.assertEqual(-1, tokenizer.ConsumeInt32()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32) self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64) self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64()) self.assertTrue(tokenizer.AtEnd()) text = '-0 -0 0 0' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeUint32()) self.assertEqual(0, tokenizer.ConsumeUint64()) self.assertEqual(0, tokenizer.ConsumeUint32()) self.assertEqual(0, tokenizer.ConsumeUint64()) self.assertTrue(tokenizer.AtEnd()) def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) def testConsumeBool(self): text = 'not-a-bool' tokenizer = text_format._Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool) if __name__ == '__main__': unittest.main()
bsd-3-clause
-5,493,006,399,263,929,000
1,758,715,442,630,850,600
39.307026
81
0.633277
false
coderanger/pychef
chef/tests/test_search.py
5
2531
from unittest2 import skip from chef import Search, Node from chef.exceptions import ChefError from chef.tests import ChefTestCase, mockSearch class SearchTestCase(ChefTestCase): def test_search_all(self): s = Search('node') self.assertGreaterEqual(len(s), 3) self.assertIn('test_1', s) self.assertIn('test_2', s) self.assertIn('test_3', s) def test_search_query(self): s = Search('node', 'role:test_1') self.assertGreaterEqual(len(s), 2) self.assertIn('test_1', s) self.assertNotIn('test_2', s) self.assertIn('test_3', s) def test_list(self): searches = Search.list() self.assertIn('node', searches) self.assertIn('role', searches) def test_search_set_query(self): s = Search('node').query('role:test_1') self.assertGreaterEqual(len(s), 2) self.assertIn('test_1', s) self.assertNotIn('test_2', s) self.assertIn('test_3', s) def test_search_call(self): s = Search('node')('role:test_1') self.assertGreaterEqual(len(s), 2) self.assertIn('test_1', s) self.assertNotIn('test_2', s) self.assertIn('test_3', s) def test_rows(self): s = Search('node', rows=1) self.assertEqual(len(s), 1) self.assertGreaterEqual(s.total, 3) def test_start(self): s = Search('node', start=1) self.assertEqual(len(s), s.total-1) self.assertGreaterEqual(s.total, 3) def test_slice(self): s = Search('node')[1:2] self.assertEqual(len(s), 1) self.assertGreaterEqual(s.total, 3) s2 = s[1:2] self.assertEqual(len(s2), 1) self.assertGreaterEqual(s2.total, 3) self.assertNotEqual(s[0]['name'], s2[0]['name']) s3 = Search('node')[2:3] self.assertEqual(len(s3), 1) self.assertGreaterEqual(s3.total, 3) self.assertEqual(s2[0]['name'], s3[0]['name']) def test_object(self): s = Search('node', 'name:test_1') self.assertEqual(len(s), 1) node = s[0].object self.assertEqual(node.name, 'test_1') self.assertEqual(node.run_list, ['role[test_1]']) class MockSearchTestCase(ChefTestCase): @mockSearch({ ('node', '*:*'): [Node('fake_1', skip_load=True).to_dict()] }) def test_single_node(self, MockSearch): import chef.search s = chef.search.Search('node') self.assertEqual(len(s), 1) self.assertIn('fake_1', s)
apache-2.0
6,168,832,527,253,823,000
1,184,041,209,031,905,000
29.865854
67
0.581193
false
thesuperzapper/tensorflow
tensorflow/python/estimator/run_config.py
7
1949
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Environment configuration object for Estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function class TaskType(object): MASTER = 'master' PS = 'ps' WORKER = 'worker' class RunConfig(object): """This class specifies the configurations for an `Estimator` run.""" @property def cluster_spec(self): return None @property def evaluation_master(self): return '' @property def is_chief(self): return True @property def master(self): return '' @property def num_ps_replicas(self): return 0 @property def num_worker_replicas(self): return 1 @property def task_id(self): return 0 @property def task_type(self): return TaskType.WORKER @property def tf_random_seed(self): return 1 @property def save_summary_steps(self): return 100 @property def save_checkpoints_secs(self): return 600 @property def session_config(self): return None @property def save_checkpoints_steps(self): return None @property def keep_checkpoint_max(self): return 5 @property def keep_checkpoint_every_n_hours(self): return 10000 @property def model_dir(self): return None
apache-2.0
8,869,748,546,496,958,000
2,257,170,771,001,422,800
19.956989
80
0.672653
false
ksteinfe/decodes
src/decodes/core/dc_mesh.py
1
6004
from decodes.core import * from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order if VERBOSE_FS: print("mesh.py loaded") import copy, collections class Mesh(HasPts): """ a very simple mesh class """ subclass_attr = [] # this list of props is unset any time this HasPts object changes def __init__(self, vertices=None, faces=None, basis=None): """ Mesh Constructor. :param vertices: The vertices of the mesh. :type vertices: [Point] :param faces: List of ordered faces. :type faces: [int] :param basis: The (optional) basis of the mesh. :type basis: Basis :result: Mesh object. :rtype: Mesh :: pts=[ Point(0,0,0), Point(0,1,0), Point(1,1,0), Point(1,0,0), Point(0,0,1), Point(0,1,1), Point(1,1,1), Point(1,0,1), ] quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]] quadmesh=Mesh(pts,quad_faces) """ super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis self._faces = [] if (faces is None) else faces @property def faces(self): """ Returns a list of mesh faces. :result: List of mesh faces. :rtype: list """ return self._faces def add_face(self,a,b,c,d=-1): """ Adds a face to the mesh. :param a,b,c,d: Face to be added to the list of faces. :type a,b,c,d: int. :result: Modifies list of faces. :rtype: None :: quadmesh.add_face(4,5,6,7) """ #TODO: add lists of faces just the same if max(a,b,c,d) < len(self.pts): if (d>=0) : self._faces.append([a,b,c,d]) else: self._faces.append([a,b,c]) def face_pts(self,index): """ Returns the points of a given face. :param index: Face's index :type index: int :returns: Vertices. :rtype: Point :: quadmesh.face_pts(0) """ return [self.pts[i] for i in self.faces[index]] def face_centroid(self,index): """ Returns the centroids of individual mesh faces. :param index: Index of a face. :type index: int :returns: The centroid of a face. :rtype: Point :: quadmesh.face_centroid(0) """ return Point.centroid(self.face_pts(index)) def face_normal(self,index): """ Returns the normal vector of a face. :param index: Index of a face. :type index: int :returns: Normal vector. :rtype: Vec :: quadmesh.face_normal(0) """ verts = self.face_pts(index) if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized() else : v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized() v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized() return Vec.bisector(v0,v1).normalized() def __repr__(self): return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces)) @staticmethod def explode(msh): """ Explodes a mesh into individual faces. :param msh: Mesh to explode. :type msh: Mesh :returns: List of meshes. :type: [Mesh] :: Mesh.explode(quadmesh) """ exploded_meshes = [] for face in msh.faces: pts = [msh.pts[v] for v in face] nface = [0,1,2] if len(face)==3 else [0,1,2,3] exploded_meshes.append(Mesh(pts,[nface])) return exploded_meshes def to_pt_graph(self): """ Returns a Graph representation of the mesh points by index. :returns: A Graph of point indexes. :rtype: Graph :: quadmesh.to_pt_graph() """ graph = Graph() for index in range(len(self.pts)): for face in self.faces: for px in face: if index in face and index!=px: graph.add_edge(index, px) return graph def to_face_graph(self, val=1): """ Returns a Graph representation of the mesh faces by index. :param val: number of coincident points for neighborness. :type val: int :returns: A Graph of face indexes. :rtype: Graph :: quadmesh.to_face_graph(2) """ from decodes.extensions.graph import Graph graph = Graph() graph.naked_nodes = [] for f1 in range(len(self.faces)): for f2 in range(len(self.faces)): if f1 != f2: count = 0 for index in self.faces[f2]: if index in self.faces[f1]: count+=1 if count >= val: graph.add_edge(f1,f2) if len(graph.edges[f1]) < len(self.faces[f1]): if f1 not in graph.naked_nodes: graph.naked_nodes.append(f1) return graph
gpl-3.0
-1,219,176,522,266,921,700
-7,503,219,643,950,952,000
30.276042
164
0.460693
false
louyihua/edx-platform
lms/djangoapps/mobile_api/video_outlines/tests.py
17
33728
# -*- coding: utf-8 -*- """ Tests for video outline API """ import itertools from uuid import uuid4 from collections import namedtuple import ddt from nose.plugins.attrib import attr from edxval import api from xmodule.modulestore.tests.factories import ItemFactory from xmodule.video_module import transcripts_utils from xmodule.modulestore.django import modulestore from xmodule.partitions.partitions import Group, UserPartition from milestones.tests.utils import MilestonesTestCaseMixin from mobile_api.models import MobileApiConfig from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, remove_user_from_cohort from mobile_api.testutils import MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin class TestVideoAPITestCase(MobileAPITestCase): """ Base test class for video related mobile APIs """ def setUp(self): super(TestVideoAPITestCase, self).setUp() self.section = ItemFactory.create( parent=self.course, category="chapter", display_name=u"test factory section omega \u03a9", ) self.sub_section = ItemFactory.create( parent=self.section, category="sequential", display_name=u"test subsection omega \u03a9", ) self.unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=u"test unit omega \u03a9", ) self.other_unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=u"test unit omega 2 \u03a9", ) self.nameless_unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=None, ) self.edx_video_id = 'testing-123' self.video_url = 'http://val.edx.org/val/video.mp4' self.video_url_high = 'http://val.edx.org/val/video_high.mp4' self.youtube_url = 'http://val.edx.org/val/youtube.mp4' self.html5_video_url = 'http://video.edx.org/html5/video.mp4' api.create_profile('youtube') api.create_profile('mobile_high') api.create_profile('mobile_low') # create the video in VAL api.create_video({ 'edx_video_id': self.edx_video_id, 'status': 'test', 'client_video_id': u"test video omega \u03a9", 'duration': 12, 'courses': [unicode(self.course.id)], 'encoded_videos': [ { 'profile': 'youtube', 'url': 'xyz123', 'file_size': 0, 'bitrate': 1500 }, { 'profile': 'mobile_low', 'url': self.video_url, 'file_size': 12345, 'bitrate': 250 }, { 'profile': 'mobile_high', 'url': self.video_url_high, 'file_size': 99999, 'bitrate': 250 }, ]}) # Set requested profiles MobileApiConfig(video_profiles="mobile_low,mobile_high,youtube").save() class TestVideoAPIMixin(object): """ Mixin class that provides helpers for testing video related mobile APIs """ def _create_video_with_subs(self, custom_subid=None): """ Creates and returns a video with stored subtitles. """ subid = custom_subid or uuid4().hex transcripts_utils.save_subs_to_store( { 'start': [100, 200, 240, 390, 1000], 'end': [200, 240, 380, 1000, 1500], 'text': [ 'subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5' ] }, subid, self.course) return ItemFactory.create( parent=self.unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test video omega \u03a9", sub=subid ) def _verify_paths(self, course_outline, path_list, outline_index=0): """ Takes a path_list and compares it against the course_outline Attributes: course_outline (list): A list of dictionaries that includes a 'path' and 'named_path' field which we will be comparing path_list to path_list (list): A list of the expected strings outline_index (int): Index into the course_outline list for which the path is being tested. """ path = course_outline[outline_index]['path'] self.assertEqual(len(path), len(path_list)) for i in range(len(path_list)): self.assertEqual(path_list[i], path[i]['name']) #named_path will be deprecated eventually named_path = course_outline[outline_index]['named_path'] self.assertEqual(len(named_path), len(path_list)) for i in range(len(path_list)): self.assertEqual(path_list[i], named_path[i]) def _setup_course_partitions(self, scheme_id='random', is_cohorted=False): """Helper method to configure the user partitions in the course.""" self.partition_id = 0 # pylint: disable=attribute-defined-outside-init self.course.user_partitions = [ UserPartition( self.partition_id, 'first_partition', 'First Partition', [Group(0, 'alpha'), Group(1, 'beta')], scheme=None, scheme_id=scheme_id ), ] self.course.cohort_config = {'cohorted': is_cohorted} self.store.update_item(self.course, self.user.id) def _setup_group_access(self, xblock, partition_id, group_ids): """Helper method to configure the partition and group mapping for the given xblock.""" xblock.group_access = {partition_id: group_ids} self.store.update_item(xblock, self.user.id) def _setup_split_module(self, sub_block_category): """Helper method to configure a split_test unit with children of type sub_block_category.""" self._setup_course_partitions() self.split_test = ItemFactory.create( # pylint: disable=attribute-defined-outside-init parent=self.unit, category="split_test", display_name=u"split test unit", user_partition_id=0, ) sub_block_a = ItemFactory.create( parent=self.split_test, category=sub_block_category, display_name=u"split test block a", ) sub_block_b = ItemFactory.create( parent=self.split_test, category=sub_block_category, display_name=u"split test block b", ) self.split_test.group_id_to_child = { str(index): url for index, url in enumerate([sub_block_a.location, sub_block_b.location]) } self.store.update_item(self.split_test, self.user.id) return sub_block_a, sub_block_b @attr(shard=2) class TestNonStandardCourseStructure(MobileAPITestCase, TestVideoAPIMixin, MilestonesTestCaseMixin): """ Tests /api/mobile/v0.5/video_outlines/courses/{course_id} with no course set """ REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']} def setUp(self): super(TestNonStandardCourseStructure, self).setUp() self.chapter_under_course = ItemFactory.create( parent=self.course, category="chapter", display_name=u"test factory chapter under course omega \u03a9", ) self.section_under_course = ItemFactory.create( parent=self.course, category="sequential", display_name=u"test factory section under course omega \u03a9", ) self.section_under_chapter = ItemFactory.create( parent=self.chapter_under_course, category="sequential", display_name=u"test factory section under chapter omega \u03a9", ) self.vertical_under_course = ItemFactory.create( parent=self.course, category="vertical", display_name=u"test factory vertical under course omega \u03a9", ) self.vertical_under_section = ItemFactory.create( parent=self.section_under_chapter, category="vertical", display_name=u"test factory vertical under section omega \u03a9", ) def test_structure_course_video(self): """ Tests when there is a video without a vertical directly under course """ self.login_and_enroll() ItemFactory.create( parent=self.course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches(section_url, r'courseware$') self.assertEqual(section_url, unit_url) self._verify_paths(course_outline, []) def test_structure_course_vert_video(self): """ Tests when there is a video under vertical directly under course """ self.login_and_enroll() ItemFactory.create( parent=self.vertical_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_vertical_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory vertical under course omega \u03a9' ] ) def test_structure_course_chap_video(self): """ Tests when there is a video directly under chapter """ self.login_and_enroll() ItemFactory.create( parent=self.chapter_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_chapter_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', ] ) def test_structure_course_section_video(self): """ Tests when chapter is none, and video under section under course """ self.login_and_enroll() ItemFactory.create( parent=self.section_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_section_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory section under course omega \u03a9', ] ) def test_structure_course_chap_section_video(self): """ Tests when chapter and sequential exists, with a video with no vertical. """ self.login_and_enroll() ItemFactory.create( parent=self.section_under_chapter, category="video", display_name=u"meow factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/$' ) ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', u'test factory section under chapter omega \u03a9', ] ) def test_structure_course_section_vert_video(self): """ Tests chapter->section->vertical->unit """ self.login_and_enroll() ItemFactory.create( parent=self.vertical_under_section, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/$' ) ) self.assertRegexpMatches( unit_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/1$' ) ) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', u'test factory section under chapter omega \u03a9', u'test factory vertical under section omega \u03a9' ] ) @attr(shard=2) @ddt.ddt class TestVideoSummaryList(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, TestVideoAPIMixin, MilestonesTestCaseMixin): """ Tests for /api/mobile/v0.5/video_outlines/courses/{course_id}.. """ REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']} def test_only_on_web(self): self.login_and_enroll() course_outline = self.api_response().data self.assertEqual(len(course_outline), 0) subid = uuid4().hex transcripts_utils.save_subs_to_store( { 'start': [100], 'end': [200], 'text': [ 'subs #1', ] }, subid, self.course) ItemFactory.create( parent=self.unit, category="video", display_name=u"test video", only_on_web=True, subid=subid ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertIsNone(course_outline[0]["summary"]["video_url"]) self.assertIsNone(course_outline[0]["summary"]["video_thumbnail_url"]) self.assertEqual(course_outline[0]["summary"]["duration"], 0) self.assertEqual(course_outline[0]["summary"]["size"], 0) self.assertEqual(course_outline[0]["summary"]["name"], "test video") self.assertEqual(course_outline[0]["summary"]["transcripts"], {}) self.assertIsNone(course_outline[0]["summary"]["language"]) self.assertEqual(course_outline[0]["summary"]["category"], "video") self.assertTrue(course_outline[0]["summary"]["only_on_web"]) def test_mobile_api_config(self): """ Tests VideoSummaryList with different MobileApiConfig video_profiles """ self.login_and_enroll() edx_video_id = "testing_mobile_high" api.create_video({ 'edx_video_id': edx_video_id, 'status': 'test', 'client_video_id': u"test video omega \u03a9", 'duration': 12, 'courses': [unicode(self.course.id)], 'encoded_videos': [ { 'profile': 'youtube', 'url': self.youtube_url, 'file_size': 2222, 'bitrate': 4444 }, { 'profile': 'mobile_high', 'url': self.video_url_high, 'file_size': 111, 'bitrate': 333 }, ]}) ItemFactory.create( parent=self.other_unit, category="video", display_name=u"testing mobile high video", edx_video_id=edx_video_id, ) expected_output = { 'category': u'video', 'video_thumbnail_url': None, 'language': u'en', 'name': u'testing mobile high video', 'video_url': self.video_url_high, 'duration': 12.0, 'transcripts': { 'en': 'http://testserver/api/mobile/v0.5/video_outlines/transcripts/{}/testing_mobile_high_video/en'.format(self.course.id) # pylint: disable=line-too-long }, 'only_on_web': False, 'encoded_videos': { u'mobile_high': { 'url': self.video_url_high, 'file_size': 111 }, u'youtube': { 'url': self.youtube_url, 'file_size': 2222 } }, 'size': 111 } # Testing when video_profiles='mobile_low,mobile_high,youtube' course_outline = self.api_response().data course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) # Testing when there is no mobile_low, and that mobile_high doesn't show MobileApiConfig(video_profiles="mobile_low,youtube").save() course_outline = self.api_response().data expected_output['encoded_videos'].pop('mobile_high') expected_output['video_url'] = self.youtube_url expected_output['size'] = 2222 course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) # Testing where youtube is the default video over mobile_high MobileApiConfig(video_profiles="youtube,mobile_high").save() course_outline = self.api_response().data expected_output['encoded_videos']['mobile_high'] = { 'url': self.video_url_high, 'file_size': 111 } course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) def test_video_not_in_val(self): self.login_and_enroll() self._create_video_with_subs() ItemFactory.create( parent=self.other_unit, category="video", edx_video_id="some_non_existent_id_in_val", display_name=u"some non existent video in val", html5_sources=[self.html5_video_url] ) summary = self.api_response().data[1]['summary'] self.assertEqual(summary['name'], "some non existent video in val") self.assertIsNone(summary['encoded_videos']) self.assertIsNone(summary['duration']) self.assertEqual(summary['size'], 0) self.assertEqual(summary['video_url'], self.html5_video_url) def test_course_list(self): self.login_and_enroll() self._create_video_with_subs() ItemFactory.create( parent=self.other_unit, category="video", display_name=u"test video omega 2 \u03a9", html5_sources=[self.html5_video_url] ) ItemFactory.create( parent=self.other_unit, category="video", display_name=u"test video omega 3 \u03a9", source=self.html5_video_url ) ItemFactory.create( parent=self.unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega \u03a9", visible_to_staff_only=True, ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 3) vid = course_outline[0] self.assertIn('test_subsection_omega_%CE%A9', vid['section_url']) self.assertIn('test_subsection_omega_%CE%A9/1', vid['unit_url']) self.assertIn(u'test_video_omega_\u03a9', vid['summary']['id']) self.assertEqual(vid['summary']['video_url'], self.video_url) self.assertEqual(vid['summary']['size'], 12345) self.assertIn('en', vid['summary']['transcripts']) self.assertFalse(vid['summary']['only_on_web']) self.assertEqual(course_outline[1]['summary']['video_url'], self.html5_video_url) self.assertEqual(course_outline[1]['summary']['size'], 0) self.assertFalse(course_outline[1]['summary']['only_on_web']) self.assertEqual(course_outline[1]['path'][2]['name'], self.other_unit.display_name) self.assertEqual(course_outline[1]['path'][2]['id'], unicode(self.other_unit.location)) self.assertEqual(course_outline[2]['summary']['video_url'], self.html5_video_url) self.assertEqual(course_outline[2]['summary']['size'], 0) self.assertFalse(course_outline[2]['summary']['only_on_web']) def test_with_nameless_unit(self): self.login_and_enroll() ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(course_outline[0]['path'][2]['name'], self.nameless_unit.location.block_id) def test_with_video_in_sub_section(self): """ Tests a non standard xml format where a video is underneath a sequential We are expecting to return the same unit and section url since there is no unit vertical. """ self.login_and_enroll() ItemFactory.create( parent=self.sub_section, category="video", edx_video_id=self.edx_video_id, display_name=u"video in the sub section" ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(len(course_outline[0]['path']), 2) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertIn( u'courseware/test_factory_section_omega_%CE%A9/test_subsection_omega_%CE%A9', section_url ) self.assertTrue(section_url) self.assertTrue(unit_url) self.assertEqual(section_url, unit_url) @ddt.data( *itertools.product([True, False], ["video", "problem"]) ) @ddt.unpack def test_with_split_block(self, is_user_staff, sub_block_category): """Test with split_module->sub_block_category and for both staff and non-staff users.""" self.login_and_enroll() self.user.is_staff = is_user_staff self.user.save() self._setup_split_module(sub_block_category) video_outline = self.api_response().data num_video_blocks = 1 if sub_block_category == "video" else 0 self.assertEqual(len(video_outline), num_video_blocks) for block_index in range(num_video_blocks): self._verify_paths( video_outline, [ self.section.display_name, self.sub_section.display_name, self.unit.display_name, self.split_test.display_name ], block_index ) self.assertIn(u"split test block", video_outline[block_index]["summary"]["name"]) def test_with_split_vertical(self): """Test with split_module->vertical->video structure.""" self.login_and_enroll() split_vertical_a, split_vertical_b = self._setup_split_module("vertical") ItemFactory.create( parent=split_vertical_a, category="video", display_name=u"video in vertical a", ) ItemFactory.create( parent=split_vertical_b, category="video", display_name=u"video in vertical b", ) video_outline = self.api_response().data # user should see only one of the videos (a or b). self.assertEqual(len(video_outline), 1) self.assertIn(u"video in vertical", video_outline[0]["summary"]["name"]) a_or_b = video_outline[0]["summary"]["name"][-1:] self._verify_paths( video_outline, [ self.section.display_name, self.sub_section.display_name, self.unit.display_name, self.split_test.display_name, u"split test block " + a_or_b ], ) def _create_cohorted_video(self, group_id): """Creates a cohorted video block, giving access to only the given group_id.""" video_block = ItemFactory.create( parent=self.unit, category="video", display_name=u"video for group " + unicode(group_id), ) self._setup_group_access(video_block, self.partition_id, [group_id]) def _create_cohorted_vertical_with_video(self, group_id): """Creates a cohorted vertical with a child video block, giving access to only the given group_id.""" vertical_block = ItemFactory.create( parent=self.sub_section, category="vertical", display_name=u"vertical for group " + unicode(group_id), ) self._setup_group_access(vertical_block, self.partition_id, [group_id]) ItemFactory.create( parent=vertical_block, category="video", display_name=u"video for group " + unicode(group_id), ) @ddt.data("_create_cohorted_video", "_create_cohorted_vertical_with_video") def test_with_cohorted_content(self, content_creator_method_name): self.login_and_enroll() self._setup_course_partitions(scheme_id='cohort', is_cohorted=True) cohorts = [] for group_id in [0, 1]: getattr(self, content_creator_method_name)(group_id) cohorts.append(CohortFactory(course_id=self.course.id, name=u"Cohort " + unicode(group_id))) link = CourseUserGroupPartitionGroup( course_user_group=cohorts[group_id], partition_id=self.partition_id, group_id=group_id, ) link.save() for cohort_index in range(len(cohorts)): # add user to this cohort add_user_to_cohort(cohorts[cohort_index], self.user.username) # should only see video for this cohort video_outline = self.api_response().data self.assertEqual(len(video_outline), 1) self.assertEquals( u"video for group " + unicode(cohort_index), video_outline[0]["summary"]["name"] ) # remove user from this cohort remove_user_from_cohort(cohorts[cohort_index], self.user.username) # un-cohorted user should see no videos video_outline = self.api_response().data self.assertEqual(len(video_outline), 0) # staff user sees all videos self.user.is_staff = True self.user.save() video_outline = self.api_response().data self.assertEqual(len(video_outline), 2) def test_with_hidden_blocks(self): self.login_and_enroll() hidden_subsection = ItemFactory.create( parent=self.section, category="sequential", hide_from_toc=True, ) unit_within_hidden_subsection = ItemFactory.create( parent=hidden_subsection, category="vertical", ) hidden_unit = ItemFactory.create( parent=self.sub_section, category="vertical", hide_from_toc=True, ) ItemFactory.create( parent=unit_within_hidden_subsection, category="video", edx_video_id=self.edx_video_id, ) ItemFactory.create( parent=hidden_unit, category="video", edx_video_id=self.edx_video_id, ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 0) def test_language(self): self.login_and_enroll() video = ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) language_case = namedtuple('language_case', ['transcripts', 'expected_language']) language_cases = [ # defaults to english language_case({}, "en"), # supports english language_case({"en": 1}, "en"), # supports another language language_case({"lang1": 1}, "lang1"), # returns first alphabetically-sorted language language_case({"lang1": 1, "en": 2}, "en"), language_case({"lang1": 1, "lang2": 2}, "lang1"), ] for case in language_cases: video.transcripts = case.transcripts modulestore().update_item(video, self.user.id) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(course_outline[0]['summary']['language'], case.expected_language) def test_transcripts(self): self.login_and_enroll() video = ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) transcript_case = namedtuple('transcript_case', ['transcripts', 'english_subtitle', 'expected_transcripts']) transcript_cases = [ # defaults to english transcript_case({}, "", ["en"]), transcript_case({}, "en-sub", ["en"]), # supports english transcript_case({"en": 1}, "", ["en"]), transcript_case({"en": 1}, "en-sub", ["en"]), # keeps both english and other languages transcript_case({"lang1": 1, "en": 2}, "", ["lang1", "en"]), transcript_case({"lang1": 1, "en": 2}, "en-sub", ["lang1", "en"]), # adds english to list of languages only if english_subtitle is specified transcript_case({"lang1": 1, "lang2": 2}, "", ["lang1", "lang2"]), transcript_case({"lang1": 1, "lang2": 2}, "en-sub", ["lang1", "lang2", "en"]), ] for case in transcript_cases: video.transcripts = case.transcripts video.sub = case.english_subtitle modulestore().update_item(video, self.user.id) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertSetEqual( set(course_outline[0]['summary']['transcripts'].keys()), set(case.expected_transcripts) ) @attr(shard=2) class TestTranscriptsDetail(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, TestVideoAPIMixin, MilestonesTestCaseMixin): """ Tests for /api/mobile/v0.5/video_outlines/transcripts/{course_id}.. """ REVERSE_INFO = {'name': 'video-transcripts-detail', 'params': ['course_id']} def setUp(self): super(TestTranscriptsDetail, self).setUp() self.video = self._create_video_with_subs() def reverse_url(self, reverse_args=None, **kwargs): reverse_args = reverse_args or {} reverse_args.update({ 'block_id': self.video.location.block_id, 'lang': kwargs.get('lang', 'en'), }) return super(TestTranscriptsDetail, self).reverse_url(reverse_args, **kwargs) def test_incorrect_language(self): self.login_and_enroll() self.api_response(expected_response_code=404, lang='pl') def test_transcript_with_unicode_file_name(self): self.video = self._create_video_with_subs(custom_subid=u'你好') self.login_and_enroll() self.api_response(expected_response_code=200, lang='en')
agpl-3.0
8,411,206,323,066,558,000
-8,285,709,377,944,376,000
36.680447
172
0.568853
false
tdtrask/ansible
lib/ansible/galaxy/token.py
102
2142
######################################################################## # # (C) 2015, Chris Houseknecht <chouse@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from stat import S_IRUSR, S_IWUSR import yaml try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyToken(object): ''' Class to storing and retrieving token in ~/.ansible_galaxy ''' def __init__(self): self.file = os.path.expanduser("~") + '/.ansible_galaxy' self.config = yaml.safe_load(self.__open_config_for_read()) if not self.config: self.config = {} def __open_config_for_read(self): if os.path.isfile(self.file): display.vvv('Opened %s' % self.file) return open(self.file, 'r') # config.yml not found, create and chomd u+rw f = open(self.file, 'w') f.close() os.chmod(self.file, S_IRUSR | S_IWUSR) # owner has +rw display.vvv('Created %s' % self.file) return open(self.file, 'r') def set(self, token): self.config['token'] = token self.save() def get(self): return self.config.get('token', None) def save(self): with open(self.file, 'w') as f: yaml.safe_dump(self.config, f, default_flow_style=False)
gpl-3.0
-6,424,056,698,184,721,000
-5,441,114,119,183,585,000
31.953846
72
0.606909
false
xiaoxiamii/scikit-learn
benchmarks/bench_plot_svd.py
325
2899
"""Benchmarks of Singular Value Decomposition (Exact and Approximate) The data is mostly low rank but is a fat infinite tail. """ import gc from time import time import numpy as np from collections import defaultdict from scipy.linalg import svd from sklearn.utils.extmath import randomized_svd from sklearn.datasets.samples_generator import make_low_rank_matrix def compute_bench(samples_range, features_range, n_iter=3, rank=50): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') X = make_low_rank_matrix(n_samples, n_features, effective_rank=rank, tail_strength=0.2) gc.collect() print("benchmarking scipy svd: ") tstart = time() svd(X, full_matrices=False) results['scipy svd'].append(time() - tstart) gc.collect() print("benchmarking scikit-learn randomized_svd: n_iter=0") tstart = time() randomized_svd(X, rank, n_iter=0) results['scikit-learn randomized_svd (n_iter=0)'].append( time() - tstart) gc.collect() print("benchmarking scikit-learn randomized_svd: n_iter=%d " % n_iter) tstart = time() randomized_svd(X, rank, n_iter=n_iter) results['scikit-learn randomized_svd (n_iter=%d)' % n_iter].append(time() - tstart) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(2, 1000, 4).astype(np.int) features_range = np.linspace(2, 1000, 4).astype(np.int) results = compute_bench(samples_range, features_range) label = 'scikit-learn singular value decomposition benchmark results' fig = plt.figure(label) ax = fig.gca(projection='3d') for c, (label, timings) in zip('rbg', sorted(results.iteritems())): X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, color=c) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.legend() plt.show()
bsd-3-clause
-1,589,565,685,505,892,400
8,172,951,023,059,353,000
34.353659
75
0.575026
false
ruijie/quantum
quantum/plugins/cisco/l2network_plugin_configuration.py
7
2232
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2011 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sumit Naiksatam, Cisco Systems, Inc. # @author: Rohit Agarwalla, Cisco Systems, Inc. from quantum.common.utils import find_config_file from quantum.plugins.cisco.common import cisco_configparser as confp CONF_FILE = find_config_file({'plugin': 'cisco'}, "l2network_plugin.ini") CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE) # Read the conf for the l2network_plugin SECTION_CONF = CONF_PARSER_OBJ['VLANS'] VLAN_NAME_PREFIX = SECTION_CONF['vlan_name_prefix'] VLAN_START = SECTION_CONF['vlan_start'] VLAN_END = SECTION_CONF['vlan_end'] SECTION_CONF = CONF_PARSER_OBJ['PORTS'] MAX_PORTS = SECTION_CONF['max_ports'] SECTION_CONF = CONF_PARSER_OBJ['PORTPROFILES'] MAX_PORT_PROFILES = SECTION_CONF['max_port_profiles'] SECTION_CONF = CONF_PARSER_OBJ['NETWORKS'] MAX_NETWORKS = SECTION_CONF['max_networks'] SECTION_CONF = CONF_PARSER_OBJ['MODEL'] MODEL_CLASS = SECTION_CONF['model_class'] CONF_FILE = find_config_file({'plugin': 'cisco'}, "cisco_plugins.ini") SECTION_CONF = CONF_PARSER_OBJ['SEGMENTATION'] MANAGER_CLASS = SECTION_CONF['manager_class'] CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE) # Read the config for the device plugins PLUGINS = CONF_PARSER_OBJ.walk(CONF_PARSER_OBJ.dummy) CONF_FILE = find_config_file({'plugin': 'cisco'}, "db_conn.ini") CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE) # Read DB config for the Quantum DB SECTION_CONF = CONF_PARSER_OBJ['DATABASE'] DB_NAME = SECTION_CONF['name'] DB_USER = SECTION_CONF['user'] DB_PASS = SECTION_CONF['pass'] DB_HOST = SECTION_CONF['host']
apache-2.0
2,151,540,761,760,266,500
8,599,553,070,102,148,000
31.823529
78
0.732975
false
cluckmaster/MissionPlanner
Lib/site-packages/scipy/ndimage/info.py
55
2112
""" N-dimensional image package =========================== This package contains various functions for multi-dimensional image processing. Modules ------- .. autosummary:: :toctree: generated/ filters - fourier - interpolation - io - measurements - morphology - Functions (partial list) ------------------------ .. autosummary:: :toctree: generated/ affine_transform - Apply an affine transformation center_of_mass - The center of mass of the values of an array at labels convolve - Multi-dimensional convolution convolve1d - 1-D convolution along the given axis correlate - Multi-dimensional correlation correlate1d - 1-D correlation along the given axis extrema - Min's and max's of an array at labels, with their positions find_objects - Find objects in a labeled array generic_filter - Multi-dimensional filter using a given function generic_filter1d - 1-D generic filter along the given axis geometric_transform - Apply an arbritrary geometric transform histogram - Histogram of the values of an array, optionally at labels imread - Load an image from a file label - Label features in an array laplace - n-D Laplace filter based on approximate second derivatives map_coordinates - Map input array to new coordinates by interpolation mean - Mean of the values of an array at labels median_filter - Calculates a multi-dimensional median filter percentile_filter - Calculates a multi-dimensional percentile filter rank_filter - Calculates a multi-dimensional rank filter rotate - Rotate an array shift - Shift an array standard_deviation - Standard deviation of an n-D image array sum - Sum of the values of the array uniform_filter - Multi-dimensional uniform filter uniform_filter1d - 1-D uniform filter along the given axis variance - Variance of the values of an n-D image array zoom - Zoom an array Note: the above is only roughly half the functions available in this package Objects ------- .. autosummary:: :toctree: generated/ docdict - """ postpone_import = 1 depends = []
gpl-3.0
-8,504,033,259,194,115,000
5,914,654,475,336,473,000
29.171429
74
0.72017
false
codewarrior0/pytest
testing/test_recwarn.py
17
6579
import warnings import py import pytest from _pytest.recwarn import WarningsRecorder def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" import warnings oldwarn = warnings.showwarning def test_method(recwarn): assert warnings.showwarning != oldwarn warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) def test_finalized(): assert warnings.showwarning == oldwarn """) res = reprec.countoutcomes() assert tuple(res) == (2, 0, 0), res class TestWarningsRecorderChecker(object): def test_recording(self, recwarn): showwarning = py.std.warnings.showwarning rec = WarningsRecorder() with rec: assert py.std.warnings.showwarning != showwarning assert not rec.list py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 py.std.warnings.warn(DeprecationWarning("hello")) assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" l = rec.list rec.clear() assert len(rec.list) == 0 assert l is rec.list pytest.raises(AssertionError, "rec.pop()") assert showwarning == py.std.warnings.showwarning def test_typechecking(self): from _pytest.recwarn import WarningsChecker with pytest.raises(TypeError): WarningsChecker(5) with pytest.raises(TypeError): WarningsChecker(('hi', RuntimeWarning)) with pytest.raises(TypeError): WarningsChecker([DeprecationWarning, RuntimeWarning]) def test_invalid_enter_exit(self): # wrap this test in WarningsRecorder to ensure warning state gets reset with WarningsRecorder(): with pytest.raises(RuntimeError): rec = WarningsRecorder() rec.__exit__(None, None, None) # can't exit before entering with pytest.raises(RuntimeError): rec = WarningsRecorder() with rec: with rec: pass # can't enter twice # # ============ test pytest.deprecated_call() ============== # def dep(i): if i == 0: py.std.warnings.warn("is deprecated", DeprecationWarning) return 42 reg = {} def dep_explicit(i): if i == 0: py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning, filename="hello", lineno=3) class TestDeprecatedCall(object): def test_deprecated_call_raises(self): excinfo = pytest.raises(AssertionError, "pytest.deprecated_call(dep, 3)") assert str(excinfo).find("did not produce") != -1 def test_deprecated_call(self): pytest.deprecated_call(dep, 0) def test_deprecated_call_ret(self): ret = pytest.deprecated_call(dep, 0) assert ret == 42 def test_deprecated_call_preserves(self): onceregistry = py.std.warnings.onceregistry.copy() filters = py.std.warnings.filters[:] warn = py.std.warnings.warn warn_explicit = py.std.warnings.warn_explicit self.test_deprecated_call_raises() self.test_deprecated_call() assert onceregistry == py.std.warnings.onceregistry assert filters == py.std.warnings.filters assert warn is py.std.warnings.warn assert warn_explicit is py.std.warnings.warn_explicit def test_deprecated_explicit_call_raises(self): pytest.raises(AssertionError, "pytest.deprecated_call(dep_explicit, 3)") def test_deprecated_explicit_call(self): pytest.deprecated_call(dep_explicit, 0) pytest.deprecated_call(dep_explicit, 0) class TestWarns(object): def test_strings(self): # different messages, b/c Python suppresses multiple identical warnings source1 = "warnings.warn('w1', RuntimeWarning)" source2 = "warnings.warn('w2', RuntimeWarning)" source3 = "warnings.warn('w3', RuntimeWarning)" pytest.warns(RuntimeWarning, source1) pytest.raises(pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2)) pytest.warns(RuntimeWarning, source3) def test_function(self): pytest.warns(SyntaxWarning, lambda msg: warnings.warn(msg, SyntaxWarning), "syntax") def test_warning_tuple(self): pytest.warns((RuntimeWarning, SyntaxWarning), lambda: warnings.warn('w1', RuntimeWarning)) pytest.warns((RuntimeWarning, SyntaxWarning), lambda: warnings.warn('w2', SyntaxWarning)) pytest.raises(pytest.fail.Exception, lambda: pytest.warns( (RuntimeWarning, SyntaxWarning), lambda: warnings.warn('w3', UserWarning))) def test_as_contextmanager(self): with pytest.warns(RuntimeWarning): warnings.warn("runtime", RuntimeWarning) with pytest.raises(pytest.fail.Exception): with pytest.warns(RuntimeWarning): warnings.warn("user", UserWarning) with pytest.raises(pytest.fail.Exception): with pytest.warns(UserWarning): warnings.warn("runtime", RuntimeWarning) with pytest.warns(UserWarning): warnings.warn("user", UserWarning) def test_record(self): with pytest.warns(UserWarning) as record: warnings.warn("user", UserWarning) assert len(record) == 1 assert str(record[0].message) == "user" def test_record_only(self): with pytest.warns(None) as record: warnings.warn("user", UserWarning) warnings.warn("runtime", RuntimeWarning) assert len(record) == 2 assert str(record[0].message) == "user" assert str(record[1].message) == "runtime" def test_double_test(self, testdir): """If a test is run again, the warning should still be raised""" testdir.makepyfile(''' import pytest import warnings @pytest.mark.parametrize('run', [1, 2]) def test(run): with pytest.warns(RuntimeWarning): warnings.warn("runtime", RuntimeWarning) ''') result = testdir.runpytest() result.stdout.fnmatch_lines(['*2 passed in*'])
mit
7,187,272,901,912,553,000
6,360,752,591,394,437,000
34.755435
82
0.600395
false
pranner/CMPUT410-Lab6-Django
v1/lib/python2.7/site-packages/django/utils/deconstruct.py
70
2066
from __future__ import absolute_import # Avoid importing `importlib` from this package. from importlib import import_module def deconstructible(*args, **kwargs): """ Class decorator that allow the decorated class to be serialized by the migrations subsystem. Accepts an optional kwarg `path` to specify the import path. """ path = kwargs.pop('path', None) def decorator(klass): def __new__(cls, *args, **kwargs): # We capture the arguments to make returning them trivial obj = super(klass, cls).__new__(cls) obj._constructor_args = (args, kwargs) return obj def deconstruct(obj): """ Returns a 3-tuple of class import path, positional arguments, and keyword arguments. """ # Python 2/fallback version if path: module_name, _, name = path.rpartition('.') else: module_name = obj.__module__ name = obj.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find object %s in %s.\n" "Please note that you cannot serialize things like inner " "classes. Please move the object into the main module " "body to use migrations.\n" "For more information, see " "https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values" % (name, module_name)) return ( path or '%s.%s' % (obj.__class__.__module__, name), obj._constructor_args[0], obj._constructor_args[1], ) klass.__new__ = staticmethod(__new__) klass.deconstruct = deconstruct return klass if not args: return decorator return decorator(*args, **kwargs)
apache-2.0
4,300,048,876,210,967,600
-7,219,659,956,629,938,000
35.892857
97
0.53969
false
nyalldawson/QGIS
tests/src/python/test_qgsserver_accesscontrol_wfs.py
15
13735
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsServer. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Stephane Brunner' __date__ = '28/08/2015' __copyright__ = 'Copyright 2015, The QGIS Project' print('CTEST_FULL_OUTPUT') from qgis.testing import unittest import urllib.request import urllib.parse import urllib.error from test_qgsserver_accesscontrol import TestQgsServerAccessControl, XML_NS class TestQgsServerAccessControlWFS(TestQgsServerAccessControl): def test_wfs_getcapabilities(self): query_string = "&".join(["%s=%s" % i for i in list({ "MAP": urllib.parse.quote(self.projectPath), "SERVICE": "WFS", "VERSION": "1.0.0", "REQUEST": "GetCapabilities" }.items())]) response, headers = self._get_fullaccess(query_string) self.assertTrue( str(response).find("<Name>Hello</Name>") != -1, "No Hello layer in WFS/GetCapabilities\n%s" % response) self.assertTrue( str(response).find("<Name>Hello_OnOff</Name>") != -1, "No Hello layer in WFS/GetCapabilities\n%s" % response) self.assertTrue( str(response).find("<Name>Country</Name>") != -1, "No Country layer in WFS/GetCapabilities\n%s" % response) response, headers = self._get_restricted(query_string) self.assertTrue( str(response).find("<Name>Hello</Name>") != -1, "No Hello layer in WFS/GetCapabilities\n%s" % response) self.assertFalse( str(response).find("<Name>Country</Name>") != -1, "Unexpected Country layer in WFS/GetCapabilities\n%s" % response) def test_wfs_describefeaturetype_hello(self): query_string = "&".join(["%s=%s" % i for i in list({ "MAP": urllib.parse.quote(self.projectPath), "SERVICE": "WFS", "VERSION": "1.0.0", "REQUEST": "DescribeFeatureType", "TYPENAME": "Hello" }.items())]) response, headers = self._get_fullaccess(query_string) self.assertTrue( str(response).find('name="Hello"') != -1, "No Hello layer in DescribeFeatureType\n%s" % response) response, headers = self._get_restricted(query_string) self.assertTrue( str(response).find('name="Hello"') != -1, "No Hello layer in DescribeFeatureType\n%s" % response) def test_wfs_describefeaturetype_country(self): query_string = "&".join(["%s=%s" % i for i in list({ "MAP": urllib.parse.quote(self.projectPath), "SERVICE": "WFS", "VERSION": "1.0.0", "REQUEST": "DescribeFeatureType", "TYPENAME": "Country" }.items())]) response, headers = self._get_fullaccess(query_string) self.assertTrue( str(response).find('name="Country"') != -1, "No Country layer in DescribeFeatureType\n%s" % response) response, headers = self._get_restricted(query_string) self.assertFalse( str(response).find('name="Country"') != -1, "Unexpected Country layer in DescribeFeatureType\n%s" % response) def test_wfs_getfeature_hello(self): data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>1</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok "No color in result of GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertTrue( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertFalse( str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok "Unexpected color in result of GetFeature\n%s" % response) self.assertFalse( str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok "Unexpected color NULL in result of GetFeature\n%s" % response) def test_wfs_getfeature_hello2(self): data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>2</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>2</qgs:pk>") != -1, "No result in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertFalse( str(response).find("<qgs:pk>2</qgs:pk>") != -1, "Unexpected result in GetFeature\n%s" % response) def test_wfs_getfeature_country(self): data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_OnOff" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>1</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "No result in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertFalse( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "Unexpected result in GetFeature\n%s" % response) # spellok # # Subset String # # def test_wfs_getfeature_subsetstring(self): data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>1</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "No good result in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>1</qgs:pk>") != -1, "No good result in GetFeature\n%s" % response) def test_wfs_getfeature_subsetstring2(self): data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>2</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>2</qgs:pk>") != -1, "No good result in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertFalse( str(response).find("<qgs:pk>") != -1, "Unexpected result in GetFeature\n%s" % response) def test_wfs_getfeature_project_subsetstring(self): """Tests access control with a subset string already applied to a layer in a project 'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)" This test checks for retrieving a feature which should be available in with/without access control """ data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>7</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) # should be one result response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>7</qgs:pk>") != -1, "Feature with pkuid=7 not found in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>7</qgs:pk>") != -1, "Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response) def test_wfs_getfeature_project_subsetstring2(self): """Tests access control with a subset string already applied to a layer in a project 'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)" This test checks for a feature which should be filtered out by access controls """ data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>8</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) # should be one result response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>") != -1, "No result in GetFeature\n%s" % response) self.assertTrue( str(response).find("<qgs:pk>8</qgs:pk>") != -1, "Feature with pkuid=8 not found in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertFalse( str(response).find("<qgs:pk>") != -1, "Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response) def test_wfs_getfeature_project_subsetstring3(self): """Tests access control with a subset string already applied to a layer in a project 'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)" This test checks for a features which should be filtered out by project subsetStrings. For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString """ data = """<?xml version="1.0" encoding="UTF-8"?> <wfs:GetFeature {xml_ns}> <wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml"> <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo> <ogc:PropertyName>pkuid</ogc:PropertyName> <ogc:Literal>6</ogc:Literal> </ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS) # should be no results, since pkuid 1 should be filtered out by project subsetString response, headers = self._post_fullaccess(data) self.assertTrue( str(response).find("<qgs:pk>") == -1, "Project based layer subsetString not respected in GetFeature\n%s" % response) response, headers = self._post_restricted(data) self.assertFalse( str(response).find("<qgs:pk>") != -1, "Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response) if __name__ == "__main__": unittest.main()
gpl-2.0
-505,077,531,454,592,640
2,395,495,797,842,059,000
46.525952
128
0.601165
false
lxn2/mxnet
example/rcnn/demo.py
13
5637
import argparse import os import cv2 import mxnet as mx import numpy as np from rcnn.logger import logger from rcnn.config import config from rcnn.symbol import get_vgg_test, get_vgg_rpn_test from rcnn.io.image import resize, transform from rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection from rcnn.utils.load_model import load_param from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') config.TEST.HAS_RPN = True SHORT_SIDE = config.SCALES[0][0] LONG_SIDE = config.SCALES[0][1] PIXEL_MEANS = config.PIXEL_MEANS DATA_NAMES = ['data', 'im_info'] LABEL_NAMES = None DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))] LABEL_SHAPES = None # visualization CONF_THRESH = 0.7 NMS_THRESH = 0.3 nms = py_nms_wrapper(NMS_THRESH) def get_net(symbol, prefix, epoch, ctx): arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True) # infer shape data_shape_dict = dict(DATA_SHAPES) arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states() arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict) arg_shape_dict = dict(zip(arg_names, arg_shape)) aux_shape_dict = dict(zip(aux_names, aux_shape)) # check shapes for k in symbol.list_arguments(): if k in data_shape_dict or 'label' in k: continue assert k in arg_params, k + ' not initialized' assert arg_params[k].shape == arg_shape_dict[k], \ 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape) for k in symbol.list_auxiliary_states(): assert k in aux_params, k + ' not initialized' assert aux_params[k].shape == aux_shape_dict[k], \ 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape) predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx, provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES, arg_params=arg_params, aux_params=aux_params) return predictor def generate_batch(im): """ preprocess image, return batch :param im: cv2.imread returns [height, width, channel] in BGR :return: data_batch: MXNet input batch data_names: names in data_batch im_scale: float number """ im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE) im_array = transform(im_array, PIXEL_MEANS) im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32) data = [mx.nd.array(im_array), mx.nd.array(im_info)] data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)] data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None) return data_batch, DATA_NAMES, im_scale def demo_net(predictor, image_name, vis=False): """ generate data_batch -> im_detect -> post process :param predictor: Predictor :param image_name: image name :param vis: will save as a new image if not visualized :return: None """ assert os.path.exists(image_name), image_name + ' not found' im = cv2.imread(image_name) data_batch, data_names, im_scale = generate_batch(im) scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale) all_boxes = [[] for _ in CLASSES] for cls in CLASSES: cls_ind = CLASSES.index(cls) cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[:, cls_ind, np.newaxis] keep = np.where(cls_scores >= CONF_THRESH)[0] dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :] keep = nms(dets) all_boxes[cls_ind] = dets[keep, :] boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))] # print results logger.info('---class---') logger.info('[[x1, x2, y1, y2, confidence]]') for ind, boxes in enumerate(boxes_this_image): if len(boxes) > 0: logger.info('---%s---' % CLASSES[ind]) logger.info('%s' % boxes) if vis: vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale) else: result_file = image_name.replace('.', '_result.') logger.info('results saved to %s' % result_file) im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale) cv2.imwrite(result_file, im) def parse_args(): parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network') parser.add_argument('--image', help='custom image', type=str) parser.add_argument('--prefix', help='saved model prefix', type=str) parser.add_argument('--epoch', help='epoch of pretrained model', type=int) parser.add_argument('--gpu', help='GPU device to use', default=0, type=int) parser.add_argument('--vis', help='display result', action='store_true') args = parser.parse_args() return args def main(): args = parse_args() ctx = mx.gpu(args.gpu) symbol = get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS) predictor = get_net(symbol, args.prefix, args.epoch, ctx) demo_net(predictor, args.image, args.vis) if __name__ == '__main__': main()
apache-2.0
-5,829,092,192,247,430,000
735,478,366,764,272,800
38.41958
123
0.636331
false
hjoliver/cylc
tests/unit/tui/test_data.py
1
1331
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import cylc.flow.tui.data from cylc.flow.tui.data import generate_mutation def test_generate_mutation(monkeypatch): """It should produce a GraphQL mutation with the args filled in.""" arg_types = { 'foo': 'String!', 'bar': '[Int]' } monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types) assert generate_mutation( 'my_mutation', ['foo', 'bar'] ) == ''' mutation($foo: String!, $bar: [Int]) { my_mutation (foos: $foo, bars: $bar) { result } } '''
gpl-3.0
-4,613,551,316,741,223,000
3,373,400,610,894,565,400
34.026316
72
0.664162
false
fosfataza/protwis
construct/migrations/0002_auto_20180117_1457.py
3
1640
# Generated by Django 2.0.1 on 2018-01-17 13:57 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('ligand', '0001_initial'), ('construct', '0001_initial'), ] operations = [ migrations.AddField( model_name='crystallizationligandconc', name='ligand', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.Ligand'), ), migrations.AddField( model_name='crystallizationligandconc', name='ligand_role', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.LigandRole'), ), migrations.AddField( model_name='crystallization', name='chemical_lists', field=models.ManyToManyField(to='construct.ChemicalList'), ), migrations.AddField( model_name='crystallization', name='crystal_method', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationMethods'), ), migrations.AddField( model_name='crystallization', name='crystal_type', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationTypes'), ), migrations.AddField( model_name='crystallization', name='ligands', field=models.ManyToManyField(to='construct.CrystallizationLigandConc'), ), ]
apache-2.0
307,718,519,941,981,760
-7,276,302,379,193,729,000
33.893617
131
0.615854
false
mcocdawc/chemopt
src/chemopt/utilities/_print_versions.py
2
4591
# The following code was taken from the pandas project and modified. # http://pandas.pydata.org/ import codecs import importlib import locale import os import platform import struct import sys def get_sys_info(): "Returns system information as a dict" blob = [] # commit = cc._git_hash # blob.append(('commit', commit)) try: (sysname, nodename, release, version, machine, processor) = platform.uname() blob.extend([ ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]), ("python-bits", struct.calcsize("P") * 8), ("OS", "%s" % (sysname)), ("OS-release", "%s" % (release)), # ("Version", "%s" % (version)), ("machine", "%s" % (machine)), ("processor", "%s" % (processor)), # ("byteorder", "%s" % sys.byteorder), ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")), ("LANG", "%s" % os.environ.get('LANG', "None")), ("LOCALE", "%s.%s" % locale.getlocale()), ]) except Exception: pass return blob def show_versions(as_json=False): sys_info = get_sys_info() deps = [ # (MODULE_NAME, f(mod) -> mod version) ("chemcoord", lambda mod: mod.__version__), ("numpy", lambda mod: mod.version.version), ("scipy", lambda mod: mod.version.version), ("pandas", lambda mod: mod.__version__), ("numba", lambda mod: mod.__version__), ("sortedcontainers", lambda mod: mod.__version__), ("sympy", lambda mod: mod.__version__), ("pytest", lambda mod: mod.__version__), ("pip", lambda mod: mod.__version__), ("setuptools", lambda mod: mod.__version__), ("IPython", lambda mod: mod.__version__), ("sphinx", lambda mod: mod.__version__), # ("tables", lambda mod: mod.__version__), # ("matplotlib", lambda mod: mod.__version__), # ("Cython", lambda mod: mod.__version__), # ("xarray", lambda mod: mod.__version__), # ("patsy", lambda mod: mod.__version__), # ("dateutil", lambda mod: mod.__version__), # ("pytz", lambda mod: mod.VERSION), # ("blosc", lambda mod: mod.__version__), # ("bottleneck", lambda mod: mod.__version__), # ("numexpr", lambda mod: mod.__version__), # ("feather", lambda mod: mod.__version__), # ("openpyxl", lambda mod: mod.__version__), # ("xlrd", lambda mod: mod.__VERSION__), # ("xlwt", lambda mod: mod.__VERSION__), # ("xlsxwriter", lambda mod: mod.__version__), # ("lxml", lambda mod: mod.etree.__version__), # ("bs4", lambda mod: mod.__version__), # ("html5lib", lambda mod: mod.__version__), # ("sqlalchemy", lambda mod: mod.__version__), # ("pymysql", lambda mod: mod.__version__), # ("psycopg2", lambda mod: mod.__version__), # ("jinja2", lambda mod: mod.__version__), # ("s3fs", lambda mod: mod.__version__), # ("pandas_gbq", lambda mod: mod.__version__), # ("pandas_datareader", lambda mod: mod.__version__) ] deps_blob = list() for (modname, ver_f) in deps: try: if modname in sys.modules: mod = sys.modules[modname] else: mod = importlib.import_module(modname) ver = ver_f(mod) deps_blob.append((modname, ver)) except Exception: deps_blob.append((modname, None)) if (as_json): try: import json except Exception: import simplejson as json j = dict(system=dict(sys_info), dependencies=dict(deps_blob)) if as_json is True: print(j) else: with codecs.open(as_json, "wb", encoding='utf8') as f: json.dump(j, f, indent=2) else: print("\nINSTALLED VERSIONS") print("------------------") for k, stat in sys_info: print("%s: %s" % (k, stat)) print("") for k, stat in deps_blob: print("%s: %s" % (k, stat)) def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-j", "--json", metavar="FILE", nargs=1, help="Save output as JSON into file, pass in " "'-' to output to stdout") options = parser.parse_args()[0] if options.json == "-": options.json = True show_versions(as_json=options.json) return 0 if __name__ == "__main__": sys.exit(main())
lgpl-3.0
1,063,523,426,152,272,400
-8,525,865,971,111,888,000
31.104895
69
0.507297
false
jmschrei/scikit-learn
sklearn/cluster/k_means_.py
30
55793
"""K-means clustering""" # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org> # Thomas Rueckstiess <ruecksti@in.tum.de> # James Bergstra <james.bergstra@umontreal.ca> # Jan Schlueter <scikit-learn@jan-schlueter.de> # Nelle Varoquaux # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Robert Layton <robertlayton@gmail.com> # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, ClusterMixin, TransformerMixin from ..metrics.pairwise import euclidean_distances from ..utils.extmath import row_norms, squared_norm from ..utils.sparsefuncs_fast import assign_rows_csr from ..utils.sparsefuncs import mean_variance_axis from ..utils.fixes import astype from ..utils import check_array from ..utils import check_random_state from ..utils import as_float_array from ..utils import gen_batches from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..utils.random import choice from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.six import string_types from . import _k_means ############################################################################### # Initialization heuristic def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None): """Init n_clusters seeds according to k-means++ Parameters ----------- X: array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters: integer The number of seeds to choose x_squared_norms: array, shape (n_samples,) Squared Euclidean norm of each data point. random_state: numpy.RandomState The generator used to initialize the centers. n_local_trials: integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: Arthur, D. and Vassilvitskii, S. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. """ n_samples, n_features = X.shape centers = np.empty((n_clusters, n_features)) assert x_squared_norms is not None, 'x_squared_norms None in _k_init' # Set the number of local seeding trials if none is given if n_local_trials is None: # This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. n_local_trials = 2 + int(np.log(n_clusters)) # Pick first center randomly center_id = random_state.randint(n_samples) if sp.issparse(X): centers[0] = X[center_id].toarray() else: centers[0] = X[center_id] # Initialize list of closest distances and calculate current potential closest_dist_sq = euclidean_distances( centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True) current_pot = closest_dist_sq.sum() # Pick the remaining n_clusters-1 points for c in range(1, n_clusters): # Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center rand_vals = random_state.random_sample(n_local_trials) * current_pot candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals) # Compute distances to center candidates distance_to_candidates = euclidean_distances( X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True) # Decide which candidate is the best best_candidate = None best_pot = None best_dist_sq = None for trial in range(n_local_trials): # Compute potential when including center candidate new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidates[trial]) new_pot = new_dist_sq.sum() # Store result if it is the best local trial so far if (best_candidate is None) or (new_pot < best_pot): best_candidate = candidate_ids[trial] best_pot = new_pot best_dist_sq = new_dist_sq # Permanently add best center candidate found in local tries if sp.issparse(X): centers[c] = X[best_candidate].toarray() else: centers[c] = X[best_candidate] current_pot = best_pot closest_dist_sq = best_dist_sq return centers ############################################################################### # K-means batch estimation by EM (expectation maximization) def _validate_center_shape(X, n_centers, centers): """Check if centers is compatible with X and n_centers""" if len(centers) != n_centers: raise ValueError('The shape of the initial centers (%s) ' 'does not match the number of clusters %i' % (centers.shape, n_centers)) if centers.shape[1] != X.shape[1]: raise ValueError( "The number of features of the initial centers %s " "does not match the number of features of the data %s." % (centers.shape[1], X.shape[1])) def _tolerance(X, tol): """Return a tolerance which is independent of the dataset""" if sp.issparse(X): variances = mean_variance_axis(X, axis=0)[1] else: variances = np.var(X, axis=0) return np.mean(variances) * tol def k_means(X, n_clusters, init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, return_n_iter=False): """K-means clustering algorithm. Read more in the :ref:`User Guide <k_means>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The observations to cluster. n_clusters : int The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, optional The relative increment in the results before declaring convergence. verbose : boolean, optional Verbosity mode. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. copy_x : boolean, optional When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- centroid : float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label : integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). best_n_iter: int Number of iterations corresponding to the best results. Returned only if `return_n_iter` is set to True. """ if n_init <= 0: raise ValueError("Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError('Number of iterations should be a positive number,' ' got %d instead' % max_iter) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) # If the distances are precomputed every job will create a matrix of shape # (n_clusters, n_samples). To stop KMeans from eating up memory we only # activate this if the created matrix is guaranteed to be under 100MB. 12 # million entries consume a little under 100MB if they are of type double. if precompute_distances == 'auto': n_samples = X.shape[0] precompute_distances = (n_clusters * n_samples) < 12e6 elif isinstance(precompute_distances, bool): pass else: raise ValueError("precompute_distances should be 'auto' or True/False" ", but a value of %r was passed" % precompute_distances) # subtract of mean of x for more accurate distance computations if not sp.issparse(X) or hasattr(init, '__array__'): X_mean = X.mean(axis=0) if not sp.issparse(X): # The copy was already done above X -= X_mean if hasattr(init, '__array__'): init = check_array(init, dtype=np.float64, copy=True) _validate_center_shape(X, n_clusters, init) init -= X_mean if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in k-means instead of n_init=%d' % n_init, RuntimeWarning, stacklevel=2) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) best_labels, best_inertia, best_centers = None, None, None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = _kmeans_single( X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, precompute_distances=precompute_distances, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, tol=tol, precompute_distances=precompute_distances, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed) for seed in seeds) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if not sp.issparse(X): if not copy_x: X += X_mean best_centers += X_mean if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300, init='k-means++', verbose=False, random_state=None, tol=1e-4, precompute_distances=True): """A single run of k-means, assumes preparation completed prior. Parameters ---------- X: array-like of floats, shape (n_samples, n_features) The observations to cluster. n_clusters: int The number of clusters to form as well as the number of centroids to generate. max_iter: int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. init: {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (k, p) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. tol: float, optional The relative increment in the results before declaring convergence. verbose: boolean, optional Verbosity mode x_squared_norms: array Precomputed x_squared_norms. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- centroid: float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label: integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia: float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. """ random_state = check_random_state(random_state) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=np.float64) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment is also called the E-step of EM labels, inertia = \ _labels_inertia(X, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances) # computation of the means is also called the M-step of EM if sp.issparse(X): centers = _k_means._centers_sparse(X, labels, n_clusters, distances) else: centers = _k_means._centers_dense(X, labels, n_clusters, distances) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia shift = squared_norm(centers_old - centers) if shift <= tol: if verbose: print("Converged at iteration %d" % i) break if shift > 0: # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels, best_inertia = \ _labels_inertia(X, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances) return best_labels, best_inertia, best_centers, i + 1 def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances): """Compute labels and inertia using a full distance matrix. This will overwrite the 'distances' array in-place. Parameters ---------- X : numpy array, shape (n_sample, n_features) Input data. x_squared_norms : numpy array, shape (n_samples,) Precomputed squared norms of X. centers : numpy array, shape (n_clusters, n_features) Cluster centers which data is assigned to. distances : numpy array, shape (n_samples,) Pre-allocated array in which distances are stored. Returns ------- labels : numpy array, dtype=np.int, shape (n_samples,) Indices of clusters that samples are assigned to. inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] k = centers.shape[0] all_distances = euclidean_distances(centers, X, x_squared_norms, squared=True) labels = np.empty(n_samples, dtype=np.int32) labels.fill(-1) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(k): dist = all_distances[center_id] labels[dist < mindist] = center_id mindist = np.minimum(dist, mindist) if n_samples == distances.shape[0]: # distances will be changed in-place distances[:] = mindist inertia = mindist.sum() return labels, inertia def _labels_inertia(X, x_squared_norms, centers, precompute_distances=True, distances=None): """E step of the K-means EM algorithm. Compute the labels and the inertia of the given samples and centers. This will compute the distances in-place. Parameters ---------- X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features) The input samples to assign to the labels. x_squared_norms: array, shape (n_samples,) Precomputed squared euclidean norm of each data point, to speed up computations. centers: float64 array, shape (k, n_features) The cluster centers. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). distances: float64 array, shape (n_samples,) Pre-allocated array to be filled in with each sample's distance to the closest center. Returns ------- labels: int array of shape(n) The resulting assignment inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] # set the default value of centers to -1 to be able to detect any anomaly # easily labels = -np.ones(n_samples, np.int32) if distances is None: distances = np.zeros(shape=(0,), dtype=np.float64) # distances will be changed in-place if sp.issparse(X): inertia = _k_means._assign_labels_csr( X, x_squared_norms, centers, labels, distances=distances) else: if precompute_distances: return _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances) inertia = _k_means._assign_labels_array( X, x_squared_norms, centers, labels, distances=distances) return labels, inertia def _init_centroids(X, k, init, random_state=None, x_squared_norms=None, init_size=None): """Compute the initial centroids Parameters ---------- X: array, shape (n_samples, n_features) k: int number of centroids init: {'k-means++', 'random' or ndarray or callable} optional Method for initialization random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. x_squared_norms: array, shape (n_samples,), optional Squared euclidean norm of each data point. Pass it if you have it at hands already to avoid it being recomputed here. Default: None init_size : int, optional Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than k. Returns ------- centers: array, shape(k, n_features) """ random_state = check_random_state(random_state) n_samples = X.shape[0] if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) if init_size is not None and init_size < n_samples: if init_size < k: warnings.warn( "init_size=%d should be larger than k=%d. " "Setting it to 3*k" % (init_size, k), RuntimeWarning, stacklevel=2) init_size = 3 * k init_indices = random_state.random_integers( 0, n_samples - 1, init_size) X = X[init_indices] x_squared_norms = x_squared_norms[init_indices] n_samples = X.shape[0] elif n_samples < k: raise ValueError( "n_samples=%d should be larger than k=%d" % (n_samples, k)) if isinstance(init, string_types) and init == 'k-means++': centers = _k_init(X, k, random_state=random_state, x_squared_norms=x_squared_norms) elif isinstance(init, string_types) and init == 'random': seeds = random_state.permutation(n_samples)[:k] centers = X[seeds] elif hasattr(init, '__array__'): centers = init elif callable(init): centers = init(X, k, random_state=random_state) else: raise ValueError("the init parameter for the k-means should " "be 'k-means++' or 'random' or an ndarray, " "'%s' (type '%s') was passed." % (init, type(init))) if sp.issparse(centers): centers = centers.toarray() _validate_center_shape(X, k, centers) return centers class KMeans(BaseEstimator, ClusterMixin, TransformerMixin): """K-Means clustering Read more in the :ref:`User Guide <k_means>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random' or an ndarray} Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, default: 1e-4 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point inertia_ : float Sum of distances of samples to their closest cluster center. Notes ------ The k-means problem is solved using Lloyd's algorithm. The average complexity is given by O(k n T), were n is the number of samples and T is the number of iteration. The worst case complexity is given by O(n^(k+2/p)) with n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii, 'How slow is the k-means method?' SoCG2006) In practice, the k-means algorithm is very fast (one of the fastest clustering algorithms available), but it falls in local minima. That's why it can be useful to restart it several times. See also -------- MiniBatchKMeans: Alternative online implementation that does incremental updates of the centers positions using mini-batches. For large scale learning (say n_samples > 10k) MiniBatchKMeans is probably much faster to than the default batch implementation. """ def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=1e-4, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=1): self.n_clusters = n_clusters self.init = init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.n_init = n_init self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse='csr', dtype=np.float64) if X.shape[0] < self.n_clusters: raise ValueError("n_samples=%d should be >= n_clusters=%d" % ( X.shape[0], self.n_clusters)) return X def _check_test_data(self, X): X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, warn_on_dtype=True) n_samples, n_features = X.shape expected_n_features = self.cluster_centers_.shape[1] if not n_features == expected_n_features: raise ValueError("Incorrect number of features. " "Got %d features, expected %d" % ( n_features, expected_n_features)) return X def fit(self, X, y=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ random_state = check_random_state(self.random_state) X = self._check_fit_data(X) self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \ k_means( X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, return_n_iter=True, precompute_distances=self.precompute_distances, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs) return self def fit_predict(self, X, y=None): """Compute cluster centers and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X).labels_ def fit_transform(self, X, y=None): """Compute clustering and transform X to cluster-distance space. Equivalent to fit(X).transform(X), but more efficiently implemented. """ # Currently, this just skips a copy of the data if it is not in # np.array or CSR format already. # XXX This skips _check_test_data, which may change the dtype; # we should refactor the input validation. X = self._check_fit_data(X) return self.fit(X)._transform(X) def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._transform(X) def _transform(self, X): """guts of transform method; no input validation""" return euclidean_distances(X, self.cluster_centers_) def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0] def score(self, X, y=None): """Opposite of the value of X on the K-means objective. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data. Returns ------- score : float Opposite of the value of X on the K-means objective. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1] def _mini_batch_step(X, x_squared_norms, centers, counts, old_center_buffer, compute_squared_diff, distances, random_reassign=False, random_state=None, reassignment_ratio=.01, verbose=False): """Incremental update of the centers for the Minibatch K-Means algorithm. Parameters ---------- X : array, shape (n_samples, n_features) The original data array. x_squared_norms : array, shape (n_samples,) Squared euclidean norm of each data point. centers : array, shape (k, n_features) The cluster centers. This array is MODIFIED IN PLACE counts : array, shape (k,) The vector in which we keep track of the numbers of elements in a cluster. This array is MODIFIED IN PLACE distances : array, dtype float64, shape (n_samples), optional If not None, should be a pre-allocated array that will be used to store the distances of each sample to its closest center. May not be None when random_reassign is True. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. random_reassign : boolean, optional If True, centers with very low counts are randomly reassigned to observations. reassignment_ratio : float, optional Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more likely to be reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : bool, optional, default False Controls the verbosity. compute_squared_diff : bool If set to False, the squared diff computation is skipped. old_center_buffer : int Copy of old centers for monitoring convergence. Returns ------- inertia : float Sum of distances of samples to their closest cluster center. squared_diff : numpy array, shape (n_clusters,) Squared distances between previous and updated cluster centers. """ # Perform label assignment to nearest centers nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers, distances=distances) if random_reassign and reassignment_ratio > 0: random_state = check_random_state(random_state) # Reassign clusters that have very low counts to_reassign = counts < reassignment_ratio * counts.max() # pick at most .5 * batch_size samples as new centers if to_reassign.sum() > .5 * X.shape[0]: indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):] to_reassign[indices_dont_reassign] = False n_reassigns = to_reassign.sum() if n_reassigns: # Pick new clusters amongst observations with uniform probability new_centers = choice(X.shape[0], replace=False, size=n_reassigns, random_state=random_state) if verbose: print("[MiniBatchKMeans] Reassigning %i cluster centers." % n_reassigns) if sp.issparse(X) and not sp.issparse(centers): assign_rows_csr(X, astype(new_centers, np.intp), astype(np.where(to_reassign)[0], np.intp), centers) else: centers[to_reassign] = X[new_centers] # reset counts of reassigned centers, but don't reset them too small # to avoid instant reassignment. This is a pretty dirty hack as it # also modifies the learning rates. counts[to_reassign] = np.min(counts[~to_reassign]) # implementation for the sparse CSR representation completely written in # cython if sp.issparse(X): return inertia, _k_means._mini_batch_update_csr( X, x_squared_norms, centers, counts, nearest_center, old_center_buffer, compute_squared_diff) # dense variant in mostly numpy (not as memory efficient though) k = centers.shape[0] squared_diff = 0.0 for center_idx in range(k): # find points from minibatch that are assigned to this center center_mask = nearest_center == center_idx count = center_mask.sum() if count > 0: if compute_squared_diff: old_center_buffer[:] = centers[center_idx] # inplace remove previous count scaling centers[center_idx] *= counts[center_idx] # inplace sum with new points members of this cluster centers[center_idx] += np.sum(X[center_mask], axis=0) # update the count statistics for this center counts[center_idx] += count # inplace rescale to compute mean of all points (old and new) centers[center_idx] /= counts[center_idx] # update the squared diff if necessary if compute_squared_diff: diff = centers[center_idx].ravel() - old_center_buffer.ravel() squared_diff += np.dot(diff, diff) return inertia, squared_diff def _mini_batch_convergence(model, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, context, verbose=0): """Helper function to encapsulte the early stopping logic""" # Normalize inertia to be able to compare values when # batch_size changes batch_inertia /= model.batch_size centers_squared_diff /= model.batch_size # Compute an Exponentially Weighted Average of the squared # diff to monitor the convergence while discarding # minibatch-local stochastic variability: # https://en.wikipedia.org/wiki/Moving_average ewa_diff = context.get('ewa_diff') ewa_inertia = context.get('ewa_inertia') if ewa_diff is None: ewa_diff = centers_squared_diff ewa_inertia = batch_inertia else: alpha = float(model.batch_size) * 2.0 / (n_samples + 1) alpha = 1.0 if alpha > 1.0 else alpha ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha # Log progress to be able to monitor convergence if verbose: progress_msg = ( 'Minibatch iteration %d/%d:' ' mean batch inertia: %f, ewa inertia: %f ' % ( iteration_idx + 1, n_iter, batch_inertia, ewa_inertia)) print(progress_msg) # Early stopping based on absolute tolerance on squared change of # centers position (using EWA smoothing) if tol > 0.0 and ewa_diff <= tol: if verbose: print('Converged (small centers change) at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # Early stopping heuristic due to lack of improvement on smoothed inertia ewa_inertia_min = context.get('ewa_inertia_min') no_improvement = context.get('no_improvement', 0) if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min: no_improvement = 0 ewa_inertia_min = ewa_inertia else: no_improvement += 1 if (model.max_no_improvement is not None and no_improvement >= model.max_no_improvement): if verbose: print('Converged (lack of improvement in inertia)' ' at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # update the convergence context to maintain state across successive calls: context['ewa_diff'] = ewa_diff context['ewa_inertia'] = ewa_inertia context['ewa_inertia_min'] = ewa_inertia_min context['no_improvement'] = no_improvement return False class MiniBatchKMeans(KMeans): """Mini-Batch K-Means clustering Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. max_no_improvement : int, default: 10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia. To disable convergence detection based on inertia, set max_no_improvement to None. tol : float, default: 0.0 Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes. This early stopping heuristics is closer to the one used for the batch variant of the algorithms but induces a slight computational and memory overhead over the inertia heuristic. To disable convergence detection based on normalized center change, set tol to 0.0 (default). batch_size : int, optional, default: 100 Size of the mini batches. init_size : int, optional, default: 3 * batch_size Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than n_clusters. init : {'k-means++', 'random' or an ndarray}, default: 'k-means++' Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. n_init : int, default=3 Number of random initializations that are tried. In contrast to KMeans, the algorithm is only run once, using the best of the ``n_init`` initializations as measured by inertia. compute_labels : boolean, default=True Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. reassignment_ratio : float, default: 0.01 Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more easily reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : boolean, optional Verbosity mode. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point (if compute_labels is set to True). inertia_ : float The value of the inertia criterion associated with the chosen partition (if compute_labels is set to True). The inertia is defined as the sum of square distances of samples to their nearest neighbor. Notes ----- See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf """ def __init__(self, n_clusters=8, init='k-means++', max_iter=100, batch_size=100, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01): super(MiniBatchKMeans, self).__init__( n_clusters=n_clusters, init=init, max_iter=max_iter, verbose=verbose, random_state=random_state, tol=tol, n_init=n_init) self.max_no_improvement = max_no_improvement self.batch_size = batch_size self.compute_labels = compute_labels self.init_size = init_size self.reassignment_ratio = reassignment_ratio def fit(self, X, y=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster """ random_state = check_random_state(self.random_state) X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64) n_samples, n_features = X.shape if n_samples < self.n_clusters: raise ValueError("Number of samples smaller than number " "of clusters.") n_init = self.n_init if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=np.float64) if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in MiniBatchKMeans instead of ' 'n_init=%d' % self.n_init, RuntimeWarning, stacklevel=2) n_init = 1 x_squared_norms = row_norms(X, squared=True) if self.tol > 0.0: tol = _tolerance(X, self.tol) # using tol-based early stopping needs the allocation of a # dedicated before which can be expensive for high dim data: # hence we allocate it outside of the main loop old_center_buffer = np.zeros(n_features, np.double) else: tol = 0.0 # no need for the center buffer if tol-based early stopping is # disabled old_center_buffer = np.zeros(0, np.double) distances = np.zeros(self.batch_size, dtype=np.float64) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) n_iter = int(self.max_iter * n_batches) init_size = self.init_size if init_size is None: init_size = 3 * self.batch_size if init_size > n_samples: init_size = n_samples self.init_size_ = init_size validation_indices = random_state.random_integers( 0, n_samples - 1, init_size) X_valid = X[validation_indices] x_squared_norms_valid = x_squared_norms[validation_indices] # perform several inits with random sub-sets best_inertia = None for init_idx in range(n_init): if self.verbose: print("Init %d/%d with method: %s" % (init_idx + 1, n_init, self.init)) counts = np.zeros(self.n_clusters, dtype=np.int32) # TODO: once the `k_means` function works with sparse input we # should refactor the following init to use it instead. # Initialize the centers using only a fraction of the data as we # expect n_samples to be very large when using MiniBatchKMeans cluster_centers = _init_centroids( X, self.n_clusters, self.init, random_state=random_state, x_squared_norms=x_squared_norms, init_size=init_size) # Compute the label assignment on the init dataset batch_inertia, centers_squared_diff = _mini_batch_step( X_valid, x_squared_norms[validation_indices], cluster_centers, counts, old_center_buffer, False, distances=None, verbose=self.verbose) # Keep only the best cluster centers across independent inits on # the common validation set _, inertia = _labels_inertia(X_valid, x_squared_norms_valid, cluster_centers) if self.verbose: print("Inertia for init %d/%d: %f" % (init_idx + 1, n_init, inertia)) if best_inertia is None or inertia < best_inertia: self.cluster_centers_ = cluster_centers self.counts_ = counts best_inertia = inertia # Empty context to be used inplace by the convergence check routine convergence_context = {} # Perform the iterative optimization until the final convergence # criterion for iteration_idx in range(n_iter): # Sample a minibatch from the full dataset minibatch_indices = random_state.random_integers( 0, n_samples - 1, self.batch_size) # Perform the actual update step on the minibatch data batch_inertia, centers_squared_diff = _mini_batch_step( X[minibatch_indices], x_squared_norms[minibatch_indices], self.cluster_centers_, self.counts_, old_center_buffer, tol > 0.0, distances=distances, # Here we randomly choose whether to perform # random reassignment: the choice is done as a function # of the iteration index, and the minimum number of # counts, in order to force this reassignment to happen # every once in a while random_reassign=((iteration_idx + 1) % (10 + self.counts_.min()) == 0), random_state=random_state, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) # Monitor convergence and do early stopping if necessary if _mini_batch_convergence( self, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, convergence_context, verbose=self.verbose): break self.n_iter_ = iteration_idx + 1 if self.compute_labels: self.labels_, self.inertia_ = self._labels_inertia_minibatch(X) return self def _labels_inertia_minibatch(self, X): """Compute labels and inertia using mini batches. This is slightly slower than doing everything at once but preventes memory errors / segfaults. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- labels : array, shap (n_samples,) Cluster labels for each point. inertia : float Sum of squared distances of points to nearest cluster. """ if self.verbose: print('Computing label assignment and total inertia') x_squared_norms = row_norms(X, squared=True) slices = gen_batches(X.shape[0], self.batch_size) results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices] labels, inertia = zip(*results) return np.hstack(labels), np.sum(inertia) def partial_fit(self, X, y=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster. """ X = check_array(X, accept_sparse="csr") n_samples, n_features = X.shape if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=np.float64) if n_samples == 0: return self x_squared_norms = row_norms(X, squared=True) self.random_state_ = getattr(self, "random_state_", check_random_state(self.random_state)) if (not hasattr(self, 'counts_') or not hasattr(self, 'cluster_centers_')): # this is the first call partial_fit on this object: # initialize the cluster centers self.cluster_centers_ = _init_centroids( X, self.n_clusters, self.init, random_state=self.random_state_, x_squared_norms=x_squared_norms, init_size=self.init_size) self.counts_ = np.zeros(self.n_clusters, dtype=np.int32) random_reassign = False distances = None else: # The lower the minimum count is, the more we do random # reassignment, however, we don't want to do random # reassignment too often, to allow for building up counts random_reassign = self.random_state_.randint( 10 * (1 + self.counts_.min())) == 0 distances = np.zeros(X.shape[0], dtype=np.float64) _mini_batch_step(X, x_squared_norms, self.cluster_centers_, self.counts_, np.zeros(0, np.double), 0, random_reassign=random_reassign, distances=distances, random_state=self.random_state_, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia( X, x_squared_norms, self.cluster_centers_) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._labels_inertia_minibatch(X)[0]
bsd-3-clause
7,049,185,211,904,784,000
-153,361,487,291,375,300
37.188227
79
0.612102
false
koparasy/faultinjection-gem5
src/arch/power/PowerTLB.py
20
1765
# -*- mode:python -*- # Copyright (c) 2009 The University of Edinburgh # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Timothy M. Jones from m5.SimObject import SimObject from m5.params import * class PowerTLB(SimObject): type = 'PowerTLB' cxx_class = 'PowerISA::TLB' size = Param.Int(64, "TLB size")
bsd-3-clause
2,630,697,995,441,791,500
-3,387,906,455,155,033,000
46.702703
72
0.77847
false
JonathonReinhart/scuba
scuba/config.py
1
11648
import os import yaml import re import shlex from .constants import * from .utils import * class ConfigError(Exception): pass class ConfigNotFoundError(ConfigError): pass # http://stackoverflow.com/a/9577670 class Loader(yaml.SafeLoader): def __init__(self, stream): self._root = os.path.split(stream.name)[0] self._cache = dict() super().__init__(stream) def from_yaml(self, node): ''' Implementes a !from_yaml constructor with the following syntax: !from_yaml filename key Arguments: filename: Filename of external YAML document from which to load, relative to the current YAML file. key: Key from external YAML document to return, using a dot-separated syntax for nested keys. Examples: !from_yaml external.yml pop !from_yaml external.yml foo.bar.pop !from_yaml "another file.yml" "foo bar.snap crackle.pop" ''' # Load the content from the node, as a scalar content = self.construct_scalar(node) # Split on unquoted spaces parts = shlex.split(content) if len(parts) != 2: raise yaml.YAMLError('Two arguments expected to !from_yaml') filename, key = parts # path is relative to the current YAML document path = os.path.join(self._root, filename) # Load the other YAML document doc = self._cache.get(path) if not doc: with open(path, 'r') as f: doc = yaml.load(f, self.__class__) self._cache[path] = doc # Retrieve the key try: cur = doc # Use a negative look-behind to split the key on non-escaped '.' characters for k in re.split(r'(?<!\\)\.', key): cur = cur[k.replace('\\.', '.')] # Be sure to replace any escaped '.' characters with *just* the '.' except KeyError: raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename)) return cur Loader.add_constructor('!from_yaml', Loader.from_yaml) def find_config(): '''Search up the directory hierarchy for .scuba.yml Returns: path, rel, config on success, or None if not found path The absolute path of the directory where .scuba.yml was found rel The relative path from the directory where .scuba.yml was found to the current directory config The loaded configuration ''' cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ path = os.getcwd() rel = '' while True: cfg_path = os.path.join(path, SCUBA_YML) if os.path.exists(cfg_path): return path, rel, load_config(cfg_path) if not cross_fs and os.path.ismount(path): msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \ + '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).' raise ConfigNotFoundError(msg) # Traverse up directory hierarchy path, rest = os.path.split(path) if not rest: raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML)) # Accumulate the relative path back to where we started rel = os.path.join(rest, rel) def _process_script_node(node, name): '''Process a script-type node This handles nodes that follow the *Common script schema*, as outlined in doc/yaml-reference.md. ''' if isinstance(node, str): # The script is just the text itself return [node] if isinstance(node, dict): # There must be a "script" key, which must be a list of strings script = node.get('script') if not script: raise ConfigError("{}: must have a 'script' subkey".format(name)) if isinstance(script, list): return script if isinstance(script, str): return [script] raise ConfigError("{}.script: must be a string or list".format(name)) raise ConfigError("{}: must be string or dict".format(name)) def _process_environment(node, name): # Environment can be either a list of strings ("KEY=VALUE") or a mapping # Environment keys and values are always strings result = {} if not node: pass elif isinstance(node, dict): for k, v in node.items(): if v is None: v = os.getenv(k, '') result[k] = str(v) elif isinstance(node, list): for e in node: k, v = parse_env_var(e) result[k] = v else: raise ConfigError("'{}' must be list or mapping, not {}".format( name, type(node).__name__)) return result def _get_entrypoint(data): # N.B. We can't use data.get() here, because that might return # None, leading to ambiguity between entrypoint being absent or set # to a null value. # # "Note that a null is different from an empty string and that a # mapping entry with some key and a null value is valid and # different from not having that key in the mapping." # - http://yaml.org/type/null.html key = 'entrypoint' if not key in data: return None ep = data[key] # We represent a null value as an empty string. if ep is None: ep = '' if not isinstance(ep, str): raise ConfigError("'{}' must be a string, not {}".format( key, type(ep).__name__)) return ep class ScubaAlias: def __init__(self, name, script, image, entrypoint, environment, shell, as_root): self.name = name self.script = script self.image = image self.entrypoint = entrypoint self.environment = environment self.shell = shell self.as_root = as_root @classmethod def from_dict(cls, name, node): script = _process_script_node(node, name) image = None entrypoint = None environment = None shell = None as_root = False if isinstance(node, dict): # Rich alias image = node.get('image') entrypoint = _get_entrypoint(node) environment = _process_environment( node.get('environment'), '{}.{}'.format(name, 'environment')) shell = node.get('shell') as_root = node.get('root', as_root) return cls(name, script, image, entrypoint, environment, shell, as_root) class ScubaContext: pass class ScubaConfig: def __init__(self, **data): optional_nodes = ('image','aliases','hooks','entrypoint','environment','shell') # Check for unrecognized nodes extra = [n for n in data if not n in optional_nodes] if extra: raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML, 's' if len(extra) > 1 else '', ', '.join(extra))) self._image = data.get('image') self._shell = data.get('shell', DEFAULT_SHELL) self._entrypoint = _get_entrypoint(data) self._load_aliases(data) self._load_hooks(data) self._environment = self._load_environment(data) def _load_aliases(self, data): self._aliases = {} for name, node in data.get('aliases', {}).items(): if ' ' in name: raise ConfigError('Alias names cannot contain spaces') self._aliases[name] = ScubaAlias.from_dict(name, node) def _load_hooks(self, data): self._hooks = {} for name in ('user', 'root',): node = data.get('hooks', {}).get(name) if node: hook = _process_script_node(node, name) self._hooks[name] = hook def _load_environment(self, data): return _process_environment(data.get('environment'), 'environment') @property def image(self): if not self._image: raise ConfigError("Top-level 'image' not set") return self._image @property def entrypoint(self): return self._entrypoint @property def aliases(self): return self._aliases @property def hooks(self): return self._hooks @property def environment(self): return self._environment @property def shell(self): return self._shell def process_command(self, command, image=None, shell=None): '''Processes a user command using aliases Arguments: command A user command list (e.g. argv) image Override the image from .scuba.yml shell Override the shell from .scuba.yml Returns: A ScubaContext object with the following attributes: script: a list of command line strings image: the docker image name to use ''' result = ScubaContext() result.script = None result.image = None result.entrypoint = self.entrypoint result.environment = self.environment.copy() result.shell = self.shell result.as_root = False if command: alias = self.aliases.get(command[0]) if not alias: # Command is not an alias; use it as-is. result.script = [shell_quote_cmd(command)] else: # Using an alias # Does this alias override the image and/or entrypoint? if alias.image: result.image = alias.image if alias.entrypoint is not None: result.entrypoint = alias.entrypoint if alias.shell is not None: result.shell = alias.shell if alias.as_root: result.as_root = True # Merge/override the environment if alias.environment: result.environment.update(alias.environment) if len(alias.script) > 1: # Alias is a multiline script; no additional # arguments are allowed in the scuba invocation. if len(command) > 1: raise ConfigError('Additional arguments not allowed with multi-line aliases') result.script = alias.script else: # Alias is a single-line script; perform substituion # and add user arguments. command.pop(0) result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)] result.script = flatten_list(result.script) # If a shell was given on the CLI, it should override the shell set by # the alias or top-level config if shell: result.shell = shell # If an image was given, it overrides what might have been set by an alias if image: result.image = image # If the image was still not set, then try to get it from the confg, # which will raise a ConfigError if it is not set if not result.image: result.image = self.image return result def load_config(path): try: with open(path, 'r') as f: data = yaml.load(f, Loader) except IOError as e: raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e)) except yaml.YAMLError as e: raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e)) return ScubaConfig(**(data or {}))
mit
-1,424,969,308,220,128,000
-4,107,151,512,313,176,000
31.088154
117
0.571171
false
badele/home-assistant
homeassistant/components/switch/modbus.py
9
4290
""" homeassistant.components.switch.modbus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for Modbus switches. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.modbus/ """ import logging import homeassistant.components.modbus as modbus from homeassistant.helpers.entity import ToggleEntity _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['modbus'] def setup_platform(hass, config, add_devices, discovery_info=None): """ Read configuration and create Modbus devices. """ switches = [] slave = config.get("slave", None) if modbus.TYPE == "serial" and not slave: _LOGGER.error("No slave number provided for serial Modbus") return False registers = config.get("registers") if registers: for regnum, register in registers.items(): bits = register.get("bits") for bitnum, bit in bits.items(): if bit.get("name"): switches.append(ModbusSwitch(bit.get("name"), slave, regnum, bitnum)) coils = config.get("coils") if coils: for coilnum, coil in coils.items(): switches.append(ModbusSwitch(coil.get("name"), slave, coilnum, 0, coil=True)) add_devices(switches) class ModbusSwitch(ToggleEntity): # pylint: disable=too-many-arguments """ Represents a Modbus switch. """ def __init__(self, name, slave, register, bit, coil=False): self._name = name self.slave = int(slave) if slave else 1 self.register = int(register) self.bit = int(bit) self._coil = coil self._is_on = None self.register_value = None def __str__(self): return "%s: %s" % (self.name, self.state) @property def should_poll(self): """ We should poll, because slaves are not allowed to initiate communication on Modbus networks. """ return True @property def unique_id(self): """ Returns a unique id. """ return "MODBUS-SWITCH-{}-{}-{}".format(self.slave, self.register, self.bit) @property def is_on(self): """ Returns True if switch is on. """ return self._is_on @property def name(self): """ Get the name of the switch. """ return self._name def turn_on(self, **kwargs): """ Set switch on. """ if self.register_value is None: self.update() if self._coil: modbus.NETWORK.write_coil(self.register, True) else: val = self.register_value | (0x0001 << self.bit) modbus.NETWORK.write_register(unit=self.slave, address=self.register, value=val) def turn_off(self, **kwargs): """ Set switch off. """ if self.register_value is None: self.update() if self._coil: modbus.NETWORK.write_coil(self.register, False) else: val = self.register_value & ~(0x0001 << self.bit) modbus.NETWORK.write_register(unit=self.slave, address=self.register, value=val) def update(self): """ Update the state of the switch. """ if self._coil: result = modbus.NETWORK.read_coils(self.register, 1) self.register_value = result.bits[0] self._is_on = self.register_value else: result = modbus.NETWORK.read_holding_registers( unit=self.slave, address=self.register, count=1) val = 0 for i, res in enumerate(result.registers): val += res * (2**(i*16)) self.register_value = val self._is_on = (val & (0x0001 << self.bit) > 0)
mit
-4,697,471,838,692,738,000
-3,026,212,605,060,166,700
32.779528
74
0.505361
false
RichardLitt/wyrd-django-dev
tests/regressiontests/admin_filters/tests.py
4
33533
from __future__ import absolute_import, unicode_literals import datetime from django.contrib.admin import (site, ModelAdmin, SimpleListFilter, BooleanFieldListFilter) from django.contrib.admin.views.main import ChangeList from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured from django.test import TestCase, RequestFactory from django.test.utils import override_settings, six from django.utils.encoding import force_text from .models import Book, Department, Employee def select_by(dictlist, key, value): return [x for x in dictlist if x[key] == value][0] class DecadeListFilter(SimpleListFilter): def lookups(self, request, model_admin): return ( ('the 80s', "the 1980's"), ('the 90s', "the 1990's"), ('the 00s', "the 2000's"), ('other', "other decades"), ) def queryset(self, request, queryset): decade = self.value() if decade == 'the 80s': return queryset.filter(year__gte=1980, year__lte=1989) if decade == 'the 90s': return queryset.filter(year__gte=1990, year__lte=1999) if decade == 'the 00s': return queryset.filter(year__gte=2000, year__lte=2009) class DecadeListFilterWithTitleAndParameter(DecadeListFilter): title = 'publication decade' parameter_name = 'publication-decade' class DecadeListFilterWithoutTitle(DecadeListFilter): parameter_name = 'publication-decade' class DecadeListFilterWithoutParameter(DecadeListFilter): title = 'publication decade' class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter): def lookups(self, request, model_admin): pass class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter): def queryset(self, request, queryset): raise 1/0 class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter): def lookups(self, request, model_admin): qs = model_admin.queryset(request) if qs.filter(year__gte=1980, year__lte=1989).exists(): yield ('the 80s', "the 1980's") if qs.filter(year__gte=1990, year__lte=1999).exists(): yield ('the 90s', "the 1990's") if qs.filter(year__gte=2000, year__lte=2009).exists(): yield ('the 00s', "the 2000's") class DecadeListFilterParameterEndsWith__In(DecadeListFilter): title = 'publication decade' parameter_name = 'decade__in' # Ends with '__in" class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter): title = 'publication decade' parameter_name = 'decade__isnull' # Ends with '__isnull" class CustomUserAdmin(UserAdmin): list_filter = ('books_authored', 'books_contributed') class BookAdmin(ModelAdmin): list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no') ordering = ('-id',) class BookAdminWithTupleBooleanFilter(BookAdmin): list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no') class DecadeFilterBookAdmin(ModelAdmin): list_filter = ('author', DecadeListFilterWithTitleAndParameter) ordering = ('-id',) class DecadeFilterBookAdminWithoutTitle(ModelAdmin): list_filter = (DecadeListFilterWithoutTitle,) class DecadeFilterBookAdminWithoutParameter(ModelAdmin): list_filter = (DecadeListFilterWithoutParameter,) class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin): list_filter = (DecadeListFilterWithNoneReturningLookups,) class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin): list_filter = (DecadeListFilterWithFailingQueryset,) class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin): list_filter = (DecadeListFilterWithQuerysetBasedLookups,) class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin): list_filter = (DecadeListFilterParameterEndsWith__In,) class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin): list_filter = (DecadeListFilterParameterEndsWith__Isnull,) class EmployeeAdmin(ModelAdmin): list_display = ['name', 'department'] list_filter = ['department'] class ListFiltersTests(TestCase): def setUp(self): self.today = datetime.date.today() self.tomorrow = self.today + datetime.timedelta(days=1) self.one_week_ago = self.today - datetime.timedelta(days=7) self.request_factory = RequestFactory() # Users self.alfred = User.objects.create_user('alfred', 'alfred@example.com') self.bob = User.objects.create_user('bob', 'bob@example.com') self.lisa = User.objects.create_user('lisa', 'lisa@example.com') # Books self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today) self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207) self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103) self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago) self.gipsy_book.contributors = [self.bob, self.lisa] self.gipsy_book.save() def get_changelist(self, request, model, modeladmin): return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links, modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields, modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin) def test_datefieldlistfilter(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') changelist = self.get_changelist(request, Book, modeladmin) request = self.request_factory.get('/', {'date_registered__gte': self.today, 'date_registered__lt': self.tomorrow}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(force_text(filterspec.title), 'date registered') choice = select_by(filterspec.choices(changelist), "display", "Today") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__gte=%s' '&date_registered__lt=%s' % (self.today, self.tomorrow)) request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1), 'date_registered__lt': self.tomorrow}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month): # In case one week ago is in the same month. self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book]) else: self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(force_text(filterspec.title), 'date registered') choice = select_by(filterspec.choices(changelist), "display", "This month") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__gte=%s' '&date_registered__lt=%s' % (self.today.replace(day=1), self.tomorrow)) request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1), 'date_registered__lt': self.tomorrow}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) if self.today.year == self.one_week_ago.year: # In case one week ago is in the same year. self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book]) else: self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(force_text(filterspec.title), 'date registered') choice = select_by(filterspec.choices(changelist), "display", "This year") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__gte=%s' '&date_registered__lt=%s' % (self.today.replace(month=1, day=1), self.tomorrow)) request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago), 'date_registered__lt': str(self.tomorrow)}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(force_text(filterspec.title), 'date registered') choice = select_by(filterspec.choices(changelist), "display", "Past 7 days") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__gte=%s' '&date_registered__lt=%s' % (str(self.one_week_ago), str(self.tomorrow))) @override_settings(USE_TZ=True) def test_datefieldlistfilter_with_time_zone_support(self): # Regression for #17830 self.test_datefieldlistfilter() def test_allvaluesfieldlistfilter(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'year__isnull': 'True'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.django_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'year') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?year__isnull=True') request = self.request_factory.get('/', {'year': '2002'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'year') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?year=2002') def test_relatedfieldlistfilter_foreignkey(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'author__isnull': 'True'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.gipsy_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'Verbose Author') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?author__isnull=True') request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'Verbose Author') # order of choices depends on User model, which has no order choice = select_by(filterspec.choices(changelist), "display", "alfred") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk) def test_relatedfieldlistfilter_manytomany(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'contributors__isnull': 'True'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][2] self.assertEqual(force_text(filterspec.title), 'Verbose Contributors') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True') request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][2] self.assertEqual(force_text(filterspec.title), 'Verbose Contributors') choice = select_by(filterspec.choices(changelist), "display", "bob") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk) def test_relatedfieldlistfilter_reverse_relationships(self): modeladmin = CustomUserAdmin(User, site) # FK relationship ----- request = self.request_factory.get('/', {'books_authored__isnull': 'True'}) changelist = self.get_changelist(request, User, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.lisa]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'book') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True') request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk}) changelist = self.get_changelist(request, User, modeladmin) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'book') choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title) self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk) # M2M relationship ----- request = self.request_factory.get('/', {'books_contributed__isnull': 'True'}) changelist = self.get_changelist(request, User, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.alfred]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'book') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True') request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk}) changelist = self.get_changelist(request, User, modeladmin) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'book') choice = select_by(filterspec.choices(changelist), "display", self.django_book.title) self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk) def test_booleanfieldlistfilter(self): modeladmin = BookAdmin(Book, site) self.verify_booleanfieldlistfilter(modeladmin) def test_booleanfieldlistfilter_tuple(self): modeladmin = BookAdminWithTupleBooleanFilter(Book, site) self.verify_booleanfieldlistfilter(modeladmin) def verify_booleanfieldlistfilter(self, modeladmin): request = self.request_factory.get('/') changelist = self.get_changelist(request, Book, modeladmin) request = self.request_factory.get('/', {'is_best_seller__exact': 0}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(force_text(filterspec.title), 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "No") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__exact=0') request = self.request_factory.get('/', {'is_best_seller__exact': 1}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(force_text(filterspec.title), 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "Yes") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__exact=1') request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.django_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(force_text(filterspec.title), 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "Unknown") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True') def test_simplelistfilter(self): modeladmin = DecadeFilterBookAdmin(Book, site) # Make sure that the first option is 'All' --------------------------- request = self.request_factory.get('/', {}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id'))) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertEqual(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') # Look for books in the 1980s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 80s'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), []) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[1]['display'], 'the 1980\'s') self.assertEqual(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s') # Look for books in the 1990s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 90s'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertEqual(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s') # Look for books in the 2000s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 00s'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[3]['display'], 'the 2000\'s') self.assertEqual(choices[3]['selected'], True) self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s') # Combine multiple filters ------------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.djangonaut_book]) # Make sure the correct choices are selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[3]['display'], 'the 2000\'s') self.assertEqual(choices[3]['selected'], True) self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'Verbose Author') choice = select_by(filterspec.choices(changelist), "display", "alfred") self.assertEqual(choice['selected'], True) self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk) def test_listfilter_without_title(self): """ Any filter must define a title. """ modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site) request = self.request_factory.get('/', {}) six.assertRaisesRegex(self, ImproperlyConfigured, "The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.", self.get_changelist, request, Book, modeladmin) def test_simplelistfilter_without_parameter(self): """ Any SimpleListFilter must define a parameter_name. """ modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site) request = self.request_factory.get('/', {}) six.assertRaisesRegex(self, ImproperlyConfigured, "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.", self.get_changelist, request, Book, modeladmin) def test_simplelistfilter_with_none_returning_lookups(self): """ A SimpleListFilter lookups method can return None but disables the filter completely. """ modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get('/', {}) changelist = self.get_changelist(request, Book, modeladmin) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) def test_filter_with_failing_queryset(self): """ Ensure that when a filter's queryset method fails, it fails loudly and the corresponding exception doesn't get swallowed. Refs #17828. """ modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site) request = self.request_factory.get('/', {}) self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin) def test_simplelistfilter_with_queryset_based_lookups(self): modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site) request = self.request_factory.get('/', {}) changelist = self.get_changelist(request, Book, modeladmin) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(len(choices), 3) self.assertEqual(choices[0]['display'], 'All') self.assertEqual(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'the 1990\'s') self.assertEqual(choices[1]['selected'], False) self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s') self.assertEqual(choices[2]['display'], 'the 2000\'s') self.assertEqual(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s') def test_two_characters_long_field(self): """ Ensure that list_filter works with two-characters long field names. Refs #16080. """ modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'no': '207'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.bio_book]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(force_text(filterspec.title), 'number') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?no=207') def test_parameter_ends_with__in__or__isnull(self): """ Ensure that a SimpleListFilter's parameter name is not mistaken for a model field if it ends with '__isnull' or '__in'. Refs #17091. """ # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get('/', {'decade__in': 'the 90s'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertEqual(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s') # When it ends with '__isnull' --------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site) request = self.request_factory.get('/', {'decade__isnull': 'the 90s'}) changelist = self.get_changelist(request, Book, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(force_text(filterspec.title), 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertEqual(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s') def test_fk_with_to_field(self): """ Ensure that a filter on a FK respects the FK's to_field attribute. Refs #17972. """ modeladmin = EmployeeAdmin(Employee, site) dev = Department.objects.create(code='DEV', description='Development') design = Department.objects.create(code='DSN', description='Design') john = Employee.objects.create(name='John Blue', department=dev) jack = Employee.objects.create(name='Jack Red', department=design) request = self.request_factory.get('/', {}) changelist = self.get_changelist(request, Employee, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [jack, john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(force_text(filterspec.title), 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertEqual(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'Development') self.assertEqual(choices[1]['selected'], False) self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV') self.assertEqual(choices[2]['display'], 'Design') self.assertEqual(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN') # Filter by Department=='Development' -------------------------------- request = self.request_factory.get('/', {'department__code__exact': 'DEV'}) changelist = self.get_changelist(request, Employee, modeladmin) # Make sure the correct queryset is returned queryset = changelist.get_query_set(request) self.assertEqual(list(queryset), [john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(force_text(filterspec.title), 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertEqual(choices[0]['selected'], False) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'Development') self.assertEqual(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV') self.assertEqual(choices[2]['display'], 'Design') self.assertEqual(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
bsd-3-clause
1,429,587,300,866,723,300
-6,430,305,026,518,685,000
46.972818
168
0.653923
false
vpramo/contrail-controller
src/vnsw/opencontrail-vrouter-netns/opencontrail_vrouter_netns/linux/utils.py
15
4675
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Juliano Martinez, Locaweb. import fcntl import os import shlex import signal import socket import struct import tempfile import sys if sys.version_info[:2] == (2, 6): import subprocess else: from eventlet.green import subprocess from eventlet import greenthread def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, env=None): return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=_subprocess_setup, close_fds=True, env=env) def create_process(cmd, root_helper=None, addl_env=None): """Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it. """ if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) return obj, cmd def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False): try: obj, cmd = create_process(cmd, root_helper=root_helper, addl_env=addl_env) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = ("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, 'stdout': _stdout, 'stderr': _stderr} if obj.returncode: if check_exit_code: raise RuntimeError(m) finally: if sys.version_info[:2] == (2, 6): pass else: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return return_stderr and (_stdout, _stderr) or _stdout def get_interface_mac(interface): DEVICE_NAME_LEN = 15 MAC_START = 18 MAC_END = 24 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', interface[:DEVICE_NAME_LEN])) return ''.join(['%02x:' % ord(char) for char in info[MAC_START:MAC_END]])[:-1] def replace_file(file_name, data): """Replaces the contents of file_name with data in a safe manner. First write to a temp file and then rename. Since POSIX renames are atomic, the file is unlikely to be corrupted by competing writes. We create the tempfile on the same device to ensure that it can be renamed. """ base_dir = os.path.dirname(os.path.abspath(file_name)) tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) tmp_file.write(data) tmp_file.close() os.chmod(tmp_file.name, 0o644) os.rename(tmp_file.name, file_name) def find_child_pids(pid): """Retrieve a list of the pids of child processes of the given pid.""" try: raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid=']) except RuntimeError as e: no_children_found = 'Exit code: 1' in str(e) if no_children_found: ctxt.reraise = False return [] raise return [x.strip() for x in raw_pids.split('\n') if x.strip()]
apache-2.0
-6,964,105,824,868,204,000
4,735,750,646,972,166,000
32.876812
79
0.611123
false
lscheinkman/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/units.py
70
4810
""" The classes here provide support for using custom classes with matplotlib, eg those that do not expose the array interface but know how to converter themselves to arrays. It also supoprts classes with units and units conversion. Use cases include converters for custom objects, eg a list of datetime objects, as well as for objects that are unit aware. We don't assume any particular units implementation, rather a units implementation must provide a ConversionInterface, and the register with the Registry converter dictionary. For example, here is a complete implementation which support plotting with native datetime objects import matplotlib.units as units import matplotlib.dates as dates import matplotlib.ticker as ticker import datetime class DateConverter(units.ConversionInterface): def convert(value, unit): 'convert value to a scalar or array' return dates.date2num(value) convert = staticmethod(convert) def axisinfo(unit): 'return major and minor tick locators and formatters' if unit!='date': return None majloc = dates.AutoDateLocator() majfmt = dates.AutoDateFormatter(majloc) return AxisInfo(majloc=majloc, majfmt=majfmt, label='date') axisinfo = staticmethod(axisinfo) def default_units(x): 'return the default unit for x or None' return 'date' default_units = staticmethod(default_units) # finally we register our object type with a converter units.registry[datetime.date] = DateConverter() """ import numpy as np from matplotlib.cbook import iterable, is_numlike class AxisInfo: 'information to support default axis labeling and tick labeling' def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None): """ majloc and minloc: TickLocators for the major and minor ticks majfmt and minfmt: TickFormatters for the major and minor ticks label: the default axis label If any of the above are None, the axis will simply use the default """ self.majloc = majloc self.minloc = minloc self.majfmt = majfmt self.minfmt = minfmt self.label = label class ConversionInterface: """ The minimal interface for a converter to take custom instances (or sequences) and convert them to values mpl can use """ def axisinfo(unit): 'return an units.AxisInfo instance for unit' return None axisinfo = staticmethod(axisinfo) def default_units(x): 'return the default unit for x or None' return None default_units = staticmethod(default_units) def convert(obj, unit): """ convert obj using unit. If obj is a sequence, return the converted sequence. The ouput must be a sequence of scalars that can be used by the numpy array layer """ return obj convert = staticmethod(convert) def is_numlike(x): """ The matplotlib datalim, autoscaling, locators etc work with scalars which are the units converted to floats given the current unit. The converter may be passed these floats, or arrays of them, even when units are set. Derived conversion interfaces may opt to pass plain-ol unitless numbers through the conversion interface and this is a helper function for them. """ if iterable(x): for thisx in x: return is_numlike(thisx) else: return is_numlike(x) is_numlike = staticmethod(is_numlike) class Registry(dict): """ register types with conversion interface """ def __init__(self): dict.__init__(self) self._cached = {} def get_converter(self, x): 'get the converter interface instance for x, or None' if not len(self): return None # nothing registered #DISABLED idx = id(x) #DISABLED cached = self._cached.get(idx) #DISABLED if cached is not None: return cached converter = None classx = getattr(x, '__class__', None) if classx is not None: converter = self.get(classx) if converter is None and iterable(x): # if this is anything but an object array, we'll assume # there are no custom units if isinstance(x, np.ndarray) and x.dtype != np.object: return None for thisx in x: converter = self.get_converter( thisx ) return converter #DISABLED self._cached[idx] = converter return converter registry = Registry()
agpl-3.0
-589,425,711,976,895,000
-6,477,258,654,007,834,000
32.402778
74
0.638462
false
gmatteo/pymatgen
pymatgen/analysis/thermochemistry.py
5
3877
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ A module to perform experimental thermochemical data analysis. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "Jun 10, 2012" from pymatgen.core.composition import Composition STANDARD_TEMP = 298.0 class ThermoData: """ A object container for an experimental Thermochemical Data. """ def __init__( self, data_type, cpdname, phaseinfo, formula, value, ref="", method="", temp_range=(298, 298), uncertainty=None, ): """ Args: data_type: The thermochemical data type. Should be one of the following: fH - Formation enthalpy, S - Entropy, A, B, C, D, E, F, G, H - variables for use in the various quations for generating formation enthaplies or Cp at various temperatures. cpdname (str): A name for the compound. For example, hematite for Fe2O3. phaseinfo (str): Denoting the phase. For example, "solid", "liquid", "gas" or "tetragonal". formula (str): A proper string formula, e.g., Fe2O3 value (float): The value of the data. ref (str): A reference, if any, for the data. method (str): The method by which the data was determined, if available. temp_range ([float, float]): Temperature range of validity for the data in Kelvin. Defaults to 298 K only. uncertainty (float): An uncertainty for the data, if available. """ self.type = data_type self.formula = formula self.composition = Composition(self.formula) self.reduced_formula = self.composition.reduced_formula self.compound_name = cpdname self.phaseinfo = phaseinfo self.value = value self.temp_range = temp_range self.method = method self.ref = ref self.uncertainty = uncertainty @classmethod def from_dict(cls, d): """ Args: d (dict): Dict representation Returns: ThermoData """ return ThermoData( d["type"], d["compound_name"], d["phaseinfo"], d["formula"], d["value"], d["ref"], d["method"], d["temp_range"], d.get("uncertainty", None), ) def as_dict(self): """ Returns: MSONable dict """ return { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "type": self.type, "formula": self.formula, "compound_name": self.compound_name, "phaseinfo": self.phaseinfo, "value": self.value, "temp_range": self.temp_range, "method": self.method, "ref": self.ref, "uncertainty": self.uncertainty, } def __repr__(self): props = [ "formula", "compound_name", "phaseinfo", "type", "temp_range", "value", "method", "ref", "uncertainty", ] output = ["{} : {}".format(k, getattr(self, k)) for k in props] return "\n".join(output) def __str__(self): return "{}_{}_{} = {}, Valid T : {}, Ref = {}".format( self.type, self.formula, self.phaseinfo, self.value, self.temp_range, self.ref, )
mit
-1,632,990,463,855,711,200
-860,107,804,327,596,300
27.507353
80
0.503224
false
Akshay0724/scikit-learn
sklearn/model_selection/_split.py
12
63090
""" The :mod:`sklearn.model_selection._split` module includes classes and functions to split the data based on a preset strategy. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from collections import Iterable from math import ceil, floor import numbers from abc import ABCMeta, abstractmethod import numpy as np from scipy.misc import comb from ..utils import indexable, check_random_state, safe_indexing from ..utils.validation import _num_samples, column_or_1d from ..utils.validation import check_array from ..utils.multiclass import type_of_target from ..externals.six import with_metaclass from ..externals.six.moves import zip from ..utils.fixes import bincount from ..utils.fixes import signature from ..utils.random import choice from ..base import _pprint __all__ = ['BaseCrossValidator', 'KFold', 'GroupKFold', 'LeaveOneGroupOut', 'LeaveOneOut', 'LeavePGroupsOut', 'LeavePOut', 'ShuffleSplit', 'GroupShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'train_test_split', 'check_cv'] class BaseCrossValidator(with_metaclass(ABCMeta)): """Base class for all cross-validators Implementations must define `_iter_test_masks` or `_iter_test_indices`. """ def __init__(self): # We need this for the build_repr to work properly in py2.7 # see #6304 pass def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) indices = np.arange(_num_samples(X)) for test_index in self._iter_test_masks(X, y, groups): train_index = indices[np.logical_not(test_index)] test_index = indices[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self, X=None, y=None, groups=None): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices(X, y, groups) """ for test_index in self._iter_test_indices(X, y, groups): test_mask = np.zeros(_num_samples(X), dtype=np.bool) test_mask[test_index] = True yield test_mask def _iter_test_indices(self, X=None, y=None, groups=None): """Generates integer indices corresponding to test sets.""" raise NotImplementedError @abstractmethod def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator""" def __repr__(self): return _build_repr(self) class LeaveOneOut(BaseCrossValidator): """Leave-One-Out cross-validator Provides train/test indices to split data in train/test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and ``LeavePOut(p=1)`` where ``n`` is the number of samples. Due to the high number of test sets (which is the same as the number of samples) this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` or :class:`StratifiedKFold`. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import LeaveOneOut >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = LeaveOneOut() >>> loo.get_n_splits(X) 2 >>> print(loo) LeaveOneOut() >>> for train_index, test_index in loo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneGroupOut For splitting the data according to explicit, domain-specific stratification of the dataset. GroupKFold: K-fold iterator variant with non-overlapping groups. """ def _iter_test_indices(self, X, y=None, groups=None): return range(_num_samples(X)) def get_n_splits(self, X, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if X is None: raise ValueError("The X parameter should not be None") return _num_samples(X) class LeavePOut(BaseCrossValidator): """Leave-P-Out cross-validator Provides train/test indices to split data in train/test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(p)`` is NOT equivalent to ``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` or :class:`ShuffleSplit`. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- p : int Size of the test sets. Examples -------- >>> from sklearn.model_selection import LeavePOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = LeavePOut(2) >>> lpo.get_n_splits(X) 6 >>> print(lpo) LeavePOut(p=2) >>> for train_index, test_index in lpo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, p): self.p = p def _iter_test_indices(self, X, y=None, groups=None): for combination in combinations(range(_num_samples(X)), self.p): yield np.array(combination) def get_n_splits(self, X, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. """ if X is None: raise ValueError("The X parameter should not be None") return int(comb(_num_samples(X), self.p, exact=True)) class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)): """Base class for KFold, GroupKFold, and StratifiedKFold""" @abstractmethod def __init__(self, n_splits, shuffle, random_state): if not isinstance(n_splits, numbers.Integral): raise ValueError('The number of folds must be of Integral type. ' '%s of type %s was passed.' % (n_splits, type(n_splits))) n_splits = int(n_splits) if n_splits <= 1: raise ValueError( "k-fold cross-validation requires at least one" " train/test split by setting n_splits=2 or more," " got n_splits={0}.".format(n_splits)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.n_splits = n_splits self.shuffle = shuffle self.random_state = random_state def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) if self.n_splits > n_samples: raise ValueError( ("Cannot have number of splits n_splits={0} greater" " than the number of samples: {1}.").format(self.n_splits, n_samples)) for train, test in super(_BaseKFold, self).split(X, y, groups): yield train, test def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_splits class KFold(_BaseKFold): """K-Folds cross-validator Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used once as a validation while the k - 1 remaining folds form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.model_selection import KFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = KFold(n_splits=2) >>> kf.get_n_splits(X) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE KFold(n_splits=2, random_state=None, shuffle=False) >>> for train_index, test_index in kf.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first ``n_samples % n_splits`` folds have size ``n_samples // n_splits + 1``, other folds have size ``n_samples // n_splits``, where ``n_samples`` is the number of samples. See also -------- StratifiedKFold Takes group information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). GroupKFold: K-fold iterator variant with non-overlapping groups. """ def __init__(self, n_splits=3, shuffle=False, random_state=None): super(KFold, self).__init__(n_splits, shuffle, random_state) def _iter_test_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) indices = np.arange(n_samples) if self.shuffle: check_random_state(self.random_state).shuffle(indices) n_splits = self.n_splits fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int) fold_sizes[:n_samples % n_splits] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield indices[start:stop] current = stop class GroupKFold(_BaseKFold): """K-fold iterator variant with non-overlapping groups. The same group will not appear in two different folds (the number of distinct groups has to be at least equal to the number of folds). The folds are approximately balanced in the sense that the number of distinct groups is approximately the same in each fold. Parameters ---------- n_splits : int, default=3 Number of folds. Must be at least 2. Examples -------- >>> from sklearn.model_selection import GroupKFold >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> groups = np.array([0, 0, 2, 2]) >>> group_kfold = GroupKFold(n_splits=2) >>> group_kfold.get_n_splits(X, y, groups) 2 >>> print(group_kfold) GroupKFold(n_splits=2) >>> for train_index, test_index in group_kfold.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) ... TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [3 4] TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [3 4] [1 2] See also -------- LeaveOneGroupOut For splitting the data according to explicit domain-specific stratification of the dataset. """ def __init__(self, n_splits=3): super(GroupKFold, self).__init__(n_splits, shuffle=False, random_state=None) def _iter_test_indices(self, X, y, groups): if groups is None: raise ValueError("The groups parameter should not be None") groups = check_array(groups, ensure_2d=False, dtype=None) unique_groups, groups = np.unique(groups, return_inverse=True) n_groups = len(unique_groups) if self.n_splits > n_groups: raise ValueError("Cannot have number of splits n_splits=%d greater" " than the number of groups: %d." % (self.n_splits, n_groups)) # Weight groups by their number of occurrences n_samples_per_group = np.bincount(groups) # Distribute the most frequent groups first indices = np.argsort(n_samples_per_group)[::-1] n_samples_per_group = n_samples_per_group[indices] # Total weight of each fold n_samples_per_fold = np.zeros(self.n_splits) # Mapping from group index to fold index group_to_fold = np.zeros(len(unique_groups)) # Distribute samples by adding the largest weight to the lightest fold for group_index, weight in enumerate(n_samples_per_group): lightest_fold = np.argmin(n_samples_per_fold) n_samples_per_fold[lightest_fold] += weight group_to_fold[indices[group_index]] = lightest_fold indices = group_to_fold[groups] for f in range(self.n_splits): yield np.where(indices == f)[0] class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.model_selection import StratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = StratifiedKFold(n_splits=2) >>> skf.get_n_splits(X, y) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE StratifiedKFold(n_splits=2, random_state=None, shuffle=False) >>> for train_index, test_index in skf.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size ``trunc(n_samples / n_splits)``, the last one has the complementary. """ def __init__(self, n_splits=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state) def _make_test_folds(self, X, y=None, groups=None): if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state y = np.asarray(y) n_samples = y.shape[0] unique_y, y_inversed = np.unique(y, return_inverse=True) y_counts = bincount(y_inversed) min_groups = np.min(y_counts) if np.all(self.n_splits > y_counts): raise ValueError("All the n_groups for individual classes" " are less than n_splits=%d." % (self.n_splits)) if self.n_splits > min_groups: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of groups for any class cannot" " be less than n_splits=%d." % (min_groups, self.n_splits)), Warning) # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each class so as to respect the balance of # classes # NOTE: Passing the data corresponding to ith class say X[y==class_i] # will break when the data is not 100% stratifiable for all classes. # So we pass np.zeroes(max(c, n_splits)) as data to the KFold per_cls_cvs = [ KFold(self.n_splits, shuffle=self.shuffle, random_state=rng).split(np.zeros(max(count, self.n_splits))) for count in y_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)): for cls, (_, test_split) in zip(unique_y, per_cls_splits): cls_test_folds = test_folds[y == cls] # the test split can be too big because we used # KFold(...).split(X[:max(c, n_splits)]) when data is not 100% # stratifiable for all the classes # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(cls_test_folds)] cls_test_folds[test_split] = test_fold_indices test_folds[y == cls] = cls_test_folds return test_folds def _iter_test_masks(self, X, y=None, groups=None): test_folds = self._make_test_folds(X, y) for i in range(self.n_splits): yield test_folds == i def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like, shape (n_samples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ y = check_array(y, ensure_2d=False, dtype=None) return super(StratifiedKFold, self).split(X, y, groups) class TimeSeriesSplit(_BaseKFold): """Time Series cross-validator Provides train/test indices to split time series data samples that are observed at fixed time intervals, in train/test sets. In each split, test indices must be higher than before, and thus shuffling in cross validator is inappropriate. This cross-validation object is a variation of :class:`KFold`. In the kth split, it returns first k folds as train set and the (k+1)th fold as test set. Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=3 Number of splits. Must be at least 1. Examples -------- >>> from sklearn.model_selection import TimeSeriesSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> tscv = TimeSeriesSplit(n_splits=3) >>> print(tscv) # doctest: +NORMALIZE_WHITESPACE TimeSeriesSplit(n_splits=3) >>> for train_index, test_index in tscv.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0] TEST: [1] TRAIN: [0 1] TEST: [2] TRAIN: [0 1 2] TEST: [3] Notes ----- The training set has size ``i * n_samples // (n_splits + 1) + n_samples % (n_splits + 1)`` in the ``i``th split, with a test set of size ``n_samples//(n_splits + 1)``, where ``n_samples`` is the number of samples. """ def __init__(self, n_splits=3): super(TimeSeriesSplit, self).__init__(n_splits, shuffle=False, random_state=None) def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Always ignored, exists for compatibility. groups : array-like, with shape (n_samples,), optional Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits n_folds = n_splits + 1 if n_folds > n_samples: raise ValueError( ("Cannot have number of folds ={0} greater" " than the number of samples: {1}.").format(n_folds, n_samples)) indices = np.arange(n_samples) test_size = (n_samples // n_folds) test_starts = range(test_size + n_samples % n_folds, n_samples, test_size) for test_start in test_starts: yield (indices[:test_start], indices[test_start:test_start + test_size]) class LeaveOneGroupOut(BaseCrossValidator): """Leave One Group Out cross-validator Provides train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import LeaveOneGroupOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> groups = np.array([1, 1, 2, 2]) >>> logo = LeaveOneGroupOut() >>> logo.get_n_splits(X, y, groups) 2 >>> print(logo) LeaveOneGroupOut() >>> for train_index, test_index in logo.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def _iter_test_masks(self, X, y, groups): if groups is None: raise ValueError("The groups parameter should not be None") # We make a copy of groups to avoid side-effects during iteration groups = check_array(groups, copy=True, ensure_2d=False, dtype=None) unique_groups = np.unique(groups) if len(unique_groups) <= 1: raise ValueError( "The groups parameter contains fewer than 2 unique groups " "(%s). LeaveOneGroupOut expects at least 2." % unique_groups) for i in unique_groups: yield groups == i def get_n_splits(self, X, y, groups): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if groups is None: raise ValueError("The groups parameter should not be None") return len(np.unique(groups)) class LeavePGroupsOut(BaseCrossValidator): """Leave P Group(s) Out cross-validator Provides train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePGroupsOut and LeaveOneGroupOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the groups while the latter uses samples all assigned the same groups. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_groups : int Number of groups (``p``) to leave out in the test split. Examples -------- >>> from sklearn.model_selection import LeavePGroupsOut >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> groups = np.array([1, 2, 3]) >>> lpgo = LeavePGroupsOut(n_groups=2) >>> lpgo.get_n_splits(X, y, groups) 3 >>> print(lpgo) LeavePGroupsOut(n_groups=2) >>> for train_index, test_index in lpgo.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] See also -------- GroupKFold: K-fold iterator variant with non-overlapping groups. """ def __init__(self, n_groups): self.n_groups = n_groups def _iter_test_masks(self, X, y, groups): if groups is None: raise ValueError("The groups parameter should not be None") groups = check_array(groups, copy=True, ensure_2d=False, dtype=None) unique_groups = np.unique(groups) if self.n_groups >= len(unique_groups): raise ValueError( "The groups parameter contains fewer than (or equal to) " "n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut " "expects that at least n_groups + 1 (%d) unique groups be " "present" % (self.n_groups, unique_groups, self.n_groups + 1)) combi = combinations(range(len(unique_groups)), self.n_groups) for indices in combi: test_index = np.zeros(_num_samples(X), dtype=np.bool) for l in unique_groups[np.array(indices)]: test_index[groups == l] = True yield test_index def get_n_splits(self, X, y, groups): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. y : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if groups is None: raise ValueError("The groups parameter should not be None") groups = check_array(groups, ensure_2d=False, dtype=None) X, y, groups = indexable(X, y, groups) return int(comb(len(np.unique(groups)), self.n_groups, exact=True)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n_splits=10, test_size=0.1, train_size=None, random_state=None): _validate_shuffle_split_init(test_size, train_size) self.n_splits = n_splits self.test_size = test_size self.train_size = train_size self.random_state = random_state def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) for train, test in self._iter_indices(X, y, groups): yield train, test @abstractmethod def _iter_indices(self, X, y=None, groups=None): """Generate (train, test) indices""" def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_splits def __repr__(self): return _build_repr(self) class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validator Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int (default 10) Number of re-shuffling & splitting iterations. test_size : float, int, or None, default 0.1 If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.model_selection import ShuffleSplit >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0) >>> rs.get_n_splits(X) 3 >>> print(rs) ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... # doctest: +ELLIPSIS TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25, ... random_state=0) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... # doctest: +ELLIPSIS TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) rng = check_random_state(self.random_state) for i in range(self.n_splits): # random partition permutation = rng.permutation(n_samples) ind_test = permutation[:n_test] ind_train = permutation[n_test:(n_test + n_train)] yield ind_train, ind_test class GroupShuffleSplit(ShuffleSplit): '''Shuffle-Group(s)-Out cross-validation iterator Provides randomized train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePGroupsOut and GroupShuffleSplit is that the former generates splits using all subsets of size ``p`` unique groups, whereas GroupShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique groups. For example, a less computationally intensive alternative to ``LeavePGroupsOut(p=10)`` would be ``GroupShuffleSplit(test_size=10, n_splits=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to groups, and not to samples, as in ShuffleSplit. Parameters ---------- n_splits : int (default 5) Number of re-shuffling & splitting iterations. test_size : float (default 0.2), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the groups to include in the test split. If int, represents the absolute number of test groups. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the groups to include in the train split. If int, represents the absolute number of train groups. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. ''' def __init__(self, n_splits=5, test_size=0.2, train_size=None, random_state=None): super(GroupShuffleSplit, self).__init__( n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state) def _iter_indices(self, X, y, groups): if groups is None: raise ValueError("The groups parameter should not be None") groups = check_array(groups, ensure_2d=False, dtype=None) classes, group_indices = np.unique(groups, return_inverse=True) for group_train, group_test in super( GroupShuffleSplit, self)._iter_indices(X=classes): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(group_indices, group_train)) test = np.flatnonzero(np.in1d(group_indices, group_test)) yield train, test def _approximate_mode(class_counts, n_draws, rng): """Computes approximate mode of multivariate hypergeometric. This is an approximation to the mode of the multivariate hypergeometric given by class_counts and n_draws. It shouldn't be off by more than one. It is the mostly likely outcome of drawing n_draws many samples from the population given by class_counts. Parameters ---------- class_counts : ndarray of int Population per class. n_draws : int Number of draws (samples to draw) from the overall population. rng : random state Used to break ties. Returns ------- sampled_classes : ndarray of int Number of samples drawn from each class. np.sum(sampled_classes) == n_draws Examples -------- >>> from sklearn.model_selection._split import _approximate_mode >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0) array([2, 1]) >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0) array([3, 1]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=0) array([0, 1, 1, 0]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=42) array([1, 1, 0, 0]) """ # this computes a bad approximation to the mode of the # multivariate hypergeometric given by class_counts and n_draws continuous = n_draws * class_counts / class_counts.sum() # floored means we don't overshoot n_samples, but probably undershoot floored = np.floor(continuous) # we add samples according to how much "left over" probability # they had, until we arrive at n_samples need_to_add = int(n_draws - floored.sum()) if need_to_add > 0: remainder = continuous - floored values = np.sort(np.unique(remainder))[::-1] # add according to remainder, but break ties # randomly to avoid biases for value in values: inds, = np.where(remainder == value) # if we need_to_add less than what's in inds # we draw randomly from them. # if we need to add more, we add them all and # go to the next value add_now = min(len(inds), need_to_add) inds = choice(inds, size=add_now, replace=False, random_state=rng) floored[inds] += 1 need_to_add -= add_now if need_to_add == 0: break return floored.astype(np.int) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.model_selection import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0) >>> sss.get_n_splits(X, y) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(n_splits=3, random_state=0, ...) >>> for train_index, test_index in sss.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, n_splits=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( n_splits, test_size, train_size, random_state) def _iter_indices(self, X, y, groups=None): n_samples = _num_samples(X) y = check_array(y, ensure_2d=False, dtype=None) n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = bincount(y_indices) if np.min(class_counts) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of groups for any class cannot" " be less than 2.") if n_train < n_classes: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_train, n_classes)) if n_test < n_classes: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_test, n_classes)) rng = check_random_state(self.random_state) for _ in range(self.n_splits): # if there are ties in the class-counts, we want # to make sure to break them anew in each iteration n_i = _approximate_mode(class_counts, n_train, rng) class_counts_remaining = class_counts - n_i t_i = _approximate_mode(class_counts_remaining, n_test, rng) train = [] test = [] for i, class_i in enumerate(classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = np.where((y == class_i))[0][permutation] train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like, shape (n_samples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ y = check_array(y, ensure_2d=False, dtype=None) return super(StratifiedShuffleSplit, self).split(X, y, groups) def _validate_shuffle_split_init(test_size, train_size): """Validation helper to check the test_size and train_size at init NOTE This does not take into account the number of samples which is known only at split """ if test_size is None and train_size is None: raise ValueError('test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind != 'i': # int values are checked during split based on the input raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif (np.asarray(test_size).dtype.kind == 'f' and (train_size + test_size) > 1.): raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind != 'i': # int values are checked during split based on the input raise ValueError("Invalid value for train_size: %r" % train_size) def _validate_shuffle_split(n_samples, test_size, train_size): """ Validation helper to check if the test/test sizes are meaningful wrt to the size of the data (n_samples) """ if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples): raise ValueError('test_size=%d should be smaller than the number of ' 'samples %d' % (test_size, n_samples)) if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples): raise ValueError("train_size=%d should be smaller than the number of" " samples %d" % (train_size, n_samples)) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n_samples) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n_samples - n_test elif np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n_samples) else: n_train = float(train_size) if test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n_samples)) return int(n_train), int(n_test) class PredefinedSplit(BaseCrossValidator): """Predefined split cross-validator Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> test_fold = [0, 1, -1, 1] >>> ps = PredefinedSplit(test_fold) >>> ps.get_n_splits() 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) >>> for train_index, test_index in ps.split(): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def split(self, X=None, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ ind = np.arange(len(self.test_fold)) for test_index in self._iter_test_masks(): train_index = ind[np.logical_not(test_index)] test_index = ind[test_index] yield train_index, test_index def _iter_test_masks(self): """Generates boolean masks corresponding to test sets.""" for f in self.unique_folds: test_index = np.where(self.test_fold == f)[0] test_mask = np.zeros(len(self.test_fold), dtype=np.bool) test_mask[test_index] = True yield test_mask def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.unique_folds) class _CVIterableWrapper(BaseCrossValidator): """Wrapper class for old style cv objects and iterables.""" def __init__(self, cv): self.cv = list(cv) def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.cv) def split(self, X=None, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ for train, test in self.cv: yield train, test def check_cv(cv=3, y=None, classifier=False): """Input checker utility for building a cross-validator Parameters ---------- cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if classifier is True and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. y : array-like, optional The target variable for supervised learning problems. classifier : boolean, optional, default False Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv : a cross-validator instance. The return value is a cross-validator which generates the train/test splits via the ``split`` method. """ if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if (classifier and (y is not None) and (type_of_target(y) in ('binary', 'multiclass'))): return StratifiedKFold(cv) else: return KFold(cv) if not hasattr(cv, 'split') or isinstance(cv, str): if not isinstance(cv, Iterable) or isinstance(cv, str): raise ValueError("Expected cv as an integer, cross-validation " "object (from sklearn.model_selection) " "or an iterable. Got %s." % cv) return _CVIterableWrapper(cv) return cv # New style cv objects are passed without any modification def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(ShuffleSplit().split(X, y))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of indexables with same length / shape[0] Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : array-like or None (default is None) If not None, data is split in a stratified fashion, using this as the class labels. Returns ------- splitting : list, length=2 * len(arrays) List containing train-test split of inputs. .. versionadded:: 0.16 If the input is sparse, the output will be a ``scipy.sparse.csr_matrix``. Else, output type is the same as the input type. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) stratify = options.pop('stratify', None) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) if stratify is not None: CVClass = StratifiedShuffleSplit else: CVClass = ShuffleSplit cv = CVClass(test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(cv.split(X=arrays[0], y=stratify)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests def _build_repr(self): # XXX This is copied from BaseEstimator's get_params cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) # Ignore varargs, kw and default values and pop self init_signature = signature(init) # Consider the constructor parameters excluding 'self' if init is object.__init__: args = [] else: args = sorted([p.name for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]) class_name = self.__class__.__name__ params = dict() for key in args: # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) params[key] = value return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
bsd-3-clause
-6,443,130,365,688,701,000
-1,912,117,979,299,768,000
35.342166
79
0.593295
false
kmiller96/Shipping-Containers-Software
lib/core.py
1
8600
# AUTHOR: Kale Miller # DESCRIPTION: The 'main brain' of the program is held in here. # 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576 # 6572616c207665727920736d616c6c20706f737369626c65207461736b732e # DEVELOPMENT LOG: # 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class. # 12/12/16: Tweaked the IDGenerator class to help remove dependancy. # 13/12/16: Fleshed out the NewHoldingBay class. # 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ import os, time import numpy as np from lib import containers CONTAINER_CLASSES = [ containers.BasicContainer, containers.HeavyContainer, containers.RefrigeratedContainer, containers.LiquidContainer, containers.ExplosivesContainer, containers.ToxicContainer, containers.ChemicalContainer ] CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical'] SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C'] TAG_APPLICATION_TIME = 0.2 PRINTALL_TIME = 1 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def processshipfile(filename, path): """Processes the csv file that the ship supplies.""" def _deletenewline(string): """Deletes the \n symbol from a string if it exists.""" try: truncatedstring = string[:string.index('\n')] except ValueError: truncatedstring = string finally: return truncatedstring try: home = os.getcwd() os.chdir(path) except WindowsError: # Would this hold true on all machines? raise NameError, "The path specified does not exist." rawfile = open(filename, 'r') arylines = rawfile.readlines() basematrix = map(lambda x: _deletenewline(x).split(','), arylines) numpyarray = np.array(basematrix) return numpyarray class IDGenerator: """Controls the assignment of id tags on the containers.""" # TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists. def __init__(self): """Initialise the id generator.""" self._COUNTERS = [0] * len(CONTAINER_TYPES) return def _findindex(self, container): """Determines the index in the lists the class should use.""" return CONTAINER_TYPES.index(container) def _serialcode(self, index): """Fetches the serial code for a supplied index.""" return SERIAL_CODES[index] def _counter(self, index): """Fetches the counter for a specific serial type and increments it by one.""" self._COUNTERS[index] += 1 return self._COUNTERS[index] def newid(self, containertype): """Generates a new id.""" ii = self._findindex(containertype) idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5) return idtag class NewHoldingBay: """Creates a new holding bay for the containers. Thus it contains all of the information about the containers along with the methods controlling unloading and loading them.""" def __init__(self): self._path = os.getcwd() self.idgenerator = IDGenerator() self.containerlist = list() self._iOnship = 0 self._iLoaded = 0 self._iHolding = 0 return None def _createcontainer(self, containerstr, parameters): """Creates a new container class based off the first column of the CSV.""" # TODO: Fix this method up to catch more and print useful error messages. if not isinstance(containerstr, str): raise TypeError, "The parameter passed must be a string." elif len(containerstr) == 1: try: ii = SERIAL_CODES.index(containerstr) except ValueError: raise Exception("Bad input.") # TODO: Fix this area up. elif len(containerstr) != 1: try: ii = CONTAINER_TYPES.index(containerstr) except ValueError: raise Exception("Bad input.") idtag = self.idgenerator.newid(CONTAINER_TYPES[ii]) return CONTAINER_CLASSES[ii](idtag, *parameters) def defineship(self, file): """Pass in the CSV file of the ship in order to unload it.""" shipdata = processshipfile(file, self._path) shipdata = shipdata[1::] # Throw out the headers. for line in shipdata: newcontainer = self._createcontainer(line[0], (line[1], line[3])) self.containerlist.append(newcontainer) self._iOnship += 1 def printcontainer(self, serial): """Prints the information about a specific container.""" for container in self.containerlist: if container.id() == serial: container.information() return None else: continue raise NameError, "Unable to find container with serial code %s" % serial return -1 def printallinformation(self): """Prints the information of all the containers.""" for container in self.containerlist: container.information() time.sleep(PRINTALL_TIME) return None def unloadall(self, debug=False): """Unloads all of the containers from the ship.""" for container in self.containerlist: container.unload(debug=debug) self._iHolding += 1 self._iOnship -= 1 return None def loadall(self, debug=False): """Loads all of the containers into trucks and trains.""" # TODO: Proper loading locations. ii = 1 for container in self.containerlist: container.load('Truck ' + str(ii).zfill(3), debug=debug) self._iHolding -= 1 self._iLoaded += 1 ii += 1 return None def printauditedload(self): """Prints information about the holding bay at this time.""" iOnship = 0; iLoaded = 0; iHolding = 0 iContainercount = [0] * len(CONTAINER_TYPES) for container in self.containerlist: try: ii = CONTAINER_TYPES.index(container._type) iContainercount[ii] += 1 except ValueError: raise NameError, "One (or more) containers don't have a valid type." # Print the appropriate information. print "----------------------------------------------------------------------" print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3) print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3) print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3) print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3) print "" print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3) for ii in xrange(len(CONTAINER_TYPES)): if iContainercount[ii] == 0: continue print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3) print "----------------------------------------------------------------------" return None def addidtags(self, debug=False): """Applys appropriate serial numbers to all of the containers.""" for container in self.containerlist: print "Applying id tag to container %s" % container.id() if not debug: time.sleep(TAG_APPLICATION_TIME) container.addidtag() return None def applyauxilarylabels(self): """Applys the labels that should go on containers about their contents and handling.""" for container in self.containerlist: print "Adding labels to container %s" % container.id() container.addauxilarylabels() return None # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mit
6,252,638,948,690,728,000
1,472,481,626,354,527,500
39.148325
120
0.570698
false
ashhher3/invenio
modules/bibrank/lib/bibrank_downloads_similarity.py
19
4328
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = \ "$Id$" from invenio.config import \ CFG_ACCESS_CONTROL_LEVEL_SITE, \ CFG_CERN_SITE from invenio.dbquery import run_sql from invenio.bibrank_downloads_indexer import database_tuples_to_single_list from invenio.search_engine_utils import get_fieldvalues def record_exists(recID): """Return 1 if record RECID exists. Return 0 if it doesn't exist. Return -1 if it exists but is marked as deleted. Copy from search_engine""" out = 0 query = "SELECT id FROM bibrec WHERE id='%s'" % recID res = run_sql(query, None, 1) if res: # record exists; now check whether it isn't marked as deleted: dbcollids = get_fieldvalues(recID, "980__%") if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids): out = -1 # exists, but marked as deleted else: out = 1 # exists fine return out ### INTERFACE def register_page_view_event(recid, uid, client_ip_address): """Register Detailed record page view event for record RECID consulted by user UID from machine CLIENT_HOST_IP. To be called by the search engine. """ if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1: # do not register access if we are in read-only access control # site mode: return [] return run_sql("INSERT DELAYED INTO rnkPAGEVIEWS " \ " (id_bibrec,id_user,client_host,view_time) " \ " VALUES (%s,%s,INET_ATON(%s),NOW())", \ (recid, uid, client_ip_address)) def calculate_reading_similarity_list(recid, type="pageviews"): """Calculate reading similarity data to use in reading similarity boxes (``people who downloaded/viewed this file/page have also downloaded/viewed''). Return list of (recid1, score1), (recid2,score2), ... for all recidN that were consulted by the same people who have also consulted RECID. The reading similarity TYPE can be either `pageviews' or `downloads', depending whether we want to obtain page view similarity or download similarity. """ if CFG_CERN_SITE: return [] # CERN hack 2009-11-23 to ease the load if type == "downloads": tablename = "rnkDOWNLOADS" else: # default tablename = "rnkPAGEVIEWS" # firstly compute the set of client hosts who consulted recid: client_host_list = run_sql("SELECT DISTINCT(client_host)" + \ " FROM " + tablename + \ " WHERE id_bibrec=%s " + \ " AND client_host IS NOT NULL", (recid,)) # secondly look up all recids that were consulted by these client hosts, # and order them by the number of different client hosts reading them: res = [] if client_host_list != (): client_host_list = str(database_tuples_to_single_list(client_host_list)) client_host_list = client_host_list.replace("L", "") client_host_list = client_host_list.replace("[", "") client_host_list = client_host_list.replace("]", "") res = run_sql("SELECT id_bibrec,COUNT(DISTINCT(client_host)) AS c" \ " FROM " + tablename + \ " WHERE client_host IN (" + client_host_list + ")" + \ " AND id_bibrec != %s" \ " GROUP BY id_bibrec ORDER BY c DESC LIMIT 10", (recid,)) return res
gpl-2.0
-7,215,239,612,178,231,000
3,368,249,666,166,398,500
42.717172
80
0.619455
false
zkota/pyblio-1.2
pybrc.py
2
1564
# Site configuration from Pyblio import Autoload, Config, version from Pyblio.TextUI import * # ================================================== import string, os # define autoloaded formats Autoload.preregister ('format', 'BibTeX', 'Pyblio.Format.BibTeX', '.*\.bib') Autoload.preregister ('format', 'Ovid', 'Pyblio.Format.Ovid', '.*\.ovid') Autoload.preregister ('format', 'Medline', 'Pyblio.Format.Medline', '.*\.med') Autoload.preregister ('format', 'Refer', 'Pyblio.Format.Refer', '.*\.refer') Autoload.preregister ('format', 'ISIFile', 'Pyblio.Format.isifile', '.*\.isi') # define styles and outputs Autoload.preregister ('style', 'Generic', 'Pyblio.Style.Generic') Autoload.preregister ('style', 'apa4e', 'Pyblio.Style.apa4e') Autoload.preregister ('style', 'abbrv', 'Pyblio.Style.abbrv') Autoload.preregister ('output', 'Text', 'Pyblio.Output.text') Autoload.preregister ('output', 'Raw', 'Pyblio.Output.raw') Autoload.preregister ('output', 'HTML', 'Pyblio.Output.html') Autoload.preregister ('output', 'LaTeX', 'Pyblio.Output.LaTeX') Autoload.preregister ('output', 'Textnum', 'Pyblio.Output.textnum') Autoload.preregister ('output', 'Textau', 'Pyblio.Output.textau') # define key formats Autoload.preregister ('key', 'Default', 'Pyblio.Utils') # Parse the configuration directory rootconfig = os.path.join ('Pyblio', 'ConfDir') if not os.path.isdir (rootconfig): rootconfig = os.path.join (version.pybdir, 'Pyblio', 'ConfDir') if os.path.isdir (rootconfig): Config.parse_directory (rootconfig)
gpl-2.0
-2,274,878,692,675,902,700
8,751,038,753,221,359,000
33
80
0.67711
false
bryceguo/robotframework-selenium2library
demo/package.py
4
1378
#!/usr/bin/env python import os, sys from time import localtime from zipfile import ZipFile, ZIP_DEFLATED THIS_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(THIS_DIR, "..", "src", "Selenium2Library")) import metadata FILES = { '': ['rundemo.py'], 'login_tests': ['valid_login.txt', 'invalid_login.txt', 'resource.txt'], 'demoapp': ['server.py'], 'demoapp/html': ['index.html', 'welcome.html', 'error.html', 'demo.css'] } def main(): cwd = os.getcwd() try: os.chdir(THIS_DIR) name = 'robotframework-selenium2library-%s-demo' % metadata.VERSION zipname = '%s.zip' % name if os.path.exists(zipname): os.remove(zipname) zipfile = ZipFile(zipname, 'w', ZIP_DEFLATED) for dirname in FILES: for filename in FILES[dirname]: path = os.path.join('.', dirname.replace('/', os.sep), filename) print 'Adding: ', os.path.normpath(path) zipfile.write(path, os.path.join(name, path)) zipfile.close() target_path = os.path.join('..', 'dist', zipname) if os.path.exists(target_path): os.remove(target_path) os.rename(zipname, target_path) print 'Created: ', os.path.abspath(target_path) finally: os.chdir(cwd) if __name__ == '__main__': main()
apache-2.0
-1,838,861,047,696,388,600
9,019,610,546,512,262,000
30.318182
80
0.583454
false
mapseed/api
src/sa_api_v2/management/commands/updateDatasetDevCreds.py
3
3486
from __future__ import print_function from django.core.management.base import BaseCommand import os import re # for manually testing with `./manage.py shell` commandline: # from ... import models as sa_models # from ... import forms from sa_api_v2 import models as sa_models import logging # display our logs to console with StreamHandler: console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(console_handler) # The name of our file containing our dev key values, # located in our project root. DATASET_ENV_FILE = '.dataset-env' # The suffix of our dev key variables in our .env file: DEV_KEY_SUFFIX = '_DEV_KEY' def parse_env(dict): """ Parses variables from a .env file located in the project root directory and loads them into the dictionary. """ try: file_path = os.path.join( os.path.dirname(__file__), '..', '..', '..', DATASET_ENV_FILE) with open(file_path) as f: content = f.read() except IOError: content = '' for line in content.splitlines(): m1 = re.match(r'\A([A-Za-z_0-9]+){}=(.*)\Z'.format(DEV_KEY_SUFFIX), line) if m1: key, val = m1.group(1), m1.group(2) m2 = re.match(r"\A'(.*)'\Z", val) if m2: val = m2.group(1) m3 = re.match(r'\A"(.*)"\Z', val) if m3: val = re.sub(r'\\(.)', r'\1', m3.group(1)) logger.info('parsing key from environment file: {}={}' .format(key, val)) dict.setdefault('{}{}'.format(key, DEV_KEY_SUFFIX), val) class Command(BaseCommand): help = """ For our dev api, update the api key value of all our datasets to the constants defined in our .env file This command is idempotent. """ def handle(self, *args, **options): logger.info('parsing environment variables...') api_key_values = {} parse_env(api_key_values) logger.info('environment variables: {}'.format(api_key_values)) logger.info('starting dataset key migration...') datasets = sa_models.DataSet.objects.all() logger.info('fetching matching...') logger.info('') for dataset in datasets: api_key_value = api_key_values.get('{}{}'.format( dataset.display_name.upper(), DEV_KEY_SUFFIX), None) # handle case when we have a dataset but no dev key: if api_key_value is None: logger.error('No matching key found for dataset: {}' .format(dataset.display_name)) logger.error('perhaps we should create a key for it?\n') continue # handle case when we have a dataset with no key: if dataset.keys is None or len(dataset.keys.all()) < 1: logger.error('Skipping dataset because it has no api key: {}\n' .format(dataset.display_name)) continue logger.info('setting key for datset name: {} to value: {}' .format(dataset.display_name, api_key_value)) # save the new value to our dataset's key: key = dataset.keys.all()[0] key.key = api_key_value key.save() dataset.key = key dataset.save()
gpl-3.0
6,838,489,530,408,534,000
2,266,499,656,569,501,200
33.86
79
0.567986
false
britcey/ansible
test/runner/lib/target.py
28
16080
"""Test target identification, iteration and inclusion/exclusion.""" from __future__ import absolute_import, print_function import os import re import errno import itertools import abc from lib.util import ApplicationError MODULE_EXTENSIONS = '.py', '.ps1' def find_target_completion(target_func, prefix): """ :type target_func: () -> collections.Iterable[CompletionTarget] :type prefix: unicode :rtype: list[str] """ try: targets = target_func() prefix = prefix.encode() short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash matches = walk_completion_targets(targets, prefix, short) return matches except Exception as ex: # pylint: disable=locally-disabled, broad-except return [str(ex)] def walk_completion_targets(targets, prefix, short=False): """ :type targets: collections.Iterable[CompletionTarget] :type prefix: str :type short: bool :rtype: tuple[str] """ aliases = set(alias for target in targets for alias in target.aliases) if prefix.endswith('/') and prefix in aliases: aliases.remove(prefix) matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]] if short: offset = len(os.path.dirname(prefix)) if offset: offset += 1 relative_matches = [match[offset:] for match in matches if len(match) > offset] if len(relative_matches) > 1: matches = relative_matches return tuple(sorted(matches)) def walk_internal_targets(targets, includes=None, excludes=None, requires=None): """ :type targets: collections.Iterable[T <= CompletionTarget] :type includes: list[str] :type excludes: list[str] :type requires: list[str] :rtype: tuple[T <= CompletionTarget] """ targets = tuple(targets) include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda t: t.name) if requires: require_targets = set(filter_targets(targets, requires, errors=True, directories=False)) include_targets = [target for target in include_targets if target in require_targets] if excludes: list(filter_targets(targets, excludes, errors=True, include=False, directories=False)) internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False)) return tuple(sorted(internal_targets, key=lambda t: t.name)) def walk_external_targets(targets, includes=None, excludes=None, requires=None): """ :type targets: collections.Iterable[CompletionTarget] :type includes: list[str] :type excludes: list[str] :type requires: list[str] :rtype: tuple[CompletionTarget], tuple[CompletionTarget] """ targets = tuple(targets) if requires: include_targets = list(filter_targets(targets, includes, errors=True, directories=False)) require_targets = set(filter_targets(targets, requires, errors=True, directories=False)) includes = [target.name for target in include_targets if target in require_targets] if includes: include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name) else: include_targets = [] else: include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name) if excludes: exclude_targets = sorted(filter_targets(targets, excludes, errors=True), key=lambda t: t.name) else: exclude_targets = [] previous = None include = [] for target in include_targets: if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \ and previous.name == target.name: previous.modules = tuple(set(previous.modules) | set(target.modules)) else: include.append(target) previous = target previous = None exclude = [] for target in exclude_targets: if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \ and previous.name == target.name: previous.modules = tuple(set(previous.modules) | set(target.modules)) else: exclude.append(target) previous = target return tuple(include), tuple(exclude) def filter_targets(targets, patterns, include=True, directories=True, errors=True): """ :type targets: collections.Iterable[CompletionTarget] :type patterns: list[str] :type include: bool :type directories: bool :type errors: bool :rtype: collections.Iterable[CompletionTarget] """ unmatched = set(patterns or ()) compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None for target in targets: matched_directories = set() match = False if patterns: for alias in target.aliases: for pattern in patterns: if compiled_patterns[pattern].match(alias): match = True try: unmatched.remove(pattern) except KeyError: pass if alias.endswith('/'): if target.base_path and len(target.base_path) > len(alias): matched_directories.add(target.base_path) else: matched_directories.add(alias) elif include: match = True if not target.base_path: matched_directories.add('.') for alias in target.aliases: if alias.endswith('/'): if target.base_path and len(target.base_path) > len(alias): matched_directories.add(target.base_path) else: matched_directories.add(alias) if match != include: continue if directories and matched_directories: yield DirectoryTarget(sorted(matched_directories, key=len)[0], target.modules) else: yield target if errors: if unmatched: raise TargetPatternsNotMatched(unmatched) def walk_module_targets(): """ :rtype: collections.Iterable[TestTarget] """ path = 'lib/ansible/modules' for target in walk_test_targets(path, path + '/', extensions=MODULE_EXTENSIONS): if not target.module: continue yield target def walk_units_targets(): """ :rtype: collections.Iterable[TestTarget] """ return walk_test_targets(path='test/units', module_path='test/units/modules/', extensions=('.py',), prefix='test_') def walk_compile_targets(): """ :rtype: collections.Iterable[TestTarget] """ return walk_test_targets(module_path='lib/ansible/modules/', extensions=('.py',)) def walk_sanity_targets(): """ :rtype: collections.Iterable[TestTarget] """ return walk_test_targets(module_path='lib/ansible/modules/') def walk_posix_integration_targets(): """ :rtype: collections.Iterable[IntegrationTarget] """ for target in walk_integration_targets(): if 'posix/' in target.aliases: yield target def walk_network_integration_targets(): """ :rtype: collections.Iterable[IntegrationTarget] """ for target in walk_integration_targets(): if 'network/' in target.aliases: yield target def walk_windows_integration_targets(): """ :rtype: collections.Iterable[IntegrationTarget] """ for target in walk_integration_targets(): if 'windows/' in target.aliases: yield target def walk_integration_targets(): """ :rtype: collections.Iterable[IntegrationTarget] """ path = 'test/integration/targets' modules = frozenset(t.module for t in walk_module_targets()) paths = sorted(os.path.join(path, p) for p in os.listdir(path)) prefixes = load_integration_prefixes() for path in paths: yield IntegrationTarget(path, modules, prefixes) def load_integration_prefixes(): """ :rtype: dict[str, str] """ path = 'test/integration' names = sorted(f for f in os.listdir(path) if os.path.splitext(f)[0] == 'target-prefixes') prefixes = {} for name in names: prefix = os.path.splitext(name)[1][1:] with open(os.path.join(path, name), 'r') as prefix_fd: prefixes.update(dict((k, prefix) for k in prefix_fd.read().splitlines())) return prefixes def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None): """ :type path: str | None :type module_path: str | None :type extensions: tuple[str] | None :type prefix: str | None :rtype: collections.Iterable[TestTarget] """ for root, _, file_names in os.walk(path or '.', topdown=False): if root.endswith('/__pycache__'): continue if '/.tox/' in root: continue if path is None: root = root[2:] if root.startswith('.'): continue for file_name in file_names: name, ext = os.path.splitext(os.path.basename(file_name)) if name.startswith('.'): continue if extensions and ext not in extensions: continue if prefix and not name.startswith(prefix): continue yield TestTarget(os.path.join(root, file_name), module_path, prefix, path) class CompletionTarget(object): """Command-line argument completion target base class.""" __metaclass__ = abc.ABCMeta def __init__(self): self.name = None self.path = None self.base_path = None self.modules = tuple() self.aliases = tuple() def __eq__(self, other): if isinstance(other, CompletionTarget): return self.__repr__() == other.__repr__() return False def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): return self.name.__lt__(other.name) def __gt__(self, other): return self.name.__gt__(other.name) def __hash__(self): return hash(self.__repr__()) def __repr__(self): if self.modules: return '%s (%s)' % (self.name, ', '.join(self.modules)) return self.name class DirectoryTarget(CompletionTarget): """Directory target.""" def __init__(self, path, modules): """ :type path: str :type modules: tuple[str] """ super(DirectoryTarget, self).__init__() self.name = path self.path = path self.modules = modules class TestTarget(CompletionTarget): """Generic test target.""" def __init__(self, path, module_path, module_prefix, base_path): """ :type path: str :type module_path: str | None :type module_prefix: str | None :type base_path: str """ super(TestTarget, self).__init__() self.name = path self.path = path self.base_path = base_path + '/' if base_path else None name, ext = os.path.splitext(os.path.basename(self.path)) if module_path and path.startswith(module_path) and name != '__init__' and ext in MODULE_EXTENSIONS: self.module = name[len(module_prefix or ''):].lstrip('_') self.modules = self.module, else: self.module = None self.modules = tuple() aliases = [self.path, self.module] parts = self.path.split('/') for i in range(1, len(parts)): alias = '%s/' % '/'.join(parts[:i]) aliases.append(alias) aliases = [a for a in aliases if a] self.aliases = tuple(sorted(aliases)) class IntegrationTarget(CompletionTarget): """Integration test target.""" non_posix = frozenset(( 'network', 'windows', )) categories = frozenset(non_posix | frozenset(( 'posix', 'module', 'needs', 'skip', ))) def __init__(self, path, modules, prefixes): """ :type path: str :type modules: frozenset[str] :type prefixes: dict[str, str] """ super(IntegrationTarget, self).__init__() self.name = os.path.basename(path) self.path = path # script_path and type contents = sorted(os.listdir(path)) runme_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'runme') test_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'test') self.script_path = None if runme_files: self.type = 'script' self.script_path = os.path.join(path, runme_files[0]) elif test_files: self.type = 'special' elif os.path.isdir(os.path.join(path, 'tasks')): self.type = 'role' else: self.type = 'unknown' # static_aliases try: with open(os.path.join(path, 'aliases'), 'r') as aliases_file: static_aliases = tuple(aliases_file.read().splitlines()) except IOError as ex: if ex.errno != errno.ENOENT: raise static_aliases = tuple() # modules if self.name in modules: module = self.name elif self.name.startswith('win_') and self.name[4:] in modules: module = self.name[4:] else: module = None self.modules = tuple(sorted(a for a in static_aliases + tuple([module]) if a in modules)) # groups groups = [self.type] groups += [a for a in static_aliases if a not in modules] groups += ['module/%s' % m for m in self.modules] if not self.modules: groups.append('non_module') if 'destructive' not in groups: groups.append('non_destructive') if '_' in self.name: prefix = self.name[:self.name.find('_')] else: prefix = None if prefix in prefixes: group = prefixes[prefix] if group != prefix: group = '%s/%s' % (group, prefix) groups.append(group) if self.name.startswith('win_'): groups.append('windows') if self.name.startswith('connection_'): groups.append('connection') if self.name.startswith('setup_') or self.name.startswith('prepare_'): groups.append('hidden') if self.type not in ('script', 'role'): groups.append('hidden') for group in itertools.islice(groups, 0, len(groups)): if '/' in group: parts = group.split('/') for i in range(1, len(parts)): groups.append('/'.join(parts[:i])) if not any(g in self.non_posix for g in groups): groups.append('posix') # aliases aliases = [self.name] + \ ['%s/' % g for g in groups] + \ ['%s/%s' % (g, self.name) for g in groups if g not in self.categories] if 'hidden/' in aliases: aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')] self.aliases = tuple(sorted(set(aliases))) class TargetPatternsNotMatched(ApplicationError): """One or more targets were not matched when a match was required.""" def __init__(self, patterns): """ :type patterns: set[str] """ self.patterns = sorted(patterns) if len(patterns) > 1: message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns) else: message = 'Target pattern not matched: %s' % self.patterns[0] super(TargetPatternsNotMatched, self).__init__(message)
gpl-3.0
3,855,705,844,107,149,300
-1,937,721,364,432,037,400
29.11236
119
0.582276
false
frost-nzcr4/djangocms-installer
docs/conf.py
3
8439
#!/usr/bin/env python # -*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) cwd = os.getcwd() parent = os.path.dirname(cwd) sys.path.append(parent) import djangocms_installer # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'django CMS Installer' copyright = u'2013, Iacopo Spalletti' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = djangocms_installer.__version__ # The full version, including alpha/beta/rc tags. release = djangocms_installer.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'djangocms_installerdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'djangocms_installer.tex', u'django CMS Installer Documentation', u'Iacopo Spalletti', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'djangocms_installer', u'django CMS Installer Documentation', [u'Iacopo Spalletti'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'djangocms_installer', u'django CMS Installer Documentation', u'Iacopo Spalletti', 'djangocms_installer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
bsd-3-clause
-3,715,108,931,176,378,400
9,026,216,315,746,856,000
31.836576
83
0.707311
false
amerlyq/airy
vim/res/ycm_extra_conf.py
1
5213
# SEE: CACHE/bundle/YouCompleteMe/cpp/ycm/.ycm_extra_conf.py import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Wextra', '-Werror', '-Wc++98-compat', '-Wno-long-long', '-Wno-variadic-macros', '-fexceptions', '-DNDEBUG', # You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM # source code needs it. #'-DUSE_CLANG_COMPLETER', # THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which # language to use when compiling headers. So it will guess. Badly. So C++ # headers will be compiled as C headers. You don't want that so ALWAYS specify # a "-std=<something>". # For a C project, you would set this to something like 'c99' instead of # 'c++11'. '-std=c++11', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-isystem', '../BoostParts', # This path will only work on OS X, but extra paths that don't exist are not harmful '-isystem', '/System/Library/Frameworks/Python.framework/Headers', '-isystem', '../llvm/include', '-isystem', '../llvm/tools/clang/include', '-I', '.', '-I', './ClangCompleter', '-isystem', './tests/gmock/gtest', '-isystem', './tests/gmock/gtest/include', '-isystem', './tests/gmock', '-isystem', './tests/gmock/include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = os.path.abspath( '~/aura/pdrm/gerrit/build' ) if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. #try: # final_flags.remove( '-stdlib=libc++' ) #except ValueError: # pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
mit
1,645,724,454,115,075,800
3,219,526,308,979,947,000
36.235714
115
0.689047
false
rdo-management/ironic
ironic/tests/drivers/test_virtualbox.py
4
18456
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test class for VirtualBox Driver Modules.""" import mock from oslo_config import cfg from pyremotevbox import exception as pyremotevbox_exc from pyremotevbox import vbox as pyremotevbox_vbox from ironic.common import boot_devices from ironic.common import exception from ironic.common import states from ironic.conductor import task_manager from ironic.drivers.modules import virtualbox from ironic.tests.conductor import utils as mgr_utils from ironic.tests.db import base as db_base from ironic.tests.objects import utils as obj_utils INFO_DICT = { 'virtualbox_vmname': 'baremetal1', 'virtualbox_host': '10.0.2.2', 'virtualbox_username': 'username', 'virtualbox_password': 'password', 'virtualbox_port': 12345, } CONF = cfg.CONF class VirtualBoxMethodsTestCase(db_base.DbTestCase): def setUp(self): super(VirtualBoxMethodsTestCase, self).setUp() driver_info = INFO_DICT.copy() mgr_utils.mock_the_extension_manager(driver="fake_vbox") self.node = obj_utils.create_test_node(self.context, driver='fake_vbox', driver_info=driver_info) def test__parse_driver_info(self): info = virtualbox._parse_driver_info(self.node) self.assertEqual('baremetal1', info['vmname']) self.assertEqual('10.0.2.2', info['host']) self.assertEqual('username', info['username']) self.assertEqual('password', info['password']) self.assertEqual(12345, info['port']) def test__parse_driver_info_missing_vmname(self): del self.node.driver_info['virtualbox_vmname'] self.assertRaises(exception.MissingParameterValue, virtualbox._parse_driver_info, self.node) def test__parse_driver_info_missing_host(self): del self.node.driver_info['virtualbox_host'] self.assertRaises(exception.MissingParameterValue, virtualbox._parse_driver_info, self.node) def test__parse_driver_info_invalid_port(self): self.node.driver_info['virtualbox_port'] = 'invalid-port' self.assertRaises(exception.InvalidParameterValue, virtualbox._parse_driver_info, self.node) def test__parse_driver_info_missing_port(self): del self.node.driver_info['virtualbox_port'] info = virtualbox._parse_driver_info(self.node) self.assertEqual(18083, info['port']) @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method(self, host_mock): host_object_mock = mock.MagicMock() func_mock = mock.MagicMock() vm_object_mock = mock.MagicMock(foo=func_mock) host_mock.return_value = host_object_mock host_object_mock.find_vm.return_value = vm_object_mock func_mock.return_value = 'return-value' return_value = virtualbox._run_virtualbox_method(self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') host_mock.assert_called_once_with(vmname='baremetal1', host='10.0.2.2', username='username', password='password', port=12345) host_object_mock.find_vm.assert_called_once_with('baremetal1') func_mock.assert_called_once_with('args', kwarg='kwarg') self.assertEqual('return-value', return_value) @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method_get_host_fails(self, host_mock): host_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException self.assertRaises(exception.VirtualBoxOperationFailed, virtualbox._run_virtualbox_method, self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method_find_vm_fails(self, host_mock): host_object_mock = mock.MagicMock() host_mock.return_value = host_object_mock exc = pyremotevbox_exc.PyRemoteVBoxException host_object_mock.find_vm.side_effect = exc self.assertRaises(exception.VirtualBoxOperationFailed, virtualbox._run_virtualbox_method, self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') host_mock.assert_called_once_with(vmname='baremetal1', host='10.0.2.2', username='username', password='password', port=12345) host_object_mock.find_vm.assert_called_once_with('baremetal1') @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method_func_fails(self, host_mock): host_object_mock = mock.MagicMock() host_mock.return_value = host_object_mock func_mock = mock.MagicMock() vm_object_mock = mock.MagicMock(foo=func_mock) host_object_mock.find_vm.return_value = vm_object_mock func_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException self.assertRaises(exception.VirtualBoxOperationFailed, virtualbox._run_virtualbox_method, self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') host_mock.assert_called_once_with(vmname='baremetal1', host='10.0.2.2', username='username', password='password', port=12345) host_object_mock.find_vm.assert_called_once_with('baremetal1') func_mock.assert_called_once_with('args', kwarg='kwarg') @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method_invalid_method(self, host_mock): host_object_mock = mock.MagicMock() host_mock.return_value = host_object_mock vm_object_mock = mock.MagicMock() host_object_mock.find_vm.return_value = vm_object_mock del vm_object_mock.foo self.assertRaises(exception.InvalidParameterValue, virtualbox._run_virtualbox_method, self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') host_mock.assert_called_once_with(vmname='baremetal1', host='10.0.2.2', username='username', password='password', port=12345) host_object_mock.find_vm.assert_called_once_with('baremetal1') @mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost') def test__run_virtualbox_method_vm_wrong_power_state(self, host_mock): host_object_mock = mock.MagicMock() host_mock.return_value = host_object_mock func_mock = mock.MagicMock() vm_object_mock = mock.MagicMock(foo=func_mock) host_object_mock.find_vm.return_value = vm_object_mock func_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState # _run_virtualbox_method() doesn't catch VmInWrongPowerState and # lets caller handle it. self.assertRaises(pyremotevbox_exc.VmInWrongPowerState, virtualbox._run_virtualbox_method, self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg') host_mock.assert_called_once_with(vmname='baremetal1', host='10.0.2.2', username='username', password='password', port=12345) host_object_mock.find_vm.assert_called_once_with('baremetal1') func_mock.assert_called_once_with('args', kwarg='kwarg') class VirtualBoxPowerTestCase(db_base.DbTestCase): def setUp(self): super(VirtualBoxPowerTestCase, self).setUp() driver_info = INFO_DICT.copy() mgr_utils.mock_the_extension_manager(driver="fake_vbox") self.node = obj_utils.create_test_node(self.context, driver='fake_vbox', driver_info=driver_info) def test_get_properties(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: properties = task.driver.power.get_properties() self.assertIn('virtualbox_vmname', properties) self.assertIn('virtualbox_host', properties) @mock.patch.object(virtualbox, '_parse_driver_info') def test_validate(self, parse_info_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.power.validate(task) parse_info_mock.assert_called_once_with(task.node) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_get_power_state(self, run_method_mock): run_method_mock.return_value = 'PoweredOff' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: power_state = task.driver.power.get_power_state(task) run_method_mock.assert_called_once_with(task.node, 'get_power_state', 'get_power_status') self.assertEqual(states.POWER_OFF, power_state) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_get_power_state_invalid_state(self, run_method_mock): run_method_mock.return_value = 'invalid-state' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: power_state = task.driver.power.get_power_state(task) run_method_mock.assert_called_once_with(task.node, 'get_power_state', 'get_power_status') self.assertEqual(states.ERROR, power_state) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_power_state_off(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.power.set_power_state(task, states.POWER_OFF) run_method_mock.assert_called_once_with(task.node, 'set_power_state', 'stop') @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_power_state_on(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.power.set_power_state(task, states.POWER_ON) run_method_mock.assert_called_once_with(task.node, 'set_power_state', 'start') @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_power_state_reboot(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.power.set_power_state(task, states.REBOOT) run_method_mock.assert_any_call(task.node, 'reboot', 'stop') run_method_mock.assert_any_call(task.node, 'reboot', 'start') def test_set_power_state_invalid_state(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.InvalidParameterValue, task.driver.power.set_power_state, task, 'invalid-state') @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_reboot(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.power.reboot(task) run_method_mock.assert_any_call(task.node, 'reboot', 'stop') run_method_mock.assert_any_call(task.node, 'reboot', 'start') class VirtualBoxManagementTestCase(db_base.DbTestCase): def setUp(self): super(VirtualBoxManagementTestCase, self).setUp() driver_info = INFO_DICT.copy() mgr_utils.mock_the_extension_manager(driver="fake_vbox") self.node = obj_utils.create_test_node(self.context, driver='fake_vbox', driver_info=driver_info) def test_get_properties(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: properties = task.driver.management.get_properties() self.assertIn('virtualbox_vmname', properties) self.assertIn('virtualbox_host', properties) @mock.patch.object(virtualbox, '_parse_driver_info') def test_validate(self, parse_info_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.management.validate(task) parse_info_mock.assert_called_once_with(task.node) def test_get_supported_boot_devices(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: devices = task.driver.management.get_supported_boot_devices() self.assertIn(boot_devices.PXE, devices) self.assertIn(boot_devices.DISK, devices) self.assertIn(boot_devices.CDROM, devices) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_get_boot_device_ok(self, run_method_mock): run_method_mock.return_value = 'Network' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: ret_val = task.driver.management.get_boot_device(task) run_method_mock.assert_called_once_with(task.node, 'get_boot_device', 'get_boot_device') self.assertEqual(boot_devices.PXE, ret_val['boot_device']) self.assertTrue(ret_val['persistent']) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_get_boot_device_invalid(self, run_method_mock): run_method_mock.return_value = 'invalid-boot-device' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: ret_val = task.driver.management.get_boot_device(task) self.assertIsNone(ret_val['boot_device']) self.assertIsNone(ret_val['persistent']) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_boot_device_ok(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.management.set_boot_device(task, boot_devices.PXE) run_method_mock.assert_called_once_with(task.node, 'set_boot_device', 'set_boot_device', 'Network') @mock.patch.object(virtualbox, 'LOG') @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_boot_device_wrong_power_state(self, run_method_mock, log_mock): run_method_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.management.set_boot_device(task, boot_devices.PXE) log_mock.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(virtualbox, '_run_virtualbox_method') def test_set_boot_device_invalid(self, run_method_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.InvalidParameterValue, task.driver.management.set_boot_device, task, 'invalid-boot-device') def test_get_sensors_data(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(NotImplementedError, task.driver.management.get_sensors_data, task)
apache-2.0
-8,325,017,368,569,581,000
6,524,796,959,878,082,000
48.347594
76
0.55868
false
mjirayu/sit_academy
common/test/acceptance/tests/studio/test_studio_rerun.py
122
4166
""" Acceptance tests for Studio related to course reruns. """ import random from bok_choy.promise import EmptyPromise from nose.tools import assert_in from ...pages.studio.index import DashboardPage from ...pages.studio.course_rerun import CourseRerunPage from ...pages.studio.overview import CourseOutlinePage from ...pages.lms.courseware import CoursewarePage from ...fixtures.course import XBlockFixtureDesc from base_studio_test import StudioCourseTest class CourseRerunTest(StudioCourseTest): """ Feature: Courses can be rerun """ __test__ = True SECTION_NAME = 'Rerun Section' SUBSECITON_NAME = 'Rerun Subsection' UNIT_NAME = 'Rerun Unit' COMPONENT_NAME = 'Rerun Component' COMPONENT_CONTENT = 'Test Content' def setUp(self): """ Login as global staff because that's the only way to rerun a course. """ super(CourseRerunTest, self).setUp(is_staff=True) self.dashboard_page = DashboardPage(self.browser) def populate_course_fixture(self, course_fixture): """ Create a sample course with one section, one subsection, one unit, and one component. """ course_fixture.add_children( XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children( XBlockFixtureDesc('sequential', self.SUBSECITON_NAME).add_children( XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children( XBlockFixtureDesc('html', self.COMPONENT_NAME, self.COMPONENT_CONTENT) ) ) ) ) def test_course_rerun(self): """ Scenario: Courses can be rerun Given I have a course with a section, subsesction, vertical, and html component with content 'Test Content' When I visit the course rerun page And I type 'test_rerun' in the course run field And I click Create Rerun And I visit the course listing page And I wait for all courses to finish processing And I click on the course with run 'test_rerun' Then I see a rerun notification on the course outline page And when I click 'Dismiss' on the notification Then I do not see a rerun notification And when I expand the subsection and click on the unit And I click 'View Live Version' Then I see one html component with the content 'Test Content' """ course_info = (self.course_info['org'], self.course_info['number'], self.course_info['run']) self.dashboard_page.visit() self.dashboard_page.create_rerun(self.course_info['display_name']) rerun_page = CourseRerunPage(self.browser, *course_info) rerun_page.wait_for_page() course_run = 'test_rerun_' + str(random.randrange(1000000, 9999999)) rerun_page.course_run = course_run rerun_page.create_rerun() def finished_processing(): self.dashboard_page.visit() return not self.dashboard_page.has_processing_courses EmptyPromise(finished_processing, "Rerun finished processing", try_interval=5, timeout=60).fulfill() assert_in(course_run, self.dashboard_page.course_runs) self.dashboard_page.click_course_run(course_run) outline_page = CourseOutlinePage(self.browser, *course_info) outline_page.wait_for_page() self.assertTrue(outline_page.has_rerun_notification) outline_page.dismiss_rerun_notification() EmptyPromise(lambda: not outline_page.has_rerun_notification, "Rerun notification dismissed").fulfill() subsection = outline_page.section(self.SECTION_NAME).subsection(self.SUBSECITON_NAME) subsection.expand_subsection() unit_page = subsection.unit(self.UNIT_NAME).go_to() unit_page.view_published_version() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_xblock_components, 1) self.assertEqual(courseware.xblock_component_html_content(), self.COMPONENT_CONTENT)
agpl-3.0
8,861,511,626,370,712,000
5,553,793,106,880,199,000
39.057692
119
0.660106
false
Russell-IO/ansible
lib/ansible/modules/network/slxos/slxos_command.py
25
7951
#!/usr/bin/python # # (c) 2018 Extreme Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: slxos_command version_added: "2.6" author: "Lindsay Hill (@LindsayHill)" short_description: Run commands on remote devices running Extreme Networks SLX-OS description: - Sends arbitrary commands to an SLX node and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. - This module does not support running commands in configuration mode. Please use M(slxos_config) to configure SLX-OS devices. notes: - Tested against SLX-OS 17s.1.02 - If a command sent to the device requires answering a prompt, it is possible to pass a dict containing I(command), I(answer) and I(prompt). See examples. options: commands: description: - List of commands to send to the remote SLX-OS device over the configured provider. The resulting output from the command is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired. required: true wait_for: description: - List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples. default: null match: description: - The I(match) argument is used in conjunction with the I(wait_for) argument to specify the match policy. Valid values are C(all) or C(any). If the value is set to C(all) then all conditionals in the wait_for must be satisfied. If the value is set to C(any) then only one of the values must be satisfied. required: false default: all choices: ['any', 'all'] retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the I(wait_for) conditions. required: false default: 10 interval: description: - Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again. required: false default: 1 """ EXAMPLES = """ tasks: - name: run show version on remote devices slxos_command: commands: show version - name: run show version and check to see if output contains SLX slxos_command: commands: show version wait_for: result[0] contains SLX - name: run multiple commands on remote nodes slxos_command: commands: - show version - show interfaces - name: run multiple commands and evaluate the output slxos_command: commands: - show version - show interface status wait_for: - result[0] contains SLX - result[1] contains Eth - name: run command that requires answering a prompt slxos_command: commands: - command: 'clear sessions' prompt: 'This operation will logout all the user sessions. Do you want to continue (yes/no)?:' answer: y """ RETURN = """ stdout: description: The set of responses from the commands returned: always apart from low level errors (such as action plugin) type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always apart from low level errors (such as action plugin) type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: The list of conditionals that have failed returned: failed type: list sample: ['...', '...'] """ import re import time from ansible.module_utils.network.slxos.slxos import run_commands from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.utils import ComplexList from ansible.module_utils.network.common.parsing import Conditional from ansible.module_utils.six import string_types __metaclass__ = type def to_lines(stdout): for item in stdout: if isinstance(item, string_types): item = str(item).split('\n') yield item def parse_commands(module, warnings): command = ComplexList(dict( command=dict(key=True), prompt=dict(), answer=dict() ), module) commands = command(module.params['commands']) for item in list(commands): configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command']) if module.check_mode: if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'): module.fail_json( msg='slxos_command does not support running config mode ' 'commands. Please use slxos_config instead' ) if not item['command'].startswith('show'): warnings.append( 'only show commands are supported when using check mode, not ' 'executing `%s`' % item['command'] ) commands.remove(item) return commands def main(): """main entry point for module execution """ argument_spec = dict( commands=dict(type='list', required=True), wait_for=dict(type='list'), match=dict(default='all', choices=['all', 'any']), retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = {'changed': False} warnings = list() commands = parse_commands(module, warnings) result['warnings'] = warnings wait_for = module.params['wait_for'] or list() conditionals = [Conditional(c) for c in wait_for] retries = module.params['retries'] interval = module.params['interval'] match = module.params['match'] while retries > 0: responses = run_commands(module, commands) for item in list(conditionals): if item(responses): if match == 'any': conditionals = list() break conditionals.remove(item) if not conditionals: break time.sleep(interval) retries -= 1 if conditionals: failed_conditions = [item.raw for item in conditionals] msg = 'One or more conditional statements have not been satisfied' module.fail_json(msg=msg, failed_conditions=failed_conditions) result.update({ 'changed': False, 'stdout': responses, 'stdout_lines': list(to_lines(responses)) }) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
-2,550,003,075,689,210,000
481,377,444,762,664,300
31.720165
109
0.646711
false
niteshch207/universe
scripts/generate-config-reference.py
3
3017
#!/usr/bin/env python3 """This script builds a Markdown file containing configuration references for all packages (and all package versions) contained in the Mesosphere DC/OS Universe repository. It outputs a single file, 'config-reference.md' in the current working directory. Usage: ./generate-config-reference.py [/path/to/universe/repo/packages] """ import json import os import sys def find_config_files(path): config_files = [] for root, dirs, files in os.walk(path): for f in files: if f == 'config.json': config_files.append(os.path.join(root, f)) return config_files def main(path): files = find_config_files(path) outfile = open(os.path.join(os.getcwd(), 'config-reference.md'), 'w') outfile.write("# DC/OS Universe Package Configuration Reference\n\n") for f in files: with open(f, 'r') as config: package_name = f.split('/')[-3] package_version = f.split('/')[-2] outfile.write("## {} version {}\n\n".format(package_name, package_version)) props = json.loads(config.read())['properties'] for key, value in props.items(): if key == "properties": outfile.write("*Errors encountered when processing config properties. Not all properties may be listed here. Please verify the structure of this package and package version.*\n\n") continue outfile.write("### {} configuration properties\n\n".format(key)) outfile.write("| Property | Type | Description | Default Value |\n") outfile.write("|----------|------|-------------|---------------|\n") for _, prop in value.items(): if type(prop) is not dict: continue for key, details in prop.items(): prop = key try: typ = details['type'] except KeyError: typ = "*No type provided.*" try: desc = details['description'] except KeyError: desc = "*No description provided.*" try: default = "`{}`".format(details['default']) if default == "``": default = "*Empty string.*" except KeyError: default = "*No default.*" outfile.write("| {prop} | {typ} | {desc} | {default} |\n".format( prop=prop, desc=desc, typ=typ, default=default)) outfile.write("\n") outfile.close() if __name__ == '__main__': if len(sys.argv) == 2: path = sys.argv[1] else: path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../repo/packages') main(path)
apache-2.0
-2,777,008,921,679,994,000
-8,732,525,129,939,553,000
35.349398
200
0.499834
false
kamcpp/tensorflow
tensorflow/contrib/opt/__init__.py
11
1119
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """opt: A module containing optimization routines.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.opt.python.training.external_optimizer import * from tensorflow.contrib.opt.python.training.moving_average_optimizer import * from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
apache-2.0
-445,338,145,482,101,500
5,277,416,913,508,995,000
45.625
80
0.725648
false
LarsFronius/ansible
lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py
7
5916
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_external_providers_facts short_description: Retrieve facts about one or more oVirt/RHV external providers author: "Ondra Machacek (@machacekondra)" version_added: "2.3" description: - "Retrieve facts about one or more oVirt/RHV external providers." notes: - "This module creates a new top-level C(ovirt_external_providers) fact, which contains a list of external_providers." options: type: description: - "Type of the external provider." choices: ['os_image', 'os_network', 'os_volume', 'foreman'] required: true name: description: - "Name of the external provider, can be used as glob expression." extends_documentation_fragment: ovirt_facts ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Gather facts about all image external providers named C<glance>: - ovirt_external_providers_facts: type: os_image name: glance - debug: var: ovirt_external_providers ''' RETURN = ''' external_host_providers: description: "List of dictionaries of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider." returned: "On success and if parameter 'type: foreman' is used." type: list openstack_image_providers: description: "List of dictionaries of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider." returned: "On success and if parameter 'type: os_image' is used." type: list openstack_volume_providers: description: "List of dictionaries of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider." returned: "On success and if parameter 'type: os_volume' is used." type: list openstack_network_providers: description: "List of dictionaries of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider." returned: "On success and if parameter 'type: os_network' is used." type: list ''' import fnmatch import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( check_sdk, create_connection, get_dict_of_struct, ovirt_facts_full_argument_spec, ) def _external_provider_service(provider_type, system_service): if provider_type == 'os_image': return system_service.openstack_image_providers_service() elif provider_type == 'os_network': return system_service.openstack_network_providers_service() elif provider_type == 'os_volume': return system_service.openstack_volume_providers_service() elif provider_type == 'foreman': return system_service.external_host_providers_service() def main(): argument_spec = ovirt_facts_full_argument_spec( name=dict(default=None, required=False), type=dict( default=None, required=True, choices=[ 'os_image', 'os_network', 'os_volume', 'foreman', ], aliases=['provider'], ), ) module = AnsibleModule(argument_spec) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) external_providers_service = _external_provider_service( provider_type=module.params.pop('type'), system_service=connection.system_service(), ) if module.params['name']: external_providers = [ e for e in external_providers_service.list() if fnmatch.fnmatch(e.name, module.params['name']) ] else: external_providers = external_providers_service.list() module.exit_json( changed=False, ansible_facts=dict( ovirt_external_providers=[ get_dict_of_struct( struct=c, connection=connection, fetch_nested=module.params.get('fetch_nested'), attributes=module.params.get('nested_attributes'), ) for c in external_providers ], ), ) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == '__main__': main()
gpl-3.0
-1,000,988,949,849,582,300
7,231,821,136,216,670,000
36.443038
157
0.658553
false
mdanielwork/intellij-community
python/testData/MockSdk2.7/python_stubs/__builtin__.py
19
174731
# encoding: utf-8 # module __builtin__ # from (built-in) # by generator 1.145 from __future__ import print_function """ Built-in functions, exceptions, and other objects. Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices. """ # imports from exceptions import (ArithmeticError, AssertionError, AttributeError, BaseException, BufferError, BytesWarning, DeprecationWarning, EOFError, EnvironmentError, Exception, FloatingPointError, FutureWarning, GeneratorExit, IOError, ImportError, ImportWarning, IndentationError, IndexError, KeyError, KeyboardInterrupt, LookupError, MemoryError, NameError, NotImplementedError, OSError, OverflowError, PendingDeprecationWarning, ReferenceError, RuntimeError, RuntimeWarning, StandardError, StopIteration, SyntaxError, SyntaxWarning, SystemError, SystemExit, TabError, TypeError, UnboundLocalError, UnicodeDecodeError, UnicodeEncodeError, UnicodeError, UnicodeTranslateError, UnicodeWarning, UserWarning, ValueError, Warning, ZeroDivisionError) # Variables with simple values False = False None = object() # real value of type <type 'NoneType'> replaced True = True __debug__ = True # functions def abs(number): # real signature unknown; restored from __doc__ """ abs(number) -> number Return the absolute value of the argument. """ return 0 def all(iterable): # real signature unknown; restored from __doc__ """ all(iterable) -> bool Return True if bool(x) is True for all values x in the iterable. If the iterable is empty, return True. """ return False def any(iterable): # real signature unknown; restored from __doc__ """ any(iterable) -> bool Return True if bool(x) is True for any x in the iterable. If the iterable is empty, return False. """ return False def apply(p_object, args=None, kwargs=None): # real signature unknown; restored from __doc__ """ apply(object[, args[, kwargs]]) -> value Call a callable object with positional arguments taken from the tuple args, and keyword arguments taken from the optional dictionary kwargs. Note that classes are callable, as are instances with a __call__() method. Deprecated since release 2.3. Instead, use the extended call syntax: function(*args, **keywords). """ pass def bin(number): # real signature unknown; restored from __doc__ """ bin(number) -> string Return the binary representation of an integer or long integer. """ return "" def callable(p_object): # real signature unknown; restored from __doc__ """ callable(object) -> bool Return whether the object is callable (i.e., some kind of function). Note that classes are callable, as are instances with a __call__() method. """ return False def chr(i): # real signature unknown; restored from __doc__ """ chr(i) -> character Return a string of one character with ordinal i; 0 <= i < 256. """ return "" def cmp(x, y): # real signature unknown; restored from __doc__ """ cmp(x, y) -> integer Return negative if x<y, zero if x==y, positive if x>y. """ return 0 def coerce(x, y): # real signature unknown; restored from __doc__ """ coerce(x, y) -> (x1, y1) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. If coercion is not possible, raise TypeError. """ pass def compile(source, filename, mode, flags=None, dont_inherit=None): # real signature unknown; restored from __doc__ """ compile(source, filename, mode[, flags[, dont_inherit]]) -> code object Compile the source string (a Python module, statement or expression) into a code object that can be executed by the exec statement or eval(). The filename will be used for run-time error messages. The mode must be 'exec' to compile a module, 'single' to compile a single (interactive) statement, or 'eval' to compile an expression. The flags argument, if present, controls which future statements influence the compilation of the code. The dont_inherit argument, if non-zero, stops the compilation inheriting the effects of any future statements in effect in the code calling compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ pass def copyright(*args, **kwargs): # real signature unknown """ interactive prompt objects for printing the license text, a list of contributors and the copyright notice. """ pass def credits(*args, **kwargs): # real signature unknown """ interactive prompt objects for printing the license text, a list of contributors and the copyright notice. """ pass def delattr(p_object, name): # real signature unknown; restored from __doc__ """ delattr(object, name) Delete a named attribute on an object; delattr(x, 'y') is equivalent to ``del x.y''. """ pass def dir(p_object=None): # real signature unknown; restored from __doc__ """ dir([object]) -> list of strings If called without an argument, return the names in the current scope. Else, return an alphabetized list of names comprising (some of) the attributes of the given object, and of attributes reachable from it. If the object supplies a method named __dir__, it will be used; otherwise the default dir() logic is used and returns: for a module object: the module's attributes. for a class object: its attributes, and recursively the attributes of its bases. for any other object: its attributes, its class's attributes, and recursively the attributes of its class's base classes. """ return [] def divmod(x, y): # known case of __builtin__.divmod """ divmod(x, y) -> (quotient, remainder) Return the tuple ((x-x%y)/y, x%y). Invariant: div*y + mod == x. """ return (0, 0) def eval(source, globals=None, locals=None): # real signature unknown; restored from __doc__ """ eval(source[, globals[, locals]]) -> value Evaluate the source in the context of globals and locals. The source may be a string representing a Python expression or a code object as returned by compile(). The globals must be a dictionary and locals can be any mapping, defaulting to the current globals and locals. If only globals is given, locals defaults to it. """ pass def execfile(filename, globals=None, locals=None): # real signature unknown; restored from __doc__ """ execfile(filename[, globals[, locals]]) Read and execute a Python script from a file. The globals and locals are dictionaries, defaulting to the current globals and locals. If only globals is given, locals defaults to it. """ pass def exit(*args, **kwargs): # real signature unknown pass def filter(function_or_none, sequence): # known special case of filter """ filter(function or None, sequence) -> list, tuple, or string Return those items of sequence for which function(item) is true. If function is None, return the items that are true. If sequence is a tuple or string, return the same type, else return a list. """ pass def format(value, format_spec=None): # real signature unknown; restored from __doc__ """ format(value[, format_spec]) -> string Returns value.__format__(format_spec) format_spec defaults to "" """ return "" def getattr(object, name, default=None): # known special case of getattr """ getattr(object, name[, default]) -> value Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y. When a default argument is given, it is returned when the attribute doesn't exist; without it, an exception is raised in that case. """ pass def globals(): # real signature unknown; restored from __doc__ """ globals() -> dictionary Return the dictionary containing the current scope's global variables. """ return {} def hasattr(p_object, name): # real signature unknown; restored from __doc__ """ hasattr(object, name) -> bool Return whether the object has an attribute with the given name. (This is done by calling getattr(object, name) and catching exceptions.) """ return False def hash(p_object): # real signature unknown; restored from __doc__ """ hash(object) -> integer Return a hash value for the object. Two objects with the same value have the same hash value. The reverse is not necessarily true, but likely. """ return 0 def help(with_a_twist): # real signature unknown; restored from __doc__ """ Define the built-in 'help'. This is a wrapper around pydoc.help (with a twist). """ pass def hex(number): # real signature unknown; restored from __doc__ """ hex(number) -> string Return the hexadecimal representation of an integer or long integer. """ return "" def id(p_object): # real signature unknown; restored from __doc__ """ id(object) -> integer Return the identity of an object. This is guaranteed to be unique among simultaneously existing objects. (Hint: it's the object's memory address.) """ return 0 def input(prompt=None): # real signature unknown; restored from __doc__ """ input([prompt]) -> value Equivalent to eval(raw_input(prompt)). """ pass def intern(string): # real signature unknown; restored from __doc__ """ intern(string) -> string ``Intern'' the given string. This enters the string in the (global) table of interned strings whose purpose is to speed up dictionary lookups. Return the string itself or the previously interned string object with the same value. """ return "" def isinstance(p_object, class_or_type_or_tuple): # real signature unknown; restored from __doc__ """ isinstance(object, class-or-type-or-tuple) -> bool Return whether an object is an instance of a class or of a subclass thereof. With a type as second argument, return whether that is the object's type. The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for isinstance(x, A) or isinstance(x, B) or ... (etc.). """ return False def issubclass(C, B): # real signature unknown; restored from __doc__ """ issubclass(C, B) -> bool Return whether class C is a subclass (i.e., a derived class) of class B. When using a tuple as the second argument issubclass(X, (A, B, ...)), is a shortcut for issubclass(X, A) or issubclass(X, B) or ... (etc.). """ return False def iter(source, sentinel=None): # known special case of iter """ iter(collection) -> iterator iter(callable, sentinel) -> iterator Get an iterator from an object. In the first form, the argument must supply its own iterator, or be a sequence. In the second form, the callable is called until it returns the sentinel. """ pass def len(p_object): # real signature unknown; restored from __doc__ """ len(object) -> integer Return the number of items of a sequence or collection. """ return 0 def license(*args, **kwargs): # real signature unknown """ interactive prompt objects for printing the license text, a list of contributors and the copyright notice. """ pass def locals(): # real signature unknown; restored from __doc__ """ locals() -> dictionary Update and return a dictionary containing the current scope's local variables. """ return {} def map(function, sequence, *sequence_1): # real signature unknown; restored from __doc__ """ map(function, sequence[, sequence, ...]) -> list Return a list of the results of applying the function to the items of the argument sequence(s). If more than one sequence is given, the function is called with an argument list consisting of the corresponding item of each sequence, substituting None for missing values when not all sequences have the same length. If the function is None, return a list of the items of the sequence (or a list of tuples if more than one sequence). """ return [] def max(*args, **kwargs): # known special case of max """ max(iterable[, key=func]) -> value max(a, b, c, ...[, key=func]) -> value With a single iterable argument, return its largest item. With two or more arguments, return the largest argument. """ pass def min(*args, **kwargs): # known special case of min """ min(iterable[, key=func]) -> value min(a, b, c, ...[, key=func]) -> value With a single iterable argument, return its smallest item. With two or more arguments, return the smallest argument. """ pass def next(iterator, default=None): # real signature unknown; restored from __doc__ """ next(iterator[, default]) Return the next item from the iterator. If default is given and the iterator is exhausted, it is returned instead of raising StopIteration. """ pass def oct(number): # real signature unknown; restored from __doc__ """ oct(number) -> string Return the octal representation of an integer or long integer. """ return "" def open(name, mode=None, buffering=None): # real signature unknown; restored from __doc__ """ open(name[, mode[, buffering]]) -> file object Open a file using the file() type, returns a file object. This is the preferred way to open a file. See file.__doc__ for further information. """ return file('/dev/null') def ord(c): # real signature unknown; restored from __doc__ """ ord(c) -> integer Return the integer ordinal of a one-character string. """ return 0 def pow(x, y, z=None): # real signature unknown; restored from __doc__ """ pow(x, y[, z]) -> number With two arguments, equivalent to x**y. With three arguments, equivalent to (x**y) % z, but may be more efficient (e.g. for longs). """ return 0 def print(*args, **kwargs): # known special case of print """ print(value, ..., sep=' ', end='\n', file=sys.stdout) Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: file: a file-like object (stream); defaults to the current sys.stdout. sep: string inserted between values, default a space. end: string appended after the last value, default a newline. """ pass def quit(*args, **kwargs): # real signature unknown pass def range(start=None, stop=None, step=None): # known special case of range """ range(stop) -> list of integers range(start, stop[, step]) -> list of integers Return a list containing an arithmetic progression of integers. range(i, j) returns [i, i+1, i+2, ..., j-1]; start (!) defaults to 0. When step is given, it specifies the increment (or decrement). For example, range(4) returns [0, 1, 2, 3]. The end point is omitted! These are exactly the valid indices for a list of 4 elements. """ pass def raw_input(prompt=None): # real signature unknown; restored from __doc__ """ raw_input([prompt]) -> string Read a string from standard input. The trailing newline is stripped. If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. On Unix, GNU readline is used if enabled. The prompt string, if given, is printed without a trailing newline before reading. """ return "" def reduce(function, sequence, initial=None): # real signature unknown; restored from __doc__ """ reduce(function, sequence[, initial]) -> value Apply a function of two arguments cumulatively to the items of a sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If initial is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. """ pass def reload(module): # real signature unknown; restored from __doc__ """ reload(module) -> module Reload the module. The module must have been successfully imported before. """ pass def repr(p_object): # real signature unknown; restored from __doc__ """ repr(object) -> string Return the canonical string representation of the object. For most object types, eval(repr(object)) == object. """ return "" def round(number, ndigits=None): # real signature unknown; restored from __doc__ """ round(number[, ndigits]) -> floating point number Round a number to a given precision in decimal digits (default 0 digits). This always returns a floating point number. Precision may be negative. """ return 0.0 def setattr(p_object, name, value): # real signature unknown; restored from __doc__ """ setattr(object, name, value) Set a named attribute on an object; setattr(x, 'y', v) is equivalent to ``x.y = v''. """ pass def sorted(iterable, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__ """ sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list """ pass def sum(sequence, start=None): # real signature unknown; restored from __doc__ """ sum(sequence[, start]) -> value Return the sum of a sequence of numbers (NOT strings) plus the value of parameter 'start' (which defaults to 0). When the sequence is empty, return start. """ pass def unichr(i): # real signature unknown; restored from __doc__ """ unichr(i) -> Unicode character Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff. """ return u"" def vars(p_object=None): # real signature unknown; restored from __doc__ """ vars([object]) -> dictionary Without arguments, equivalent to locals(). With an argument, equivalent to object.__dict__. """ return {} def zip(seq1, seq2, *more_seqs): # known special case of zip """ zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)] Return a list of tuples, where each tuple contains the i-th element from each of the argument sequences. The returned list is truncated in length to the length of the shortest argument sequence. """ pass def __import__(name, globals={}, locals={}, fromlist=[], level=-1): # real signature unknown; restored from __doc__ """ __import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module Import a module. Because this function is meant for use by the Python interpreter and not for general use it is better to use importlib.import_module() to programmatically import a module. The globals argument is only used to determine the context; they are not modified. The locals argument is unused. The fromlist should be a list of names to emulate ``from name import ...'', or an empty list to emulate ``import name''. When importing a module from a package, note that __import__('A.B', ...) returns package A when fromlist is empty, but its submodule B when fromlist is not empty. Level is used to determine whether to perform absolute or relative imports. -1 is the original strategy of attempting both absolute and relative imports, 0 is absolute, a positive number is the number of parent directories to search relative to the current module. """ pass # classes class ___Classobj: '''A mock class representing the old style class base.''' __module__ = '' __class__ = None def __init__(self): pass __dict__ = {} __doc__ = '' class __generator(object): '''A mock class representing the generator function type.''' def __init__(self): self.gi_code = None self.gi_frame = None self.gi_running = 0 def __iter__(self): '''Defined to support iteration over container.''' pass def next(self): '''Return the next item from the container.''' pass def close(self): '''Raises new GeneratorExit exception inside the generator to terminate the iteration.''' pass def send(self, value): '''Resumes the generator and "sends" a value that becomes the result of the current yield-expression.''' pass def throw(self, type, value=None, traceback=None): '''Used to raise an exception inside the generator.''' pass class __function(object): '''A mock class representing function type.''' def __init__(self): self.__name__ = '' self.__doc__ = '' self.__dict__ = '' self.__module__ = '' self.func_defaults = {} self.func_globals = {} self.func_closure = None self.func_code = None self.func_name = '' self.func_doc = '' self.func_dict = '' self.__defaults__ = {} self.__globals__ = {} self.__closure__ = None self.__code__ = None self.__name__ = '' class __method(object): '''A mock class representing method type.''' def __init__(self): self.im_class = None self.im_self = None self.im_func = None self.__func__ = None self.__self__ = None class __namedtuple(tuple): '''A mock base class for named tuples.''' __slots__ = () _fields = () def __new__(cls, *args, **kwargs): 'Create a new instance of the named tuple.' return tuple.__new__(cls, *args) @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): 'Make a new named tuple object from a sequence or iterable.' return new(cls, iterable) def __repr__(self): return '' def _asdict(self): 'Return a new dict which maps field types to their values.' return {} def _replace(self, **kwargs): 'Return a new named tuple object replacing specified fields with new values.' return self def __getnewargs__(self): return tuple(self) class object: """ The most base type """ def __delattr__(self, name): # real signature unknown; restored from __doc__ """ x.__delattr__('name') <==> del x.name """ pass def __format__(self, *args, **kwargs): # real signature unknown """ default object formatter """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self): # known special case of object.__init__ """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass @staticmethod # known case of __new__ def __new__(cls, *more): # known special case of object.__new__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __reduce_ex__(self, *args, **kwargs): # real signature unknown """ helper for pickle """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ helper for pickle """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setattr__(self, name, value): # real signature unknown; restored from __doc__ """ x.__setattr__('name', value) <==> x.name = value """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ __sizeof__() -> int size of object in memory, in bytes """ return 0 def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass @classmethod # known case def __subclasshook__(cls, subclass): # known special case of object.__subclasshook__ """ Abstract classes can override this to customize issubclass(). This is invoked early on by abc.ABCMeta.__subclasscheck__(). It should return True, False or NotImplemented. If it returns NotImplemented, the normal algorithm is used. Otherwise, it overrides the normal algorithm (and the outcome is cached). """ pass __class__ = None # (!) forward: type, real value is '' __dict__ = {} __doc__ = '' __module__ = '' class basestring(object): """ Type basestring cannot be instantiated; it is the base for str and unicode. """ def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass class int(object): """ int(x=0) -> int or long int(x, base=10) -> int or long Convert a number or string to an integer, or return 0 if no arguments are given. If x is floating point, the conversion truncates towards zero. If x is outside the integer range, the function returns a long instead. If x is not a number or if base is given, then x must be a string or Unicode object representing an integer literal in the given base. The literal can be preceded by '+' or '-' and be surrounded by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to interpret the base from the string as an integer literal. >>> int('0b100', base=0) 4 """ def bit_length(self): # real signature unknown; restored from __doc__ """ int.bit_length() -> int Number of bits necessary to represent self in binary. >>> bin(37) '0b100101' >>> (37).bit_length() 6 """ return 0 def conjugate(self, *args, **kwargs): # real signature unknown """ Returns self, the complex conjugate of any int. """ pass def __abs__(self): # real signature unknown; restored from __doc__ """ x.__abs__() <==> abs(x) """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __and__(self, y): # real signature unknown; restored from __doc__ """ x.__and__(y) <==> x&y """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __coerce__(self, y): # real signature unknown; restored from __doc__ """ x.__coerce__(y) <==> coerce(x, y) """ pass def __divmod__(self, y): # real signature unknown; restored from __doc__ """ x.__divmod__(y) <==> divmod(x, y) """ pass def __div__(self, y): # real signature unknown; restored from __doc__ """ x.__div__(y) <==> x/y """ pass def __float__(self): # real signature unknown; restored from __doc__ """ x.__float__() <==> float(x) """ pass def __floordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__floordiv__(y) <==> x//y """ pass def __format__(self, *args, **kwargs): # real signature unknown pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __hex__(self): # real signature unknown; restored from __doc__ """ x.__hex__() <==> hex(x) """ pass def __index__(self): # real signature unknown; restored from __doc__ """ x[y:z] <==> x[y.__index__():z.__index__()] """ pass def __init__(self, x, base=10): # known special case of int.__init__ """ int(x=0) -> int or long int(x, base=10) -> int or long Convert a number or string to an integer, or return 0 if no arguments are given. If x is floating point, the conversion truncates towards zero. If x is outside the integer range, the function returns a long instead. If x is not a number or if base is given, then x must be a string or Unicode object representing an integer literal in the given base. The literal can be preceded by '+' or '-' and be surrounded by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to interpret the base from the string as an integer literal. >>> int('0b100', base=0) 4 # (copied from class doc) """ pass def __int__(self): # real signature unknown; restored from __doc__ """ x.__int__() <==> int(x) """ pass def __invert__(self): # real signature unknown; restored from __doc__ """ x.__invert__() <==> ~x """ pass def __long__(self): # real signature unknown; restored from __doc__ """ x.__long__() <==> long(x) """ pass def __lshift__(self, y): # real signature unknown; restored from __doc__ """ x.__lshift__(y) <==> x<<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, y): # real signature unknown; restored from __doc__ """ x.__mul__(y) <==> x*y """ pass def __neg__(self): # real signature unknown; restored from __doc__ """ x.__neg__() <==> -x """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __nonzero__(self): # real signature unknown; restored from __doc__ """ x.__nonzero__() <==> x != 0 """ pass def __oct__(self): # real signature unknown; restored from __doc__ """ x.__oct__() <==> oct(x) """ pass def __or__(self, y): # real signature unknown; restored from __doc__ """ x.__or__(y) <==> x|y """ pass def __pos__(self): # real signature unknown; restored from __doc__ """ x.__pos__() <==> +x """ pass def __pow__(self, y, z=None): # real signature unknown; restored from __doc__ """ x.__pow__(y[, z]) <==> pow(x, y[, z]) """ pass def __radd__(self, y): # real signature unknown; restored from __doc__ """ x.__radd__(y) <==> y+x """ pass def __rand__(self, y): # real signature unknown; restored from __doc__ """ x.__rand__(y) <==> y&x """ pass def __rdivmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rdivmod__(y) <==> divmod(y, x) """ pass def __rdiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rdiv__(y) <==> y/x """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rfloordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rfloordiv__(y) <==> y//x """ pass def __rlshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rlshift__(y) <==> y<<x """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, y): # real signature unknown; restored from __doc__ """ x.__rmul__(y) <==> y*x """ pass def __ror__(self, y): # real signature unknown; restored from __doc__ """ x.__ror__(y) <==> y|x """ pass def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__ """ y.__rpow__(x[, z]) <==> pow(x, y[, z]) """ pass def __rrshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rrshift__(y) <==> y>>x """ pass def __rshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rshift__(y) <==> x>>y """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rtruediv__(self, y): # real signature unknown; restored from __doc__ """ x.__rtruediv__(y) <==> y/x """ pass def __rxor__(self, y): # real signature unknown; restored from __doc__ """ x.__rxor__(y) <==> y^x """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __truediv__(self, y): # real signature unknown; restored from __doc__ """ x.__truediv__(y) <==> x/y """ pass def __trunc__(self, *args, **kwargs): # real signature unknown """ Truncating an Integral returns itself. """ pass def __xor__(self, y): # real signature unknown; restored from __doc__ """ x.__xor__(y) <==> x^y """ pass denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the denominator of a rational number in lowest terms""" imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the imaginary part of a complex number""" numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the numerator of a rational number in lowest terms""" real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the real part of a complex number""" class bool(int): """ bool(x) -> bool Returns True when the argument x is true, False otherwise. The builtins True and False are the only two instances of the class bool. The class bool is a subclass of the class int, and cannot be subclassed. """ def __and__(self, y): # real signature unknown; restored from __doc__ """ x.__and__(y) <==> x&y """ pass def __init__(self, x): # real signature unknown; restored from __doc__ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __or__(self, y): # real signature unknown; restored from __doc__ """ x.__or__(y) <==> x|y """ pass def __rand__(self, y): # real signature unknown; restored from __doc__ """ x.__rand__(y) <==> y&x """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __ror__(self, y): # real signature unknown; restored from __doc__ """ x.__ror__(y) <==> y|x """ pass def __rxor__(self, y): # real signature unknown; restored from __doc__ """ x.__rxor__(y) <==> y^x """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass def __xor__(self, y): # real signature unknown; restored from __doc__ """ x.__xor__(y) <==> x^y """ pass class buffer(object): """ buffer(object [, offset[, size]]) Create a new buffer object which references the given object. The buffer will reference a slice of the target object from the start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """ def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __delitem__(self, y): # real signature unknown; restored from __doc__ """ x.__delitem__(y) <==> del x[y] """ pass def __delslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__delslice__(i, j) <==> del x[i:j] Use of negative indices is not supported. """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __getslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__getslice__(i, j) <==> x[i:j] Use of negative indices is not supported. """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, p_object, offset=None, size=None): # real signature unknown; restored from __doc__ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass def __setitem__(self, i, y): # real signature unknown; restored from __doc__ """ x.__setitem__(i, y) <==> x[i]=y """ pass def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__ """ x.__setslice__(i, j, y) <==> x[i:j]=y Use of negative indices is not supported. """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass class bytearray(object): """ bytearray(iterable_of_ints) -> bytearray. bytearray(string, encoding[, errors]) -> bytearray. bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray. bytearray(memory_view) -> bytearray. Construct an mutable bytearray object from: - an iterable yielding integers in range(256) - a text string encoded using the specified encoding - a bytes or a bytearray object - any object implementing the buffer API. bytearray(int) -> bytearray. Construct a zero-initialized bytearray of the given length. """ def append(self, p_int): # real signature unknown; restored from __doc__ """ B.append(int) -> None Append a single item to the end of B. """ pass def capitalize(self): # real signature unknown; restored from __doc__ """ B.capitalize() -> copy of B Return a copy of B with only its first character capitalized (ASCII) and the rest lower-cased. """ pass def center(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ B.center(width[, fillchar]) -> copy of B Return B centered in a string of length width. Padding is done using the specified fill character (default is a space). """ pass def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ B.count(sub [,start [,end]]) -> int Return the number of non-overlapping occurrences of subsection sub in bytes B[start:end]. Optional arguments start and end are interpreted as in slice notation. """ return 0 def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ B.decode([encoding[, errors]]) -> unicode object. Decodes B using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle UnicodeDecodeErrors. """ return u"" def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__ """ B.endswith(suffix [,start [,end]]) -> bool Return True if B ends with the specified suffix, False otherwise. With optional start, test B beginning at that position. With optional end, stop comparing B at that position. suffix can also be a tuple of strings to try. """ return False def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__ """ B.expandtabs([tabsize]) -> copy of B Return a copy of B where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. """ pass def extend(self, iterable_int): # real signature unknown; restored from __doc__ """ B.extend(iterable int) -> None Append all the elements from the iterator or sequence to the end of B. """ pass def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ B.find(sub [,start [,end]]) -> int Return the lowest index in B where subsection sub is found, such that sub is contained within B[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 @classmethod # known case def fromhex(cls, string): # real signature unknown; restored from __doc__ """ bytearray.fromhex(string) -> bytearray Create a bytearray object from a string of hexadecimal numbers. Spaces between two numbers are accepted. Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). """ return bytearray def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ B.index(sub [,start [,end]]) -> int Like B.find() but raise ValueError when the subsection is not found. """ return 0 def insert(self, index, p_int): # real signature unknown; restored from __doc__ """ B.insert(index, int) -> None Insert a single item into the bytearray before the given index. """ pass def isalnum(self): # real signature unknown; restored from __doc__ """ B.isalnum() -> bool Return True if all characters in B are alphanumeric and there is at least one character in B, False otherwise. """ return False def isalpha(self): # real signature unknown; restored from __doc__ """ B.isalpha() -> bool Return True if all characters in B are alphabetic and there is at least one character in B, False otherwise. """ return False def isdigit(self): # real signature unknown; restored from __doc__ """ B.isdigit() -> bool Return True if all characters in B are digits and there is at least one character in B, False otherwise. """ return False def islower(self): # real signature unknown; restored from __doc__ """ B.islower() -> bool Return True if all cased characters in B are lowercase and there is at least one cased character in B, False otherwise. """ return False def isspace(self): # real signature unknown; restored from __doc__ """ B.isspace() -> bool Return True if all characters in B are whitespace and there is at least one character in B, False otherwise. """ return False def istitle(self): # real signature unknown; restored from __doc__ """ B.istitle() -> bool Return True if B is a titlecased string and there is at least one character in B, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return False otherwise. """ return False def isupper(self): # real signature unknown; restored from __doc__ """ B.isupper() -> bool Return True if all cased characters in B are uppercase and there is at least one cased character in B, False otherwise. """ return False def join(self, iterable_of_bytes): # real signature unknown; restored from __doc__ """ B.join(iterable_of_bytes) -> bytes Concatenates any number of bytearray objects, with B in between each pair. """ return "" def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ B.ljust(width[, fillchar]) -> copy of B Return B left justified in a string of length width. Padding is done using the specified fill character (default is a space). """ pass def lower(self): # real signature unknown; restored from __doc__ """ B.lower() -> copy of B Return a copy of B with all ASCII characters converted to lowercase. """ pass def lstrip(self, bytes=None): # real signature unknown; restored from __doc__ """ B.lstrip([bytes]) -> bytearray Strip leading bytes contained in the argument. If the argument is omitted, strip leading ASCII whitespace. """ return bytearray def partition(self, sep): # real signature unknown; restored from __doc__ """ B.partition(sep) -> (head, sep, tail) Searches for the separator sep in B, and returns the part before it, the separator itself, and the part after it. If the separator is not found, returns B and two empty bytearray objects. """ pass def pop(self, index=None): # real signature unknown; restored from __doc__ """ B.pop([index]) -> int Remove and return a single item from B. If no index argument is given, will pop the last value. """ return 0 def remove(self, p_int): # real signature unknown; restored from __doc__ """ B.remove(int) -> None Remove the first occurance of a value in B. """ pass def replace(self, old, new, count=None): # real signature unknown; restored from __doc__ """ B.replace(old, new[, count]) -> bytes Return a copy of B with all occurrences of subsection old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. """ return "" def reverse(self): # real signature unknown; restored from __doc__ """ B.reverse() -> None Reverse the order of the values in B in place. """ pass def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ B.rfind(sub [,start [,end]]) -> int Return the highest index in B where subsection sub is found, such that sub is contained within B[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ B.rindex(sub [,start [,end]]) -> int Like B.rfind() but raise ValueError when the subsection is not found. """ return 0 def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ B.rjust(width[, fillchar]) -> copy of B Return B right justified in a string of length width. Padding is done using the specified fill character (default is a space) """ pass def rpartition(self, sep): # real signature unknown; restored from __doc__ """ B.rpartition(sep) -> (head, sep, tail) Searches for the separator sep in B, starting at the end of B, and returns the part before it, the separator itself, and the part after it. If the separator is not found, returns two empty bytearray objects and B. """ pass def rsplit(self, sep, maxsplit=None): # real signature unknown; restored from __doc__ """ B.rsplit(sep[, maxsplit]) -> list of bytearray Return a list of the sections in B, using sep as the delimiter, starting at the end of B and working to the front. If sep is not given, B is split on ASCII whitespace characters (space, tab, return, newline, formfeed, vertical tab). If maxsplit is given, at most maxsplit splits are done. """ return [] def rstrip(self, bytes=None): # real signature unknown; restored from __doc__ """ B.rstrip([bytes]) -> bytearray Strip trailing bytes contained in the argument. If the argument is omitted, strip trailing ASCII whitespace. """ return bytearray def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__ """ B.split([sep[, maxsplit]]) -> list of bytearray Return a list of the sections in B, using sep as the delimiter. If sep is not given, B is split on ASCII whitespace characters (space, tab, return, newline, formfeed, vertical tab). If maxsplit is given, at most maxsplit splits are done. """ return [] def splitlines(self, keepends=False): # real signature unknown; restored from __doc__ """ B.splitlines(keepends=False) -> list of lines Return a list of the lines in B, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ return [] def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__ """ B.startswith(prefix [,start [,end]]) -> bool Return True if B starts with the specified prefix, False otherwise. With optional start, test B beginning at that position. With optional end, stop comparing B at that position. prefix can also be a tuple of strings to try. """ return False def strip(self, bytes=None): # real signature unknown; restored from __doc__ """ B.strip([bytes]) -> bytearray Strip leading and trailing bytes contained in the argument. If the argument is omitted, strip ASCII whitespace. """ return bytearray def swapcase(self): # real signature unknown; restored from __doc__ """ B.swapcase() -> copy of B Return a copy of B with uppercase ASCII characters converted to lowercase ASCII and vice versa. """ pass def title(self): # real signature unknown; restored from __doc__ """ B.title() -> copy of B Return a titlecased version of B, i.e. ASCII words start with uppercase characters, all remaining cased characters have lowercase. """ pass def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__ """ B.translate(table[, deletechars]) -> bytearray Return a copy of B, where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a bytes object of length 256. """ return bytearray def upper(self): # real signature unknown; restored from __doc__ """ B.upper() -> copy of B Return a copy of B with all ASCII characters converted to uppercase. """ pass def zfill(self, width): # real signature unknown; restored from __doc__ """ B.zfill(width) -> copy of B Pad a numeric string B with zeros on the left, to fill a field of the specified width. B is never truncated. """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __alloc__(self): # real signature unknown; restored from __doc__ """ B.__alloc__() -> int Returns the number of bytes actually allocated. """ return 0 def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x """ pass def __delitem__(self, y): # real signature unknown; restored from __doc__ """ x.__delitem__(y) <==> del x[y] """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __iadd__(self, y): # real signature unknown; restored from __doc__ """ x.__iadd__(y) <==> x+=y """ pass def __imul__(self, y): # real signature unknown; restored from __doc__ """ x.__imul__(y) <==> x*=y """ pass def __init__(self, source=None, encoding=None, errors='strict'): # known special case of bytearray.__init__ """ bytearray(iterable_of_ints) -> bytearray. bytearray(string, encoding[, errors]) -> bytearray. bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray. bytearray(memory_view) -> bytearray. Construct an mutable bytearray object from: - an iterable yielding integers in range(256) - a text string encoded using the specified encoding - a bytes or a bytearray object - any object implementing the buffer API. bytearray(int) -> bytearray. Construct a zero-initialized bytearray of the given length. # (copied from class doc) """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ Return state information for pickling. """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass def __setitem__(self, i, y): # real signature unknown; restored from __doc__ """ x.__setitem__(i, y) <==> x[i]=y """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ B.__sizeof__() -> int Returns the size of B in memory, in bytes """ return 0 def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass class str(basestring): """ str(object='') -> string Return a nice string representation of the object. If the argument is a string, the return value is the same object. """ def capitalize(self): # real signature unknown; restored from __doc__ """ S.capitalize() -> string Return a copy of the string S with only its first character capitalized. """ return "" def center(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.center(width[, fillchar]) -> string Return S centered in a string of length width. Padding is done using the specified fill character (default is a space) """ return "" def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.count(sub[, start[, end]]) -> int Return the number of non-overlapping occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. """ return 0 def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ S.decode([encoding[,errors]]) -> object Decodes S using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle UnicodeDecodeErrors. """ return object() def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ S.encode([encoding[,errors]]) -> object Encodes S using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any other name registered with codecs.register_error that is able to handle UnicodeEncodeErrors. """ return object() def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__ """ S.endswith(suffix[, start[, end]]) -> bool Return True if S ends with the specified suffix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. suffix can also be a tuple of strings to try. """ return False def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__ """ S.expandtabs([tabsize]) -> string Return a copy of S where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. """ return "" def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.find(sub [,start [,end]]) -> int Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 def format(self, *args, **kwargs): # known special case of str.format """ S.format(*args, **kwargs) -> string Return a formatted version of S, using substitutions from args and kwargs. The substitutions are identified by braces ('{' and '}'). """ pass def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.index(sub [,start [,end]]) -> int Like S.find() but raise ValueError when the substring is not found. """ return 0 def isalnum(self): # real signature unknown; restored from __doc__ """ S.isalnum() -> bool Return True if all characters in S are alphanumeric and there is at least one character in S, False otherwise. """ return False def isalpha(self): # real signature unknown; restored from __doc__ """ S.isalpha() -> bool Return True if all characters in S are alphabetic and there is at least one character in S, False otherwise. """ return False def isdigit(self): # real signature unknown; restored from __doc__ """ S.isdigit() -> bool Return True if all characters in S are digits and there is at least one character in S, False otherwise. """ return False def islower(self): # real signature unknown; restored from __doc__ """ S.islower() -> bool Return True if all cased characters in S are lowercase and there is at least one cased character in S, False otherwise. """ return False def isspace(self): # real signature unknown; restored from __doc__ """ S.isspace() -> bool Return True if all characters in S are whitespace and there is at least one character in S, False otherwise. """ return False def istitle(self): # real signature unknown; restored from __doc__ """ S.istitle() -> bool Return True if S is a titlecased string and there is at least one character in S, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return False otherwise. """ return False def isupper(self): # real signature unknown; restored from __doc__ """ S.isupper() -> bool Return True if all cased characters in S are uppercase and there is at least one cased character in S, False otherwise. """ return False def join(self, iterable): # real signature unknown; restored from __doc__ """ S.join(iterable) -> string Return a string which is the concatenation of the strings in the iterable. The separator between elements is S. """ return "" def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.ljust(width[, fillchar]) -> string Return S left-justified in a string of length width. Padding is done using the specified fill character (default is a space). """ return "" def lower(self): # real signature unknown; restored from __doc__ """ S.lower() -> string Return a copy of the string S converted to lowercase. """ return "" def lstrip(self, chars=None): # real signature unknown; restored from __doc__ """ S.lstrip([chars]) -> string or unicode Return a copy of the string S with leading whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is unicode, S will be converted to unicode before stripping """ return "" def partition(self, sep): # real signature unknown; restored from __doc__ """ S.partition(sep) -> (head, sep, tail) Search for the separator sep in S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return S and two empty strings. """ pass def replace(self, old, new, count=None): # real signature unknown; restored from __doc__ """ S.replace(old, new[, count]) -> string Return a copy of string S with all occurrences of substring old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. """ return "" def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.rfind(sub [,start [,end]]) -> int Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.rindex(sub [,start [,end]]) -> int Like S.rfind() but raise ValueError when the substring is not found. """ return 0 def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.rjust(width[, fillchar]) -> string Return S right-justified in a string of length width. Padding is done using the specified fill character (default is a space) """ return "" def rpartition(self, sep): # real signature unknown; restored from __doc__ """ S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return two empty strings and S. """ pass def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__ """ S.rsplit([sep [,maxsplit]]) -> list of strings Return a list of the words in the string S, using sep as the delimiter string, starting at the end of the string and working to the front. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or is None, any whitespace string is a separator. """ return [] def rstrip(self, chars=None): # real signature unknown; restored from __doc__ """ S.rstrip([chars]) -> string or unicode Return a copy of the string S with trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is unicode, S will be converted to unicode before stripping """ return "" def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__ """ S.split([sep [,maxsplit]]) -> list of strings Return a list of the words in the string S, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or is None, any whitespace string is a separator and empty strings are removed from the result. """ return [] def splitlines(self, keepends=False): # real signature unknown; restored from __doc__ """ S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ return [] def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__ """ S.startswith(prefix[, start[, end]]) -> bool Return True if S starts with the specified prefix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. prefix can also be a tuple of strings to try. """ return False def strip(self, chars=None): # real signature unknown; restored from __doc__ """ S.strip([chars]) -> string or unicode Return a copy of the string S with leading and trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is unicode, S will be converted to unicode before stripping """ return "" def swapcase(self): # real signature unknown; restored from __doc__ """ S.swapcase() -> string Return a copy of the string S with uppercase characters converted to lowercase and vice versa. """ return "" def title(self): # real signature unknown; restored from __doc__ """ S.title() -> string Return a titlecased version of S, i.e. words start with uppercase characters, all remaining cased characters have lowercase. """ return "" def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__ """ S.translate(table [,deletechars]) -> string Return a copy of the string S, where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256 or None. If the table argument is None, no translation is applied and the operation simply removes the characters in deletechars. """ return "" def upper(self): # real signature unknown; restored from __doc__ """ S.upper() -> string Return a copy of the string S converted to uppercase. """ return "" def zfill(self, width): # real signature unknown; restored from __doc__ """ S.zfill(width) -> string Pad a numeric string S with zeros on the left, to fill a field of the specified width. The string S is never truncated. """ return "" def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown pass def _formatter_parser(self, *args, **kwargs): # real signature unknown pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __format__(self, format_spec): # real signature unknown; restored from __doc__ """ S.__format__(format_spec) -> string Return a formatted version of S as described by format_spec. """ return "" def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __getslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__getslice__(i, j) <==> x[i:j] Use of negative indices is not supported. """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, string=''): # known special case of str.__init__ """ str(object='') -> string Return a nice string representation of the object. If the argument is a string, the return value is the same object. # (copied from class doc) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ S.__sizeof__() -> size of S in memory, in bytes """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass bytes = str class classmethod(object): """ classmethod(function) -> method Convert a function to be a class method. A class method receives the class as implicit first argument, just like an instance method receives the instance. To declare a class method, use this idiom: class C: def f(cls, arg1, arg2, ...): ... f = classmethod(f) It can be called either on the class (e.g. C.f()) or on an instance (e.g. C().f()). The instance is ignored except for its class. If a class method is called for a derived class, the derived class object is passed as the implied first argument. Class methods are different than C++ or Java static methods. If you want those, see the staticmethod builtin. """ def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __get__(self, obj, type=None): # real signature unknown; restored from __doc__ """ descr.__get__(obj[, type]) -> value """ pass def __init__(self, function): # real signature unknown; restored from __doc__ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass __func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default class complex(object): """ complex(real[, imag]) -> complex number Create a complex number from a real part and an optional imaginary part. This is equivalent to (real + imag*1j) where imag defaults to 0. """ def conjugate(self): # real signature unknown; restored from __doc__ """ complex.conjugate() -> complex Return the complex conjugate of its argument. (3-4j).conjugate() == 3+4j. """ return complex def __abs__(self): # real signature unknown; restored from __doc__ """ x.__abs__() <==> abs(x) """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __coerce__(self, y): # real signature unknown; restored from __doc__ """ x.__coerce__(y) <==> coerce(x, y) """ pass def __divmod__(self, y): # real signature unknown; restored from __doc__ """ x.__divmod__(y) <==> divmod(x, y) """ pass def __div__(self, y): # real signature unknown; restored from __doc__ """ x.__div__(y) <==> x/y """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __float__(self): # real signature unknown; restored from __doc__ """ x.__float__() <==> float(x) """ pass def __floordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__floordiv__(y) <==> x//y """ pass def __format__(self): # real signature unknown; restored from __doc__ """ complex.__format__() -> str Convert to a string according to format_spec. """ return "" def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, real, imag=None): # real signature unknown; restored from __doc__ pass def __int__(self): # real signature unknown; restored from __doc__ """ x.__int__() <==> int(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __long__(self): # real signature unknown; restored from __doc__ """ x.__long__() <==> long(x) """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, y): # real signature unknown; restored from __doc__ """ x.__mul__(y) <==> x*y """ pass def __neg__(self): # real signature unknown; restored from __doc__ """ x.__neg__() <==> -x """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __nonzero__(self): # real signature unknown; restored from __doc__ """ x.__nonzero__() <==> x != 0 """ pass def __pos__(self): # real signature unknown; restored from __doc__ """ x.__pos__() <==> +x """ pass def __pow__(self, y, z=None): # real signature unknown; restored from __doc__ """ x.__pow__(y[, z]) <==> pow(x, y[, z]) """ pass def __radd__(self, y): # real signature unknown; restored from __doc__ """ x.__radd__(y) <==> y+x """ pass def __rdivmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rdivmod__(y) <==> divmod(y, x) """ pass def __rdiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rdiv__(y) <==> y/x """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rfloordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rfloordiv__(y) <==> y//x """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, y): # real signature unknown; restored from __doc__ """ x.__rmul__(y) <==> y*x """ pass def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__ """ y.__rpow__(x[, z]) <==> pow(x, y[, z]) """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rtruediv__(self, y): # real signature unknown; restored from __doc__ """ x.__rtruediv__(y) <==> y/x """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __truediv__(self, y): # real signature unknown; restored from __doc__ """ x.__truediv__(y) <==> x/y """ pass imag = property(lambda self: 0.0) """the imaginary part of a complex number :type: float """ real = property(lambda self: 0.0) """the real part of a complex number :type: float """ class dict(object): """ dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2) """ def clear(self): # real signature unknown; restored from __doc__ """ D.clear() -> None. Remove all items from D. """ pass def copy(self): # real signature unknown; restored from __doc__ """ D.copy() -> a shallow copy of D """ pass @staticmethod # known case def fromkeys(S, v=None): # real signature unknown; restored from __doc__ """ dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v. v defaults to None. """ pass def get(self, k, d=None): # real signature unknown; restored from __doc__ """ D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. """ pass def has_key(self, k): # real signature unknown; restored from __doc__ """ D.has_key(k) -> True if D has a key k, else False """ return False def items(self): # real signature unknown; restored from __doc__ """ D.items() -> list of D's (key, value) pairs, as 2-tuples """ return [] def iteritems(self): # real signature unknown; restored from __doc__ """ D.iteritems() -> an iterator over the (key, value) items of D """ pass def iterkeys(self): # real signature unknown; restored from __doc__ """ D.iterkeys() -> an iterator over the keys of D """ pass def itervalues(self): # real signature unknown; restored from __doc__ """ D.itervalues() -> an iterator over the values of D """ pass def keys(self): # real signature unknown; restored from __doc__ """ D.keys() -> list of D's keys """ return [] def pop(self, k, d=None): # real signature unknown; restored from __doc__ """ D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised """ pass def popitem(self): # real signature unknown; restored from __doc__ """ D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty. """ pass def setdefault(self, k, d=None): # real signature unknown; restored from __doc__ """ D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """ pass def update(self, E=None, **F): # known special case of dict.update """ D.update([E, ]**F) -> None. Update D from dict/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] """ pass def values(self): # real signature unknown; restored from __doc__ """ D.values() -> list of D's values """ return [] def viewitems(self): # real signature unknown; restored from __doc__ """ D.viewitems() -> a set-like object providing a view on D's items """ pass def viewkeys(self): # real signature unknown; restored from __doc__ """ D.viewkeys() -> a set-like object providing a view on D's keys """ pass def viewvalues(self): # real signature unknown; restored from __doc__ """ D.viewvalues() -> an object providing a view on D's values """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __contains__(self, k): # real signature unknown; restored from __doc__ """ D.__contains__(k) -> True if D has a key k, else False """ return False def __delitem__(self, y): # real signature unknown; restored from __doc__ """ x.__delitem__(y) <==> del x[y] """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __init__(self, seq=None, **kwargs): # known special case of dict.__init__ """ dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2) # (copied from class doc) """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setitem__(self, i, y): # real signature unknown; restored from __doc__ """ x.__setitem__(i, y) <==> x[i]=y """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ D.__sizeof__() -> size of D in memory, in bytes """ pass __hash__ = None class enumerate(object): """ enumerate(iterable[, start]) -> iterator for index, value of iterable Return an enumerate object. iterable must be another object that supports iteration. The enumerate object yields pairs containing a count (from start, which defaults to zero) and a value yielded by the iterable argument. enumerate is useful for obtaining an indexed list: (0, seq[0]), (1, seq[1]), (2, seq[2]), ... """ def next(self): # real signature unknown; restored from __doc__ """ x.next() -> the next value, or raise StopIteration """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __init__(self, iterable, start=0): # known special case of enumerate.__init__ """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass class file(object): """ file(name[, mode[, buffering]]) -> file object Open a file. The mode can be 'r', 'w' or 'a' for reading (default), writing or appending. The file will be created if it doesn't exist when opened for writing or appending; it will be truncated when opened for writing. Add a 'b' to the mode for binary files. Add a '+' to the mode to allow simultaneous reading and writing. If the buffering argument is given, 0 means unbuffered, 1 means line buffered, and larger numbers specify the buffer size. The preferred way to open a file is with the builtin open() function. Add a 'U' to mode to open the file for input with universal newline support. Any line ending in the input file will be seen as a '\n' in Python. Also, a file so opened gains the attribute 'newlines'; the value for this attribute is one of None (no newline read yet), '\r', '\n', '\r\n' or a tuple containing all the newline types seen. 'U' cannot be combined with 'w' or '+' mode. """ def close(self): # real signature unknown; restored from __doc__ """ close() -> None or (perhaps) an integer. Close the file. Sets data attribute .closed to True. A closed file cannot be used for further I/O operations. close() may be called more than once without error. Some kinds of file objects (for example, opened by popen()) may return an exit status upon closing. """ pass def fileno(self): # real signature unknown; restored from __doc__ """ fileno() -> integer "file descriptor". This is needed for lower-level file interfaces, such os.read(). """ return 0 def flush(self): # real signature unknown; restored from __doc__ """ flush() -> None. Flush the internal I/O buffer. """ pass def isatty(self): # real signature unknown; restored from __doc__ """ isatty() -> true or false. True if the file is connected to a tty device. """ return False def next(self): # real signature unknown; restored from __doc__ """ x.next() -> the next value, or raise StopIteration """ pass def read(self, size=None): # real signature unknown; restored from __doc__ """ read([size]) -> read at most size bytes, returned as a string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. """ pass def readinto(self): # real signature unknown; restored from __doc__ """ readinto() -> Undocumented. Don't use this; it may go away. """ pass def readline(self, size=None): # real signature unknown; restored from __doc__ """ readline([size]) -> next line from the file, as a string. Retain newline. A non-negative size argument limits the maximum number of bytes to return (an incomplete line may be returned then). Return an empty string at EOF. """ pass def readlines(self, size=None): # real signature unknown; restored from __doc__ """ readlines([size]) -> list of strings, each a line from the file. Call readline() repeatedly and return a list of the lines so read. The optional size argument, if given, is an approximate bound on the total number of bytes in the lines returned. """ return [] def seek(self, offset, whence=None): # real signature unknown; restored from __doc__ """ seek(offset[, whence]) -> None. Move to new file position. Argument offset is a byte count. Optional argument whence defaults to 0 (offset from start of file, offset should be >= 0); other values are 1 (move relative to current position, positive or negative), and 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). If the file is opened in text mode, only offsets returned by tell() are legal. Use of other offsets causes undefined behavior. Note that not all file objects are seekable. """ pass def tell(self): # real signature unknown; restored from __doc__ """ tell() -> current file position, an integer (may be a long integer). """ pass def truncate(self, size=None): # real signature unknown; restored from __doc__ """ truncate([size]) -> None. Truncate the file to at most size bytes. Size defaults to the current file position, as returned by tell(). """ pass def write(self, p_str): # real signature unknown; restored from __doc__ """ write(str) -> None. Write string str to file. Note that due to buffering, flush() or close() may be needed before the file on disk reflects the data written. """ pass def writelines(self, sequence_of_strings): # real signature unknown; restored from __doc__ """ writelines(sequence_of_strings) -> None. Write the strings to the file. Note that newlines are not added. The sequence can be any iterable object producing strings. This is equivalent to calling write() for each string. """ pass def xreadlines(self): # real signature unknown; restored from __doc__ """ xreadlines() -> returns self. For backward compatibility. File objects now include the performance optimizations previously implemented in the xreadlines module. """ pass def __delattr__(self, name): # real signature unknown; restored from __doc__ """ x.__delattr__('name') <==> del x.name """ pass def __enter__(self): # real signature unknown; restored from __doc__ """ __enter__() -> self. """ return self def __exit__(self, *excinfo): # real signature unknown; restored from __doc__ """ __exit__(*excinfo) -> None. Closes the file. """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __init__(self, name, mode=None, buffering=None): # real signature unknown; restored from __doc__ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setattr__(self, name, value): # real signature unknown; restored from __doc__ """ x.__setattr__('name', value) <==> x.name = value """ pass closed = property(lambda self: True) """True if the file is closed :type: bool """ encoding = property(lambda self: '') """file encoding :type: string """ errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Unicode error handler""" mode = property(lambda self: '') """file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added) :type: string """ name = property(lambda self: '') """file name :type: string """ newlines = property(lambda self: '') """end-of-line convention used in this file :type: string """ softspace = property(lambda self: True) """flag indicating that a space needs to be printed; used by print :type: bool """ class float(object): """ float(x) -> floating point number Convert a string or number to a floating point number, if possible. """ def as_integer_ratio(self): # real signature unknown; restored from __doc__ """ float.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original float and with a positive denominator. Raise OverflowError on infinities and a ValueError on NaNs. >>> (10.0).as_integer_ratio() (10, 1) >>> (0.0).as_integer_ratio() (0, 1) >>> (-.25).as_integer_ratio() (-1, 4) """ pass def conjugate(self, *args, **kwargs): # real signature unknown """ Return self, the complex conjugate of any float. """ pass @staticmethod # known case def fromhex(string): # real signature unknown; restored from __doc__ """ float.fromhex(string) -> float Create a floating-point number from a hexadecimal string. >>> float.fromhex('0x1.ffffp10') 2047.984375 >>> float.fromhex('-0x1p-1074') -4.9406564584124654e-324 """ return 0.0 def hex(self): # real signature unknown; restored from __doc__ """ float.hex() -> string Return a hexadecimal representation of a floating-point number. >>> (-0.1).hex() '-0x1.999999999999ap-4' >>> 3.14159.hex() '0x1.921f9f01b866ep+1' """ return "" def is_integer(self, *args, **kwargs): # real signature unknown """ Return True if the float is an integer. """ pass def __abs__(self): # real signature unknown; restored from __doc__ """ x.__abs__() <==> abs(x) """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __coerce__(self, y): # real signature unknown; restored from __doc__ """ x.__coerce__(y) <==> coerce(x, y) """ pass def __divmod__(self, y): # real signature unknown; restored from __doc__ """ x.__divmod__(y) <==> divmod(x, y) """ pass def __div__(self, y): # real signature unknown; restored from __doc__ """ x.__div__(y) <==> x/y """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __float__(self): # real signature unknown; restored from __doc__ """ x.__float__() <==> float(x) """ pass def __floordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__floordiv__(y) <==> x//y """ pass def __format__(self, format_spec): # real signature unknown; restored from __doc__ """ float.__format__(format_spec) -> string Formats the float according to format_spec. """ return "" def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getformat__(self, typestr): # real signature unknown; restored from __doc__ """ float.__getformat__(typestr) -> string You probably don't want to use this function. It exists mainly to be used in Python's test suite. typestr must be 'double' or 'float'. This function returns whichever of 'unknown', 'IEEE, big-endian' or 'IEEE, little-endian' best describes the format of floating point numbers used by the C type named by typestr. """ return "" def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, x): # real signature unknown; restored from __doc__ pass def __int__(self): # real signature unknown; restored from __doc__ """ x.__int__() <==> int(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __long__(self): # real signature unknown; restored from __doc__ """ x.__long__() <==> long(x) """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, y): # real signature unknown; restored from __doc__ """ x.__mul__(y) <==> x*y """ pass def __neg__(self): # real signature unknown; restored from __doc__ """ x.__neg__() <==> -x """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __nonzero__(self): # real signature unknown; restored from __doc__ """ x.__nonzero__() <==> x != 0 """ pass def __pos__(self): # real signature unknown; restored from __doc__ """ x.__pos__() <==> +x """ pass def __pow__(self, y, z=None): # real signature unknown; restored from __doc__ """ x.__pow__(y[, z]) <==> pow(x, y[, z]) """ pass def __radd__(self, y): # real signature unknown; restored from __doc__ """ x.__radd__(y) <==> y+x """ pass def __rdivmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rdivmod__(y) <==> divmod(y, x) """ pass def __rdiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rdiv__(y) <==> y/x """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rfloordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rfloordiv__(y) <==> y//x """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, y): # real signature unknown; restored from __doc__ """ x.__rmul__(y) <==> y*x """ pass def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__ """ y.__rpow__(x[, z]) <==> pow(x, y[, z]) """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rtruediv__(self, y): # real signature unknown; restored from __doc__ """ x.__rtruediv__(y) <==> y/x """ pass def __setformat__(self, typestr, fmt): # real signature unknown; restored from __doc__ """ float.__setformat__(typestr, fmt) -> None You probably don't want to use this function. It exists mainly to be used in Python's test suite. typestr must be 'double' or 'float'. fmt must be one of 'unknown', 'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be one of the latter two if it appears to match the underlying C reality. Override the automatic determination of C-level floating point type. This affects how floats are converted to and from binary strings. """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __truediv__(self, y): # real signature unknown; restored from __doc__ """ x.__truediv__(y) <==> x/y """ pass def __trunc__(self, *args, **kwargs): # real signature unknown """ Return the Integral closest to x between 0 and x. """ pass imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the imaginary part of a complex number""" real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the real part of a complex number""" class frozenset(object): """ frozenset() -> empty frozenset object frozenset(iterable) -> frozenset object Build an immutable unordered collection of unique elements. """ def copy(self, *args, **kwargs): # real signature unknown """ Return a shallow copy of a set. """ pass def difference(self, *args, **kwargs): # real signature unknown """ Return the difference of two or more sets as a new set. (i.e. all elements that are in this set but not the others.) """ pass def intersection(self, *args, **kwargs): # real signature unknown """ Return the intersection of two or more sets as a new set. (i.e. elements that are common to all of the sets.) """ pass def isdisjoint(self, *args, **kwargs): # real signature unknown """ Return True if two sets have a null intersection. """ pass def issubset(self, *args, **kwargs): # real signature unknown """ Report whether another set contains this set. """ pass def issuperset(self, *args, **kwargs): # real signature unknown """ Report whether this set contains another set. """ pass def symmetric_difference(self, *args, **kwargs): # real signature unknown """ Return the symmetric difference of two sets as a new set. (i.e. all elements that are in exactly one of the sets.) """ pass def union(self, *args, **kwargs): # real signature unknown """ Return the union of sets as a new set. (i.e. all elements that are in either set.) """ pass def __and__(self, y): # real signature unknown; restored from __doc__ """ x.__and__(y) <==> x&y """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x. """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, seq=()): # known special case of frozenset.__init__ """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __or__(self, y): # real signature unknown; restored from __doc__ """ x.__or__(y) <==> x|y """ pass def __rand__(self, y): # real signature unknown; restored from __doc__ """ x.__rand__(y) <==> y&x """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ Return state information for pickling. """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __ror__(self, y): # real signature unknown; restored from __doc__ """ x.__ror__(y) <==> y|x """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rxor__(self, y): # real signature unknown; restored from __doc__ """ x.__rxor__(y) <==> y^x """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ S.__sizeof__() -> size of S in memory, in bytes """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __xor__(self, y): # real signature unknown; restored from __doc__ """ x.__xor__(y) <==> x^y """ pass class list(object): """ list() -> new empty list list(iterable) -> new list initialized from iterable's items """ def append(self, p_object): # real signature unknown; restored from __doc__ """ L.append(object) -- append object to end """ pass def count(self, value): # real signature unknown; restored from __doc__ """ L.count(value) -> integer -- return number of occurrences of value """ return 0 def extend(self, iterable): # real signature unknown; restored from __doc__ """ L.extend(iterable) -- extend list by appending elements from the iterable """ pass def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__ """ L.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. """ return 0 def insert(self, index, p_object): # real signature unknown; restored from __doc__ """ L.insert(index, object) -- insert object before index """ pass def pop(self, index=None): # real signature unknown; restored from __doc__ """ L.pop([index]) -> item -- remove and return item at index (default last). Raises IndexError if list is empty or index is out of range. """ pass def remove(self, value): # real signature unknown; restored from __doc__ """ L.remove(value) -- remove first occurrence of value. Raises ValueError if the value is not present. """ pass def reverse(self): # real signature unknown; restored from __doc__ """ L.reverse() -- reverse *IN PLACE* """ pass def sort(self, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__ """ L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*; cmp(x, y) -> -1, 0, 1 """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x """ pass def __delitem__(self, y): # real signature unknown; restored from __doc__ """ x.__delitem__(y) <==> del x[y] """ pass def __delslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__delslice__(i, j) <==> del x[i:j] Use of negative indices is not supported. """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __getslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__getslice__(i, j) <==> x[i:j] Use of negative indices is not supported. """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __iadd__(self, y): # real signature unknown; restored from __doc__ """ x.__iadd__(y) <==> x+=y """ pass def __imul__(self, y): # real signature unknown; restored from __doc__ """ x.__imul__(y) <==> x*=y """ pass def __init__(self, seq=()): # known special case of list.__init__ """ list() -> new empty list list(iterable) -> new list initialized from iterable's items # (copied from class doc) """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __reversed__(self): # real signature unknown; restored from __doc__ """ L.__reversed__() -- return a reverse iterator over the list """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass def __setitem__(self, i, y): # real signature unknown; restored from __doc__ """ x.__setitem__(i, y) <==> x[i]=y """ pass def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__ """ x.__setslice__(i, j, y) <==> x[i:j]=y Use of negative indices is not supported. """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ L.__sizeof__() -- size of L in memory, in bytes """ pass __hash__ = None class long(object): """ long(x=0) -> long long(x, base=10) -> long Convert a number or string to a long integer, or return 0L if no arguments are given. If x is floating point, the conversion truncates towards zero. If x is not a number or if base is given, then x must be a string or Unicode object representing an integer literal in the given base. The literal can be preceded by '+' or '-' and be surrounded by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to interpret the base from the string as an integer literal. >>> int('0b100', base=0) 4L """ def bit_length(self): # real signature unknown; restored from __doc__ """ long.bit_length() -> int or long Number of bits necessary to represent self in binary. >>> bin(37L) '0b100101' >>> (37L).bit_length() 6 """ return 0 def conjugate(self, *args, **kwargs): # real signature unknown """ Returns self, the complex conjugate of any long. """ pass def __abs__(self): # real signature unknown; restored from __doc__ """ x.__abs__() <==> abs(x) """ pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __and__(self, y): # real signature unknown; restored from __doc__ """ x.__and__(y) <==> x&y """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __coerce__(self, y): # real signature unknown; restored from __doc__ """ x.__coerce__(y) <==> coerce(x, y) """ pass def __divmod__(self, y): # real signature unknown; restored from __doc__ """ x.__divmod__(y) <==> divmod(x, y) """ pass def __div__(self, y): # real signature unknown; restored from __doc__ """ x.__div__(y) <==> x/y """ pass def __float__(self): # real signature unknown; restored from __doc__ """ x.__float__() <==> float(x) """ pass def __floordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__floordiv__(y) <==> x//y """ pass def __format__(self, *args, **kwargs): # real signature unknown pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __hex__(self): # real signature unknown; restored from __doc__ """ x.__hex__() <==> hex(x) """ pass def __index__(self): # real signature unknown; restored from __doc__ """ x[y:z] <==> x[y.__index__():z.__index__()] """ pass def __init__(self, x=0): # real signature unknown; restored from __doc__ pass def __int__(self): # real signature unknown; restored from __doc__ """ x.__int__() <==> int(x) """ pass def __invert__(self): # real signature unknown; restored from __doc__ """ x.__invert__() <==> ~x """ pass def __long__(self): # real signature unknown; restored from __doc__ """ x.__long__() <==> long(x) """ pass def __lshift__(self, y): # real signature unknown; restored from __doc__ """ x.__lshift__(y) <==> x<<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, y): # real signature unknown; restored from __doc__ """ x.__mul__(y) <==> x*y """ pass def __neg__(self): # real signature unknown; restored from __doc__ """ x.__neg__() <==> -x """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __nonzero__(self): # real signature unknown; restored from __doc__ """ x.__nonzero__() <==> x != 0 """ pass def __oct__(self): # real signature unknown; restored from __doc__ """ x.__oct__() <==> oct(x) """ pass def __or__(self, y): # real signature unknown; restored from __doc__ """ x.__or__(y) <==> x|y """ pass def __pos__(self): # real signature unknown; restored from __doc__ """ x.__pos__() <==> +x """ pass def __pow__(self, y, z=None): # real signature unknown; restored from __doc__ """ x.__pow__(y[, z]) <==> pow(x, y[, z]) """ pass def __radd__(self, y): # real signature unknown; restored from __doc__ """ x.__radd__(y) <==> y+x """ pass def __rand__(self, y): # real signature unknown; restored from __doc__ """ x.__rand__(y) <==> y&x """ pass def __rdivmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rdivmod__(y) <==> divmod(y, x) """ pass def __rdiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rdiv__(y) <==> y/x """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rfloordiv__(self, y): # real signature unknown; restored from __doc__ """ x.__rfloordiv__(y) <==> y//x """ pass def __rlshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rlshift__(y) <==> y<<x """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, y): # real signature unknown; restored from __doc__ """ x.__rmul__(y) <==> y*x """ pass def __ror__(self, y): # real signature unknown; restored from __doc__ """ x.__ror__(y) <==> y|x """ pass def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__ """ y.__rpow__(x[, z]) <==> pow(x, y[, z]) """ pass def __rrshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rrshift__(y) <==> y>>x """ pass def __rshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rshift__(y) <==> x>>y """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rtruediv__(self, y): # real signature unknown; restored from __doc__ """ x.__rtruediv__(y) <==> y/x """ pass def __rxor__(self, y): # real signature unknown; restored from __doc__ """ x.__rxor__(y) <==> y^x """ pass def __sizeof__(self, *args, **kwargs): # real signature unknown """ Returns size in memory, in bytes """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __truediv__(self, y): # real signature unknown; restored from __doc__ """ x.__truediv__(y) <==> x/y """ pass def __trunc__(self, *args, **kwargs): # real signature unknown """ Truncating an Integral returns itself. """ pass def __xor__(self, y): # real signature unknown; restored from __doc__ """ x.__xor__(y) <==> x^y """ pass denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the denominator of a rational number in lowest terms""" imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the imaginary part of a complex number""" numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the numerator of a rational number in lowest terms""" real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """the real part of a complex number""" class memoryview(object): """ memoryview(object) Create a new memoryview object which references the given object. """ def tobytes(self, *args, **kwargs): # real signature unknown pass def tolist(self, *args, **kwargs): # real signature unknown pass def __delitem__(self, y): # real signature unknown; restored from __doc__ """ x.__delitem__(y) <==> del x[y] """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __init__(self, p_object): # real signature unknown; restored from __doc__ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setitem__(self, i, y): # real signature unknown; restored from __doc__ """ x.__setitem__(i, y) <==> x[i]=y """ pass format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default class property(object): """ property(fget=None, fset=None, fdel=None, doc=None) -> property attribute fget is a function to be used for getting an attribute value, and likewise fset is a function for setting, and fdel a function for del'ing, an attribute. Typical use is to define a managed attribute x: class C(object): def getx(self): return self._x def setx(self, value): self._x = value def delx(self): del self._x x = property(getx, setx, delx, "I'm the 'x' property.") Decorators make defining new properties or modifying existing ones easy: class C(object): @property def x(self): "I am the 'x' property." return self._x @x.setter def x(self, value): self._x = value @x.deleter def x(self): del self._x """ def deleter(self, *args, **kwargs): # real signature unknown """ Descriptor to change the deleter on a property. """ pass def getter(self, *args, **kwargs): # real signature unknown """ Descriptor to change the getter on a property. """ pass def setter(self, *args, **kwargs): # real signature unknown """ Descriptor to change the setter on a property. """ pass def __delete__(self, obj): # real signature unknown; restored from __doc__ """ descr.__delete__(obj) """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __get__(self, obj, type=None): # real signature unknown; restored from __doc__ """ descr.__get__(obj[, type]) -> value """ pass def __init__(self, fget=None, fset=None, fdel=None, doc=None): # known special case of property.__init__ """ property(fget=None, fset=None, fdel=None, doc=None) -> property attribute fget is a function to be used for getting an attribute value, and likewise fset is a function for setting, and fdel a function for del'ing, an attribute. Typical use is to define a managed attribute x: class C(object): def getx(self): return self._x def setx(self, value): self._x = value def delx(self): del self._x x = property(getx, setx, delx, "I'm the 'x' property.") Decorators make defining new properties or modifying existing ones easy: class C(object): @property def x(self): "I am the 'x' property." return self._x @x.setter def x(self, value): self._x = value @x.deleter def x(self): del self._x # (copied from class doc) """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __set__(self, obj, value): # real signature unknown; restored from __doc__ """ descr.__set__(obj, value) """ pass fdel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default fget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default fset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default class reversed(object): """ reversed(sequence) -> reverse iterator over values of the sequence Return a reverse iterator """ def next(self): # real signature unknown; restored from __doc__ """ x.next() -> the next value, or raise StopIteration """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __init__(self, sequence): # real signature unknown; restored from __doc__ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __length_hint__(self, *args, **kwargs): # real signature unknown """ Private method returning an estimate of len(list(it)). """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass class set(object): """ set() -> new empty set object set(iterable) -> new set object Build an unordered collection of unique elements. """ def add(self, *args, **kwargs): # real signature unknown """ Add an element to a set. This has no effect if the element is already present. """ pass def clear(self, *args, **kwargs): # real signature unknown """ Remove all elements from this set. """ pass def copy(self, *args, **kwargs): # real signature unknown """ Return a shallow copy of a set. """ pass def difference(self, *args, **kwargs): # real signature unknown """ Return the difference of two or more sets as a new set. (i.e. all elements that are in this set but not the others.) """ pass def difference_update(self, *args, **kwargs): # real signature unknown """ Remove all elements of another set from this set. """ pass def discard(self, *args, **kwargs): # real signature unknown """ Remove an element from a set if it is a member. If the element is not a member, do nothing. """ pass def intersection(self, *args, **kwargs): # real signature unknown """ Return the intersection of two or more sets as a new set. (i.e. elements that are common to all of the sets.) """ pass def intersection_update(self, *args, **kwargs): # real signature unknown """ Update a set with the intersection of itself and another. """ pass def isdisjoint(self, *args, **kwargs): # real signature unknown """ Return True if two sets have a null intersection. """ pass def issubset(self, *args, **kwargs): # real signature unknown """ Report whether another set contains this set. """ pass def issuperset(self, *args, **kwargs): # real signature unknown """ Report whether this set contains another set. """ pass def pop(self, *args, **kwargs): # real signature unknown """ Remove and return an arbitrary set element. Raises KeyError if the set is empty. """ pass def remove(self, *args, **kwargs): # real signature unknown """ Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError. """ pass def symmetric_difference(self, *args, **kwargs): # real signature unknown """ Return the symmetric difference of two sets as a new set. (i.e. all elements that are in exactly one of the sets.) """ pass def symmetric_difference_update(self, *args, **kwargs): # real signature unknown """ Update a set with the symmetric difference of itself and another. """ pass def union(self, *args, **kwargs): # real signature unknown """ Return the union of sets as a new set. (i.e. all elements that are in either set.) """ pass def update(self, *args, **kwargs): # real signature unknown """ Update a set with the union of itself and others. """ pass def __and__(self, y): # real signature unknown; restored from __doc__ """ x.__and__(y) <==> x&y """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x. """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __iand__(self, y): # real signature unknown; restored from __doc__ """ x.__iand__(y) <==> x&=y """ pass def __init__(self, seq=()): # known special case of set.__init__ """ set() -> new empty set object set(iterable) -> new set object Build an unordered collection of unique elements. # (copied from class doc) """ pass def __ior__(self, y): # real signature unknown; restored from __doc__ """ x.__ior__(y) <==> x|=y """ pass def __isub__(self, y): # real signature unknown; restored from __doc__ """ x.__isub__(y) <==> x-=y """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __ixor__(self, y): # real signature unknown; restored from __doc__ """ x.__ixor__(y) <==> x^=y """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __or__(self, y): # real signature unknown; restored from __doc__ """ x.__or__(y) <==> x|y """ pass def __rand__(self, y): # real signature unknown; restored from __doc__ """ x.__rand__(y) <==> y&x """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ Return state information for pickling. """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __ror__(self, y): # real signature unknown; restored from __doc__ """ x.__ror__(y) <==> y|x """ pass def __rsub__(self, y): # real signature unknown; restored from __doc__ """ x.__rsub__(y) <==> y-x """ pass def __rxor__(self, y): # real signature unknown; restored from __doc__ """ x.__rxor__(y) <==> y^x """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ S.__sizeof__() -> size of S in memory, in bytes """ pass def __sub__(self, y): # real signature unknown; restored from __doc__ """ x.__sub__(y) <==> x-y """ pass def __xor__(self, y): # real signature unknown; restored from __doc__ """ x.__xor__(y) <==> x^y """ pass __hash__ = None class slice(object): """ slice(stop) slice(start, stop[, step]) Create a slice object. This is used for extended slicing (e.g. a[0:10:2]). """ def indices(self, len): # real signature unknown; restored from __doc__ """ S.indices(len) -> (start, stop, stride) Assuming a sequence of length len, calculate the start and stop indices, and the stride length of the extended slice described by S. Out of bounds indices are clipped in a manner consistent with the handling of normal slices. """ pass def __cmp__(self, y): # real signature unknown; restored from __doc__ """ x.__cmp__(y) <==> cmp(x,y) """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, stop): # real signature unknown; restored from __doc__ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ Return state information for pickling. """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass start = property(lambda self: 0) """:type: int""" step = property(lambda self: 0) """:type: int""" stop = property(lambda self: 0) """:type: int""" class staticmethod(object): """ staticmethod(function) -> method Convert a function to be a static method. A static method does not receive an implicit first argument. To declare a static method, use this idiom: class C: def f(arg1, arg2, ...): ... f = staticmethod(f) It can be called either on the class (e.g. C.f()) or on an instance (e.g. C().f()). The instance is ignored except for its class. Static methods in Python are similar to those found in Java or C++. For a more advanced concept, see the classmethod builtin. """ def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __get__(self, obj, type=None): # real signature unknown; restored from __doc__ """ descr.__get__(obj[, type]) -> value """ pass def __init__(self, function): # real signature unknown; restored from __doc__ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass __func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default class super(object): """ super(type, obj) -> bound super object; requires isinstance(obj, type) super(type) -> unbound super object super(type, type2) -> bound super object; requires issubclass(type2, type) Typical use to call a cooperative superclass method: class C(B): def meth(self, arg): super(C, self).meth(arg) """ def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __get__(self, obj, type=None): # real signature unknown; restored from __doc__ """ descr.__get__(obj[, type]) -> value """ pass def __init__(self, type1, type2=None): # known special case of super.__init__ """ super(type, obj) -> bound super object; requires isinstance(obj, type) super(type) -> unbound super object super(type, type2) -> bound super object; requires issubclass(type2, type) Typical use to call a cooperative superclass method: class C(B): def meth(self, arg): super(C, self).meth(arg) # (copied from class doc) """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass __self_class__ = property(lambda self: type(object)) """the type of the instance invoking super(); may be None :type: type """ __self__ = property(lambda self: type(object)) """the instance invoking super(); may be None :type: type """ __thisclass__ = property(lambda self: type(object)) """the class invoking super() :type: type """ class tuple(object): """ tuple() -> empty tuple tuple(iterable) -> tuple initialized from iterable's items If the argument is a tuple, the return value is the same object. """ def count(self, value): # real signature unknown; restored from __doc__ """ T.count(value) -> integer -- return number of occurrences of value """ return 0 def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__ """ T.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. """ return 0 def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __getslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__getslice__(i, j) <==> x[i:j] Use of negative indices is not supported. """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, seq=()): # known special case of tuple.__init__ """ tuple() -> empty tuple tuple(iterable) -> tuple initialized from iterable's items If the argument is a tuple, the return value is the same object. # (copied from class doc) """ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass class type(object): """ type(object) -> the object's type type(name, bases, dict) -> a new type """ def mro(self): # real signature unknown; restored from __doc__ """ mro() -> list return a type's method resolution order """ return [] def __call__(self, *more): # real signature unknown; restored from __doc__ """ x.__call__(...) <==> x(...) """ pass def __delattr__(self, name): # real signature unknown; restored from __doc__ """ x.__delattr__('name') <==> del x.name """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(cls, what, bases=None, dict=None): # known special case of type.__init__ """ type(object) -> the object's type type(name, bases, dict) -> a new type # (copied from class doc) """ pass def __instancecheck__(self): # real signature unknown; restored from __doc__ """ __instancecheck__() -> bool check if an object is an instance """ return False def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setattr__(self, name, value): # real signature unknown; restored from __doc__ """ x.__setattr__('name', value) <==> x.name = value """ pass def __subclasscheck__(self): # real signature unknown; restored from __doc__ """ __subclasscheck__() -> bool check if a class is a subclass """ return False def __subclasses__(self): # real signature unknown; restored from __doc__ """ __subclasses__() -> list of immediate subclasses """ return [] __abstractmethods__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default __bases__ = ( object, ) __base__ = object __basicsize__ = 872 __dictoffset__ = 264 __dict__ = None # (!) real value is '' __flags__ = 2148423147 __itemsize__ = 40 __mro__ = ( None, # (!) forward: type, real value is '' object, ) __name__ = 'type' __weakrefoffset__ = 368 class unicode(basestring): """ unicode(object='') -> unicode object unicode(string[, encoding[, errors]]) -> unicode object Create a new Unicode object from the given encoded string. encoding defaults to the current default string encoding. errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'. """ def capitalize(self): # real signature unknown; restored from __doc__ """ S.capitalize() -> unicode Return a capitalized version of S, i.e. make the first character have upper case and the rest lower case. """ return u"" def center(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.center(width[, fillchar]) -> unicode Return S centered in a Unicode string of length width. Padding is done using the specified fill character (default is a space) """ return u"" def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.count(sub[, start[, end]]) -> int Return the number of non-overlapping occurrences of substring sub in Unicode string S[start:end]. Optional arguments start and end are interpreted as in slice notation. """ return 0 def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ S.decode([encoding[,errors]]) -> string or unicode Decodes S using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle UnicodeDecodeErrors. """ return "" def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ S.encode([encoding[,errors]]) -> string or unicode Encodes S using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any other name registered with codecs.register_error that can handle UnicodeEncodeErrors. """ return "" def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__ """ S.endswith(suffix[, start[, end]]) -> bool Return True if S ends with the specified suffix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. suffix can also be a tuple of strings to try. """ return False def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__ """ S.expandtabs([tabsize]) -> unicode Return a copy of S where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. """ return u"" def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.find(sub [,start [,end]]) -> int Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 def format(self, *args, **kwargs): # known special case of unicode.format """ S.format(*args, **kwargs) -> unicode Return a formatted version of S, using substitutions from args and kwargs. The substitutions are identified by braces ('{' and '}'). """ pass def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.index(sub [,start [,end]]) -> int Like S.find() but raise ValueError when the substring is not found. """ return 0 def isalnum(self): # real signature unknown; restored from __doc__ """ S.isalnum() -> bool Return True if all characters in S are alphanumeric and there is at least one character in S, False otherwise. """ return False def isalpha(self): # real signature unknown; restored from __doc__ """ S.isalpha() -> bool Return True if all characters in S are alphabetic and there is at least one character in S, False otherwise. """ return False def isdecimal(self): # real signature unknown; restored from __doc__ """ S.isdecimal() -> bool Return True if there are only decimal characters in S, False otherwise. """ return False def isdigit(self): # real signature unknown; restored from __doc__ """ S.isdigit() -> bool Return True if all characters in S are digits and there is at least one character in S, False otherwise. """ return False def islower(self): # real signature unknown; restored from __doc__ """ S.islower() -> bool Return True if all cased characters in S are lowercase and there is at least one cased character in S, False otherwise. """ return False def isnumeric(self): # real signature unknown; restored from __doc__ """ S.isnumeric() -> bool Return True if there are only numeric characters in S, False otherwise. """ return False def isspace(self): # real signature unknown; restored from __doc__ """ S.isspace() -> bool Return True if all characters in S are whitespace and there is at least one character in S, False otherwise. """ return False def istitle(self): # real signature unknown; restored from __doc__ """ S.istitle() -> bool Return True if S is a titlecased string and there is at least one character in S, i.e. upper- and titlecase characters may only follow uncased characters and lowercase characters only cased ones. Return False otherwise. """ return False def isupper(self): # real signature unknown; restored from __doc__ """ S.isupper() -> bool Return True if all cased characters in S are uppercase and there is at least one cased character in S, False otherwise. """ return False def join(self, iterable): # real signature unknown; restored from __doc__ """ S.join(iterable) -> unicode Return a string which is the concatenation of the strings in the iterable. The separator between elements is S. """ return u"" def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.ljust(width[, fillchar]) -> int Return S left-justified in a Unicode string of length width. Padding is done using the specified fill character (default is a space). """ return 0 def lower(self): # real signature unknown; restored from __doc__ """ S.lower() -> unicode Return a copy of the string S converted to lowercase. """ return u"" def lstrip(self, chars=None): # real signature unknown; restored from __doc__ """ S.lstrip([chars]) -> unicode Return a copy of the string S with leading whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ return u"" def partition(self, sep): # real signature unknown; restored from __doc__ """ S.partition(sep) -> (head, sep, tail) Search for the separator sep in S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return S and two empty strings. """ pass def replace(self, old, new, count=None): # real signature unknown; restored from __doc__ """ S.replace(old, new[, count]) -> unicode Return a copy of S with all occurrences of substring old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. """ return u"" def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.rfind(sub [,start [,end]]) -> int Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ return 0 def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__ """ S.rindex(sub [,start [,end]]) -> int Like S.rfind() but raise ValueError when the substring is not found. """ return 0 def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__ """ S.rjust(width[, fillchar]) -> unicode Return S right-justified in a Unicode string of length width. Padding is done using the specified fill character (default is a space). """ return u"" def rpartition(self, sep): # real signature unknown; restored from __doc__ """ S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return two empty strings and S. """ pass def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__ """ S.rsplit([sep [,maxsplit]]) -> list of strings Return a list of the words in S, using sep as the delimiter string, starting at the end of the string and working to the front. If maxsplit is given, at most maxsplit splits are done. If sep is not specified, any whitespace string is a separator. """ return [] def rstrip(self, chars=None): # real signature unknown; restored from __doc__ """ S.rstrip([chars]) -> unicode Return a copy of the string S with trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ return u"" def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__ """ S.split([sep [,maxsplit]]) -> list of strings Return a list of the words in S, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or is None, any whitespace string is a separator and empty strings are removed from the result. """ return [] def splitlines(self, keepends=False): # real signature unknown; restored from __doc__ """ S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ return [] def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__ """ S.startswith(prefix[, start[, end]]) -> bool Return True if S starts with the specified prefix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. prefix can also be a tuple of strings to try. """ return False def strip(self, chars=None): # real signature unknown; restored from __doc__ """ S.strip([chars]) -> unicode Return a copy of the string S with leading and trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ return u"" def swapcase(self): # real signature unknown; restored from __doc__ """ S.swapcase() -> unicode Return a copy of S with uppercase characters converted to lowercase and vice versa. """ return u"" def title(self): # real signature unknown; restored from __doc__ """ S.title() -> unicode Return a titlecased version of S, i.e. words start with title case characters, all remaining cased characters have lower case. """ return u"" def translate(self, table): # real signature unknown; restored from __doc__ """ S.translate(table) -> unicode Return a copy of the string S, where all characters have been mapped through the given translation table, which must be a mapping of Unicode ordinals to Unicode ordinals, Unicode strings or None. Unmapped characters are left untouched. Characters mapped to None are deleted. """ return u"" def upper(self): # real signature unknown; restored from __doc__ """ S.upper() -> unicode Return a copy of S converted to uppercase. """ return u"" def zfill(self, width): # real signature unknown; restored from __doc__ """ S.zfill(width) -> unicode Pad a numeric string S with zeros on the left, to fill a field of the specified width. The string S is never truncated. """ return u"" def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown pass def _formatter_parser(self, *args, **kwargs): # real signature unknown pass def __add__(self, y): # real signature unknown; restored from __doc__ """ x.__add__(y) <==> x+y """ pass def __contains__(self, y): # real signature unknown; restored from __doc__ """ x.__contains__(y) <==> y in x """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __format__(self, format_spec): # real signature unknown; restored from __doc__ """ S.__format__(format_spec) -> unicode Return a formatted version of S as described by format_spec. """ return u"" def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __getnewargs__(self, *args, **kwargs): # real signature unknown pass def __getslice__(self, i, j): # real signature unknown; restored from __doc__ """ x.__getslice__(i, j) <==> x[i:j] Use of negative indices is not supported. """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, string=u'', encoding=None, errors='strict'): # known special case of unicode.__init__ """ unicode(object='') -> unicode object unicode(string[, encoding[, errors]]) -> unicode object Create a new Unicode object from the given encoded string. encoding defaults to the current default string encoding. errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'. # (copied from class doc) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __mod__(self, y): # real signature unknown; restored from __doc__ """ x.__mod__(y) <==> x%y """ pass def __mul__(self, n): # real signature unknown; restored from __doc__ """ x.__mul__(n) <==> x*n """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rmod__(self, y): # real signature unknown; restored from __doc__ """ x.__rmod__(y) <==> y%x """ pass def __rmul__(self, n): # real signature unknown; restored from __doc__ """ x.__rmul__(n) <==> n*x """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ S.__sizeof__() -> size of S in memory, in bytes """ pass def __str__(self): # real signature unknown; restored from __doc__ """ x.__str__() <==> str(x) """ pass class xrange(object): """ xrange(stop) -> xrange object xrange(start, stop[, step]) -> xrange object Like range(), but instead of returning a list, returns an object that generates the numbers in the range on demand. For looping, this is slightly faster than range() and more memory efficient. """ def __getattribute__(self, name): # real signature unknown; restored from __doc__ """ x.__getattribute__('name') <==> x.name """ pass def __getitem__(self, y): # real signature unknown; restored from __doc__ """ x.__getitem__(y) <==> x[y] """ pass def __init__(self, stop): # real signature unknown; restored from __doc__ pass def __iter__(self): # real signature unknown; restored from __doc__ """ x.__iter__() <==> iter(x) """ pass def __len__(self): # real signature unknown; restored from __doc__ """ x.__len__() <==> len(x) """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __reduce__(self, *args, **kwargs): # real signature unknown pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __reversed__(self, *args, **kwargs): # real signature unknown """ Returns a reverse iterator. """ pass # variables with complex values Ellipsis = None # (!) real value is '' NotImplemented = None # (!) real value is ''
apache-2.0
-3,999,963,882,647,524,400
-1,852,694,252,454,972,000
32.994358
115
0.559162
false
allenlavoie/tensorflow
tensorflow/contrib/autograph/converters/logical_expressions.py
5
4878
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Converter for logical expressions. e.g. `a and b -> tf.logical_and(a, b)`. This is not done automatically in TF. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.contrib.autograph.pyct import anno from tensorflow.contrib.autograph.pyct import parser from tensorflow.contrib.autograph.pyct import templates from tensorflow.contrib.autograph.pyct import transformer # TODO(mdan): Properly extrack boolean ops according to lazy eval rules. # Note that this isn't completely safe either, because tensors may have control # dependencies. # Note that for loops that should be done after the loop was converted to # tf.while_loop so that the expanded conditionals are properly scoped. # Used to signal that an operand is safe for non-lazy evaluation. SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND' class LogicalExpressionTransformer(transformer.Base): """Converts logical expressions to corresponding TF calls.""" def __init__(self, context): super(LogicalExpressionTransformer, self).__init__(context) # TODO(mdan): Look into replacing with bitwise operators instead. # TODO(mdan): Skip replacing if the function is trivial. self.op_mapping = { gast.And: 'tf.logical_and', gast.Eq: 'tf.equal', gast.Gt: 'tf.greater', gast.GtE: 'tf.greater_equal', gast.Lt: 'tf.less', gast.LtE: 'tf.less_equal', gast.Not: 'tf.logical_not', gast.NotEq: 'tf.not_equal', gast.Or: 'tf.logical_or', gast.USub: 'tf.negative', gast.Is: 'autograph_utils.dynamic_is', gast.IsNot: 'autograph_utils.dynamic_is_not' } def _expect_simple_symbol(self, operand): if isinstance(operand, gast.Name): return if anno.hasanno(operand, SAFE_BOOLEAN_OPERAND): return raise NotImplementedError( 'only simple local variables are supported in logical and compound ' 'comparison expressions; for example, we support "a or b" but not ' '"a.x or b"; for a workaround, assign the expression to a local ' 'variable and use that instead, for example "tmp = a.x", "tmp or b"') def _matching_func(self, operator): op_type = type(operator) mapped_op = self.op_mapping.get(op_type) if not mapped_op: raise NotImplementedError('operator %s is not yet supported' % op_type) return mapped_op def _as_function(self, func_name, args): template = """ func_name(args) """ replacement = templates.replace_as_expression( template, func_name=parser.parse_expression(func_name), args=args) anno.setanno(replacement, SAFE_BOOLEAN_OPERAND, True) return replacement def visit_Compare(self, node): node = self.generic_visit(node) ops_and_comps = list(zip(node.ops, node.comparators)) left = node.left op_tree = None # Repeated comparisons are converted to conjunctions: # a < b < c -> a < b and b < c while ops_and_comps: op, right = ops_and_comps.pop(0) binary_comparison = self._as_function( self._matching_func(op), (left, right)) if isinstance(left, gast.Name) and isinstance(right, gast.Name): anno.setanno(binary_comparison, SAFE_BOOLEAN_OPERAND, True) if op_tree: self._expect_simple_symbol(right) op_tree = self._as_function('tf.logical_and', (binary_comparison, op_tree)) else: op_tree = binary_comparison left = right assert op_tree is not None return op_tree def visit_UnaryOp(self, node): node = self.generic_visit(node) return self._as_function(self._matching_func(node.op), node.operand) def visit_BoolOp(self, node): node = self.generic_visit(node) node_values = node.values right = node.values.pop() self._expect_simple_symbol(right) while node_values: left = node_values.pop() self._expect_simple_symbol(left) right = self._as_function(self._matching_func(node.op), (left, right)) return right def transform(node, context): return LogicalExpressionTransformer(context).visit(node)
apache-2.0
807,492,755,893,797,100
-1,904,011,213,675,941,600
35.954545
80
0.672202
false
sangwonl/stage34
webapp/api/handlers/stage.py
1
6612
from django.views import View from django.conf import settings from datetime import datetime from api.helpers.mixins import AuthRequiredMixin from api.helpers.http.jsend import JSENDSuccess, JSENDError from api.models.resources import Membership, Stage from libs.utils.model_ext import model_to_dict from worker.tasks.deployment import ( task_provision_stage, task_change_stage_status, task_delete_stage, task_refresh_stage ) import pytz import os import json import jwt SERIALIZE_FIELDS = [ 'id', 'title', 'endpoint', 'status', 'repo', 'default_branch', 'branch', 'created_at' ] class StageRootHandler(AuthRequiredMixin, View): def get(self, request, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stages_qs = Stage.objects.filter(org=org) stages = [model_to_dict(s, fields=SERIALIZE_FIELDS) for s in stages_qs] return JSENDSuccess(status_code=200, data=stages) def post(self, request, *args, **kwargs): json_body = json.loads(request.body) title = json_body.get('title') repo = json_body.get('repo') branch= json_body.get('branch') default_branch= json_body.get('default_branch') run_on_create = json_body.get('run_on_create', False) if not (title and repo and default_branch and branch): return JSENDError(status_code=400, msg='invalid stage info') org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = Stage.objects.create( org=org, title=title, repo=repo, default_branch=default_branch, branch=branch ) github_access_key = request.user.jwt_payload.get('access_token') task_provision_stage.apply_async(args=[github_access_key, stage.id, repo, branch, run_on_create]) stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=200, data=stage_dict) class StageDetailHandler(AuthRequiredMixin, View): def get_stage(self, org, stage_id): try: stage = Stage.objects.get(org=org, id=stage_id) except Stage.DoesNotExist: return None return stage def get(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=200, data=stage_dict) def put(self, request, stage_id, *args, **kwargs): json_body = json.loads(request.body) new_status = json_body.get('status') if not new_status or new_status not in ('running', 'paused'): return JSENDError(status_code=400, msg='invalid stage status') org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') cur_status = stage.status if cur_status != new_status: github_access_key = request.user.jwt_payload.get('access_token') task_change_stage_status.apply_async(args=[github_access_key, stage_id, new_status]) new_status = 'changing' stage.title = json_body.get('title', stage.title) stage.repo = json_body.get('repo', stage.repo) stage.default_branch = json_body.get('default_branch', stage.default_branch) stage.branch = json_body.get('branch', stage.branch) stage.status = new_status stage.save() stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=204) def delete(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') stage.status = 'deleting' stage.save() github_access_key = request.user.jwt_payload.get('access_token') task_delete_stage.apply_async(args=[github_access_key, stage_id]) return JSENDSuccess(status_code=204) class StageLogHandler(AuthRequiredMixin, View): def get_log_path(self, stage_id): return os.path.join(settings.STAGE_REPO_HOME, stage_id, 'output.log') def get(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') log_path = self.get_log_path(stage_id) if not os.path.exists(log_path): return JSENDError(status_code=404, msg='log file not found') log_msgs = [] with open(log_path, 'rt') as f: log_msg = f.read() log_msgs = [l for l in log_msg.split('\n') if l] ts = os.path.getmtime(log_path) tz = pytz.timezone(settings.TIME_ZONE) dt = datetime.fromtimestamp(ts, tz=tz) log_data = {'log_messages': log_msgs, 'log_time': dt.isoformat()} return JSENDSuccess(status_code=200, data=log_data) class StageRefreshHandler(AuthRequiredMixin, View): def get_stage(self, org, stage_id): try: stage = Stage.objects.get(org=org, id=stage_id) except Stage.DoesNotExist: return None return stage def post(self, request, stage_id, *args, **kwargs): org = Membership.get_org_of_user(request.user) if not org: return JSENDError(status_code=400, msg='org not found') stage = self.get_stage(org, stage_id) if not stage: return JSENDError(status_code=404, msg='stage not found') github_access_key = request.user.jwt_payload.get('access_token') task_refresh_stage.apply_async(args=[github_access_key, stage_id]) stage.status = 'changing' stage.save() stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS) return JSENDSuccess(status_code=204)
mit
8,468,166,173,420,534,000
-6,769,447,188,306,844,000
33.082474
105
0.628403
false
globaltoken/globaltoken
test/functional/test_framework/authproxy.py
1
7759
# Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to globaltokend. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging import socket import time import urllib.parse HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error): try: errmsg = '%(message)s (%(code)i)' % rpc_error except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) port = 80 if self.__url.port is None else self.__url.port user = None if self.__url.username is None else self.__url.username.encode('utf8') passwd = None if self.__url.password is None else self.__url.password.encode('utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) if connection: # Callables re-use the connection of the original proxy self.__conn = connection elif self.__url.scheme == 'https': self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout) else: self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: name = "%s.%s" % (self._service_name, name) return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} try: self.__conn.request(method, path, postdata, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError('Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response = self._request('POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error']) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout as e: raise JSONRPCException({ 'code': -344, 'message': '%r RPC took longer than %f seconds. Consider ' 'using larger timeout for calls that take ' 'longer to return.' % (self._service_name, self.__conn.timeout)}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException({ 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) return response def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
mit
6,470,691,205,206,189,000
-2,075,112,503,874,044,200
42.105556
155
0.621601
false
jtomasek/tuskar-ui-1
tuskar_ui/infrastructure/resource_management/resource_classes/workflows.py
1
12384
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import workflows from tuskar_ui import api as tuskar import tuskar_ui.workflows import re from tuskar_ui.infrastructure. \ resource_management.resource_classes.tables import FlavorTemplatesTable from tuskar_ui.infrastructure. \ resource_management.resource_classes.tables import RacksTable class ResourceClassInfoAndFlavorsAction(workflows.Action): name = forms.CharField(max_length=255, label=_("Class Name"), help_text="", required=True) service_type = forms.ChoiceField(label=_('Class Type'), required=True, choices=[('', ''), ('compute', ('Compute')), ('not_compute', ('Non Compute')), ], widget=forms.Select( attrs={'class': 'switchable'}) ) image = forms.ChoiceField(label=_('Provisioning Image'), required=True, choices=[('compute-img', ('overcloud-compute'))], widget=forms.Select( attrs={'class': 'switchable'}) ) def clean(self): cleaned_data = super(ResourceClassInfoAndFlavorsAction, self).clean() name = cleaned_data.get('name') resource_class_id = self.initial.get('resource_class_id', None) try: resource_classes = tuskar.ResourceClass.list(self.request) except Exception: resource_classes = [] msg = _('Unable to get resource class list') exceptions.check_message(["Connection", "refused"], msg) raise for resource_class in resource_classes: if resource_class.name == name and \ resource_class_id != resource_class.id: raise forms.ValidationError( _('The name "%s" is already used by' ' another resource class.') % name ) return cleaned_data class Meta: name = _("Class Settings") help_text = _("From here you can fill the class " "settings and add flavors to class.") class CreateResourceClassInfoAndFlavors(tuskar_ui.workflows.TableStep): table_classes = (FlavorTemplatesTable,) action_class = ResourceClassInfoAndFlavorsAction template_name = 'infrastructure/resource_management/resource_classes/'\ '_resource_class_info_and_flavors_step.html' contributes = ("name", "service_type", "flavors_object_ids", 'max_vms') def contribute(self, data, context): request = self.workflow.request if data: context["flavors_object_ids"] =\ request.POST.getlist("flavors_object_ids") # todo: lsmola django can't parse dictionaruy from POST # this should be rewritten to django formset context["max_vms"] = {} for index, value in request.POST.items(): match = re.match( '^(flavors_object_ids__max_vms__(.*?))$', index) if match: context["max_vms"][match.groups()[1]] = value context.update(data) return context def get_flavors_data(self): try: resource_class_id = self.workflow.context.get("resource_class_id") if resource_class_id: resource_class = tuskar.ResourceClass.get( self.workflow.request, resource_class_id) # TODO(lsmola ugly interface, rewrite) self._tables['flavors'].active_multi_select_values = \ resource_class.flavortemplates_ids all_flavors = resource_class.all_flavors else: all_flavors = tuskar.FlavorTemplate.list( self.workflow.request) except Exception: all_flavors = [] exceptions.handle(self.workflow.request, _('Unable to retrieve resource flavors list.')) return all_flavors class RacksAction(workflows.Action): class Meta: name = _("Racks") class CreateRacks(tuskar_ui.workflows.TableStep): table_classes = (RacksTable,) action_class = RacksAction contributes = ("racks_object_ids") template_name = 'infrastructure/resource_management/'\ 'resource_classes/_racks_step.html' def contribute(self, data, context): request = self.workflow.request context["racks_object_ids"] =\ request.POST.getlist("racks_object_ids") context.update(data) return context def get_racks_data(self): try: resource_class_id = self.workflow.context.get("resource_class_id") if resource_class_id: resource_class = tuskar.ResourceClass.get( self.workflow.request, resource_class_id) # TODO(lsmola ugly interface, rewrite) self._tables['racks'].active_multi_select_values = \ resource_class.racks_ids racks = \ resource_class.all_racks else: racks = \ tuskar.Rack.list(self.workflow.request, True) except Exception: racks = [] exceptions.handle(self.workflow.request, _('Unable to retrieve racks list.')) return racks class ResourceClassWorkflowMixin: # FIXME active tabs coflict # When on page with tabs, the workflow with more steps is used, # there is a conflict of active tabs and it always shows the # first tab after an action. So I explicitly specify to what # tab it should redirect after action, until the coflict will # be fixed in Horizon. def get_index_url(self): """This url is used both as success and failure url""" return "%s?tab=resource_management_tabs__resource_classes_tab" %\ reverse("horizon:infrastructure:resource_management:index") def get_success_url(self): return self.get_index_url() def get_failure_url(self): return self.get_index_url() def format_status_message(self, message): name = self.context.get('name') return message % name def _get_flavors(self, request, data): flavors = [] flavor_ids = data.get('flavors_object_ids') or [] max_vms = data.get('max_vms') resource_class_name = data['name'] for template_id in flavor_ids: template = tuskar.FlavorTemplate.get(request, template_id) capacities = [] for c in template.capacities: capacities.append({'name': c.name, 'value': str(c.value), 'unit': c.unit}) # FIXME: tuskar uses resource-class-name prefix for flavors, # e.g. m1.large, we add rc name to the template name: flavor_name = "%s.%s" % (resource_class_name, template.name) flavors.append({'name': flavor_name, 'max_vms': max_vms.get(template.id, None), 'capacities': capacities}) return flavors def _add_racks(self, request, data, resource_class): ids_to_add = data.get('racks_object_ids') or [] resource_class.set_racks(request, ids_to_add) class CreateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow): default_steps = (CreateResourceClassInfoAndFlavors, CreateRacks) slug = "create_resource_class" name = _("Create Class") finalize_button_name = _("Create Class") success_message = _('Created class "%s".') failure_message = _('Unable to create class "%s".') def _create_resource_class_info(self, request, data): try: flavors = self._get_flavors(request, data) return tuskar.ResourceClass.create( request, name=data['name'], service_type=data['service_type'], flavors=flavors) except Exception: redirect = self.get_failure_url() exceptions.handle(request, _('Unable to create resource class.'), redirect=redirect) return None def handle(self, request, data): resource_class = self._create_resource_class_info(request, data) self._add_racks(request, data, resource_class) return True class UpdateResourceClassInfoAndFlavors(CreateResourceClassInfoAndFlavors): depends_on = ("resource_class_id",) class UpdateRacks(CreateRacks): depends_on = ("resource_class_id",) class UpdateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow): default_steps = (UpdateResourceClassInfoAndFlavors, UpdateRacks) slug = "update_resource_class" name = _("Update Class") finalize_button_name = _("Update Class") success_message = _('Updated class "%s".') failure_message = _('Unable to update class "%s".') def _update_resource_class_info(self, request, data): try: flavors = self._get_flavors(request, data) return tuskar.ResourceClass.update( request, data['resource_class_id'], name=data['name'], service_type=data['service_type'], flavors=flavors) except Exception: redirect = self.get_failure_url() exceptions.handle(request, _('Unable to create resource class.'), redirect=redirect) return None def handle(self, request, data): resource_class = self._update_resource_class_info(request, data) self._add_racks(request, data, resource_class) return True class DetailUpdateWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__overview" % ( reverse(url, args=(self.context["resource_class_id"]))) class UpdateRacksWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__racks" % ( reverse(url, args=(self.context["resource_class_id"]))) class UpdateFlavorsWorkflow(UpdateResourceClass): def get_index_url(self): """This url is used both as success and failure url""" url = "horizon:infrastructure:resource_management:resource_classes:"\ "detail" return "%s?tab=resource_class_details__flavors" % ( reverse(url, args=(self.context["resource_class_id"])))
apache-2.0
-8,736,488,812,325,475,000
-287,569,140,465,058,370
37.222222
79
0.566053
false
khushboo9293/postorius
src/postorius/models.py
2
11004
# -*- coding: utf-8 -*- # Copyright (C) 1998-2015 by the Free Software Foundation, Inc. # # This file is part of Postorius. # # Postorius is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # Postorius is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # Postorius. If not, see <http://www.gnu.org/licenses/>. from __future__ import ( absolute_import, division, print_function, unicode_literals) import random import hashlib import logging from datetime import datetime, timedelta from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured from django.core.mail import send_mail from django.db.models.signals import post_save from django.core.urlresolvers import reverse from django.dispatch import receiver from django.db import models from django.http import Http404 from django.template import Context from django.template.loader import get_template from mailmanclient import MailmanConnectionError from postorius.utils import get_client from urllib2 import HTTPError logger = logging.getLogger(__name__) @receiver(post_save, sender=User) def create_mailman_user(sender, **kwargs): if kwargs.get('created'): autocreate = False try: autocreate = settings.AUTOCREATE_MAILMAN_USER except AttributeError: pass if autocreate: user = kwargs.get('instance') client = get_client() try: client.create_user(user.email, None, None) except HTTPError: pass class MailmanApiError(Exception): """Raised if the API is not available. """ pass class Mailman404Error(Exception): """Proxy exception. Raised if the API returns 404.""" pass class MailmanRestManager(object): """Manager class to give a model class CRUD access to the API. Returns objects (or lists of objects) retrived from the API. """ def __init__(self, resource_name, resource_name_plural, cls_name=None): self.resource_name = resource_name self.resource_name_plural = resource_name_plural def all(self): try: return getattr(get_client(), self.resource_name_plural) except AttributeError: raise MailmanApiError except MailmanConnectionError, e: raise MailmanApiError(e) def get(self, **kwargs): try: method = getattr(get_client(), 'get_' + self.resource_name) return method(**kwargs) except AttributeError, e: raise MailmanApiError(e) except HTTPError, e: if e.code == 404: raise Mailman404Error('Mailman resource could not be found.') else: raise except MailmanConnectionError, e: raise MailmanApiError(e) def get_or_404(self, **kwargs): """Similar to `self.get` but raises standard Django 404 error. """ try: return self.get(**kwargs) except Mailman404Error: raise Http404 except MailmanConnectionError, e: raise MailmanApiError(e) def create(self, **kwargs): try: method = getattr(get_client(), 'create_' + self.resource_name) return method(**kwargs) except AttributeError, e: raise MailmanApiError(e) except HTTPError, e: if e.code == 409: raise MailmanApiError else: raise except MailmanConnectionError: raise MailmanApiError def delete(self): """Not implemented since the objects returned from the API have a `delete` method of their own. """ pass class MailmanListManager(MailmanRestManager): def __init__(self): super(MailmanListManager, self).__init__('list', 'lists') def all(self, only_public=False): try: objects = getattr(get_client(), self.resource_name_plural) except AttributeError: raise MailmanApiError except MailmanConnectionError, e: raise MailmanApiError(e) if only_public: public = [] for obj in objects: if obj.settings.get('advertised', False): public.append(obj) return public else: return objects def by_mail_host(self, mail_host, only_public=False): objects = self.all(only_public) host_objects = [] for obj in objects: if obj.mail_host == mail_host: host_objects.append(obj) return host_objects class MailmanRestModel(object): """Simple REST Model class to make REST API calls Django style. """ MailmanApiError = MailmanApiError DoesNotExist = Mailman404Error def __init__(self, **kwargs): self.kwargs = kwargs def save(self): """Proxy function for `objects.create`. (REST API uses `create`, while Django uses `save`.) """ self.objects.create(**self.kwargs) class Domain(MailmanRestModel): """Domain model class. """ objects = MailmanRestManager('domain', 'domains') class List(MailmanRestModel): """List model class. """ objects = MailmanListManager() class MailmanUser(MailmanRestModel): """MailmanUser model class. """ objects = MailmanRestManager('user', 'users') class Member(MailmanRestModel): """Member model class. """ objects = MailmanRestManager('member', 'members') class AddressConfirmationProfileManager(models.Manager): """ Manager class for AddressConfirmationProfile. """ def create_profile(self, email, user): # Create or update a profile # Guarantee an email bytestr type that can be fed to hashlib. email_str = email if isinstance(email_str, unicode): email_str = email_str.encode('utf-8') activation_key = hashlib.sha1( str(random.random())+email_str).hexdigest() # Make now tz naive (we don't care about the timezone) now = datetime.now().replace(tzinfo=None) # Either update an existing profile record for the given email address try: profile = self.get(email=email) profile.activation_key = activation_key profile.created = now profile.save() # ... or create a new one. except AddressConfirmationProfile.DoesNotExist: profile = self.create(email=email, activation_key=activation_key, user=user, created=now) return profile class AddressConfirmationProfile(models.Model): """ Profile model for temporarily storing an activation key to register an email address. """ email = models.EmailField() activation_key = models.CharField(max_length=40) created = models.DateTimeField() user = models.ForeignKey(User) objects = AddressConfirmationProfileManager() def __unicode__(self): return u'Address Confirmation Profile for {0}'.format(self.email) @property def is_expired(self): """ a profile expires after 1 day by default. This can be configured in the settings. >>> EMAIL_CONFIRMATION_EXPIRATION_DELTA = timedelta(days=2) """ expiration_delta = getattr( settings, 'EMAIL_CONFIRMATION_EXPIRATION_DELTA', timedelta(days=1)) age = datetime.now().replace(tzinfo=None) - \ self.created.replace(tzinfo=None) return age > expiration_delta def _create_host_url(self, request): # Create the host url protocol = 'https' if not request.is_secure(): protocol = 'http' server_name = request.META['SERVER_NAME'] if server_name[-1] == '/': server_name = server_name[:len(server_name) - 1] return '{0}://{1}'.format(protocol, server_name) def send_confirmation_link(self, request, template_context=None, template_path=None): """ Send out a message containing a link to activate the given address. The following settings are recognized: >>> EMAIL_CONFIRMATION_TEMPLATE = 'postorius/address_confirmation_message.txt' >>> EMAIL_CONFIRMATION_FROM = 'postmaster@list.org' >>> EMAIL_CONFIRMATION_SUBJECT = 'Confirmation needed' :param request: The HTTP request object. :type request: HTTPRequest :param template_context: The context used when rendering the template. Falls back to host url and activation link. :type template_context: django.template.Context """ # create the host url and the activation link need for the template host_url = self._create_host_url(request) # Get the url string from url conf. url = reverse('address_activation_link', kwargs={'activation_key': self.activation_key}) activation_link = '{0}{1}'.format(host_url, url) # Detect the right template path, either from the param, # the setting or the default if not template_path: template_path = getattr(settings, 'EMAIL_CONFIRMATION_TEMPLATE', 'postorius/address_confirmation_message.txt') # Create a template context (if there is none) containing # the activation_link and the host_url. if not template_context: template_context = Context( {'activation_link': activation_link, 'host_url': host_url}) email_subject = getattr( settings, 'EMAIL_CONFIRMATION_SUBJECT', u'Confirmation needed') try: sender_address = getattr(settings, 'EMAIL_CONFIRMATION_FROM') except AttributeError: # settings.EMAIL_CONFIRMATION_FROM is not defined, fallback # settings.DEFAULT_EMAIL_FROM as mentioned in the django # docs. If that also fails, raise a `ImproperlyConfigured` Error. try: sender_address = getattr(settings, 'DEFAULT_FROM_EMAIL') except AttributeError: raise ImproperlyConfigured send_mail(email_subject, get_template(template_path).render(template_context), sender_address, [self.email])
gpl-3.0
3,223,909,458,813,279,700
-4,506,301,360,420,963,000
32.858462
90
0.620229
false
chaso137/wot-xvm
src/xpm/xpm/mods/lib/tlslite/utils/pem.py
116
3587
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. from .compat import * import binascii #This code is shared with tackpy (somewhat), so I'd rather make minimal #changes, and preserve the use of a2b_base64 throughout. def dePem(s, name): """Decode a PEM string into a bytearray of its payload. The input must contain an appropriate PEM prefix and postfix based on the input name string, e.g. for name="CERTIFICATE": -----BEGIN CERTIFICATE----- MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL ... KoZIhvcNAQEFBQADAwA5kw== -----END CERTIFICATE----- The first such PEM block in the input will be found, and its payload will be base64 decoded and returned. """ prefix = "-----BEGIN %s-----" % name postfix = "-----END %s-----" % name start = s.find(prefix) if start == -1: raise SyntaxError("Missing PEM prefix") end = s.find(postfix, start+len(prefix)) if end == -1: raise SyntaxError("Missing PEM postfix") s = s[start+len("-----BEGIN %s-----" % name) : end] retBytes = a2b_base64(s) # May raise SyntaxError return retBytes def dePemList(s, name): """Decode a sequence of PEM blocks into a list of bytearrays. The input must contain any number of PEM blocks, each with the appropriate PEM prefix and postfix based on the input name string, e.g. for name="TACK BREAK SIG". Arbitrary text can appear between and before and after the PEM blocks. For example: " Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK BREAK SIG----- ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z -----BEGIN TACK BREAK SIG----- ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM +kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK BREAK SIG----- " All such PEM blocks will be found, decoded, and return in an ordered list of bytearrays, which may have zero elements if not PEM blocks are found. """ bList = [] prefix = "-----BEGIN %s-----" % name postfix = "-----END %s-----" % name while 1: start = s.find(prefix) if start == -1: return bList end = s.find(postfix, start+len(prefix)) if end == -1: raise SyntaxError("Missing PEM postfix") s2 = s[start+len(prefix) : end] retBytes = a2b_base64(s2) # May raise SyntaxError bList.append(retBytes) s = s[end+len(postfix) : ] def pem(b, name): """Encode a payload bytearray into a PEM string. The input will be base64 encoded, then wrapped in a PEM prefix/postfix based on the name string, e.g. for name="CERTIFICATE": -----BEGIN CERTIFICATE----- MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL ... KoZIhvcNAQEFBQADAwA5kw== -----END CERTIFICATE----- """ s1 = b2a_base64(b)[:-1] # remove terminating \n s2 = "" while s1: s2 += s1[:64] + "\n" s1 = s1[64:] s = ("-----BEGIN %s-----\n" % name) + s2 + \ ("-----END %s-----\n" % name) return s def pemSniff(inStr, name): searchStr = "-----BEGIN %s-----" % name return searchStr in inStr
gpl-3.0
3,775,556,770,676,750,300
-7,594,037,409,240,106,000
35.602041
78
0.643156
false
kuszmaul/PerconaFT
src/tests/ipm.py
92
1263
#!/usr/local/bin/python2.6 import sys import os import pexpect import getpass # # remote_cmd # nameaddr='admn@192.168.1.254' passwd='admn' def IPM_cmd(cmds): # password handling ssh_newkey = 'Are you sure you want to continue connecting' p=pexpect.spawn('ssh %s' % nameaddr, timeout=60) i=p.expect([ssh_newkey,'Password:',pexpect.EOF]) if i==0: p.sendline('yes') i=p.expect([ssh_newkey,'Password:',pexpect.EOF]) if i==1: p.sendline(passwd) elif i==2: print "I either got key or connection timeout" pass # run command(s) i = p.expect('Sentry:') for cmd in cmds: if i==0: p.sendline(cmd) else: print 'p.expect saw', p.before i = p.expect('Sentry:') print p.before # close session p.sendline('quit') p.expect(pexpect.EOF) return 0 def IPM_power_on(): IPM_cmd(['on all']) def IPM_power_off(): IPM_cmd(['off all']) def main(argv): # passwd = getpass.getpass('password for %s:' % (nameaddr)) if argv[1] == 'on': IPM_power_on() elif argv[1] == 'off': IPM_power_off() else: IPM_cmd(argv[1:]) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
agpl-3.0
5,764,531,581,110,229,000
8,708,050,042,540,633,000
19.704918
63
0.562945
false
vladimir-ipatov/ganeti
test/py/ganeti.impexpd_unittest.py
9
8733
#!/usr/bin/python # # Copyright (C) 2010 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Script for testing ganeti.impexpd""" import os import sys import re import unittest import socket from ganeti import constants from ganeti import objects from ganeti import compat from ganeti import utils from ganeti import errors from ganeti import impexpd import testutils class CmdBuilderConfig(objects.ConfigObject): __slots__ = [ "bind", "key", "cert", "ca", "host", "port", "ipv4", "ipv6", "compress", "magic", "connect_timeout", "connect_retries", "cmd_prefix", "cmd_suffix", ] def CheckCmdWord(cmd, word): wre = re.compile(r"\b%s\b" % re.escape(word)) return compat.any(wre.search(i) for i in cmd) class TestCommandBuilder(unittest.TestCase): def test(self): for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: if mode == constants.IEM_IMPORT: comprcmd = "gunzip" elif mode == constants.IEM_EXPORT: comprcmd = "gzip" for compress in [constants.IEC_NONE, constants.IEC_GZIP]: for magic in [None, 10 * "-", "HelloWorld", "J9plh4nFo2", "24A02A81-2264-4B51-A882-A2AB9D85B420"]: opts = CmdBuilderConfig(magic=magic, compress=compress) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) magic_cmd = builder._GetMagicCommand() dd_cmd = builder._GetDdCommand() if magic: self.assert_(("M=%s" % magic) in magic_cmd) self.assert_(("M=%s" % magic) in dd_cmd) else: self.assertFalse(magic_cmd) for host in ["localhost", "198.51.100.4", "192.0.2.99"]: for port in [0, 1, 1234, 7856, 45452]: for cmd_prefix in [None, "PrefixCommandGoesHere|", "dd if=/dev/hda bs=1048576 |"]: for cmd_suffix in [None, "< /some/file/name", "| dd of=/dev/null"]: opts = CmdBuilderConfig(host=host, port=port, compress=compress, cmd_prefix=cmd_prefix, cmd_suffix=cmd_suffix) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) # Check complete command cmd = builder.GetCommand() self.assert_(isinstance(cmd, list)) if compress == constants.IEC_GZIP: self.assert_(CheckCmdWord(cmd, comprcmd)) if cmd_prefix is not None: self.assert_(compat.any(cmd_prefix in i for i in cmd)) if cmd_suffix is not None: self.assert_(compat.any(cmd_suffix in i for i in cmd)) # Check socat command socat_cmd = builder._GetSocatCommand() if mode == constants.IEM_IMPORT: ssl_addr = socat_cmd[-2].split(",") self.assert_(("OPENSSL-LISTEN:%s" % port) in ssl_addr) elif mode == constants.IEM_EXPORT: ssl_addr = socat_cmd[-1].split(",") self.assert_(("OPENSSL:%s:%s" % (host, port)) in ssl_addr) self.assert_("verify=1" in ssl_addr) def testIPv6(self): for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=False, ipv6=False) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.all("pf=" not in i for i in cmd)) # IPv4 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=True, ipv6=False) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.any(",pf=ipv4" in i for i in cmd)) # IPv6 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=False, ipv6=True) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.any(",pf=ipv6" in i for i in cmd)) # IPv4 and IPv6 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=True, ipv6=True) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(AssertionError, builder._GetSocatCommand) def testCommaError(self): opts = CmdBuilderConfig(host="localhost", port=1234, ca="/some/path/with,a/,comma") for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) def testOptionLengthError(self): testopts = [ CmdBuilderConfig(bind="0.0.0.0" + ("A" * impexpd.SOCAT_OPTION_MAXLEN), port=1234, ca="/tmp/ca"), CmdBuilderConfig(host="localhost", port=1234, ca="/tmp/ca" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)), CmdBuilderConfig(host="localhost", port=1234, key="/tmp/key" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)), ] for opts in testopts: for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) opts.host = "localhost" + ("A" * impexpd.SOCAT_OPTION_MAXLEN) builder = impexpd.CommandBuilder(constants.IEM_EXPORT, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) def testModeError(self): mode = "foobarbaz" assert mode not in [constants.IEM_IMPORT, constants.IEM_EXPORT] opts = CmdBuilderConfig(host="localhost", port=1234) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) class TestVerifyListening(unittest.TestCase): def test(self): self.assertEqual(impexpd._VerifyListening(socket.AF_INET, "192.0.2.7", 1234), ("192.0.2.7", 1234)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "::1", 9876), ("::1", 9876)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "[::1]", 4563), ("::1", 4563)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "[2001:db8::1:4563]", 4563), ("2001:db8::1:4563", 4563)) def testError(self): for family in [socket.AF_UNIX, socket.AF_INET, socket.AF_INET6]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "192", 999) for family in [socket.AF_UNIX, socket.AF_INET6]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "192.0.2.7", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "[2001:db8::1", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "2001:db8::1]", 1234) for family in [socket.AF_UNIX, socket.AF_INET]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "::1", 1234) class TestCalcThroughput(unittest.TestCase): def test(self): self.assertEqual(impexpd._CalcThroughput([]), None) self.assertEqual(impexpd._CalcThroughput([(0, 0)]), None) samples = [ (0.0, 0.0), (10.0, 100.0), ] self.assertAlmostEqual(impexpd._CalcThroughput(samples), 10.0, 3) samples = [ (5.0, 7.0), (10.0, 100.0), (16.0, 181.0), ] self.assertAlmostEqual(impexpd._CalcThroughput(samples), 15.818, 3) if __name__ == "__main__": testutils.GanetiTestProgram()
gpl-2.0
-6,355,962,625,890,815,000
1,980,844,527,093,096,400
35.086777
80
0.592351
false
iansf/sky_engine
sky/tools/webkitpy/layout_tests/port/linux.py
7
6967
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import re from webkitpy.common.webkit_finder import WebKitFinder from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderLinux from webkitpy.layout_tests.models import test_run_results from webkitpy.layout_tests.port import base from webkitpy.layout_tests.port import config _log = logging.getLogger(__name__) class LinuxPort(base.Port): port_name = 'linux' SUPPORTED_VERSIONS = ('x86', 'x86_64') FALLBACK_PATHS = { 'x86_64': [ 'linux' ] } FALLBACK_PATHS['x86'] = ['linux-x86'] + FALLBACK_PATHS['x86_64'] DEFAULT_BUILD_DIRECTORIES = ('out',) BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/LinuxBuildInstructions' @classmethod def _determine_driver_path_statically(cls, host, options): config_object = config.Config(host.executive, host.filesystem) build_directory = getattr(options, 'build_directory', None) finder = WebKitFinder(host.filesystem) webkit_base = finder.webkit_base() chromium_base = finder.chromium_base() driver_name = cls.SKY_SHELL_NAME if hasattr(options, 'configuration') and options.configuration: configuration = options.configuration else: configuration = config_object.default_configuration() return cls._static_build_path(host.filesystem, build_directory, chromium_base, configuration, [driver_name]) @staticmethod def _determine_architecture(filesystem, executive, driver_path): file_output = '' if filesystem.isfile(driver_path): # The --dereference flag tells file to follow symlinks file_output = executive.run_command(['file', '--brief', '--dereference', driver_path], return_stderr=True) if re.match(r'ELF 32-bit LSB\s+executable', file_output): return 'x86' if re.match(r'ELF 64-bit LSB\s+executable', file_output): return 'x86_64' if file_output: _log.warning('Could not determine architecture from "file" output: %s' % file_output) # We don't know what the architecture is; default to 'x86' because # maybe we're rebaselining and the binary doesn't actually exist, # or something else weird is going on. It's okay to do this because # if we actually try to use the binary, check_build() should fail. return 'x86_64' @classmethod def determine_full_port_name(cls, host, options, port_name): if port_name.endswith('linux'): return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options)) return port_name def __init__(self, host, port_name, **kwargs): super(LinuxPort, self).__init__(host, port_name, **kwargs) (base, arch) = port_name.rsplit('-', 1) assert base == 'linux' assert arch in self.SUPPORTED_VERSIONS assert port_name in ('linux', 'linux-x86', 'linux-x86_64') self._version = 'lucid' # We only support lucid right now. self._architecture = arch if not self.get_option('disable_breakpad'): self._dump_reader = DumpReaderLinux(host, self._build_path()) def default_baseline_search_path(self): port_names = self.FALLBACK_PATHS[self._architecture] return map(self._webkit_baseline_path, port_names) def _modules_to_search_for_symbols(self): return [self._build_path('libffmpegsumo.so')] def check_build(self, needs_http, printer): result = super(LinuxPort, self).check_build(needs_http, printer) if result: _log.error('For complete Linux build requirements, please see:') _log.error('') _log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions') return result def look_for_new_crash_logs(self, crashed_processes, start_time): if self.get_option('disable_breakpad'): return None return self._dump_reader.look_for_new_crash_logs(crashed_processes, start_time) def clobber_old_port_specific_results(self): if not self.get_option('disable_breakpad'): self._dump_reader.clobber_old_results() def operating_system(self): return 'linux' # # PROTECTED METHODS # def _check_apache_install(self): result = self._check_file_exists(self.path_to_apache(), "apache2") result = self._check_file_exists(self.path_to_apache_config_file(), "apache2 config file") and result if not result: _log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"') _log.error('') return result def _wdiff_missing_message(self): return 'wdiff is not installed; please install using "sudo apt-get install wdiff"' def path_to_apache(self): # The Apache binary path can vary depending on OS and distribution # See http://wiki.apache.org/httpd/DistrosDefaultLayout for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]: if self._filesystem.exists(path): return path _log.error("Could not find apache. Not installed or unknown path.") return None def _path_to_driver(self, configuration=None): binary_name = self.driver_name() return self._build_path_with_configuration(configuration, binary_name)
bsd-3-clause
307,872,624,214,801,300
8,689,510,732,490,704,000
42.81761
151
0.681211
false
rgommers/statsmodels
statsmodels/duration/tests/survival_r_results.py
34
11980
import numpy as np coef_20_1_bre = np.array([-0.9185611]) se_20_1_bre = np.array([0.4706831]) time_20_1_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,1.1,1.2,1.3,1.4,1.5]) hazard_20_1_bre = np.array([0,0,0.04139181,0.1755379,0.3121216,0.3121216,0.4263121,0.6196358,0.6196358,0.6196358,0.909556,1.31083,1.31083]) coef_20_1_et_bre = np.array([-0.8907007]) se_20_1_et_bre = np.array([0.4683384]) time_20_1_et_bre = np.array([0]) hazard_20_1_et_bre = np.array([0]) coef_20_1_st_bre = np.array([-0.5766809]) se_20_1_st_bre = np.array([0.4418918]) time_20_1_st_bre = np.array([0]) hazard_20_1_st_bre = np.array([0]) coef_20_1_et_st_bre = np.array([-0.5785683]) se_20_1_et_st_bre = np.array([0.4388437]) time_20_1_et_st_bre = np.array([0]) hazard_20_1_et_st_bre = np.array([0]) coef_20_1_efr = np.array([-0.9975319]) se_20_1_efr = np.array([0.4792421]) time_20_1_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,1.1,1.2,1.3,1.4,1.5]) hazard_20_1_efr = np.array([0,0,0.03934634,0.1663316,0.2986427,0.2986427,0.4119189,0.6077373,0.6077373,0.6077373,0.8933041,1.285732,1.285732]) coef_20_1_et_efr = np.array([-0.9679541]) se_20_1_et_efr = np.array([0.4766406]) time_20_1_et_efr = np.array([0]) hazard_20_1_et_efr = np.array([0]) coef_20_1_st_efr = np.array([-0.6345294]) se_20_1_st_efr = np.array([0.4455952]) time_20_1_st_efr = np.array([0]) hazard_20_1_st_efr = np.array([0]) coef_20_1_et_st_efr = np.array([-0.6355622]) se_20_1_et_st_efr = np.array([0.4423104]) time_20_1_et_st_efr = np.array([0]) hazard_20_1_et_st_efr = np.array([0]) coef_50_1_bre = np.array([-0.6761247]) se_50_1_bre = np.array([0.25133]) time_50_1_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.5,1.6,1.7,1.8,1.9,2.4,2.8]) hazard_50_1_bre = np.array([0,0.04895521,0.08457461,0.2073863,0.2382473,0.2793018,0.3271622,0.3842953,0.3842953,0.5310807,0.6360276,0.7648251,0.7648251,0.9294298,0.9294298,0.9294298,1.206438,1.555569,1.555569]) coef_50_1_et_bre = np.array([-0.6492871]) se_50_1_et_bre = np.array([0.2542493]) time_50_1_et_bre = np.array([0]) hazard_50_1_et_bre = np.array([0]) coef_50_1_st_bre = np.array([-0.7051135]) se_50_1_st_bre = np.array([0.2852093]) time_50_1_st_bre = np.array([0]) hazard_50_1_st_bre = np.array([0]) coef_50_1_et_st_bre = np.array([-0.8672546]) se_50_1_et_st_bre = np.array([0.3443235]) time_50_1_et_st_bre = np.array([0]) hazard_50_1_et_st_bre = np.array([0]) coef_50_1_efr = np.array([-0.7119322]) se_50_1_efr = np.array([0.2533563]) time_50_1_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.5,1.6,1.7,1.8,1.9,2.4,2.8]) hazard_50_1_efr = np.array([0,0.04773902,0.08238731,0.2022993,0.2327053,0.2736316,0.3215519,0.3787123,0.3787123,0.526184,0.6323073,0.7627338,0.7627338,0.9288858,0.9288858,0.9288858,1.206835,1.556054,1.556054]) coef_50_1_et_efr = np.array([-0.7103063]) se_50_1_et_efr = np.array([0.2598129]) time_50_1_et_efr = np.array([0]) hazard_50_1_et_efr = np.array([0]) coef_50_1_st_efr = np.array([-0.7417904]) se_50_1_st_efr = np.array([0.2846437]) time_50_1_st_efr = np.array([0]) hazard_50_1_st_efr = np.array([0]) coef_50_1_et_st_efr = np.array([-0.9276112]) se_50_1_et_st_efr = np.array([0.3462638]) time_50_1_et_st_efr = np.array([0]) hazard_50_1_et_st_efr = np.array([0]) coef_50_2_bre = np.array([-0.5935189,0.5035724]) se_50_2_bre = np.array([0.2172841,0.2399933]) time_50_2_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.9,2.7,2.9]) hazard_50_2_bre = np.array([0.02695812,0.09162381,0.1309537,0.1768423,0.2033353,0.2033353,0.3083449,0.3547287,0.4076453,0.4761318,0.5579718,0.7610905,0.918962,0.918962,1.136173,1.605757,2.457676,2.457676]) coef_50_2_et_bre = np.array([-0.4001465,0.4415933]) se_50_2_et_bre = np.array([0.1992302,0.2525949]) time_50_2_et_bre = np.array([0]) hazard_50_2_et_bre = np.array([0]) coef_50_2_st_bre = np.array([-0.6574891,0.4416079]) se_50_2_st_bre = np.array([0.2753398,0.269458]) time_50_2_st_bre = np.array([0]) hazard_50_2_st_bre = np.array([0]) coef_50_2_et_st_bre = np.array([-0.3607069,0.2731982]) se_50_2_et_st_bre = np.array([0.255415,0.306942]) time_50_2_et_st_bre = np.array([0]) hazard_50_2_et_st_bre = np.array([0]) coef_50_2_efr = np.array([-0.6107485,0.5309737]) se_50_2_efr = np.array([0.2177713,0.2440535]) time_50_2_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.9,2.7,2.9]) hazard_50_2_efr = np.array([0.02610571,0.08933637,0.1279094,0.1731699,0.19933,0.19933,0.303598,0.3497025,0.4023939,0.4706978,0.5519237,0.7545023,0.9129989,0.9129989,1.13186,1.60574,2.472615,2.472615]) coef_50_2_et_efr = np.array([-0.4092002,0.4871344]) se_50_2_et_efr = np.array([0.1968905,0.2608527]) time_50_2_et_efr = np.array([0]) hazard_50_2_et_efr = np.array([0]) coef_50_2_st_efr = np.array([-0.6631286,0.4663285]) se_50_2_st_efr = np.array([0.2748224,0.273603]) time_50_2_st_efr = np.array([0]) hazard_50_2_st_efr = np.array([0]) coef_50_2_et_st_efr = np.array([-0.3656059,0.2943912]) se_50_2_et_st_efr = np.array([0.2540752,0.3124632]) time_50_2_et_st_efr = np.array([0]) hazard_50_2_et_st_efr = np.array([0]) coef_100_5_bre = np.array([-0.529776,-0.2916374,-0.1205425,0.3493476,0.6034305]) se_100_5_bre = np.array([0.1789305,0.1482505,0.1347422,0.1528205,0.1647927]) time_100_5_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.5,2.8,3.2,3.3]) hazard_100_5_bre = np.array([0.02558588,0.05608812,0.1087773,0.1451098,0.1896703,0.2235791,0.3127521,0.3355107,0.439452,0.504983,0.5431706,0.5841462,0.5841462,0.5841462,0.6916466,0.7540191,0.8298704,1.027876,1.170335,1.379306,1.648758,1.943177,1.943177,1.943177,4.727101]) coef_100_5_et_bre = np.array([-0.4000784,-0.1790941,-0.1378969,0.3288529,0.533246]) se_100_5_et_bre = np.array([0.1745655,0.1513545,0.1393968,0.1487803,0.1686992]) time_100_5_et_bre = np.array([0]) hazard_100_5_et_bre = np.array([0]) coef_100_5_st_bre = np.array([-0.53019,-0.3225739,-0.1241568,0.3246598,0.6196859]) se_100_5_st_bre = np.array([0.1954581,0.1602811,0.1470644,0.17121,0.1784115]) time_100_5_st_bre = np.array([0]) hazard_100_5_st_bre = np.array([0]) coef_100_5_et_st_bre = np.array([-0.3977171,-0.2166136,-0.1387623,0.3251726,0.5664705]) se_100_5_et_st_bre = np.array([0.1951054,0.1707925,0.1501968,0.1699932,0.1843428]) time_100_5_et_st_bre = np.array([0]) hazard_100_5_et_st_bre = np.array([0]) coef_100_5_efr = np.array([-0.5641909,-0.3233021,-0.1234858,0.3712328,0.6421963]) se_100_5_efr = np.array([0.1804027,0.1496253,0.1338531,0.1529832,0.1670848]) time_100_5_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.5,2.8,3.2,3.3]) hazard_100_5_efr = np.array([0.02393412,0.05276399,0.1028432,0.1383859,0.1823461,0.2158107,0.3037825,0.3264864,0.4306648,0.4964367,0.5348595,0.5760305,0.5760305,0.5760305,0.6842238,0.7468135,0.8228841,1.023195,1.166635,1.379361,1.652898,1.950119,1.950119,1.950119,4.910635]) coef_100_5_et_efr = np.array([-0.4338666,-0.2140139,-0.1397387,0.3535993,0.5768645]) se_100_5_et_efr = np.array([0.1756485,0.1527244,0.138298,0.1488427,0.1716654]) time_100_5_et_efr = np.array([0]) hazard_100_5_et_efr = np.array([0]) coef_100_5_st_efr = np.array([-0.5530876,-0.3331652,-0.128381,0.3503472,0.6397813]) se_100_5_st_efr = np.array([0.1969338,0.1614976,0.1464088,0.171299,0.1800787]) time_100_5_st_efr = np.array([0]) hazard_100_5_st_efr = np.array([0]) coef_100_5_et_st_efr = np.array([-0.421153,-0.2350069,-0.1433638,0.3538863,0.5934568]) se_100_5_et_st_efr = np.array([0.1961729,0.1724719,0.1492979,0.170464,0.1861849]) time_100_5_et_st_efr = np.array([0]) hazard_100_5_et_st_efr = np.array([0]) coef_1000_10_bre = np.array([-0.4699279,-0.464557,-0.308411,-0.2158298,-0.09048563,0.09359662,0.112588,0.3343705,0.3480601,0.5634985]) se_1000_10_bre = np.array([0.04722914,0.04785291,0.04503528,0.04586872,0.04429793,0.0446141,0.04139944,0.04464292,0.04559903,0.04864393]) time_1000_10_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.2,2.3,2.4,2.5,2.6,2.7,2.8,2.9,3,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9,4,4.1,4.2,4.3,4.4,4.6,4.8,4.9,5,5.1,5.2,5.7,5.8,5.9,6.9]) hazard_1000_10_bre = np.array([0.01610374,0.04853538,0.08984849,0.1311329,0.168397,0.2230488,0.2755388,0.3312606,0.3668702,0.4146558,0.477935,0.5290705,0.5831775,0.6503129,0.7113068,0.7830385,0.8361717,0.8910061,0.9615944,1.024011,1.113399,1.165349,1.239827,1.352902,1.409548,1.53197,1.601843,1.682158,1.714907,1.751564,1.790898,1.790898,1.83393,1.83393,1.936055,1.992303,2.050778,2.118776,2.263056,2.504999,2.739343,2.895514,3.090349,3.090349,3.391772,3.728142,4.152769,4.152769,4.152769,4.725957,4.725957,5.69653,5.69653,5.69653]) coef_1000_10_et_bre = np.array([-0.410889,-0.3929442,-0.2975845,-0.1851533,-0.0918359,0.1011997,0.106735,0.2899179,0.3220672,0.5069589]) se_1000_10_et_bre = np.array([0.04696754,0.04732169,0.04537707,0.04605371,0.04365232,0.04450021,0.04252475,0.04482007,0.04562374,0.04859727]) time_1000_10_et_bre = np.array([0]) hazard_1000_10_et_bre = np.array([0]) coef_1000_10_st_bre = np.array([-0.471015,-0.4766859,-0.3070839,-0.2091938,-0.09190845,0.0964942,0.1138269,0.3307131,0.3543551,0.562492]) se_1000_10_st_bre = np.array([0.04814778,0.04841938,0.04572291,0.04641227,0.04502525,0.04517603,0.04203737,0.04524356,0.04635037,0.04920866]) time_1000_10_st_bre = np.array([0]) hazard_1000_10_st_bre = np.array([0]) coef_1000_10_et_st_bre = np.array([-0.4165849,-0.4073504,-0.2980959,-0.1765194,-0.09152798,0.1013213,0.1009838,0.2859668,0.3247608,0.5044448]) se_1000_10_et_st_bre = np.array([0.04809818,0.04809499,0.0460829,0.04679922,0.0445294,0.04514045,0.04339298,0.04580591,0.04652447,0.04920744]) time_1000_10_et_st_bre = np.array([0]) hazard_1000_10_et_st_bre = np.array([0]) coef_1000_10_efr = np.array([-0.4894399,-0.4839746,-0.3227769,-0.2261293,-0.09318482,0.09767154,0.1173205,0.3493732,0.3640146,0.5879749]) se_1000_10_efr = np.array([0.0474181,0.04811855,0.04507655,0.04603044,0.04440409,0.04478202,0.04136728,0.04473343,0.045768,0.04891375]) time_1000_10_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.2,2.3,2.4,2.5,2.6,2.7,2.8,2.9,3,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9,4,4.1,4.2,4.3,4.4,4.6,4.8,4.9,5,5.1,5.2,5.7,5.8,5.9,6.9]) hazard_1000_10_efr = np.array([0.01549698,0.04680035,0.08682564,0.1269429,0.1632388,0.2167291,0.2682311,0.3231316,0.3582936,0.4054892,0.4681098,0.5188697,0.5727059,0.639571,0.7003012,0.7718979,0.825053,0.880063,0.950935,1.013828,1.103903,1.156314,1.231707,1.346235,1.40359,1.527475,1.598231,1.6795,1.712779,1.750227,1.790455,1.790455,1.834455,1.834455,1.938997,1.996804,2.056859,2.126816,2.275217,2.524027,2.76669,2.929268,3.13247,3.13247,3.448515,3.80143,4.249649,4.249649,4.249649,4.851365,4.851365,5.877307,5.877307,5.877307]) coef_1000_10_et_efr = np.array([-0.4373066,-0.4131901,-0.3177637,-0.1978493,-0.09679451,0.1092037,0.1136069,0.3088907,0.3442007,0.5394121]) se_1000_10_et_efr = np.array([0.04716041,0.04755342,0.04546713,0.04627802,0.04376583,0.04474868,0.04259991,0.04491564,0.04589027,0.04890847]) time_1000_10_et_efr = np.array([0]) hazard_1000_10_et_efr = np.array([0]) coef_1000_10_st_efr = np.array([-0.4911117,-0.4960756,-0.3226152,-0.220949,-0.09478141,0.1015735,0.1195524,0.3446977,0.3695904,0.5878576]) se_1000_10_st_efr = np.array([0.04833676,0.04868554,0.04578407,0.04661755,0.04518267,0.04537135,0.04202183,0.04531266,0.0464931,0.04949831]) time_1000_10_st_efr = np.array([0]) hazard_1000_10_st_efr = np.array([0]) coef_1000_10_et_st_efr = np.array([-0.444355,-0.4283278,-0.3198815,-0.1901781,-0.09727039,0.1106191,0.1092104,0.3034778,0.3451699,0.5382381]) se_1000_10_et_st_efr = np.array([0.04830664,0.04833619,0.04617371,0.04706401,0.04472699,0.0454208,0.04350539,0.04588588,0.04675675,0.04950987]) time_1000_10_et_st_efr = np.array([0]) hazard_1000_10_et_st_efr = np.array([0])
bsd-3-clause
5,424,702,658,527,676,000
-8,412,588,448,535,925,000
36.320872
532
0.678798
false
forman/dectree
examples/intertidal_flat_classif/intertidal_flat_classif.py
1
12362
from numba import jit, jitclass, float64 import numpy as np @jit(nopython=True) def _B1_LT_085(x): # B1.LT_085: lt(0.85) if 0.0 == 0.0: return 1.0 if x < 0.85 else 0.0 x1 = 0.85 - 0.0 x2 = 0.85 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B1_GT_1(x): # B1.GT_1: gt(1.0) if 0.0 == 0.0: return 1.0 if x > 1.0 else 0.0 x1 = 1.0 - 0.0 x2 = 1.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B2_GT_0(x): # B2.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B3_LT_005(x): # B3.LT_005: lt(0.05) if 0.0 == 0.0: return 1.0 if x < 0.05 else 0.0 x1 = 0.05 - 0.0 x2 = 0.05 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_01(x): # B3.LT_01: lt(0.1) if 0.0 == 0.0: return 1.0 if x < 0.1 else 0.0 x1 = 0.1 - 0.0 x2 = 0.1 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_015(x): # B3.LT_015: lt(0.15) if 0.0 == 0.0: return 1.0 if x < 0.15 else 0.0 x1 = 0.15 - 0.0 x2 = 0.15 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B3_LT_02(x): # B3.LT_02: lt(0.2) if 0.0 == 0.0: return 1.0 if x < 0.2 else 0.0 x1 = 0.2 - 0.0 x2 = 0.2 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B4_NODATA(x): # B4.NODATA: eq(0.0) if 0.0 == 0.0: return 1.0 if x == 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 x3 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) if x <= x3: return 1.0 - (x - x2) / (x3 - x2) return 0.0 @jit(nopython=True) def _B5_LT_01(x): # B5.LT_01: lt(0.1) if 0.0 == 0.0: return 1.0 if x < 0.1 else 0.0 x1 = 0.1 - 0.0 x2 = 0.1 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B7_LT_05(x): # B7.LT_05: lt(0.5) if 0.0 == 0.0: return 1.0 if x < 0.5 else 0.0 x1 = 0.5 - 0.0 x2 = 0.5 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B8_GT_0(x): # B8.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_LT_009(x): # B8.LT_009: lt(0.09) if 0.0 == 0.0: return 1.0 if x < 0.09 else 0.0 x1 = 0.09 - 0.0 x2 = 0.09 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B8_GT_033(x): # B8.GT_033: gt(0.33) if 0.0 == 0.0: return 1.0 if x > 0.33 else 0.0 x1 = 0.33 - 0.0 x2 = 0.33 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_035(x): # B8.GT_035: gt(0.35) if 0.0 == 0.0: return 1.0 if x > 0.35 else 0.0 x1 = 0.35 - 0.0 x2 = 0.35 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_04(x): # B8.GT_04: gt(0.4) if 0.0 == 0.0: return 1.0 if x > 0.4 else 0.0 x1 = 0.4 - 0.0 x2 = 0.4 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_GT_045(x): # B8.GT_045: gt(0.45) if 0.0 == 0.0: return 1.0 if x > 0.45 else 0.0 x1 = 0.45 - 0.0 x2 = 0.45 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B8_LT_085(x): # B8.LT_085: lt(0.85) if 0.0 == 0.0: return 1.0 if x < 0.85 else 0.0 x1 = 0.85 - 0.0 x2 = 0.85 + 0.0 if x <= x1: return 1.0 if x <= x2: return 1.0 - (x - x1) / (x2 - x1) return 0.0 @jit(nopython=True) def _B16_GT_0(x): # B16.GT_0: gt(0.0) if 0.0 == 0.0: return 1.0 if x > 0.0 else 0.0 x1 = 0.0 - 0.0 x2 = 0.0 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _B19_GT_015(x): # B19.GT_015: gt(0.15) if 0.0 == 0.0: return 1.0 if x > 0.15 else 0.0 x1 = 0.15 - 0.0 x2 = 0.15 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_011(x): # BSum.GT_011: gt(0.11) if 0.0 == 0.0: return 1.0 if x > 0.11 else 0.0 x1 = 0.11 - 0.0 x2 = 0.11 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_013(x): # BSum.GT_013: gt(0.13) if 0.0 == 0.0: return 1.0 if x > 0.13 else 0.0 x1 = 0.13 - 0.0 x2 = 0.13 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _BSum_GT_016(x): # BSum.GT_016: gt(0.16) if 0.0 == 0.0: return 1.0 if x > 0.16 else 0.0 x1 = 0.16 - 0.0 x2 = 0.16 + 0.0 if x <= x1: return 0.0 if x <= x2: return (x - x1) / (x2 - x1) return 1.0 @jit(nopython=True) def _Class_FALSE(x): # Class.FALSE: false() return 0.0 @jit(nopython=True) def _Class_TRUE(x): # Class.TRUE: true() return 1.0 _InputsSpec = [ ("b1", float64[:]), ("b2", float64[:]), ("b3", float64[:]), ("b4", float64[:]), ("b5", float64[:]), ("b6", float64[:]), ("b7", float64[:]), ("b8", float64[:]), ("b12", float64[:]), ("b13", float64[:]), ("b14", float64[:]), ("b15", float64[:]), ("b16", float64[:]), ("b19", float64[:]), ("b100", float64[:]), ("bsum", float64[:]), ] @jitclass(_InputsSpec) class Inputs: def __init__(self, size: int): self.b1 = np.zeros(size, dtype=np.float64) self.b2 = np.zeros(size, dtype=np.float64) self.b3 = np.zeros(size, dtype=np.float64) self.b4 = np.zeros(size, dtype=np.float64) self.b5 = np.zeros(size, dtype=np.float64) self.b6 = np.zeros(size, dtype=np.float64) self.b7 = np.zeros(size, dtype=np.float64) self.b8 = np.zeros(size, dtype=np.float64) self.b12 = np.zeros(size, dtype=np.float64) self.b13 = np.zeros(size, dtype=np.float64) self.b14 = np.zeros(size, dtype=np.float64) self.b15 = np.zeros(size, dtype=np.float64) self.b16 = np.zeros(size, dtype=np.float64) self.b19 = np.zeros(size, dtype=np.float64) self.b100 = np.zeros(size, dtype=np.float64) self.bsum = np.zeros(size, dtype=np.float64) _OutputsSpec = [ ("nodata", float64[:]), ("Wasser", float64[:]), ("Schill", float64[:]), ("Muschel", float64[:]), ("dense2", float64[:]), ("dense1", float64[:]), ("Strand", float64[:]), ("Sand", float64[:]), ("Misch", float64[:]), ("Misch2", float64[:]), ("Schlick", float64[:]), ("schlick_t", float64[:]), ("Wasser2", float64[:]), ] @jitclass(_OutputsSpec) class Outputs: def __init__(self, size: int): self.nodata = np.zeros(size, dtype=np.float64) self.Wasser = np.zeros(size, dtype=np.float64) self.Schill = np.zeros(size, dtype=np.float64) self.Muschel = np.zeros(size, dtype=np.float64) self.dense2 = np.zeros(size, dtype=np.float64) self.dense1 = np.zeros(size, dtype=np.float64) self.Strand = np.zeros(size, dtype=np.float64) self.Sand = np.zeros(size, dtype=np.float64) self.Misch = np.zeros(size, dtype=np.float64) self.Misch2 = np.zeros(size, dtype=np.float64) self.Schlick = np.zeros(size, dtype=np.float64) self.schlick_t = np.zeros(size, dtype=np.float64) self.Wasser2 = np.zeros(size, dtype=np.float64) @jit(nopython=True) def apply_rules(inputs: Inputs, outputs: Outputs): for i in range(len(outputs.nodata)): t0 = 1.0 # if b4 is NODATA: t1 = min(t0, _B4_NODATA(inputs.b4[i])) # nodata = TRUE outputs.nodata[i] = t1 # else: t1 = min(t0, 1.0 - t1) # if (b8 is GT_033 and b1 is LT_085) or b8 is LT_009: t2 = min(t1, max(min(_B8_GT_033(inputs.b8[i]), _B1_LT_085(inputs.b1[i])), _B8_LT_009(inputs.b8[i]))) # if b5 is LT_01: t3 = min(t2, _B5_LT_01(inputs.b5[i])) # Wasser = TRUE outputs.Wasser[i] = t3 # else: t3 = min(t2, 1.0 - t3) # if (b19 is GT_015 and (b8 is GT_04 and b8 is LT_085) and b7 is LT_05) or (b8 is GT_04 and bsum is GT_011) or (b8 is GT_035 and bsum is GT_016): t4 = min(t3, max(max(min(min(_B19_GT_015(inputs.b19[i]), min(_B8_GT_04(inputs.b8[i]), _B8_LT_085(inputs.b8[i]))), _B7_LT_05(inputs.b7[i])), min(_B8_GT_04(inputs.b8[i]), _BSum_GT_011(inputs.bsum[i]))), min(_B8_GT_035(inputs.b8[i]), _BSum_GT_016(inputs.bsum[i])))) # if bsum is GT_013: t5 = min(t4, _BSum_GT_013(inputs.bsum[i])) # Schill = TRUE outputs.Schill[i] = t5 # else: t5 = min(t4, 1.0 - t5) # Muschel = TRUE outputs.Muschel[i] = t5 # else: t4 = min(t3, 1.0 - t4) # if b8 is GT_045: t5 = min(t4, _B8_GT_045(inputs.b8[i])) # dense2 = TRUE outputs.dense2[i] = t5 # else: t5 = min(t4, 1.0 - t5) # dense1 = TRUE outputs.dense1[i] = t5 # else: t2 = min(t1, 1.0 - t2) # if b1 is GT_1: t3 = min(t2, _B1_GT_1(inputs.b1[i])) # Strand = TRUE outputs.Strand[i] = t3 # else: t3 = min(t2, 1.0 - t3) # if b3 is LT_005: t4 = min(t3, _B3_LT_005(inputs.b3[i])) # Sand = TRUE outputs.Sand[i] = t4 # else: t4 = min(t3, 1.0 - t4) # if b3 is LT_01 and b8 is GT_0: t5 = min(t4, min(_B3_LT_01(inputs.b3[i]), _B8_GT_0(inputs.b8[i]))) # Misch = TRUE outputs.Misch[i] = t5 # else: t5 = min(t4, 1.0 - t5) # if b3 is LT_015 and b8 is GT_0: t6 = min(t5, min(_B3_LT_015(inputs.b3[i]), _B8_GT_0(inputs.b8[i]))) # Misch2 = TRUE outputs.Misch2[i] = t6 # else: t6 = min(t5, 1.0 - t6) # if b3 is LT_02 and b2 is GT_0 and b8 is GT_0: t7 = min(t6, min(min(_B3_LT_02(inputs.b3[i]), _B2_GT_0(inputs.b2[i])), _B8_GT_0(inputs.b8[i]))) # Schlick = TRUE outputs.Schlick[i] = t7 # else: t7 = min(t6, 1.0 - t7) # if b16 is GT_0 and b8 is GT_0: t8 = min(t7, min(_B16_GT_0(inputs.b16[i]), _B8_GT_0(inputs.b8[i]))) # schlick_t = TRUE outputs.schlick_t[i] = t8 # else: t8 = min(t7, 1.0 - t8) # Wasser2 = TRUE outputs.Wasser2[i] = t8
mit
-3,180,541,234,268,725,000
-1,778,537,509,760,328,400
24.647303
270
0.44928
false
blacklin/kbengine
kbe/src/lib/python/Lib/pickle.py
67
55641
"""Create portable serialized representations of Python objects. See module copyreg for a mechanism for registering custom picklers. See module pickletools source for extensive comments. Classes: Pickler Unpickler Functions: dump(object, file) dumps(object) -> string load(file) -> object loads(string) -> object Misc variables: __version__ format_version compatible_formats """ from types import FunctionType from copyreg import dispatch_table from copyreg import _extension_registry, _inverted_registry, _extension_cache from itertools import islice import sys from sys import maxsize from struct import pack, unpack import re import io import codecs import _compat_pickle __all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler", "Unpickler", "dump", "dumps", "load", "loads"] # Shortcut for use in isinstance testing bytes_types = (bytes, bytearray) # These are purely informational; no code uses these. format_version = "4.0" # File format version we write compatible_formats = ["1.0", # Original protocol 0 "1.1", # Protocol 0 with INST added "1.2", # Original protocol 1 "1.3", # Protocol 1 with BINFLOAT added "2.0", # Protocol 2 "3.0", # Protocol 3 "4.0", # Protocol 4 ] # Old format versions we can read # This is the highest protocol number we know how to read. HIGHEST_PROTOCOL = 4 # The protocol we write by default. May be less than HIGHEST_PROTOCOL. # We intentionally write a protocol that Python 2.x cannot read; # there are too many issues with that. DEFAULT_PROTOCOL = 3 class PickleError(Exception): """A common base class for the other pickling exceptions.""" pass class PicklingError(PickleError): """This exception is raised when an unpicklable object is passed to the dump() method. """ pass class UnpicklingError(PickleError): """This exception is raised when there is a problem unpickling an object, such as a security violation. Note that other exceptions may also be raised during unpickling, including (but not necessarily limited to) AttributeError, EOFError, ImportError, and IndexError. """ pass # An instance of _Stop is raised by Unpickler.load_stop() in response to # the STOP opcode, passing the object that is the result of unpickling. class _Stop(Exception): def __init__(self, value): self.value = value # Jython has PyStringMap; it's a dict subclass with string keys try: from org.python.core import PyStringMap except ImportError: PyStringMap = None # Pickle opcodes. See pickletools.py for extensive docs. The listing # here is in kind-of alphabetical order of 1-character pickle code. # pickletools groups them by purpose. MARK = b'(' # push special markobject on stack STOP = b'.' # every pickle ends with STOP POP = b'0' # discard topmost stack item POP_MARK = b'1' # discard stack top through topmost markobject DUP = b'2' # duplicate top stack item FLOAT = b'F' # push float object; decimal string argument INT = b'I' # push integer or bool; decimal string argument BININT = b'J' # push four-byte signed int BININT1 = b'K' # push 1-byte unsigned int LONG = b'L' # push long; decimal string argument BININT2 = b'M' # push 2-byte unsigned int NONE = b'N' # push None PERSID = b'P' # push persistent object; id is taken from string arg BINPERSID = b'Q' # " " " ; " " " " stack REDUCE = b'R' # apply callable to argtuple, both on stack STRING = b'S' # push string; NL-terminated string argument BINSTRING = b'T' # push string; counted binary string argument SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument BINUNICODE = b'X' # " " " ; counted UTF-8 string argument APPEND = b'a' # append stack top to list below it BUILD = b'b' # call __setstate__ or __dict__.update() GLOBAL = b'c' # push self.find_class(modname, name); 2 string args DICT = b'd' # build a dict from stack items EMPTY_DICT = b'}' # push empty dict APPENDS = b'e' # extend list on stack by topmost stack slice GET = b'g' # push item from memo on stack; index is string arg BINGET = b'h' # " " " " " " ; " " 1-byte arg INST = b'i' # build & push class instance LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg LIST = b'l' # build list from topmost stack items EMPTY_LIST = b']' # push empty list OBJ = b'o' # build & push class instance PUT = b'p' # store stack top in memo; index is string arg BINPUT = b'q' # " " " " " ; " " 1-byte arg LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg SETITEM = b's' # add key+value pair to dict TUPLE = b't' # build tuple from topmost stack items EMPTY_TUPLE = b')' # push empty tuple SETITEMS = b'u' # modify dict by adding topmost key+value pairs BINFLOAT = b'G' # push float; arg is 8-byte float encoding TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py # Protocol 2 PROTO = b'\x80' # identify pickle protocol NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple EXT1 = b'\x82' # push object from extension registry; 1-byte index EXT2 = b'\x83' # ditto, but 2-byte index EXT4 = b'\x84' # ditto, but 4-byte index TUPLE1 = b'\x85' # build 1-tuple from stack top TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items NEWTRUE = b'\x88' # push True NEWFALSE = b'\x89' # push False LONG1 = b'\x8a' # push long from < 256 bytes LONG4 = b'\x8b' # push really big long _tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] # Protocol 3 (Python 3.x) BINBYTES = b'B' # push bytes; counted binary string argument SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes # Protocol 4 SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes BINUNICODE8 = b'\x8d' # push very long string BINBYTES8 = b'\x8e' # push very long bytes string EMPTY_SET = b'\x8f' # push empty set on the stack ADDITEMS = b'\x90' # modify set by adding topmost stack items FROZENSET = b'\x91' # build frozenset from topmost stack items NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks MEMOIZE = b'\x94' # store top of the stack in memo FRAME = b'\x95' # indicate the beginning of a new frame __all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)]) class _Framer: _FRAME_SIZE_TARGET = 64 * 1024 def __init__(self, file_write): self.file_write = file_write self.current_frame = None def start_framing(self): self.current_frame = io.BytesIO() def end_framing(self): if self.current_frame and self.current_frame.tell() > 0: self.commit_frame(force=True) self.current_frame = None def commit_frame(self, force=False): if self.current_frame: f = self.current_frame if f.tell() >= self._FRAME_SIZE_TARGET or force: with f.getbuffer() as data: n = len(data) write = self.file_write write(FRAME) write(pack("<Q", n)) write(data) f.seek(0) f.truncate() def write(self, data): if self.current_frame: return self.current_frame.write(data) else: return self.file_write(data) class _Unframer: def __init__(self, file_read, file_readline, file_tell=None): self.file_read = file_read self.file_readline = file_readline self.current_frame = None def read(self, n): if self.current_frame: data = self.current_frame.read(n) if not data and n != 0: self.current_frame = None return self.file_read(n) if len(data) < n: raise UnpicklingError( "pickle exhausted before end of frame") return data else: return self.file_read(n) def readline(self): if self.current_frame: data = self.current_frame.readline() if not data: self.current_frame = None return self.file_readline() if data[-1] != b'\n': raise UnpicklingError( "pickle exhausted before end of frame") return data else: return self.file_readline() def load_frame(self, frame_size): if self.current_frame and self.current_frame.read() != b'': raise UnpicklingError( "beginning of a new frame before end of current frame") self.current_frame = io.BytesIO(self.file_read(frame_size)) # Tools used for pickling. def _getattribute(obj, name, allow_qualname=False): dotted_path = name.split(".") if not allow_qualname and len(dotted_path) > 1: raise AttributeError("Can't get qualified attribute {!r} on {!r}; " + "use protocols >= 4 to enable support" .format(name, obj)) for subpath in dotted_path: if subpath == '<locals>': raise AttributeError("Can't get local attribute {!r} on {!r}" .format(name, obj)) try: obj = getattr(obj, subpath) except AttributeError: raise AttributeError("Can't get attribute {!r} on {!r}" .format(name, obj)) return obj def whichmodule(obj, name, allow_qualname=False): """Find the module an object belong to.""" module_name = getattr(obj, '__module__', None) if module_name is not None: return module_name for module_name, module in sys.modules.items(): if module_name == '__main__' or module is None: continue try: if _getattribute(module, name, allow_qualname) is obj: return module_name except AttributeError: pass return '__main__' def encode_long(x): r"""Encode a long to a two's complement little-endian binary string. Note that 0 is a special case, returning an empty string, to save a byte in the LONG1 pickling context. >>> encode_long(0) b'' >>> encode_long(255) b'\xff\x00' >>> encode_long(32767) b'\xff\x7f' >>> encode_long(-256) b'\x00\xff' >>> encode_long(-32768) b'\x00\x80' >>> encode_long(-128) b'\x80' >>> encode_long(127) b'\x7f' >>> """ if x == 0: return b'' nbytes = (x.bit_length() >> 3) + 1 result = x.to_bytes(nbytes, byteorder='little', signed=True) if x < 0 and nbytes > 1: if result[-1] == 0xff and (result[-2] & 0x80) != 0: result = result[:-1] return result def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. >>> decode_long(b'') 0 >>> decode_long(b"\xff\x00") 255 >>> decode_long(b"\xff\x7f") 32767 >>> decode_long(b"\x00\xff") -256 >>> decode_long(b"\x00\x80") -32768 >>> decode_long(b"\x80") -128 >>> decode_long(b"\x7f") 127 """ return int.from_bytes(data, byteorder='little', signed=True) # Pickling machinery class _Pickler: def __init__(self, file, protocol=None, *, fix_imports=True): """This takes a binary file for writing a pickle data stream. The optional *protocol* argument tells the pickler to use the given protocol; supported protocols are 0, 1, 2, 3 and 4. The default protocol is 3; a backward-incompatible protocol designed for Python 3. Specifying a negative protocol version selects the highest protocol version supported. The higher the protocol used, the more recent the version of Python needed to read the pickle produced. The *file* argument must have a write() method that accepts a single bytes argument. It can thus be a file object opened for binary writing, a io.BytesIO instance, or any other custom object that meets this interface. If *fix_imports* is True and *protocol* is less than 3, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. """ if protocol is None: protocol = DEFAULT_PROTOCOL if protocol < 0: protocol = HIGHEST_PROTOCOL elif not 0 <= protocol <= HIGHEST_PROTOCOL: raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL) try: self._file_write = file.write except AttributeError: raise TypeError("file must have a 'write' attribute") self.framer = _Framer(self._file_write) self.write = self.framer.write self.memo = {} self.proto = int(protocol) self.bin = protocol >= 1 self.fast = 0 self.fix_imports = fix_imports and protocol < 3 def clear_memo(self): """Clears the pickler's "memo". The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is useful when re-using picklers. """ self.memo.clear() def dump(self, obj): """Write a pickled representation of obj to the open file.""" # Check whether Pickler was initialized correctly. This is # only needed to mimic the behavior of _pickle.Pickler.dump(). if not hasattr(self, "_file_write"): raise PicklingError("Pickler.__init__() was not called by " "%s.__init__()" % (self.__class__.__name__,)) if self.proto >= 2: self.write(PROTO + pack("<B", self.proto)) if self.proto >= 4: self.framer.start_framing() self.save(obj) self.write(STOP) self.framer.end_framing() def memoize(self, obj): """Store an object in the memo.""" # The Pickler memo is a dictionary mapping object ids to 2-tuples # that contain the Unpickler memo key and the object being memoized. # The memo key is written to the pickle and will become # the key in the Unpickler's memo. The object is stored in the # Pickler memo so that transient objects are kept alive during # pickling. # The use of the Unpickler memo length as the memo key is just a # convention. The only requirement is that the memo values be unique. # But there appears no advantage to any other scheme, and this # scheme allows the Unpickler memo to be implemented as a plain (but # growable) array, indexed by memo key. if self.fast: return assert id(obj) not in self.memo idx = len(self.memo) self.write(self.put(idx)) self.memo[id(obj)] = idx, obj # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i. def put(self, idx): if self.proto >= 4: return MEMOIZE elif self.bin: if idx < 256: return BINPUT + pack("<B", idx) else: return LONG_BINPUT + pack("<I", idx) else: return PUT + repr(idx).encode("ascii") + b'\n' # Return a GET (BINGET, LONG_BINGET) opcode string, with argument i. def get(self, i): if self.bin: if i < 256: return BINGET + pack("<B", i) else: return LONG_BINGET + pack("<I", i) return GET + repr(i).encode("ascii") + b'\n' def save(self, obj, save_persistent_id=True): self.framer.commit_frame() # Check for persistent id (defined by a subclass) pid = self.persistent_id(obj) if pid is not None and save_persistent_id: self.save_pers(pid) return # Check the memo x = self.memo.get(id(obj)) if x is not None: self.write(self.get(x[0])) return # Check the type dispatch table t = type(obj) f = self.dispatch.get(t) if f is not None: f(self, obj) # Call unbound method with explicit self return # Check private dispatch table if any, or else copyreg.dispatch_table reduce = getattr(self, 'dispatch_table', dispatch_table).get(t) if reduce is not None: rv = reduce(obj) else: # Check for a class with a custom metaclass; treat as regular class try: issc = issubclass(t, type) except TypeError: # t is not a class (old Boost; see SF #502085) issc = False if issc: self.save_global(obj) return # Check for a __reduce_ex__ method, fall back to __reduce__ reduce = getattr(obj, "__reduce_ex__", None) if reduce is not None: rv = reduce(self.proto) else: reduce = getattr(obj, "__reduce__", None) if reduce is not None: rv = reduce() else: raise PicklingError("Can't pickle %r object: %r" % (t.__name__, obj)) # Check for string returned by reduce(), meaning "save as global" if isinstance(rv, str): self.save_global(obj, rv) return # Assert that reduce() returned a tuple if not isinstance(rv, tuple): raise PicklingError("%s must return string or tuple" % reduce) # Assert that it returned an appropriately sized tuple l = len(rv) if not (2 <= l <= 5): raise PicklingError("Tuple returned by %s must have " "two to five elements" % reduce) # Save the reduce() output and finally memoize the object self.save_reduce(obj=obj, *rv) def persistent_id(self, obj): # This exists so a subclass can override it return None def save_pers(self, pid): # Save a persistent id reference if self.bin: self.save(pid, save_persistent_id=False) self.write(BINPERSID) else: self.write(PERSID + str(pid).encode("ascii") + b'\n') def save_reduce(self, func, args, state=None, listitems=None, dictitems=None, obj=None): # This API is called by some subclasses if not isinstance(args, tuple): raise PicklingError("args from save_reduce() must be a tuple") if not callable(func): raise PicklingError("func from save_reduce() must be callable") save = self.save write = self.write func_name = getattr(func, "__name__", "") if self.proto >= 4 and func_name == "__newobj_ex__": cls, args, kwargs = args if not hasattr(cls, "__new__"): raise PicklingError("args[0] from {} args has no __new__" .format(func_name)) if obj is not None and cls is not obj.__class__: raise PicklingError("args[0] from {} args has the wrong class" .format(func_name)) save(cls) save(args) save(kwargs) write(NEWOBJ_EX) elif self.proto >= 2 and func_name == "__newobj__": # A __reduce__ implementation can direct protocol 2 or newer to # use the more efficient NEWOBJ opcode, while still # allowing protocol 0 and 1 to work normally. For this to # work, the function returned by __reduce__ should be # called __newobj__, and its first argument should be a # class. The implementation for __newobj__ # should be as follows, although pickle has no way to # verify this: # # def __newobj__(cls, *args): # return cls.__new__(cls, *args) # # Protocols 0 and 1 will pickle a reference to __newobj__, # while protocol 2 (and above) will pickle a reference to # cls, the remaining args tuple, and the NEWOBJ code, # which calls cls.__new__(cls, *args) at unpickling time # (see load_newobj below). If __reduce__ returns a # three-tuple, the state from the third tuple item will be # pickled regardless of the protocol, calling __setstate__ # at unpickling time (see load_build below). # # Note that no standard __newobj__ implementation exists; # you have to provide your own. This is to enforce # compatibility with Python 2.2 (pickles written using # protocol 0 or 1 in Python 2.3 should be unpicklable by # Python 2.2). cls = args[0] if not hasattr(cls, "__new__"): raise PicklingError( "args[0] from __newobj__ args has no __new__") if obj is not None and cls is not obj.__class__: raise PicklingError( "args[0] from __newobj__ args has the wrong class") args = args[1:] save(cls) save(args) write(NEWOBJ) else: save(func) save(args) write(REDUCE) if obj is not None: # If the object is already in the memo, this means it is # recursive. In this case, throw away everything we put on the # stack, and fetch the object back from the memo. if id(obj) in self.memo: write(POP + self.get(self.memo[id(obj)][0])) else: self.memoize(obj) # More new special cases (that work with older protocols as # well): when __reduce__ returns a tuple with 4 or 5 items, # the 4th and 5th item should be iterators that provide list # items and dict items (as (key, value) tuples), or None. if listitems is not None: self._batch_appends(listitems) if dictitems is not None: self._batch_setitems(dictitems) if state is not None: save(state) write(BUILD) # Methods below this point are dispatched through the dispatch table dispatch = {} def save_none(self, obj): self.write(NONE) dispatch[type(None)] = save_none def save_bool(self, obj): if self.proto >= 2: self.write(NEWTRUE if obj else NEWFALSE) else: self.write(TRUE if obj else FALSE) dispatch[bool] = save_bool def save_long(self, obj): if self.bin: # If the int is small enough to fit in a signed 4-byte 2's-comp # format, we can store it more efficiently than the general # case. # First one- and two-byte unsigned ints: if obj >= 0: if obj <= 0xff: self.write(BININT1 + pack("<B", obj)) return if obj <= 0xffff: self.write(BININT2 + pack("<H", obj)) return # Next check for 4-byte signed ints: if -0x80000000 <= obj <= 0x7fffffff: self.write(BININT + pack("<i", obj)) return if self.proto >= 2: encoded = encode_long(obj) n = len(encoded) if n < 256: self.write(LONG1 + pack("<B", n) + encoded) else: self.write(LONG4 + pack("<i", n) + encoded) return self.write(LONG + repr(obj).encode("ascii") + b'L\n') dispatch[int] = save_long def save_float(self, obj): if self.bin: self.write(BINFLOAT + pack('>d', obj)) else: self.write(FLOAT + repr(obj).encode("ascii") + b'\n') dispatch[float] = save_float def save_bytes(self, obj): if self.proto < 3: if not obj: # bytes object is empty self.save_reduce(bytes, (), obj=obj) else: self.save_reduce(codecs.encode, (str(obj, 'latin1'), 'latin1'), obj=obj) return n = len(obj) if n <= 0xff: self.write(SHORT_BINBYTES + pack("<B", n) + obj) elif n > 0xffffffff and self.proto >= 4: self.write(BINBYTES8 + pack("<Q", n) + obj) else: self.write(BINBYTES + pack("<I", n) + obj) self.memoize(obj) dispatch[bytes] = save_bytes def save_str(self, obj): if self.bin: encoded = obj.encode('utf-8', 'surrogatepass') n = len(encoded) if n <= 0xff and self.proto >= 4: self.write(SHORT_BINUNICODE + pack("<B", n) + encoded) elif n > 0xffffffff and self.proto >= 4: self.write(BINUNICODE8 + pack("<Q", n) + encoded) else: self.write(BINUNICODE + pack("<I", n) + encoded) else: obj = obj.replace("\\", "\\u005c") obj = obj.replace("\n", "\\u000a") self.write(UNICODE + obj.encode('raw-unicode-escape') + b'\n') self.memoize(obj) dispatch[str] = save_str def save_tuple(self, obj): if not obj: # tuple is empty if self.bin: self.write(EMPTY_TUPLE) else: self.write(MARK + TUPLE) return n = len(obj) save = self.save memo = self.memo if n <= 3 and self.proto >= 2: for element in obj: save(element) # Subtle. Same as in the big comment below. if id(obj) in memo: get = self.get(memo[id(obj)][0]) self.write(POP * n + get) else: self.write(_tuplesize2code[n]) self.memoize(obj) return # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple # has more than 3 elements. write = self.write write(MARK) for element in obj: save(element) if id(obj) in memo: # Subtle. d was not in memo when we entered save_tuple(), so # the process of saving the tuple's elements must have saved # the tuple itself: the tuple is recursive. The proper action # now is to throw away everything we put on the stack, and # simply GET the tuple (it's already constructed). This check # could have been done in the "for element" loop instead, but # recursive tuples are a rare thing. get = self.get(memo[id(obj)][0]) if self.bin: write(POP_MARK + get) else: # proto 0 -- POP_MARK not available write(POP * (n+1) + get) return # No recursion. write(TUPLE) self.memoize(obj) dispatch[tuple] = save_tuple def save_list(self, obj): if self.bin: self.write(EMPTY_LIST) else: # proto 0 -- can't use EMPTY_LIST self.write(MARK + LIST) self.memoize(obj) self._batch_appends(obj) dispatch[list] = save_list _BATCHSIZE = 1000 def _batch_appends(self, items): # Helper to batch up APPENDS sequences save = self.save write = self.write if not self.bin: for x in items: save(x) write(APPEND) return it = iter(items) while True: tmp = list(islice(it, self._BATCHSIZE)) n = len(tmp) if n > 1: write(MARK) for x in tmp: save(x) write(APPENDS) elif n: save(tmp[0]) write(APPEND) # else tmp is empty, and we're done if n < self._BATCHSIZE: return def save_dict(self, obj): if self.bin: self.write(EMPTY_DICT) else: # proto 0 -- can't use EMPTY_DICT self.write(MARK + DICT) self.memoize(obj) self._batch_setitems(obj.items()) dispatch[dict] = save_dict if PyStringMap is not None: dispatch[PyStringMap] = save_dict def _batch_setitems(self, items): # Helper to batch up SETITEMS sequences; proto >= 1 only save = self.save write = self.write if not self.bin: for k, v in items: save(k) save(v) write(SETITEM) return it = iter(items) while True: tmp = list(islice(it, self._BATCHSIZE)) n = len(tmp) if n > 1: write(MARK) for k, v in tmp: save(k) save(v) write(SETITEMS) elif n: k, v = tmp[0] save(k) save(v) write(SETITEM) # else tmp is empty, and we're done if n < self._BATCHSIZE: return def save_set(self, obj): save = self.save write = self.write if self.proto < 4: self.save_reduce(set, (list(obj),), obj=obj) return write(EMPTY_SET) self.memoize(obj) it = iter(obj) while True: batch = list(islice(it, self._BATCHSIZE)) n = len(batch) if n > 0: write(MARK) for item in batch: save(item) write(ADDITEMS) if n < self._BATCHSIZE: return dispatch[set] = save_set def save_frozenset(self, obj): save = self.save write = self.write if self.proto < 4: self.save_reduce(frozenset, (list(obj),), obj=obj) return write(MARK) for item in obj: save(item) if id(obj) in self.memo: # If the object is already in the memo, this means it is # recursive. In this case, throw away everything we put on the # stack, and fetch the object back from the memo. write(POP_MARK + self.get(self.memo[id(obj)][0])) return write(FROZENSET) self.memoize(obj) dispatch[frozenset] = save_frozenset def save_global(self, obj, name=None): write = self.write memo = self.memo if name is None and self.proto >= 4: name = getattr(obj, '__qualname__', None) if name is None: name = obj.__name__ module_name = whichmodule(obj, name, allow_qualname=self.proto >= 4) try: __import__(module_name, level=0) module = sys.modules[module_name] obj2 = _getattribute(module, name, allow_qualname=self.proto >= 4) except (ImportError, KeyError, AttributeError): raise PicklingError( "Can't pickle %r: it's not found as %s.%s" % (obj, module_name, name)) else: if obj2 is not obj: raise PicklingError( "Can't pickle %r: it's not the same object as %s.%s" % (obj, module_name, name)) if self.proto >= 2: code = _extension_registry.get((module_name, name)) if code: assert code > 0 if code <= 0xff: write(EXT1 + pack("<B", code)) elif code <= 0xffff: write(EXT2 + pack("<H", code)) else: write(EXT4 + pack("<i", code)) return # Non-ASCII identifiers are supported only with protocols >= 3. if self.proto >= 4: self.save(module_name) self.save(name) write(STACK_GLOBAL) elif self.proto >= 3: write(GLOBAL + bytes(module_name, "utf-8") + b'\n' + bytes(name, "utf-8") + b'\n') else: if self.fix_imports: r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING if (module_name, name) in r_name_mapping: module_name, name = r_name_mapping[(module_name, name)] if module_name in r_import_mapping: module_name = r_import_mapping[module_name] try: write(GLOBAL + bytes(module_name, "ascii") + b'\n' + bytes(name, "ascii") + b'\n') except UnicodeEncodeError: raise PicklingError( "can't pickle global identifier '%s.%s' using " "pickle protocol %i" % (module, name, self.proto)) self.memoize(obj) def save_type(self, obj): if obj is type(None): return self.save_reduce(type, (None,), obj=obj) elif obj is type(NotImplemented): return self.save_reduce(type, (NotImplemented,), obj=obj) elif obj is type(...): return self.save_reduce(type, (...,), obj=obj) return self.save_global(obj) dispatch[FunctionType] = save_global dispatch[type] = save_type # Unpickling machinery class _Unpickler: def __init__(self, file, *, fix_imports=True, encoding="ASCII", errors="strict"): """This takes a binary file for reading a pickle data stream. The protocol version of the pickle is detected automatically, so no proto argument is needed. The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* can be a binary file object opened for reading, a io.BytesIO object, or any other custom object that meets this interface. The file-like object must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus file-like object can be a binary file object opened for reading, a BytesIO object, or any other custom object that meets this interface. Optional keyword arguments are *fix_imports*, *encoding* and *errors*, which are used to control compatiblity support for pickle stream generated by Python 2. If *fix_imports* is True, pickle will try to map the old Python 2 names to the new names used in Python 3. The *encoding* and *errors* tell pickle how to decode 8-bit string instances pickled by Python 2; these default to 'ASCII' and 'strict', respectively. *encoding* can be 'bytes' to read theses 8-bit string instances as bytes objects. """ self._file_readline = file.readline self._file_read = file.read self.memo = {} self.encoding = encoding self.errors = errors self.proto = 0 self.fix_imports = fix_imports def load(self): """Read a pickled object representation from the open file. Return the reconstituted object hierarchy specified in the file. """ # Check whether Unpickler was initialized correctly. This is # only needed to mimic the behavior of _pickle.Unpickler.dump(). if not hasattr(self, "_file_read"): raise UnpicklingError("Unpickler.__init__() was not called by " "%s.__init__()" % (self.__class__.__name__,)) self._unframer = _Unframer(self._file_read, self._file_readline) self.read = self._unframer.read self.readline = self._unframer.readline self.mark = object() # any new unique object self.stack = [] self.append = self.stack.append self.proto = 0 read = self.read dispatch = self.dispatch try: while True: key = read(1) if not key: raise EOFError assert isinstance(key, bytes_types) dispatch[key[0]](self) except _Stop as stopinst: return stopinst.value # Return largest index k such that self.stack[k] is self.mark. # If the stack doesn't contain a mark, eventually raises IndexError. # This could be sped by maintaining another stack, of indices at which # the mark appears. For that matter, the latter stack would suffice, # and we wouldn't need to push mark objects on self.stack at all. # Doing so is probably a good thing, though, since if the pickle is # corrupt (or hostile) we may get a clue from finding self.mark embedded # in unpickled objects. def marker(self): stack = self.stack mark = self.mark k = len(stack)-1 while stack[k] is not mark: k = k-1 return k def persistent_load(self, pid): raise UnpicklingError("unsupported persistent id encountered") dispatch = {} def load_proto(self): proto = self.read(1)[0] if not 0 <= proto <= HIGHEST_PROTOCOL: raise ValueError("unsupported pickle protocol: %d" % proto) self.proto = proto dispatch[PROTO[0]] = load_proto def load_frame(self): frame_size, = unpack('<Q', self.read(8)) if frame_size > sys.maxsize: raise ValueError("frame size > sys.maxsize: %d" % frame_size) self._unframer.load_frame(frame_size) dispatch[FRAME[0]] = load_frame def load_persid(self): pid = self.readline()[:-1].decode("ascii") self.append(self.persistent_load(pid)) dispatch[PERSID[0]] = load_persid def load_binpersid(self): pid = self.stack.pop() self.append(self.persistent_load(pid)) dispatch[BINPERSID[0]] = load_binpersid def load_none(self): self.append(None) dispatch[NONE[0]] = load_none def load_false(self): self.append(False) dispatch[NEWFALSE[0]] = load_false def load_true(self): self.append(True) dispatch[NEWTRUE[0]] = load_true def load_int(self): data = self.readline() if data == FALSE[1:]: val = False elif data == TRUE[1:]: val = True else: val = int(data, 0) self.append(val) dispatch[INT[0]] = load_int def load_binint(self): self.append(unpack('<i', self.read(4))[0]) dispatch[BININT[0]] = load_binint def load_binint1(self): self.append(self.read(1)[0]) dispatch[BININT1[0]] = load_binint1 def load_binint2(self): self.append(unpack('<H', self.read(2))[0]) dispatch[BININT2[0]] = load_binint2 def load_long(self): val = self.readline()[:-1] if val and val[-1] == b'L'[0]: val = val[:-1] self.append(int(val, 0)) dispatch[LONG[0]] = load_long def load_long1(self): n = self.read(1)[0] data = self.read(n) self.append(decode_long(data)) dispatch[LONG1[0]] = load_long1 def load_long4(self): n, = unpack('<i', self.read(4)) if n < 0: # Corrupt or hostile pickle -- we never write one like this raise UnpicklingError("LONG pickle has negative byte count") data = self.read(n) self.append(decode_long(data)) dispatch[LONG4[0]] = load_long4 def load_float(self): self.append(float(self.readline()[:-1])) dispatch[FLOAT[0]] = load_float def load_binfloat(self): self.append(unpack('>d', self.read(8))[0]) dispatch[BINFLOAT[0]] = load_binfloat def _decode_string(self, value): # Used to allow strings from Python 2 to be decoded either as # bytes or Unicode strings. This should be used only with the # STRING, BINSTRING and SHORT_BINSTRING opcodes. if self.encoding == "bytes": return value else: return value.decode(self.encoding, self.errors) def load_string(self): data = self.readline()[:-1] # Strip outermost quotes if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'': data = data[1:-1] else: raise UnpicklingError("the STRING opcode argument must be quoted") self.append(self._decode_string(codecs.escape_decode(data)[0])) dispatch[STRING[0]] = load_string def load_binstring(self): # Deprecated BINSTRING uses signed 32-bit length len, = unpack('<i', self.read(4)) if len < 0: raise UnpicklingError("BINSTRING pickle has negative byte count") data = self.read(len) self.append(self._decode_string(data)) dispatch[BINSTRING[0]] = load_binstring def load_binbytes(self): len, = unpack('<I', self.read(4)) if len > maxsize: raise UnpicklingError("BINBYTES exceeds system's maximum size " "of %d bytes" % maxsize) self.append(self.read(len)) dispatch[BINBYTES[0]] = load_binbytes def load_unicode(self): self.append(str(self.readline()[:-1], 'raw-unicode-escape')) dispatch[UNICODE[0]] = load_unicode def load_binunicode(self): len, = unpack('<I', self.read(4)) if len > maxsize: raise UnpicklingError("BINUNICODE exceeds system's maximum size " "of %d bytes" % maxsize) self.append(str(self.read(len), 'utf-8', 'surrogatepass')) dispatch[BINUNICODE[0]] = load_binunicode def load_binunicode8(self): len, = unpack('<Q', self.read(8)) if len > maxsize: raise UnpicklingError("BINUNICODE8 exceeds system's maximum size " "of %d bytes" % maxsize) self.append(str(self.read(len), 'utf-8', 'surrogatepass')) dispatch[BINUNICODE8[0]] = load_binunicode8 def load_short_binstring(self): len = self.read(1)[0] data = self.read(len) self.append(self._decode_string(data)) dispatch[SHORT_BINSTRING[0]] = load_short_binstring def load_short_binbytes(self): len = self.read(1)[0] self.append(self.read(len)) dispatch[SHORT_BINBYTES[0]] = load_short_binbytes def load_short_binunicode(self): len = self.read(1)[0] self.append(str(self.read(len), 'utf-8', 'surrogatepass')) dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode def load_tuple(self): k = self.marker() self.stack[k:] = [tuple(self.stack[k+1:])] dispatch[TUPLE[0]] = load_tuple def load_empty_tuple(self): self.append(()) dispatch[EMPTY_TUPLE[0]] = load_empty_tuple def load_tuple1(self): self.stack[-1] = (self.stack[-1],) dispatch[TUPLE1[0]] = load_tuple1 def load_tuple2(self): self.stack[-2:] = [(self.stack[-2], self.stack[-1])] dispatch[TUPLE2[0]] = load_tuple2 def load_tuple3(self): self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] dispatch[TUPLE3[0]] = load_tuple3 def load_empty_list(self): self.append([]) dispatch[EMPTY_LIST[0]] = load_empty_list def load_empty_dictionary(self): self.append({}) dispatch[EMPTY_DICT[0]] = load_empty_dictionary def load_empty_set(self): self.append(set()) dispatch[EMPTY_SET[0]] = load_empty_set def load_frozenset(self): k = self.marker() self.stack[k:] = [frozenset(self.stack[k+1:])] dispatch[FROZENSET[0]] = load_frozenset def load_list(self): k = self.marker() self.stack[k:] = [self.stack[k+1:]] dispatch[LIST[0]] = load_list def load_dict(self): k = self.marker() items = self.stack[k+1:] d = {items[i]: items[i+1] for i in range(0, len(items), 2)} self.stack[k:] = [d] dispatch[DICT[0]] = load_dict # INST and OBJ differ only in how they get a class object. It's not # only sensible to do the rest in a common routine, the two routines # previously diverged and grew different bugs. # klass is the class to instantiate, and k points to the topmost mark # object, following which are the arguments for klass.__init__. def _instantiate(self, klass, k): args = tuple(self.stack[k+1:]) del self.stack[k:] if (args or not isinstance(klass, type) or hasattr(klass, "__getinitargs__")): try: value = klass(*args) except TypeError as err: raise TypeError("in constructor for %s: %s" % (klass.__name__, str(err)), sys.exc_info()[2]) else: value = klass.__new__(klass) self.append(value) def load_inst(self): module = self.readline()[:-1].decode("ascii") name = self.readline()[:-1].decode("ascii") klass = self.find_class(module, name) self._instantiate(klass, self.marker()) dispatch[INST[0]] = load_inst def load_obj(self): # Stack is ... markobject classobject arg1 arg2 ... k = self.marker() klass = self.stack.pop(k+1) self._instantiate(klass, k) dispatch[OBJ[0]] = load_obj def load_newobj(self): args = self.stack.pop() cls = self.stack.pop() obj = cls.__new__(cls, *args) self.append(obj) dispatch[NEWOBJ[0]] = load_newobj def load_newobj_ex(self): kwargs = self.stack.pop() args = self.stack.pop() cls = self.stack.pop() obj = cls.__new__(cls, *args, **kwargs) self.append(obj) dispatch[NEWOBJ_EX[0]] = load_newobj_ex def load_global(self): module = self.readline()[:-1].decode("utf-8") name = self.readline()[:-1].decode("utf-8") klass = self.find_class(module, name) self.append(klass) dispatch[GLOBAL[0]] = load_global def load_stack_global(self): name = self.stack.pop() module = self.stack.pop() if type(name) is not str or type(module) is not str: raise UnpicklingError("STACK_GLOBAL requires str") self.append(self.find_class(module, name)) dispatch[STACK_GLOBAL[0]] = load_stack_global def load_ext1(self): code = self.read(1)[0] self.get_extension(code) dispatch[EXT1[0]] = load_ext1 def load_ext2(self): code, = unpack('<H', self.read(2)) self.get_extension(code) dispatch[EXT2[0]] = load_ext2 def load_ext4(self): code, = unpack('<i', self.read(4)) self.get_extension(code) dispatch[EXT4[0]] = load_ext4 def get_extension(self, code): nil = [] obj = _extension_cache.get(code, nil) if obj is not nil: self.append(obj) return key = _inverted_registry.get(code) if not key: if code <= 0: # note that 0 is forbidden # Corrupt or hostile pickle. raise UnpicklingError("EXT specifies code <= 0") raise ValueError("unregistered extension code %d" % code) obj = self.find_class(*key) _extension_cache[code] = obj self.append(obj) def find_class(self, module, name): # Subclasses may override this. if self.proto < 3 and self.fix_imports: if (module, name) in _compat_pickle.NAME_MAPPING: module, name = _compat_pickle.NAME_MAPPING[(module, name)] if module in _compat_pickle.IMPORT_MAPPING: module = _compat_pickle.IMPORT_MAPPING[module] __import__(module, level=0) return _getattribute(sys.modules[module], name, allow_qualname=self.proto >= 4) def load_reduce(self): stack = self.stack args = stack.pop() func = stack[-1] try: value = func(*args) except: print(sys.exc_info()) print(func, args) raise stack[-1] = value dispatch[REDUCE[0]] = load_reduce def load_pop(self): del self.stack[-1] dispatch[POP[0]] = load_pop def load_pop_mark(self): k = self.marker() del self.stack[k:] dispatch[POP_MARK[0]] = load_pop_mark def load_dup(self): self.append(self.stack[-1]) dispatch[DUP[0]] = load_dup def load_get(self): i = int(self.readline()[:-1]) self.append(self.memo[i]) dispatch[GET[0]] = load_get def load_binget(self): i = self.read(1)[0] self.append(self.memo[i]) dispatch[BINGET[0]] = load_binget def load_long_binget(self): i, = unpack('<I', self.read(4)) self.append(self.memo[i]) dispatch[LONG_BINGET[0]] = load_long_binget def load_put(self): i = int(self.readline()[:-1]) if i < 0: raise ValueError("negative PUT argument") self.memo[i] = self.stack[-1] dispatch[PUT[0]] = load_put def load_binput(self): i = self.read(1)[0] if i < 0: raise ValueError("negative BINPUT argument") self.memo[i] = self.stack[-1] dispatch[BINPUT[0]] = load_binput def load_long_binput(self): i, = unpack('<I', self.read(4)) if i > maxsize: raise ValueError("negative LONG_BINPUT argument") self.memo[i] = self.stack[-1] dispatch[LONG_BINPUT[0]] = load_long_binput def load_memoize(self): memo = self.memo memo[len(memo)] = self.stack[-1] dispatch[MEMOIZE[0]] = load_memoize def load_append(self): stack = self.stack value = stack.pop() list = stack[-1] list.append(value) dispatch[APPEND[0]] = load_append def load_appends(self): stack = self.stack mark = self.marker() list_obj = stack[mark - 1] items = stack[mark + 1:] if isinstance(list_obj, list): list_obj.extend(items) else: append = list_obj.append for item in items: append(item) del stack[mark:] dispatch[APPENDS[0]] = load_appends def load_setitem(self): stack = self.stack value = stack.pop() key = stack.pop() dict = stack[-1] dict[key] = value dispatch[SETITEM[0]] = load_setitem def load_setitems(self): stack = self.stack mark = self.marker() dict = stack[mark - 1] for i in range(mark + 1, len(stack), 2): dict[stack[i]] = stack[i + 1] del stack[mark:] dispatch[SETITEMS[0]] = load_setitems def load_additems(self): stack = self.stack mark = self.marker() set_obj = stack[mark - 1] items = stack[mark + 1:] if isinstance(set_obj, set): set_obj.update(items) else: add = set_obj.add for item in items: add(item) del stack[mark:] dispatch[ADDITEMS[0]] = load_additems def load_build(self): stack = self.stack state = stack.pop() inst = stack[-1] setstate = getattr(inst, "__setstate__", None) if setstate is not None: setstate(state) return slotstate = None if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if state: inst_dict = inst.__dict__ intern = sys.intern for k, v in state.items(): if type(k) is str: inst_dict[intern(k)] = v else: inst_dict[k] = v if slotstate: for k, v in slotstate.items(): setattr(inst, k, v) dispatch[BUILD[0]] = load_build def load_mark(self): self.append(self.mark) dispatch[MARK[0]] = load_mark def load_stop(self): value = self.stack.pop() raise _Stop(value) dispatch[STOP[0]] = load_stop # Shorthands def _dump(obj, file, protocol=None, *, fix_imports=True): _Pickler(file, protocol, fix_imports=fix_imports).dump(obj) def _dumps(obj, protocol=None, *, fix_imports=True): f = io.BytesIO() _Pickler(f, protocol, fix_imports=fix_imports).dump(obj) res = f.getvalue() assert isinstance(res, bytes_types) return res def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict"): return _Unpickler(file, fix_imports=fix_imports, encoding=encoding, errors=errors).load() def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict"): if isinstance(s, str): raise TypeError("Can't load pickle from unicode string") file = io.BytesIO(s) return _Unpickler(file, fix_imports=fix_imports, encoding=encoding, errors=errors).load() # Use the faster _pickle if possible try: from _pickle import ( PickleError, PicklingError, UnpicklingError, Pickler, Unpickler, dump, dumps, load, loads ) except ImportError: Pickler, Unpickler = _Pickler, _Unpickler dump, dumps, load, loads = _dump, _dumps, _load, _loads # Doctest def _test(): import doctest return doctest.testmod() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description='display contents of the pickle files') parser.add_argument( 'pickle_file', type=argparse.FileType('br'), nargs='*', help='the pickle file') parser.add_argument( '-t', '--test', action='store_true', help='run self-test suite') parser.add_argument( '-v', action='store_true', help='run verbosely; only affects self-test run') args = parser.parse_args() if args.test: _test() else: if not args.pickle_file: parser.print_help() else: import pprint for f in args.pickle_file: obj = load(f) pprint.pprint(obj)
lgpl-3.0
3,342,829,208,354,894,000
-3,633,344,518,016,829,400
33.797373
80
0.552344
false
rajashreer7/autotest-client-tests
linux-tools/libpwquality/libpwquality.py
4
1206
#!/bin/python import os, subprocess import logging from autotest.client import test from autotest.client.shared import error class libpwquality(test.test): """ Autotest module for testing basic functionality of libpwquality @author Tejaswini Sambamurthy <tejaswin.linux.vnet.ibm.com> """ version = 1 nfail = 0 path = '' def initialize(self): """ Sets the overall failure counter for the test. """ self.nfail = 0 logging.info('\n Test initialize successfully') def run_once(self, test_path=''): """ Trigger test run """ try: os.environ["LTPBIN"] = "%s/shared" %(test_path) ret_val = subprocess.call(test_path + '/libpwquality' + '/libpwquality.sh', shell=True) if ret_val != 0: self.nfail += 1 except error.CmdError, e: self.nfail += 1 logging.error("Test Failed: %s", e) def postprocess(self): if self.nfail != 0: logging.info('\n nfails is non-zero') raise error.TestError('\nTest failed') else: logging.info('\n Test completed successfully ')
gpl-2.0
2,183,773,138,056,051,200
-1,138,478,227,677,722,500
24.659574
99
0.569652
false
pfouque/deezer-python
deezer/tests/test_resources.py
1
9053
# -*- coding: utf-8 -*- import json import unittest from types import GeneratorType import deezer from mock import patch from .mocked_methods import fake_urlopen class TestResources(unittest.TestCase): def setUp(self): self.patcher = patch('deezer.client.urlopen', fake_urlopen) self.patcher.start() def tearDown(self): self.patcher.stop() def test_resource_dict(self): """ Test that resource can be converted to dict """ client = deezer.Client() response = fake_urlopen(client.object_url('track', 3135556)) resp_str = response.read().decode('utf-8') response.close() data = json.loads(resp_str) resource = deezer.resources.Resource(client, data) self.assertEqual(resource.asdict(), data) def test_resource_relation(self): """ Test passing parent object when using get_relation """ client = deezer.Client() album = client.get_album(302127) tracks = album.get_tracks() self.assertTrue(tracks[0].album is album) def test_album_attributes(self): """ Test album resource """ client = deezer.Client() album = client.get_album(302127) self.assertTrue(hasattr(album, 'title')) self.assertEqual(repr(album), '<Album: Discovery>') artist = album.get_artist() self.assertIsInstance(artist, deezer.resources.Artist) self.assertEqual(repr(artist), '<Artist: Daft Punk>') def test_album_tracks(self): """ Test tracks method of album resource """ client = deezer.Client() album = client.get_album(302127) tracks = album.get_tracks() self.assertIsInstance(tracks, list) track = tracks[0] self.assertIsInstance(track, deezer.resources.Track) self.assertEqual(repr(track), '<Track: One More Time>') self.assertEqual(type(album.iter_tracks()), GeneratorType) track = list(album.iter_tracks())[0] self.assertIsInstance(track, deezer.resources.Track) def test_artist_attributes(self): """ Test artist resource """ client = deezer.Client() artist = client.get_artist(27) self.assertTrue(hasattr(artist, 'name')) self.assertIsInstance(artist, deezer.resources.Artist) self.assertEqual(repr(artist), '<Artist: Daft Punk>') def test_artist_albums(self): """ Test albums method of artist resource """ client = deezer.Client() artist = client.get_artist(27) albums = artist.get_albums() self.assertIsInstance(albums, list) album = albums[0] self.assertIsInstance(album, deezer.resources.Album) self.assertEqual(repr(album), '<Album: Human After All (Remixes) (Remixes)>') self.assertEqual(type(artist.iter_albums()), GeneratorType) def test_artist_top(self): """ Test top method of artist resource """ client = deezer.Client() artist = client.get_artist(27) tracks = artist.get_top() self.assertIsInstance(tracks, list) track = tracks[0] self.assertIsInstance(track, deezer.resources.Track) self.assertEqual(repr(track), '<Track: Get Lucky (Radio Edit)>') def test_artist_radio(self): """ Test radio method of artist resource """ client = deezer.Client() artist = client.get_artist(27) tracks = artist.get_radio() self.assertIsInstance(tracks, list) track = tracks[0] self.assertIsInstance(track, deezer.resources.Track) self.assertEqual(repr(track), '<Track: Lose Yourself to Dance>') def test_artist_related(self): """ Test related method of artist resource """ client = deezer.Client() artist = client.get_artist(27) artists = artist.get_related() self.assertIsInstance(artists, list) artist = artists[0] self.assertIsInstance(artist, deezer.resources.Artist) self.assertEqual(repr(artist), '<Artist: Justice>') self.assertEqual(type(artist.iter_related()), GeneratorType) def test_track_attributes(self): """ Test track resource """ client = deezer.Client() track = client.get_track(3135556) artist = track.get_artist() album = track.get_album() self.assertTrue(hasattr(track, 'title')) self.assertIsInstance(track, deezer.resources.Track) self.assertIsInstance(artist, deezer.resources.Artist) self.assertIsInstance(album, deezer.resources.Album) self.assertEqual(repr(track), '<Track: Harder Better Faster Stronger>') self.assertEqual(repr(artist), '<Artist: Daft Punk>') self.assertEqual(repr(album), '<Album: Discovery>') def test_radio_attributes(self): """ Test radio resource """ client = deezer.Client() radio = client.get_radio(23261) self.assertTrue(hasattr(radio, 'title')) self.assertIsInstance(radio, deezer.resources.Radio) self.assertEqual(repr(radio), '<Radio: Telegraph Classical>') def test_radio_tracks(self): """ Test tracks method of radio resource """ client = deezer.Client() radio = client.get_radio(23261) tracks = radio.get_tracks() self.assertIsInstance(tracks, list) track = tracks[2] self.assertIsInstance(track, deezer.resources.Track) self.assertEqual(repr(track), '<Track: Schumann: Kinderszenen, Op.15 - 11. Fürchtenmachen>') self.assertEqual(type(radio.iter_tracks()), GeneratorType) def test_genre_attributes(self): """ Test genre resource """ client = deezer.Client() genre = client.get_genre(106) self.assertTrue(hasattr(genre, 'name')) self.assertIsInstance(genre, deezer.resources.Genre) self.assertEqual(repr(genre), '<Genre: Electro>') def test_genre_artists(self): """ Test artists method of genre resource """ client = deezer.Client() genre = client.get_genre(106) artists = genre.get_artists() self.assertIsInstance(artists, list) artist = artists[0] self.assertIsInstance(artist, deezer.resources.Artist) self.assertEqual(repr(artist), '<Artist: Calvin Harris>') self.assertEqual(type(genre.iter_artists()), GeneratorType) def test_genre_radios(self): """ Test radios method of genre resource """ client = deezer.Client() genre = client.get_genre(106) radios = genre.get_radios() self.assertIsInstance(radios, list) radio = radios[0] self.assertIsInstance(radio, deezer.resources.Radio) self.assertEqual(repr(radio), '<Radio: Techno/House>') self.assertEqual(type(genre.iter_radios()), GeneratorType) def test_chart_tracks(self): """ Test tracks method of chart resource """ client = deezer.Client() chart = client.get_chart() tracks = chart.get_tracks() self.assertIsInstance(tracks, list) track = tracks[0] self.assertIsInstance(track, deezer.resources.Track) self.assertEqual(repr(track), '<Track: Starboy>') self.assertEqual(type(chart.iter_tracks()), GeneratorType) def test_chart_artists(self): """ Test artists method of chart resource """ client = deezer.Client() chart = client.get_chart() artists = chart.get_artists() self.assertIsInstance(artists, list) artist = artists[0] self.assertIsInstance(artist, deezer.resources.Artist) self.assertEqual(repr(artist), '<Artist: Pnl>') self.assertEqual(type(chart.iter_artists()), GeneratorType) def test_chart_albums(self): """ Test albums method of chart resource """ client = deezer.Client() chart = client.get_chart() albums = chart.get_albums() self.assertIsInstance(albums, list) album = albums[0] self.assertIsInstance(album, deezer.resources.Album) self.assertEqual(repr(album), "<Album: Where Is l'album de Gradur>") self.assertEqual(type(chart.iter_albums()), GeneratorType) def test_chart_playlists(self): """ Test playlists method of chart resource """ client = deezer.Client() chart = client.get_chart() playlists = chart.get_playlists() self.assertIsInstance(playlists, list) playlist = playlists[0] self.assertIsInstance(playlist, deezer.resources.Playlist) self.assertEqual(repr(playlist), "<Playlist: Top France>") self.assertEqual(type(chart.iter_playlists()), GeneratorType)
mit
-214,660,163,311,572,350
9,096,429,597,451,903,000
34.498039
100
0.610252
false
gsnedders/presto-testo
wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/demo/broadcast/broadcast_server.py
4
2351
############################################################################### ## ## Copyright 2011 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### import sys from twisted.internet import reactor from twisted.python import log from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS class BroadcastServerProtocol(WebSocketServerProtocol): def onOpen(self): self.factory.register(self) def onMessage(self, msg, binary): if not binary: self.factory.broadcast("received message '%s' from %s" % (msg, self.peerstr)) def connectionLost(self, reason): WebSocketServerProtocol.connectionLost(self, reason) self.factory.unregister(self) class BroadcastServerFactory(WebSocketServerFactory): protocol = BroadcastServerProtocol def __init__(self, url): WebSocketServerFactory.__init__(self, url) self.clients = [] self.tickcount = 0 self.tick() def tick(self): self.tickcount += 1 self.broadcast("tick %d" % self.tickcount) reactor.callLater(1, self.tick) def register(self, client): if not client in self.clients: print "registered client " + client.peerstr self.clients.append(client) def unregister(self, client): if client in self.clients: print "unregistered client " + client.peerstr self.clients.remove(client) def broadcast(self, msg): print "broadcasting message '%s' .." % msg for c in self.clients: print "send to " + c.peerstr c.sendMessage(msg) if __name__ == '__main__': log.startLogging(sys.stdout) factory = BroadcastServerFactory("ws://localhost:9000") listenWS(factory) reactor.run()
bsd-3-clause
-1,031,752,574,369,483,900
-7,905,111,028,880,033,000
29.934211
88
0.642705
false
isabellemao/Hello-World
python/Junior2015CCCJ4.py
1
1278
#Problem J4: Arrival Time departure_time = input() split_departure = list(departure_time) #The time of departure, split into a list. #Split the list departure_hour = split_departure[0:2] departure_minute = split_departure[3:5] #Change the split list to integers. departure_hour = int("".join(departure_hour)) departure_minute = int("".join(departure_minute)) #The start and end of the rush hours rh_start_1 = 7 rh_end_1 = 10 rh_start_2 = 15 rh_end_2 = 19 #Set the current time hour = departure_hour minute = departure_minute #For the 120 minutes it usually takes Fiona to commute for counter in range(1, 121): #If it's currently rush hour if hour >= rh_start_1 and hour < rh_end_1 or hour >= rh_start_2 and hour < rh_end_2: #Twice as slow if rush hour minute += 2 else: #Normal speed if normal time minute += 1 if minute >= 60: minute = 0 #Reset hour hour += 1 if hour == 24: hour = 0 #Add fake zeroes if required. if hour < 10: hour = str(hour) hour = "0" + hour else: hour = str(hour) if minute < 10: minute = str(minute) minute = "0" + minute else: minute = str(minute) #Make a valid output. output = hour , ":" , minute output = "".join(output) print(output)
apache-2.0
6,126,909,433,431,679,000
8,487,452,318,307,785,000
22.666667
88
0.640063
false
JosephCastro/selenium
py/selenium/webdriver/support/wait.py
81
4070
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import time from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import TimeoutException POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method IGNORED_EXCEPTIONS = (NoSuchElementException,) # exceptions ignored during calls to the method class WebDriverWait(object): def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None): """Constructor, takes a WebDriver instance and timeout in seconds. :Args: - driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote) - timeout - Number of seconds before timing out - poll_frequency - sleep interval between calls By default, it is 0.5 second. - ignored_exceptions - iterable structure of exception classes ignored during calls. By default, it contains NoSuchElementException only. Example: from selenium.webdriver.support.ui import WebDriverWait \n element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n until_not(lambda x: x.find_element_by_id("someId").is_displayed()) """ self._driver = driver self._timeout = timeout self._poll = poll_frequency # avoid the divide by zero if self._poll == 0: self._poll = POLL_FREQUENCY exceptions = list(IGNORED_EXCEPTIONS) if ignored_exceptions is not None: try: exceptions.extend(iter(ignored_exceptions)) except TypeError: # ignored_exceptions is not iterable exceptions.append(ignored_exceptions) self._ignored_exceptions = tuple(exceptions) def __repr__(self): return '<{0.__module__}.{0.__name__} (session="{1}")>'.format( type(self), self._driver.session_id) def until(self, method, message=''): """Calls the method provided with the driver as an argument until the \ return value is not False.""" screen = None stacktrace = None end_time = time.time() + self._timeout while True: try: value = method(self._driver) if value: return value except self._ignored_exceptions as exc: screen = getattr(exc, 'screen', None) stacktrace = getattr(exc, 'stacktrace', None) time.sleep(self._poll) if time.time() > end_time: break raise TimeoutException(message, screen, stacktrace) def until_not(self, method, message=''): """Calls the method provided with the driver as an argument until the \ return value is False.""" end_time = time.time() + self._timeout while True: try: value = method(self._driver) if not value: return value except self._ignored_exceptions: return True time.sleep(self._poll) if time.time() > end_time: break raise TimeoutException(message)
apache-2.0
-6,290,646,403,034,270,000
-3,850,453,644,238,676,000
41.395833
98
0.626536
false
FUSED-Wind/fusedwind
src/fusedwind/lib/cubicspline.py
2
2234
import numpy as np from scipy.linalg import solve_banded from fusedwind.lib.utilities import _checkIfFloat class NaturalCubicSpline(object): """ class implementation of utilities.cubic_with_deriv """ def __init__(self, xp, yp): if np.any(np.diff(xp) < 0): raise TypeError('xp must be in ascending order') # n = len(x) self.m = len(xp) xk = xp[1:-1] yk = yp[1:-1] xkp = xp[2:] ykp = yp[2:] xkm = xp[:-2] ykm = yp[:-2] b = (ykp - yk)/(xkp - xk) - (yk - ykm)/(xk - xkm) l = (xk - xkm)/6.0 d = (xkp - xkm)/3.0 u = (xkp - xk)/6.0 # u[0] = 0.0 # non-existent entries # l[-1] = 0.0 # solve for second derivatives fpp = solve_banded((1, 1), np.matrix([u, d, l]), b) self.fpp = np.concatenate([[0.0], fpp, [0.0]]) # natural spline self.xp = xp self.yp = yp def __call__(self, x, deriv=False): x, n = _checkIfFloat(x) y = np.zeros(n) dydx = np.zeros(n) dydxp = np.zeros((n, self.m)) dydyp = np.zeros((n, self.m)) # find location in vector for i in range(n): if x[i] < self.xp[0]: j = 0 elif x[i] > self.xp[-1]: j = self.m-2 else: for j in range(self.m-1): if self.xp[j+1] > x[i]: break x1 = self.xp[j] y1 = self.yp[j] x2 = self.xp[j+1] y2 = self.yp[j+1] A = (x2 - x[i])/(x2 - x1) B = 1 - A C = 1.0/6*(A**3 - A)*(x2 - x1)**2 D = 1.0/6*(B**3 - B)*(x2 - x1)**2 y[i] = A * y1 + B * y2 + C * self.fpp[j] + D * self.fpp[j+1] dAdx = -1.0/(x2 - x1) dBdx = -dAdx dCdx = 1.0/6 * (3 * A**2 - 1) * dAdx * (x2 - x1)**2 dDdx = 1.0/6 * (3 * B**2 - 1) * dBdx * (x2 - x1)**2 dydx[i] = dAdx * y1 + dBdx * y2 + dCdx * self.fpp[j] + dDdx * self.fpp[j+1] if n == 1: y = y[0] dydx = dydx[0] if deriv: return y, dydx else: return y
apache-2.0
7,200,985,568,287,435,000
2,635,374,988,322,809,000
25.915663
87
0.405998
false
googleapis/python-pubsublite
google/cloud/pubsublite_v1/types/__init__.py
1
4702
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .admin import ( CreateReservationRequest, CreateSubscriptionRequest, CreateTopicRequest, DeleteReservationRequest, DeleteSubscriptionRequest, DeleteTopicRequest, GetReservationRequest, GetSubscriptionRequest, GetTopicPartitionsRequest, GetTopicRequest, ListReservationsRequest, ListReservationsResponse, ListReservationTopicsRequest, ListReservationTopicsResponse, ListSubscriptionsRequest, ListSubscriptionsResponse, ListTopicsRequest, ListTopicsResponse, ListTopicSubscriptionsRequest, ListTopicSubscriptionsResponse, OperationMetadata, SeekSubscriptionRequest, SeekSubscriptionResponse, TopicPartitions, UpdateReservationRequest, UpdateSubscriptionRequest, UpdateTopicRequest, ) from .common import ( AttributeValues, Cursor, PubSubMessage, Reservation, SequencedMessage, Subscription, TimeTarget, Topic, ) from .cursor import ( CommitCursorRequest, CommitCursorResponse, InitialCommitCursorRequest, InitialCommitCursorResponse, ListPartitionCursorsRequest, ListPartitionCursorsResponse, PartitionCursor, SequencedCommitCursorRequest, SequencedCommitCursorResponse, StreamingCommitCursorRequest, StreamingCommitCursorResponse, ) from .publisher import ( InitialPublishRequest, InitialPublishResponse, MessagePublishRequest, MessagePublishResponse, PublishRequest, PublishResponse, ) from .subscriber import ( FlowControlRequest, InitialPartitionAssignmentRequest, InitialSubscribeRequest, InitialSubscribeResponse, MessageResponse, PartitionAssignment, PartitionAssignmentAck, PartitionAssignmentRequest, SeekRequest, SeekResponse, SubscribeRequest, SubscribeResponse, ) from .topic_stats import ( ComputeHeadCursorRequest, ComputeHeadCursorResponse, ComputeMessageStatsRequest, ComputeMessageStatsResponse, ComputeTimeCursorRequest, ComputeTimeCursorResponse, ) __all__ = ( "CreateReservationRequest", "CreateSubscriptionRequest", "CreateTopicRequest", "DeleteReservationRequest", "DeleteSubscriptionRequest", "DeleteTopicRequest", "GetReservationRequest", "GetSubscriptionRequest", "GetTopicPartitionsRequest", "GetTopicRequest", "ListReservationsRequest", "ListReservationsResponse", "ListReservationTopicsRequest", "ListReservationTopicsResponse", "ListSubscriptionsRequest", "ListSubscriptionsResponse", "ListTopicsRequest", "ListTopicsResponse", "ListTopicSubscriptionsRequest", "ListTopicSubscriptionsResponse", "OperationMetadata", "SeekSubscriptionRequest", "SeekSubscriptionResponse", "TopicPartitions", "UpdateReservationRequest", "UpdateSubscriptionRequest", "UpdateTopicRequest", "AttributeValues", "Cursor", "PubSubMessage", "Reservation", "SequencedMessage", "Subscription", "TimeTarget", "Topic", "CommitCursorRequest", "CommitCursorResponse", "InitialCommitCursorRequest", "InitialCommitCursorResponse", "ListPartitionCursorsRequest", "ListPartitionCursorsResponse", "PartitionCursor", "SequencedCommitCursorRequest", "SequencedCommitCursorResponse", "StreamingCommitCursorRequest", "StreamingCommitCursorResponse", "InitialPublishRequest", "InitialPublishResponse", "MessagePublishRequest", "MessagePublishResponse", "PublishRequest", "PublishResponse", "FlowControlRequest", "InitialPartitionAssignmentRequest", "InitialSubscribeRequest", "InitialSubscribeResponse", "MessageResponse", "PartitionAssignment", "PartitionAssignmentAck", "PartitionAssignmentRequest", "SeekRequest", "SeekResponse", "SubscribeRequest", "SubscribeResponse", "ComputeHeadCursorRequest", "ComputeHeadCursorResponse", "ComputeMessageStatsRequest", "ComputeMessageStatsResponse", "ComputeTimeCursorRequest", "ComputeTimeCursorResponse", )
apache-2.0
-7,967,435,988,339,191,000
-2,349,843,363,383,726,000
26.658824
74
0.746278
false
jounex/hue
desktop/core/ext-py/Mako-0.8.1/test/test_tgplugin.py
36
1260
import unittest from mako.ext.turbogears import TGPlugin from test.util import flatten_result, result_lines from test import TemplateTest, template_base tl = TGPlugin(options=dict(directories=[template_base]), extension='html') class TestTGPlugin(TemplateTest): def test_basic(self): t = tl.load_template('/index.html') assert result_lines(t.render()) == [ "this is index" ] def test_subdir(self): t = tl.load_template('/subdir/index.html') assert result_lines(t.render()) == [ "this is sub index", "this is include 2" ] assert tl.load_template('/subdir/index.html').module_id == '_subdir_index_html' def test_basic_dot(self): t = tl.load_template('index') assert result_lines(t.render()) == [ "this is index" ] def test_subdir_dot(self): t = tl.load_template('subdir.index') assert result_lines(t.render()) == [ "this is sub index", "this is include 2" ] assert tl.load_template('subdir.index').module_id == '_subdir_index_html' def test_string(self): t = tl.load_template('foo', "hello world") assert t.render() == "hello world"
apache-2.0
-3,687,753,926,524,521,500
9,127,114,865,547,539,000
29
87
0.588095
false
gisce/OCB
openerp/report/render/rml2pdf/__init__.py
75
1135
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from trml2pdf import parseString, parseNode #.apidoc title: RML to PDF engine # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-6,240,189,122,880,952,000
-4,869,202,976,873,692,000
41.037037
79
0.618502
false
robosafe/testbench_vRAL_hydro
bert2_simulator/sim_step_monitors/assertion_monitor_manager.py
1
2830
#!/usr/bin/env python """ Assertion Monitor Manager Created by David Western, June 2015. """ from coverage import coverage import imp import rospkg import rospy from std_msgs.msg import UInt64 from std_srvs.srv import Empty import sys class AMM: def __init__(self,AM_list_file,trace_label): # Read list of assertion monitors to run (from file?): rospack = rospkg.RosPack() path = rospack.get_path('bert2_simulator') path = path+'/sim_step_monitors/' print("--- Assertion monitors to run:") self.AM_names = [line.rstrip('\n') for line in open(path+AM_list_file)] print(self.AM_names) # Instantiate assertion monitors: self.AMs = [] # Initialise empty list of AMs. for idx, class_name in enumerate(self.AM_names): print(class_name) print path+class_name+'.py' module = imp.load_source(class_name, path+class_name+'.py') #module = __import__(path+class_name) # N.B. These two lines imply that we class_ = getattr(module, class_name) # require the AM to be defined in a # file with the same name as the class. self.AMs.append(class_(trace_label)) # Check AM has the mandatory attributes: mand_attrs = ['step'] for attr in mand_attrs: if not hasattr(self.AMs[idx],attr): rospy.logerr("Assertion monitor specification '%s' does not define the attribute \ '%s', which is required by AMM (assertion_monitor_manager.py). \ Does %s inherite from an assertion monitor base class?", self.AMs[idx].__name__, attr, self.AMs[idx].__name__) # Get service self.unpause_gazebo = rospy.ServiceProxy('gazebo/unpause_physics',Empty) # Subscriber to triggers, which come on each sim step: rospy.Subscriber("AM_trigger", UInt64, self.trigger_AMs) def trigger_AMs(self,data): iteration = data.data sim_time = rospy.get_time() # Step all assertion monitors: for idx, AM in enumerate(self.AMs): AM.step(iteration,sim_time) # Release gazebo now we've finished the checks for this step: #print "unpausing" #self.unpause_gazebo() # Problem: This line prevents Gazebo's pause button from working (unless you # get a lucky click). if __name__ == '__main__': try: if len(sys.argv) < 3: print("usage: rosrun [package_name] assertion_monitor_manager.py AM_list_file.txt report_file_name") else: rospy.init_node('AMM') AMMInst = AMM(sys.argv[1],sys.argv[2]) rospy.spin() except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c pass
gpl-3.0
-1,607,268,812,167,874,800
8,136,559,815,108,588,000
31.906977
109
0.602473
false
ContinuumIO/chaco
examples/demo/xray_plot.py
3
5751
""" Implementation of a plot using a custom overlay and tool """ from __future__ import with_statement import numpy from traits.api import HasTraits, Instance, Enum from traitsui.api import View, Item from enable.api import ComponentEditor from chaco.api import Plot, ArrayPlotData, AbstractOverlay from enable.api import BaseTool from enable.markers import DOT_MARKER, DotMarker class BoxSelectTool(BaseTool): """ Tool for selecting all points within a box There are 2 states for this tool, normal and selecting. While the left mouse button is down the metadata on the datasources will be updated with the current selected bounds. Note that the tool does not actually store the selected point, but the bounds of the box. """ event_state = Enum("normal", "selecting") def normal_left_down(self, event): self.event_state = "selecting" self.selecting_mouse_move(event) def selecting_left_up(self, event): self.event_state = "normal" def selecting_mouse_move(self, event): x1, y1 = self.map_to_data(event.x - 25, event.y - 25) x2, y2 = self.map_to_data(event.x + 25, event.y + 25) index_datasource = self.component.index index_datasource.metadata['selections'] = (x1, x2) value_datasource = self.component.value value_datasource.metadata['selections'] = (y1, y2) self.component.request_redraw() def map_to_data(self, x, y): """ Returns the data space coordinates of the given x and y. Takes into account orientation of the plot and the axis setting. """ plot = self.component if plot.orientation == "h": index = plot.x_mapper.map_data(x) value = plot.y_mapper.map_data(y) else: index = plot.y_mapper.map_data(y) value = plot.x_mapper.map_data(x) return index, value class XRayOverlay(AbstractOverlay): """ Overlay which draws scatter markers on top of plot data points. This overlay should be combined with a tool which updates the datasources metadata with selection bounds. """ marker = DotMarker() def overlay(self, component, gc, view_bounds=None, mode='normal'): x_range = self._get_selection_index_screen_range() y_range = self._get_selection_value_screen_range() if len(x_range) == 0: return x1, x2 = x_range y1, y2 = y_range with gc: gc.set_alpha(0.8) gc.set_fill_color((1.0, 1.0, 1.0)) gc.rect(x1, y1, x2 - x1, y2 - y1) gc.draw_path() pts = self._get_selected_points() if len(pts) == 0: return screen_pts = self.component.map_screen(pts) if hasattr(gc, 'draw_marker_at_points'): gc.draw_marker_at_points(screen_pts, 3, DOT_MARKER) else: gc.save_state() for sx, sy in screen_pts: gc.translate_ctm(sx, sy) gc.begin_path() self.marker.add_to_path(gc, 3) gc.draw_path(self.marker.draw_mode) gc.translate_ctm(-sx, -sy) gc.restore_state() def _get_selected_points(self): """ gets all the points within the bounds defined in the datasources metadata """ index_datasource = self.component.index index_selection = index_datasource.metadata['selections'] index = index_datasource.get_data() value_datasource = self.component.value value_selection = value_datasource.metadata['selections'] value = value_datasource.get_data() x_indices = numpy.where((index > index_selection[0]) & (index < index_selection[-1])) y_indices = numpy.where((value > value_selection[0]) & (value < value_selection[-1])) indices = list(set(x_indices[0]) & set(y_indices[0])) sel_index = index[indices] sel_value = value[indices] return zip(sel_index, sel_value) def _get_selection_index_screen_range(self): """ maps the selected bounds which were set by the tool into screen space. The screen space points can be used for drawing the overlay """ index_datasource = self.component.index index_mapper = self.component.index_mapper index_selection = index_datasource.metadata['selections'] return tuple(index_mapper.map_screen(numpy.array(index_selection))) def _get_selection_value_screen_range(self): """ maps the selected bounds which were set by the tool into screen space. The screen space points can be used for drawing the overlay """ value_datasource = self.component.value value_mapper = self.component.value_mapper value_selection = value_datasource.metadata['selections'] return tuple(value_mapper.map_screen(numpy.array(value_selection))) class PlotExample(HasTraits): plot = Instance(Plot) traits_view = View(Item('plot', editor=ComponentEditor()), width=600, height=600) def __init__(self, index, value, *args, **kw): super(PlotExample, self).__init__(*args, **kw) plot_data = ArrayPlotData(index=index) plot_data.set_data('value', value) self.plot = Plot(plot_data) line = self.plot.plot(('index', 'value'))[0] line.overlays.append(XRayOverlay(line)) line.tools.append(BoxSelectTool(line)) index = numpy.arange(0, 25, 0.25) value = numpy.sin(index) + numpy.arange(0, 10, 0.1) example = PlotExample(index, value) example.configure_traits()
bsd-3-clause
7,195,397,922,641,287,000
8,556,214,570,269,092,000
32.242775
78
0.614328
false
xuweiliang/Codelibrary
nova/db/sqlalchemy/migrate_repo/versions/302_pgsql_add_instance_system_metadata_index.py
43
1322
# Copyright 2015 Huawei. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import utils INDEX_COLUMNS = ['instance_uuid'] # using the index name same with mysql INDEX_NAME = 'instance_uuid' SYS_META_TABLE_NAME = 'instance_system_metadata' def upgrade(migrate_engine): """Add instance_system_metadata indexes missing on PostgreSQL and other DB. """ # This index was already added by migration 216 for MySQL if migrate_engine.name != 'mysql': # Adds index for PostgreSQL and other DB if not utils.index_exists(migrate_engine, SYS_META_TABLE_NAME, INDEX_NAME): utils.add_index(migrate_engine, SYS_META_TABLE_NAME, INDEX_NAME, INDEX_COLUMNS)
apache-2.0
-4,391,403,006,927,339,000
6,895,892,914,627,141,000
35.722222
79
0.683812
false
0x7E/ubuntu-tweak
ubuntutweak/settings/ccm/Utils.py
5
12852
# -*- coding: UTF-8 -*- # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Authors: Quinn Storm (quinn@beryl-project.org) # Patrick Niklaus (marex@opencompositing.org) # Guillaume Seguin (guillaume@segu.in) # Christopher Williams (christopherw@verizon.net) # Copyright (C) 2007 Quinn Storm import os import weakref from gi.repository import GObject, Gtk, Gdk, Pango from Constants import * from cgi import escape as protect_pango_markup import operator import itertools import locale import gettext locale.setlocale(locale.LC_ALL, "") gettext.bindtextdomain("ccsm", DataDir + "/locale") gettext.textdomain("ccsm") _ = gettext.gettext IconTheme = Gtk.IconTheme.get_default() #TODO #if not IconDir in IconTheme.get_search_path(): # IconTheme.prepend_search_path(IconDir) def gtk_process_events (): while Gtk.events_pending (): Gtk.main_iteration () def getScreens(): screens = [] display = Gdk.Display.get_default() nScreens = display.get_n_screens() for i in range(nScreens): screens.append(i) return screens def getDefaultScreen(): display = Gdk.Display.get_default() return display.get_default_screen().get_number() def protect_markup_dict (dict_): return dict((k, protect_pango_markup (v)) for (k, v) in dict_.items()) class Image (Gtk.Image): def __init__ (self, name = None, type = ImageNone, size = 32, useMissingImage = False): GObject.GObject.__init__ (self) if not name: return if useMissingImage: self.set_from_stock (Gtk.STOCK_MISSING_IMAGE, Gtk.IconSize.LARGE_TOOLBAR) return try: if type in (ImagePlugin, ImageCategory, ImageThemed): pixbuf = None if type == ImagePlugin: name = "plugin-" + name try: pixbuf = IconTheme.load_icon (name, size, 0) except GObject.GError: pixbuf = IconTheme.load_icon ("plugin-unknown", size, 0) elif type == ImageCategory: name = "plugins-" + name try: pixbuf = IconTheme.load_icon (name, size, 0) except GObject.GError: pixbuf = IconTheme.load_icon ("plugins-unknown", size, 0) else: pixbuf = IconTheme.load_icon (name, size, 0) self.set_from_pixbuf (pixbuf) elif type == ImageStock: self.set_from_stock (name, size) except GObject.GError as e: self.set_from_stock (Gtk.STOCK_MISSING_IMAGE, Gtk.IconSize.BUTTON) class ActionImage (Gtk.Alignment): map = { "keyboard" : "input-keyboard", "button" : "input-mouse", "edges" : "video-display", "bell" : "audio-x-generic" } def __init__ (self, action): GObject.GObject.__init__ (self, 0, 0.5) self.set_padding (0, 0, 0, 10) if action in self.map: action = self.map[action] self.add (Image (name = action, type = ImageThemed, size = 22)) class SizedButton (Gtk.Button): minWidth = -1 minHeight = -1 def __init__ (self, minWidth = -1, minHeight = -1): super (SizedButton, self).__init__ () self.minWidth = minWidth self.minHeight = minHeight self.connect ("size-request", self.adjust_size) def adjust_size (self, widget, requisition): width, height = requisition.width, requisition.height newWidth = max (width, self.minWidth) newHeight = max (height, self.minHeight) self.set_size_request (newWidth, newHeight) class PrettyButton (Gtk.Button): __gsignals__ = { 'draw': 'override', } _old_toplevel = None def __init__ (self): super (PrettyButton, self).__init__ () self.states = { "focus" : False, "pointer" : False } self.set_size_request (200, -1) self.set_relief (Gtk.ReliefStyle.NONE) self.connect ("focus-in-event", self.update_state_in, "focus") self.connect ("focus-out-event", self.update_state_out, "focus") self.connect ("hierarchy-changed", self.hierarchy_changed) def hierarchy_changed (self, widget, old_toplevel): if old_toplevel == self._old_toplevel: return if not old_toplevel and self.state != Gtk.StateType.NORMAL: self.set_state(Gtk.StateType.PRELIGHT) self.set_state(Gtk.StateType.NORMAL) self._old_toplevel = old_toplevel def update_state_in (self, *args): state = args[-1] self.set_state (Gtk.StateType.PRELIGHT) self.states[state] = True def update_state_out (self, *args): state = args[-1] self.states[state] = False if True in self.states.values (): self.set_state (Gtk.StateType.PRELIGHT) else: self.set_state (Gtk.StateType.NORMAL) def do_expose_event (self, event): has_focus = self.flags () & Gtk.HAS_FOCUS if has_focus: self.unset_flags (Gtk.HAS_FOCUS) ret = super (PrettyButton, self).do_expose_event (self, event) if has_focus: self.set_flags (Gtk.HAS_FOCUS) return ret class Label(Gtk.Label): def __init__(self, value = "", wrap = 160): GObject.GObject.__init__(self, value) self.props.xalign = 0 self.props.wrap_mode = Pango.WrapMode.WORD self.set_line_wrap(True) self.set_size_request(wrap, -1) class NotFoundBox(Gtk.Alignment): def __init__(self, value=""): GObject.GObject.__init__(self, 0.5, 0.5, 0.0, 0.0) box = Gtk.HBox() self.Warning = Gtk.Label() self.Markup = _("<span size=\"large\"><b>No matches found.</b> </span><span>\n\n Your filter \"<b>%s</b>\" does not match any items.</span>") value = protect_pango_markup(value) self.Warning.set_markup(self.Markup % value) image = Image("face-surprise", ImageThemed, 48) box.pack_start(image, False, False, 0) box.pack_start(self.Warning, True, True, 15) self.add(box) def update(self, value): value = protect_pango_markup(value) self.Warning.set_markup(self.Markup % value) class IdleSettingsParser: def __init__(self, context, main): def FilterPlugin (p): return not p.Initialized and p.Enabled self.Context = context self.Main = main self.PluginList = [p for p in self.Context.Plugins.items() if FilterPlugin(p[1])] nCategories = len (main.MainPage.RightWidget._boxes) self.CategoryLoadIconsList = list(range(3, nCategories)) # Skip the first 3 print('Loading icons...') GObject.timeout_add (150, self.Wait) def Wait(self): if not self.PluginList: return False if len (self.CategoryLoadIconsList) == 0: # If we're done loading icons GObject.idle_add (self.ParseSettings) else: GObject.idle_add (self.LoadCategoryIcons) return False def ParseSettings(self): name, plugin = self.PluginList[0] if not plugin.Initialized: plugin.Update () self.Main.RefreshPage(plugin) self.PluginList.remove (self.PluginList[0]) GObject.timeout_add (200, self.Wait) return False def LoadCategoryIcons(self): from ccm.Widgets import PluginButton catIndex = self.CategoryLoadIconsList[0] pluginWindow = self.Main.MainPage.RightWidget categoryBox = pluginWindow._boxes[catIndex] for (pluginIndex, plugin) in \ enumerate (categoryBox.get_unfiltered_plugins()): categoryBox._buttons[pluginIndex] = PluginButton (plugin) categoryBox.rebuild_table (categoryBox._current_cols, True) pluginWindow.connect_buttons (categoryBox) self.CategoryLoadIconsList.remove (self.CategoryLoadIconsList[0]) GObject.timeout_add (150, self.Wait) return False # Updates all registered setting when they where changed through CompizConfig class Updater: def __init__ (self): self.VisibleSettings = {} self.Plugins = [] self.Block = 0 def SetContext (self, context): self.Context = context GObject.timeout_add (2000, self.Update) def Append (self, widget): reference = weakref.ref(widget) setting = widget.Setting self.VisibleSettings.setdefault((setting.Plugin.Name, setting.Name), []).append(reference) def AppendPlugin (self, plugin): self.Plugins.append (plugin) def Remove (self, widget): setting = widget.Setting l = self.VisibleSettings.get((setting.Plugin.Name, setting.Name)) if not l: return for i, ref in enumerate(list(l)): if ref() is widget: l.remove(ref) break def UpdatePlugins(self): for plugin in self.Plugins: plugin.Read() def UpdateSetting (self, setting): widgets = self.VisibleSettings.get((setting.Plugin.Name, setting.Name)) if not widgets: return for reference in widgets: widget = reference() if widget is not None: widget.Read() def Update (self): if self.Block > 0: return True if self.Context.ProcessEvents(): changed = self.Context.ChangedSettings if [s for s in changed if s.Plugin.Name == "core" and s.Name == "active_plugins"]: self.UpdatePlugins() for setting in list(changed): widgets = self.VisibleSettings.get((setting.Plugin.Name, setting.Name)) if widgets: for reference in widgets: widget = reference() if widget is not None: widget.Read() if widget.List: widget.ListWidget.Read() changed.remove(setting) self.Context.ChangedSettings = changed return True GlobalUpdater = Updater () class PluginSetting: def __init__ (self, plugin, widget, handler): self.Widget = widget self.Plugin = plugin self.Handler = handler GlobalUpdater.AppendPlugin (self) def Read (self): widget = self.Widget widget.handler_block(self.Handler) widget.set_active (self.Plugin.Enabled) widget.set_sensitive (self.Plugin.Context.AutoSort) widget.handler_unblock(self.Handler) class PureVirtualError(Exception): pass def SettingKeyFunc(value): return value.Plugin.Ranking[value.Name] def CategoryKeyFunc(category): if 'General' == category: return '' else: return category or 'zzzzzzzz' def GroupIndexKeyFunc(item): return item[1][0] FirstItemKeyFunc = operator.itemgetter(0) EnumSettingKeyFunc = operator.itemgetter(1) PluginKeyFunc = operator.attrgetter('ShortDesc') def HasOnlyType (settings, stype): return settings and not [s for s in settings if s.Type != stype] def GetSettings(group, types=None): def TypeFilter (settings, types): for setting in settings: if setting.Type in types: yield setting if types: screen = TypeFilter(iter(group.Screen.values()), types) else: screen = iter(group.Screen.values()) return screen # Support python 2.4 try: any all except NameError: def any(iterable): for element in iterable: if element: return True return False def all(iterable): for element in iterable: if not element: return False return True
gpl-2.0
-160,937,647,168,256,350
8,923,224,704,775,566,000
29.968675
149
0.591892
false
YongseopKim/crosswalk-test-suite
webapi/tct-csp-w3c-tests/csp-py/csp_media-src_none_audio_blocked_int.py
30
3064
def main(request, response): import simplejson as json f = file('config.json') source = f.read() s = json.JSONDecoder().decode(source) url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1]) url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0]) _CSP = "media-src 'none'; script-src 'self' 'unsafe-inline'" response.headers.set("Content-Security-Policy", _CSP) response.headers.set("X-Content-Security-Policy", _CSP) response.headers.set("X-WebKit-CSP", _CSP) return """<!DOCTYPE html> <!-- Copyright (c) 2013 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Zhang, Zhiqiang <zhiqiang.zhang@intel.com> --> <html> <head> <title>CSP Test: csp_media-src_none_audio_blocked_int</title> <link rel="author" title="Intel" href="http://www.intel.com"/> <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/> <meta name="flags" content=""/> <meta name="assert" content="media-src *; script-src 'self' 'unsafe-inline'"/> <meta charset="utf-8"/> <script src="../resources/testharness.js"></script> <script src="../resources/testharnessreport.js"></script> </head> <body> <div id="log"></div> <audio id="m"></audio> <script> var t = async_test(document.title); var m = document.getElementById("m"); m.src = "support/khronos/red-green.theora.ogv"; window.setTimeout(function() { t.step(function() { assert_true(m.currentSrc == "", "audio.currentSrc should be empty after setting src attribute"); }); t.done(); }, 0); </script> </body> </html> """
bsd-3-clause
5,415,151,669,881,461,000
8,307,420,846,024,547,000
42.15493
84
0.685379
false
reingart/rad2py
assignments/program10A.py
16
3944
#!/usr/bin/env python # coding:utf-8 "PSP Program 6A - Linear Regression Prediction Interval" __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2011 Mariano Reingart" __license__ = "GPL 3.0" from math import sqrt, pi # reuse previous programs from program1A import mean from program5A import simpson_rule_integrate, gamma def double_sided_student_t_probability(t, n): "Calculate the p-value using a double sided student t distribution" # create the function for n degrees of freedom: k = gamma(n + 1, 2) / (sqrt(n * pi) * gamma(n, 2)) f_t_dist = lambda u: k * (1 + (u ** 2) / float(n)) ** (- (n + 1) / 2.0) # integrate a finite area from the origin to t p_aux = simpson_rule_integrate(f_t_dist, 0, t) # return the area of the two tails of the distribution (symmetrical) return (0.5 - p_aux) * 2 def double_sided_student_t_value(p, n): "Calculate the t-value using a double sided student t distribution" # replaces table lookup, thanks to http://statpages.org/pdfs.html v = dv = 0.5 t = 0 while dv > 0.000001: t = 1 / v - 1 dv = dv / 2 if double_sided_student_t_probability(t, n) > p: v = v - dv else: v = v + dv return t def variance(x_values, y_values, b0, b1): "Calculate the mean square deviation of the linear regeression line" # take the variance from the regression line instead of the data average sum_aux = sum([(y - b0 - b1 * x) ** 2 for x, y in zip(x_values, y_values)]) n = float(len(x_values)) return (1 / (n - 2.0)) * sum_aux def prediction_interval(x_values, y_values, x_k, alpha): """Calculate the linear regression parameters for a set of n values then calculate the upper and lower prediction interval """ # calculate aux variables x_avg = mean(x_values) y_avg = mean(y_values) n = len(x_values) sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)]) sum_x2 = sum([(x_values[i] ** 2) for i in range(n)]) # calculate regression coefficients b1 = (sum_xy - (n * x_avg * y_avg)) / (sum_x2 - n * (x_avg ** 2)) b0 = y_avg - b1 * x_avg # calculate the t-value for the given alpha p-value t = double_sided_student_t_value(1 - alpha, n - 2) # calculate the standard deviation sigma = sqrt(variance(x_values, y_values, b0, b1)) # calculate the range sum_xi_xavg = sum([(x - x_avg) ** 2 for x in x_values], 0.0) aux = 1 + (1 / float(n)) + ((x_k - x_avg) ** 2) / sum_xi_xavg p_range = t * sigma * sqrt(aux) # combine the range with the x_k projection: return b0, b1, p_range, x_k + p_range, x_k - p_range, t def test_student_t_integration(): # test student t values assert round(double_sided_student_t_probability(t=1.8595, n=8), 4) == 0.1 assert round(double_sided_student_t_value(p=0.1, n=8), 4) == 1.8595 if __name__ == "__main__": test_student_t_integration() # Table D8 "Size Estimating regression data" est_loc = [130, 650, 99, 150, 128, 302, 95, 945, 368, 961] act_new_chg_loc = [186, 699, 132, 272, 291, 331, 199, 1890, 788, 1601] projection = 644.429 # 70 percent b0, b1, p_range, upi, lpi, t = prediction_interval( est_loc, act_new_chg_loc, projection, alpha=0.7) print "70% Prediction interval: ", b0, b1, p_range, upi, lpi, t assert round(t, 3) == 1.108 assert round(b0, 2) == -22.55 assert round(b1, 4) == 1.7279 assert round(p_range, 3) == 236.563 assert round(upi, 2) == 880.99 assert round(lpi, 2) == 407.87 # 90 percent b0, b1, p_range, upi, lpi, t = prediction_interval( est_loc, act_new_chg_loc, projection, alpha=0.9) print "90% Prediction interval: ", b0, b1, p_range, upi, lpi, t assert round(t, 2) == 1.86 assert round(p_range, 2) == 396.97 assert round(upi, 2) == 1041.4 assert round(lpi, 2) == 247.46
gpl-3.0
-7,750,535,023,869,851,000
774,744,729,114,931,100
33.578947
79
0.608067
false
JeremyRubin/bitcoin
test/functional/test_framework/siphash.py
91
2014
#!/usr/bin/env python3 # Copyright (c) 2016-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Specialized SipHash-2-4 implementations. This implements SipHash-2-4 for 256-bit integers. """ def rotl64(n, b): return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b def siphash_round(v0, v1, v2, v3): v0 = (v0 + v1) & ((1 << 64) - 1) v1 = rotl64(v1, 13) v1 ^= v0 v0 = rotl64(v0, 32) v2 = (v2 + v3) & ((1 << 64) - 1) v3 = rotl64(v3, 16) v3 ^= v2 v0 = (v0 + v3) & ((1 << 64) - 1) v3 = rotl64(v3, 21) v3 ^= v0 v2 = (v2 + v1) & ((1 << 64) - 1) v1 = rotl64(v1, 17) v1 ^= v2 v2 = rotl64(v2, 32) return (v0, v1, v2, v3) def siphash256(k0, k1, h): n0 = h & ((1 << 64) - 1) n1 = (h >> 64) & ((1 << 64) - 1) n2 = (h >> 128) & ((1 << 64) - 1) n3 = (h >> 192) & ((1 << 64) - 1) v0 = 0x736f6d6570736575 ^ k0 v1 = 0x646f72616e646f6d ^ k1 v2 = 0x6c7967656e657261 ^ k0 v3 = 0x7465646279746573 ^ k1 ^ n0 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n0 v3 ^= n1 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n1 v3 ^= n2 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n2 v3 ^= n3 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n3 v3 ^= 0x2000000000000000 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= 0x2000000000000000 v2 ^= 0xFF v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) return v0 ^ v1 ^ v2 ^ v3
mit
3,939,022,929,350,397,000
1,458,524,691,053,043,700
30.968254
69
0.525819
false
m-sanders/wagtail
wagtail/wagtailimages/migrations/0001_initial.py
31
3250
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import wagtail.wagtailimages.models import taggit.managers from django.conf import settings import wagtail.wagtailadmin.taggable class Migration(migrations.Migration): dependencies = [ ('taggit', '__latest__'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Filter', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ('spec', models.CharField(db_index=True, max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ('title', models.CharField(verbose_name='Title', max_length=255)), ('file', models.ImageField(width_field='width', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='File', height_field='height')), ('width', models.IntegerField(editable=False)), ('height', models.IntegerField(editable=False)), ('created_at', models.DateTimeField(auto_now_add=True)), ('focal_point_x', models.PositiveIntegerField(editable=False, null=True)), ('focal_point_y', models.PositiveIntegerField(editable=False, null=True)), ('focal_point_width', models.PositiveIntegerField(editable=False, null=True)), ('focal_point_height', models.PositiveIntegerField(editable=False, null=True)), ('tags', taggit.managers.TaggableManager(verbose_name='Tags', blank=True, help_text=None, to='taggit.Tag', through='taggit.TaggedItem')), ('uploaded_by_user', models.ForeignKey(editable=False, blank=True, null=True, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable), ), migrations.CreateModel( name='Rendition', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ('file', models.ImageField(width_field='width', upload_to='images', height_field='height')), ('width', models.IntegerField(editable=False)), ('height', models.IntegerField(editable=False)), ('focal_point_key', models.CharField(editable=False, max_length=255, null=True)), ('filter', models.ForeignKey(related_name='+', to='wagtailimages.Filter')), ('image', models.ForeignKey(related_name='renditions', to='wagtailimages.Image')), ], options={ }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='rendition', unique_together=set([('image', 'filter', 'focal_point_key')]), ), ]
bsd-3-clause
8,441,832,068,934,010,000
-886,989,295,198,516,700
46.101449
163
0.585231
false
Huskerboy/startbootstrap-freelancer
freelancer_env/Lib/re.py
36
15501
# # Secret Labs' Regular Expression Engine # # re-compatible interface for the sre matching engine # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # This version of the SRE library can be redistributed under CNRI's # Python 1.6 license. For any other use, please contact Secret Labs # AB (info@pythonware.com). # # Portions of this engine have been developed in cooperation with # CNRI. Hewlett-Packard provided funding for 1.6 integration and # other compatibility work. # r"""Support for regular expressions (RE). This module provides regular expression matching operations similar to those found in Perl. It supports both 8-bit and Unicode strings; both the pattern and the strings being processed can contain null bytes and characters outside the US ASCII range. Regular expressions can contain both special and ordinary characters. Most ordinary characters, like "A", "a", or "0", are the simplest regular expressions; they simply match themselves. You can concatenate ordinary characters, so last matches the string 'last'. The special characters are: "." Matches any character except a newline. "^" Matches the start of the string. "$" Matches the end of the string or just before the newline at the end of the string. "*" Matches 0 or more (greedy) repetitions of the preceding RE. Greedy means that it will match as many repetitions as possible. "+" Matches 1 or more (greedy) repetitions of the preceding RE. "?" Matches 0 or 1 (greedy) of the preceding RE. *?,+?,?? Non-greedy versions of the previous three special characters. {m,n} Matches from m to n repetitions of the preceding RE. {m,n}? Non-greedy version of the above. "\\" Either escapes special characters or signals a special sequence. [] Indicates a set of characters. A "^" as the first character indicates a complementing set. "|" A|B, creates an RE that will match either A or B. (...) Matches the RE inside the parentheses. The contents can be retrieved or matched later in the string. (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below). (?:...) Non-grouping version of regular parentheses. (?P<name>...) The substring matched by the group is accessible by name. (?P=name) Matches the text matched earlier by the group named name. (?#...) A comment; ignored. (?=...) Matches if ... matches next, but doesn't consume the string. (?!...) Matches if ... doesn't match next. (?<=...) Matches if preceded by ... (must be fixed length). (?<!...) Matches if not preceded by ... (must be fixed length). (?(id/name)yes|no) Matches yes pattern if the group with id/name matched, the (optional) no pattern otherwise. The special sequences consist of "\\" and a character from the list below. If the ordinary character is not on the list, then the resulting RE will match the second character. \number Matches the contents of the group of the same number. \A Matches only at the start of the string. \Z Matches only at the end of the string. \b Matches the empty string, but only at the start or end of a word. \B Matches the empty string, but not at the start or end of a word. \d Matches any decimal digit; equivalent to the set [0-9] in bytes patterns or string patterns with the ASCII flag. In string patterns without the ASCII flag, it will match the whole range of Unicode digits. \D Matches any non-digit character; equivalent to [^\d]. \s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in bytes patterns or string patterns with the ASCII flag. In string patterns without the ASCII flag, it will match the whole range of Unicode whitespace characters. \S Matches any non-whitespace character; equivalent to [^\s]. \w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_] in bytes patterns or string patterns with the ASCII flag. In string patterns without the ASCII flag, it will match the range of Unicode alphanumeric characters (letters plus digits plus underscore). With LOCALE, it will match the set [0-9_] plus characters defined as letters for the current locale. \W Matches the complement of \w. \\ Matches a literal backslash. This module exports the following functions: match Match a regular expression pattern to the beginning of a string. fullmatch Match a regular expression pattern to all of a string. search Search a string for the presence of a pattern. sub Substitute occurrences of a pattern found in a string. subn Same as sub, but also return the number of substitutions made. split Split a string by the occurrences of a pattern. findall Find all occurrences of a pattern in a string. finditer Return an iterator yielding a match object for each match. compile Compile a pattern into a RegexObject. purge Clear the regular expression cache. escape Backslash all non-alphanumerics in a string. Some of the functions in this module takes flags as optional parameters: A ASCII For string patterns, make \w, \W, \b, \B, \d, \D match the corresponding ASCII character categories (rather than the whole Unicode categories, which is the default). For bytes patterns, this flag is the only available behaviour and needn't be specified. I IGNORECASE Perform case-insensitive matching. L LOCALE Make \w, \W, \b, \B, dependent on the current locale. M MULTILINE "^" matches the beginning of lines (after a newline) as well as the string. "$" matches the end of lines (before a newline) as well as the end of the string. S DOTALL "." matches any character at all, including the newline. X VERBOSE Ignore whitespace and comments for nicer looking RE's. U UNICODE For compatibility only. Ignored for string patterns (it is the default), and forbidden for bytes patterns. This module also defines an exception 'error'. """ import sys import sre_compile import sre_parse try: import _locale except ImportError: _locale = None # public symbols __all__ = [ "match", "fullmatch", "search", "sub", "subn", "split", "findall", "finditer", "compile", "purge", "template", "escape", "error", "A", "I", "L", "M", "S", "X", "U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", "UNICODE", ] __version__ = "2.2.1" # flags A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale" I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale" M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments # sre extensions (experimental, don't rely on these) T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation # sre exception error = sre_compile.error # -------------------------------------------------------------------- # public interface def match(pattern, string, flags=0): """Try to apply the pattern at the start of the string, returning a match object, or None if no match was found.""" return _compile(pattern, flags).match(string) def fullmatch(pattern, string, flags=0): """Try to apply the pattern to all of the string, returning a match object, or None if no match was found.""" return _compile(pattern, flags).fullmatch(string) def search(pattern, string, flags=0): """Scan through string looking for a match to the pattern, returning a match object, or None if no match was found.""" return _compile(pattern, flags).search(string) def sub(pattern, repl, string, count=0, flags=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement repl. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).sub(repl, string, count) def subn(pattern, repl, string, count=0, flags=0): """Return a 2-tuple containing (new_string, number). new_string is the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in the source string by the replacement repl. number is the number of substitutions that were made. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).subn(repl, string, count) def split(pattern, string, maxsplit=0, flags=0): """Split the source string by the occurrences of the pattern, returning a list containing the resulting substrings. If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list. If maxsplit is nonzero, at most maxsplit splits occur, and the remainder of the string is returned as the final element of the list.""" return _compile(pattern, flags).split(string, maxsplit) def findall(pattern, string, flags=0): """Return a list of all non-overlapping matches in the string. If one or more capturing groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." return _compile(pattern, flags) def purge(): "Clear the regular expression caches" _cache.clear() _cache_repl.clear() def template(pattern, flags=0): "Compile a template pattern, returning a pattern object" return _compile(pattern, flags|T) _alphanum_str = frozenset( "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") _alphanum_bytes = frozenset( b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") def escape(pattern): """ Escape all the characters in pattern except ASCII letters, numbers and '_'. """ if isinstance(pattern, str): alphanum = _alphanum_str s = list(pattern) for i, c in enumerate(pattern): if c not in alphanum: if c == "\000": s[i] = "\\000" else: s[i] = "\\" + c return "".join(s) else: alphanum = _alphanum_bytes s = [] esc = ord(b"\\") for c in pattern: if c in alphanum: s.append(c) else: if c == 0: s.extend(b"\\000") else: s.append(esc) s.append(c) return bytes(s) # -------------------------------------------------------------------- # internals _cache = {} _cache_repl = {} _pattern_type = type(sre_compile.compile("", 0)) _MAXCACHE = 512 def _compile(pattern, flags): # internal: compile pattern try: p, loc = _cache[type(pattern), pattern, flags] if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE): return p except KeyError: pass if isinstance(pattern, _pattern_type): if flags: raise ValueError( "cannot process flags argument with a compiled pattern") return pattern if not sre_compile.isstring(pattern): raise TypeError("first argument must be string or compiled pattern") p = sre_compile.compile(pattern, flags) if not (flags & DEBUG): if len(_cache) >= _MAXCACHE: _cache.clear() if p.flags & LOCALE: if not _locale: return p loc = _locale.setlocale(_locale.LC_CTYPE) else: loc = None _cache[type(pattern), pattern, flags] = p, loc return p def _compile_repl(repl, pattern): # internal: compile replacement pattern try: return _cache_repl[repl, pattern] except KeyError: pass p = sre_parse.parse_template(repl, pattern) if len(_cache_repl) >= _MAXCACHE: _cache_repl.clear() _cache_repl[repl, pattern] = p return p def _expand(pattern, match, template): # internal: match.expand implementation hook template = sre_parse.parse_template(template, pattern) return sre_parse.expand_template(template, match) def _subx(pattern, template): # internal: pattern.sub/subn implementation helper template = _compile_repl(template, pattern) if not template[0] and len(template[1]) == 1: # literal replacement return template[1][0] def filter(match, template=template): return sre_parse.expand_template(template, match) return filter # register myself for pickling import copyreg def _pickle(p): return _compile, (p.pattern, p.flags) copyreg.pickle(_pattern_type, _pickle, _compile) # -------------------------------------------------------------------- # experimental stuff (see python-dev discussions for details) class Scanner: def __init__(self, lexicon, flags=0): from sre_constants import BRANCH, SUBPATTERN self.lexicon = lexicon # combine phrases into a compound pattern p = [] s = sre_parse.Pattern() s.flags = flags for phrase, action in lexicon: gid = s.opengroup() p.append(sre_parse.SubPattern(s, [ (SUBPATTERN, (gid, sre_parse.parse(phrase, flags))), ])) s.closegroup(gid, p[-1]) p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def scan(self, string): result = [] append = result.append match = self.scanner.scanner(string).match i = 0 while True: m = match() if not m: break j = m.end() if i == j: break action = self.lexicon[m.lastindex-1][1] if callable(action): self.match = m action = action(self, m.group()) if action is not None: append(action) i = j return result, string[i:]
mit
-6,698,869,415,976,645,000
-387,973,767,523,678,340
39.792105
79
0.638991
false
pyKun/rally
rally/plugins/openstack/scenarios/ceilometer/samples.py
3
1169
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally import consts from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils from rally.task.scenarios import base from rally.task import validation class CeilometerSamples(ceiloutils.CeilometerScenario): """Benchmark scenarios for Ceilometer Samples API.""" @validation.required_services(consts.Service.CEILOMETER) @validation.required_openstack(users=True) @base.scenario() def list_samples(self): """Fetch all samples. This scenario fetches list of all samples. """ self._list_samples()
apache-2.0
-2,962,008,990,012,300,300
-3,780,410,160,866,043,000
35.53125
78
0.729683
false
eayunstack/eayunstack-upgrade
ansible/library/keystone_v2_endpoint.py
1
9178
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Kevin Carter <kevin.carter@rackspace.com> # # Copyright 2014, Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on Jimmy Tang's implementation DOCUMENTATION = """ --- module: keystone_v2_endpoint short_description: - Manage OpenStack Identity (keystone) v2 endpoint. description: - Manage OpenStack Identity (keystone) v2 endpoint. endpoints. options: token: description: - The token to be uses in case the password is not specified required: true default: None endpoint: description: - The keystone url for authentication required: true service_name: description: - Name of the service. required: true default: None region_name: description: - Name of the region. required: true default: None service_type: description: - Type of service. required: true default: None endpoint_dict: description: - Dict of endpoint urls to add to keystone for a service required: true default: None type: dict state: description: - Ensuring the endpoint is either present, absent. - It always ensures endpoint is updated to latest url. required: False default: 'present' requirements: [ python-keystoneclient ] """ EXAMPLES = """ # Create an endpoint - keystone_v2_endpoint: region_name: "RegionOne" service_name: "glance" service_type: "image" endpoint: "http://127.0.0.1:5000/v2.0/" token: "ChangeMe" endpoint_dict: publicurl: "http://127.0.0.1:9292" adminurl: "http://127.0.0.1:9292" internalurl: "http://127.0.0.1:9292" """ try: from keystoneclient.v2_0 import client except ImportError: keystoneclient_found = False else: keystoneclient_found = True class ManageKeystoneV2Endpoint(object): def __init__(self, module): """Manage Keystone via Ansible.""" self.state_change = False self.keystone = None # Load AnsibleModule self.module = module @staticmethod def _facts(facts): """Return a dict for our Ansible facts. :param facts: ``dict`` Dict with data to return """ return {'keystone_facts': facts} def failure(self, error, rc, msg): """Return a Failure when running an Ansible command. :param error: ``str`` Error that occurred. :param rc: ``int`` Return code while executing an Ansible command. :param msg: ``str`` Message to report. """ self.module.fail_json(msg=msg, rc=rc, err=error) def _authenticate(self): """Return a keystone client object.""" endpoint = self.module.params.get('endpoint') token = self.module.params.get('token') if token is None: self.failure( error='Missing Auth Token', rc=2, msg='Auto token is required!' ) if token: self.keystone = client.Client( endpoint=endpoint, token=token ) def _get_service(self, name, srv_type=None): for entry in self.keystone.services.list(): if srv_type is not None: if entry.type == srv_type and name == entry.name: return entry elif entry.name == name: return entry else: return None def _get_endpoint(self, region, service_id): """ Getting endpoints per complete definition Returns the endpoint details for an endpoint matching region, service id. :param service_id: service to which the endpoint belongs :param region: geographic location of the endpoint """ for entry in self.keystone.endpoints.list(): check = [ entry.region == region, entry.service_id == service_id, ] if all(check): return entry else: return None def _compare_endpoint_info(self, endpoint, endpoint_dict): """ Compare existed endpoint with module parameters Return True if public, admin, internal urls are all the same. :param endpoint: endpoint existed :param endpoint_dict: endpoint info passed in """ check = [ endpoint.adminurl == endpoint_dict.get('adminurl'), endpoint.publicurl == endpoint_dict.get('publicurl'), endpoint.internalurl == endpoint_dict.get('internalurl') ] if all(check): return True else: return False def ensure_endpoint(self): """Ensures the deletion/modification/addition of endpoints within Keystone. Returns the endpoint ID on a successful run. """ self._authenticate() service_name = self.module.params.get('service_name') service_type = self.module.params.get('service_type') region = self.module.params.get('region_name') endpoint_dict = self.module.params.get('endpoint_dict') state = self.module.params.get('state') endpoint_dict = { 'adminurl': endpoint_dict.get('adminurl', ''), 'publicurl': endpoint_dict.get('publicurl', ''), 'internalurl': endpoint_dict.get('internalurl', '') } service = self._get_service(name=service_name, srv_type=service_type) if service is None: self.failure( error='service [ %s ] was not found.' % service_name, rc=2, msg='Service was not found, does it exist?' ) existed_endpoint = self._get_endpoint( region=region, service_id=service.id, ) delete_existed = False if state == 'present': ''' Creating an endpoint (if it does not exist) or creating a new one, and then deleting the existing endpoint that matches the service type, name, and region. ''' if existed_endpoint: if not self._compare_endpoint_info(existed_endpoint, endpoint_dict): delete_existed = True else: endpoint = existed_endpoint if (not existed_endpoint or delete_existed): self.state_change = True endpoint = self.keystone.endpoints.create( region=region, service_id=service.id, **endpoint_dict ) elif state == 'absent': if existed_endpoint is not None: self.state_change = True delete_existed = True if delete_existed: result = self.keystone.endpoints.delete(existed_endpoint.id) if result[0].status_code != 204: self.module.fail() if state != 'absent': facts = self._facts(endpoint.to_dict()) else: facts = self._facts({}) self.module.exit_json( changed=self.state_change, ansible_facts=facts ) # TODO(evrardjp): Deprecate state=update in Q. def main(): module = AnsibleModule( argument_spec=dict( token=dict( required=True ), endpoint=dict( required=True, ), region_name=dict( required=True ), service_name=dict( required=True ), service_type=dict( required=True ), endpoint_dict=dict( required=True, type='dict' ), state=dict( choices=['present', 'absent'], required=False, default='present' ) ), supports_check_mode=False, ) km = ManageKeystoneV2Endpoint(module=module) if not keystoneclient_found: km.failure( error='python-keystoneclient is missing', rc=2, msg='keystone client was not importable, is it installed?' ) facts = km.ensure_endpoint() # import module snippets from ansible.module_utils.basic import * # NOQA if __name__ == '__main__': main()
apache-2.0
-1,006,895,568,885,644,700
-6,458,205,326,859,398,000
28.322684
78
0.552953
false
lexus42/2015cd_midterm2
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_skipping.py
744
5173
import unittest from .support import LoggingResult class Test_TestSkipping(unittest.TestCase): def test_skipping(self): class Foo(unittest.TestCase): def test_skip_me(self): self.skipTest("skip") events = [] result = LoggingResult(events) test = Foo("test_skip_me") test.run(result) self.assertEqual(events, ['startTest', 'addSkip', 'stopTest']) self.assertEqual(result.skipped, [(test, "skip")]) # Try letting setUp skip the test now. class Foo(unittest.TestCase): def setUp(self): self.skipTest("testing") def test_nothing(self): pass events = [] result = LoggingResult(events) test = Foo("test_nothing") test.run(result) self.assertEqual(events, ['startTest', 'addSkip', 'stopTest']) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(result.testsRun, 1) def test_skipping_decorators(self): op_table = ((unittest.skipUnless, False, True), (unittest.skipIf, True, False)) for deco, do_skip, dont_skip in op_table: class Foo(unittest.TestCase): @deco(do_skip, "testing") def test_skip(self): pass @deco(dont_skip, "testing") def test_dont_skip(self): pass test_do_skip = Foo("test_skip") test_dont_skip = Foo("test_dont_skip") suite = unittest.TestSuite([test_do_skip, test_dont_skip]) events = [] result = LoggingResult(events) suite.run(result) self.assertEqual(len(result.skipped), 1) expected = ['startTest', 'addSkip', 'stopTest', 'startTest', 'addSuccess', 'stopTest'] self.assertEqual(events, expected) self.assertEqual(result.testsRun, 2) self.assertEqual(result.skipped, [(test_do_skip, "testing")]) self.assertTrue(result.wasSuccessful()) def test_skip_class(self): @unittest.skip("testing") class Foo(unittest.TestCase): def test_1(self): record.append(1) record = [] result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(record, []) def test_skip_non_unittest_class(self): @unittest.skip("testing") class Mixin: def test_1(self): record.append(1) class Foo(Mixin, unittest.TestCase): pass record = [] result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(record, []) def test_expected_failure(self): class Foo(unittest.TestCase): @unittest.expectedFailure def test_die(self): self.fail("help me!") events = [] result = LoggingResult(events) test = Foo("test_die") test.run(result) self.assertEqual(events, ['startTest', 'addExpectedFailure', 'stopTest']) self.assertEqual(result.expectedFailures[0][0], test) self.assertTrue(result.wasSuccessful()) def test_unexpected_success(self): class Foo(unittest.TestCase): @unittest.expectedFailure def test_die(self): pass events = [] result = LoggingResult(events) test = Foo("test_die") test.run(result) self.assertEqual(events, ['startTest', 'addUnexpectedSuccess', 'stopTest']) self.assertFalse(result.failures) self.assertEqual(result.unexpectedSuccesses, [test]) self.assertTrue(result.wasSuccessful()) def test_skip_doesnt_run_setup(self): class Foo(unittest.TestCase): wasSetUp = False wasTornDown = False def setUp(self): Foo.wasSetUp = True def tornDown(self): Foo.wasTornDown = True @unittest.skip('testing') def test_1(self): pass result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertFalse(Foo.wasSetUp) self.assertFalse(Foo.wasTornDown) def test_decorated_skip(self): def decorator(func): def inner(*a): return func(*a) return inner class Foo(unittest.TestCase): @decorator @unittest.skip('testing') def test_1(self): pass result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")])
agpl-3.0
-403,330,753,159,972,540
2,516,320,678,062,899,700
33.718121
75
0.553064
false
gaddman/ansible
lib/ansible/modules/storage/ibm/ibm_sa_domain.py
9
4173
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu <tzure@il.ibm.com> # # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: ibm_sa_domain short_description: Manages domains on IBM Spectrum Accelerate Family storage systems version_added: "2.8" description: - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." options: domain: description: - Name of the domain to be managed. required: true state: description: - The desired state of the domain. required: true default: "present" choices: [ "present", "absent" ] ldap_id: description: - ldap id to add to the domain. required: false size: description: - Size of the domain. required: false hard_capacity: description: - Hard capacity of the domain. required: false soft_capacity: description: - Soft capacity of the domain. required: false max_cgs: description: - Number of max cgs. required: false max_dms: description: - Number of max dms. required: false max_mirrors: description: - Number of max_mirrors. required: false max_pools: description: - Number of max_pools. required: false max_volumes: description: - Number of max_volumes. required: false perf_class: description: - Add the domain to a performance class. required: false extends_documentation_fragment: - ibm_storage author: - Tzur Eliyahu (@tzure) ''' EXAMPLES = ''' - name: Define new domain. ibm_sa_domain: domain: domain_name size: domain_size state: present username: admin password: secret endpoints: hostdev-system - name: Delete domain. ibm_sa_domain: domain: domain_name state: absent username: admin password: secret endpoints: hostdev-system ''' RETURN = ''' msg: description: module return status. returned: as needed type: string sample: "domain 'domain_name' created successfully." ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ibm_sa_utils import execute_pyxcli_command, \ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), domain=dict(required=True), size=dict(), max_dms=dict(), max_cgs=dict(), ldap_id=dict(), max_mirrors=dict(), max_pools=dict(), max_volumes=dict(), perf_class=dict(), hard_capacity=dict(), soft_capacity=dict() ) ) module = AnsibleModule(argument_spec) is_pyxcli_installed(module) xcli_client = connect_ssl(module) domain = xcli_client.cmd.domain_list( domain=module.params['domain']).as_single_element state = module.params['state'] state_changed = False msg = 'Domain \'{0}\''.format(module.params['domain']) if state == 'present' and not domain: state_changed = execute_pyxcli_command( module, 'domain_create', xcli_client) msg += " created successfully." elif state == 'absent' and domain: state_changed = execute_pyxcli_command( module, 'domain_delete', xcli_client) msg += " deleted successfully." else: msg += " state unchanged." module.exit_json(changed=state_changed, msg=msg) if __name__ == '__main__': main()
gpl-3.0
4,745,609,699,029,251,000
-5,805,028,564,326,112,000
24.919255
118
0.592859
false
shakamunyi/nova
nova/tests/unit/db/test_migrations.py
1
32632
# Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. There are "opportunistic" tests which allows testing against all 3 databases (sqlite in memory, mysql, pg) in a properly configured unit test environment. For the opportunistic testing you need to set up db's named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands:: | sudo -u postgres psql | postgres=# create user openstack_citest with createdb login password | 'openstack_citest'; | postgres=# create database openstack_citest with owner openstack_citest; """ import glob import logging import os from migrate.versioning import repository import mock from oslo.db.sqlalchemy import test_base from oslo.db.sqlalchemy import test_migrations from oslo.db.sqlalchemy import utils as oslodbutils import sqlalchemy from sqlalchemy.engine import reflection import sqlalchemy.exc from sqlalchemy.sql import null from nova.db import migration from nova.db.sqlalchemy import migrate_repo from nova.db.sqlalchemy import migration as sa_migration from nova.db.sqlalchemy import utils as db_utils from nova import exception from nova import test LOG = logging.getLogger(__name__) class NovaMigrationsCheckers(test_migrations.WalkVersionsMixin): """Test sqlalchemy-migrate migrations.""" TIMEOUT_SCALING_FACTOR = 2 snake_walk = True downgrade = True @property def INIT_VERSION(self): return migration.db_initial_version() @property def REPOSITORY(self): return repository.Repository( os.path.abspath(os.path.dirname(migrate_repo.__file__))) @property def migration_api(self): return sa_migration.versioning_api @property def migrate_engine(self): return self.engine def setUp(self): super(NovaMigrationsCheckers, self).setUp() # NOTE(viktors): We should reduce log output because it causes issues, # when we run tests with testr migrate_log = logging.getLogger('migrate') old_level = migrate_log.level migrate_log.setLevel(logging.WARN) self.addCleanup(migrate_log.setLevel, old_level) def assertColumnExists(self, engine, table_name, column): self.assertTrue(oslodbutils.column_exists(engine, table_name, column)) def assertColumnNotExists(self, engine, table_name, column): self.assertFalse(oslodbutils.column_exists(engine, table_name, column)) def assertTableNotExists(self, engine, table): self.assertRaises(sqlalchemy.exc.NoSuchTableError, oslodbutils.get_table, engine, table) def assertIndexExists(self, engine, table_name, index): self.assertTrue(oslodbutils.index_exists(engine, table_name, index)) def assertIndexNotExists(self, engine, table_name, index): self.assertFalse(oslodbutils.index_exists(engine, table_name, index)) def assertIndexMembers(self, engine, table, index, members): # NOTE(johannes): Order of columns can matter. Most SQL databases # can use the leading columns for optimizing queries that don't # include all of the covered columns. self.assertIndexExists(engine, table, index) t = oslodbutils.get_table(engine, table) index_columns = None for idx in t.indexes: if idx.name == index: index_columns = [c.name for c in idx.columns] break self.assertEqual(members, index_columns) def _skippable_migrations(self): special = [ 216, # Havana 272, # NOOP migration due to revert ] havana_placeholders = range(217, 227) icehouse_placeholders = range(235, 244) juno_placeholders = range(255, 265) return (special + havana_placeholders + icehouse_placeholders + juno_placeholders) def migrate_up(self, version, with_data=False): if with_data: check = getattr(self, "_check_%03d" % version, None) if version not in self._skippable_migrations(): self.assertIsNotNone(check, ('DB Migration %i does not have a ' 'test. Please add one!') % version) super(NovaMigrationsCheckers, self).migrate_up(version, with_data) def test_walk_versions(self): self.walk_versions(self.snake_walk, self.downgrade) def _check_227(self, engine, data): table = oslodbutils.get_table(engine, 'project_user_quotas') # Insert fake_quotas with the longest resource name. fake_quotas = {'id': 5, 'project_id': 'fake_project', 'user_id': 'fake_user', 'resource': 'injected_file_content_bytes', 'hard_limit': 10} table.insert().execute(fake_quotas) # Check we can get the longest resource name. quota = table.select(table.c.id == 5).execute().first() self.assertEqual(quota['resource'], 'injected_file_content_bytes') def _check_228(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'metrics') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.metrics.type, sqlalchemy.types.Text) def _post_downgrade_228(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'metrics') def _check_229(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'extra_resources') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.extra_resources.type, sqlalchemy.types.Text) def _post_downgrade_229(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'extra_resources') def _check_230(self, engine, data): for table_name in ['instance_actions_events', 'shadow_instance_actions_events']: self.assertColumnExists(engine, table_name, 'host') self.assertColumnExists(engine, table_name, 'details') action_events = oslodbutils.get_table(engine, 'instance_actions_events') self.assertIsInstance(action_events.c.host.type, sqlalchemy.types.String) self.assertIsInstance(action_events.c.details.type, sqlalchemy.types.Text) def _post_downgrade_230(self, engine): for table_name in ['instance_actions_events', 'shadow_instance_actions_events']: self.assertColumnNotExists(engine, table_name, 'host') self.assertColumnNotExists(engine, table_name, 'details') def _check_231(self, engine, data): self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid') instances = oslodbutils.get_table(engine, 'instances') self.assertIsInstance(instances.c.ephemeral_key_uuid.type, sqlalchemy.types.String) self.assertTrue(db_utils.check_shadow_table(engine, 'instances')) def _post_downgrade_231(self, engine): self.assertColumnNotExists(engine, 'instances', 'ephemeral_key_uuid') self.assertTrue(db_utils.check_shadow_table(engine, 'instances')) def _check_232(self, engine, data): table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions', 'instance_actions_events', 'instance_faults', 'migrations'] for table_name in table_names: self.assertTableNotExists(engine, 'dump_' + table_name) def _check_233(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'stats') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.stats.type, sqlalchemy.types.Text) self.assertRaises(sqlalchemy.exc.NoSuchTableError, oslodbutils.get_table, engine, 'compute_node_stats') def _post_downgrade_233(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'stats') # confirm compute_node_stats exists oslodbutils.get_table(engine, 'compute_node_stats') def _check_234(self, engine, data): self.assertIndexMembers(engine, 'reservations', 'reservations_deleted_expire_idx', ['deleted', 'expire']) def _check_244(self, engine, data): volume_usage_cache = oslodbutils.get_table( engine, 'volume_usage_cache') self.assertEqual(64, volume_usage_cache.c.user_id.type.length) def _post_downgrade_244(self, engine): volume_usage_cache = oslodbutils.get_table( engine, 'volume_usage_cache') self.assertEqual(36, volume_usage_cache.c.user_id.type.length) def _pre_upgrade_245(self, engine): # create a fake network networks = oslodbutils.get_table(engine, 'networks') fake_network = {'id': 1} networks.insert().execute(fake_network) def _check_245(self, engine, data): networks = oslodbutils.get_table(engine, 'networks') network = networks.select(networks.c.id == 1).execute().first() # mtu should default to None self.assertIsNone(network.mtu) # dhcp_server should default to None self.assertIsNone(network.dhcp_server) # enable dhcp should default to true self.assertTrue(network.enable_dhcp) # share address should default to false self.assertFalse(network.share_address) def _post_downgrade_245(self, engine): self.assertColumnNotExists(engine, 'networks', 'mtu') self.assertColumnNotExists(engine, 'networks', 'dhcp_server') self.assertColumnNotExists(engine, 'networks', 'enable_dhcp') self.assertColumnNotExists(engine, 'networks', 'share_address') def _check_246(self, engine, data): pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id'])) def _post_downgrade_246(self, engine): pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id'])) def _check_247(self, engine, data): quota_usages = oslodbutils.get_table(engine, 'quota_usages') self.assertFalse(quota_usages.c.resource.nullable) pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertTrue(pci_devices.c.deleted.nullable) self.assertFalse(pci_devices.c.product_id.nullable) self.assertFalse(pci_devices.c.vendor_id.nullable) self.assertFalse(pci_devices.c.dev_type.nullable) def _post_downgrade_247(self, engine): quota_usages = oslodbutils.get_table(engine, 'quota_usages') self.assertTrue(quota_usages.c.resource.nullable) pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertFalse(pci_devices.c.deleted.nullable) self.assertTrue(pci_devices.c.product_id.nullable) self.assertTrue(pci_devices.c.vendor_id.nullable) self.assertTrue(pci_devices.c.dev_type.nullable) def _check_248(self, engine, data): self.assertIndexMembers(engine, 'reservations', 'reservations_deleted_expire_idx', ['deleted', 'expire']) def _post_downgrade_248(self, engine): reservations = oslodbutils.get_table(engine, 'reservations') index_names = [idx.name for idx in reservations.indexes] self.assertNotIn('reservations_deleted_expire_idx', index_names) def _check_249(self, engine, data): # Assert that only one index exists that covers columns # instance_uuid and device_name bdm = oslodbutils.get_table(engine, 'block_device_mapping') self.assertEqual(1, len([i for i in bdm.indexes if [c.name for c in i.columns] == ['instance_uuid', 'device_name']])) def _post_downgrade_249(self, engine): # The duplicate index is not created on downgrade, so this # asserts that only one index exists that covers columns # instance_uuid and device_name bdm = oslodbutils.get_table(engine, 'block_device_mapping') self.assertEqual(1, len([i for i in bdm.indexes if [c.name for c in i.columns] == ['instance_uuid', 'device_name']])) def _check_250(self, engine, data): self.assertTableNotExists(engine, 'instance_group_metadata') self.assertTableNotExists(engine, 'shadow_instance_group_metadata') def _post_downgrade_250(self, engine): oslodbutils.get_table(engine, 'instance_group_metadata') oslodbutils.get_table(engine, 'shadow_instance_group_metadata') def _check_251(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'numa_topology') self.assertColumnExists(engine, 'shadow_compute_nodes', 'numa_topology') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') shadow_compute_nodes = oslodbutils.get_table(engine, 'shadow_compute_nodes') self.assertIsInstance(compute_nodes.c.numa_topology.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, sqlalchemy.types.Text) def _post_downgrade_251(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology') self.assertColumnNotExists(engine, 'shadow_compute_nodes', 'numa_topology') def _check_252(self, engine, data): oslodbutils.get_table(engine, 'instance_extra') oslodbutils.get_table(engine, 'shadow_instance_extra') self.assertIndexMembers(engine, 'instance_extra', 'instance_extra_idx', ['instance_uuid']) def _post_downgrade_252(self, engine): self.assertTableNotExists(engine, 'instance_extra') self.assertTableNotExists(engine, 'shadow_instance_extra') def _check_253(self, engine, data): self.assertColumnExists(engine, 'instance_extra', 'pci_requests') self.assertColumnExists( engine, 'shadow_instance_extra', 'pci_requests') instance_extra = oslodbutils.get_table(engine, 'instance_extra') shadow_instance_extra = oslodbutils.get_table(engine, 'shadow_instance_extra') self.assertIsInstance(instance_extra.c.pci_requests.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_instance_extra.c.pci_requests.type, sqlalchemy.types.Text) def _post_downgrade_253(self, engine): self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests') self.assertColumnNotExists(engine, 'shadow_instance_extra', 'pci_requests') def _check_254(self, engine, data): self.assertColumnExists(engine, 'pci_devices', 'request_id') self.assertColumnExists( engine, 'shadow_pci_devices', 'request_id') pci_devices = oslodbutils.get_table(engine, 'pci_devices') shadow_pci_devices = oslodbutils.get_table( engine, 'shadow_pci_devices') self.assertIsInstance(pci_devices.c.request_id.type, sqlalchemy.types.String) self.assertIsInstance(shadow_pci_devices.c.request_id.type, sqlalchemy.types.String) def _post_downgrade_254(self, engine): self.assertColumnNotExists(engine, 'pci_devices', 'request_id') self.assertColumnNotExists( engine, 'shadow_pci_devices', 'request_id') def _check_265(self, engine, data): # Assert that only one index exists that covers columns # host and deleted instances = oslodbutils.get_table(engine, 'instances') self.assertEqual(1, len([i for i in instances.indexes if [c.name for c in i.columns][:2] == ['host', 'deleted']])) # and only one index covers host column iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets') self.assertEqual(1, len([i for i in iscsi_targets.indexes if [c.name for c in i.columns][:1] == ['host']])) def _post_downgrade_265(self, engine): # The duplicated index is not created on downgrade, so this # asserts that only one index exists that covers columns # host and deleted instances = oslodbutils.get_table(engine, 'instances') self.assertEqual(1, len([i for i in instances.indexes if [c.name for c in i.columns][:2] == ['host', 'deleted']])) # and only one index covers host column iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets') self.assertEqual(1, len([i for i in iscsi_targets.indexes if [c.name for c in i.columns][:1] == ['host']])) def _check_266(self, engine, data): self.assertColumnExists(engine, 'tags', 'resource_id') self.assertColumnExists(engine, 'tags', 'tag') table = oslodbutils.get_table(engine, 'tags') self.assertIsInstance(table.c.resource_id.type, sqlalchemy.types.String) self.assertIsInstance(table.c.tag.type, sqlalchemy.types.String) def _post_downgrade_266(self, engine): self.assertTableNotExists(engine, 'tags') def _pre_upgrade_267(self, engine): # Create a fixed_ips row with a null instance_uuid (if not already # there) to make sure that's not deleted. fixed_ips = oslodbutils.get_table(engine, 'fixed_ips') fake_fixed_ip = {'id': 1} fixed_ips.insert().execute(fake_fixed_ip) # Create an instance record with a valid (non-null) UUID so we make # sure we don't do something stupid and delete valid records. instances = oslodbutils.get_table(engine, 'instances') fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'} instances.insert().execute(fake_instance) # Add a null instance_uuid entry for the volumes table # since it doesn't have a foreign key back to the instances table. volumes = oslodbutils.get_table(engine, 'volumes') fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'} volumes.insert().execute(fake_volume) def _check_267(self, engine, data): # Make sure the column is non-nullable and the UC exists. fixed_ips = oslodbutils.get_table(engine, 'fixed_ips') self.assertTrue(fixed_ips.c.instance_uuid.nullable) fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first() self.assertIsNone(fixed_ip.instance_uuid) instances = oslodbutils.get_table(engine, 'instances') self.assertFalse(instances.c.uuid.nullable) inspector = reflection.Inspector.from_engine(engine) constraints = inspector.get_unique_constraints('instances') constraint_names = [constraint['name'] for constraint in constraints] self.assertIn('uniq_instances0uuid', constraint_names) # Make sure the instances record with the valid uuid is still there. instance = instances.select(instances.c.id == 1).execute().first() self.assertIsNotNone(instance) # Check that the null entry in the volumes table is still there since # we skipped tables that don't have FK's back to the instances table. volumes = oslodbutils.get_table(engine, 'volumes') self.assertTrue(volumes.c.instance_uuid.nullable) volume = fixed_ips.select( volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da' ).execute().first() self.assertIsNone(volume.instance_uuid) def _post_downgrade_267(self, engine): # Make sure the UC is gone and the column is nullable again. instances = oslodbutils.get_table(engine, 'instances') self.assertTrue(instances.c.uuid.nullable) inspector = reflection.Inspector.from_engine(engine) constraints = inspector.get_unique_constraints('instances') constraint_names = [constraint['name'] for constraint in constraints] self.assertNotIn('uniq_instances0uuid', constraint_names) def test_migration_267(self): # This is separate from test_walk_versions so we can test the case # where there are non-null instance_uuid entries in the database which # cause the 267 migration to fail. engine = self.migrate_engine self.migration_api.version_control( engine, self.REPOSITORY, self.INIT_VERSION) self.migration_api.upgrade(engine, self.REPOSITORY, 266) # Create a consoles record with a null instance_uuid so # we can test that the upgrade fails if that entry is found. # NOTE(mriedem): We use the consoles table since that's the only table # created in the 216 migration with a ForeignKey created on the # instance_uuid table for sqlite. consoles = oslodbutils.get_table(engine, 'consoles') fake_console = {'id': 1} consoles.insert().execute(fake_console) # NOTE(mriedem): We handle the 267 migration where we expect to # hit a ValidationError on the consoles table to have # a null instance_uuid entry ex = self.assertRaises(exception.ValidationError, self.migration_api.upgrade, engine, self.REPOSITORY, 267) self.assertIn("There are 1 records in the " "'consoles' table where the uuid or " "instance_uuid column is NULL.", ex.kwargs['detail']) # Remove the consoles entry with the null instance_uuid column. rows = consoles.delete().where( consoles.c['instance_uuid'] == null()).execute().rowcount self.assertEqual(1, rows) # Now run the 267 upgrade again. self.migration_api.upgrade(engine, self.REPOSITORY, 267) # Make sure the consoles entry with the null instance_uuid # was deleted. console = consoles.select(consoles.c.id == 1).execute().first() self.assertIsNone(console) def _check_268(self, engine, data): # We can only assert that the col exists, not the unique constraint # as the engine is running sqlite self.assertColumnExists(engine, 'compute_nodes', 'host') self.assertColumnExists(engine, 'shadow_compute_nodes', 'host') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') shadow_compute_nodes = oslodbutils.get_table( engine, 'shadow_compute_nodes') self.assertIsInstance(compute_nodes.c.host.type, sqlalchemy.types.String) self.assertIsInstance(shadow_compute_nodes.c.host.type, sqlalchemy.types.String) def _post_downgrade_268(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'host') self.assertColumnNotExists(engine, 'shadow_compute_nodes', 'host') def _check_269(self, engine, data): self.assertColumnExists(engine, 'pci_devices', 'numa_node') self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node') pci_devices = oslodbutils.get_table(engine, 'pci_devices') shadow_pci_devices = oslodbutils.get_table( engine, 'shadow_pci_devices') self.assertIsInstance(pci_devices.c.numa_node.type, sqlalchemy.types.Integer) self.assertTrue(pci_devices.c.numa_node.nullable) self.assertIsInstance(shadow_pci_devices.c.numa_node.type, sqlalchemy.types.Integer) self.assertTrue(shadow_pci_devices.c.numa_node.nullable) def _post_downgrade_269(self, engine): self.assertColumnNotExists(engine, 'pci_devices', 'numa_node') self.assertColumnNotExists(engine, 'shadow_pci_devices', 'numa_node') def _check_270(self, engine, data): self.assertColumnExists(engine, 'instance_extra', 'flavor') self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor') instance_extra = oslodbutils.get_table(engine, 'instance_extra') shadow_instance_extra = oslodbutils.get_table( engine, 'shadow_instance_extra') self.assertIsInstance(instance_extra.c.flavor.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_instance_extra.c.flavor.type, sqlalchemy.types.Text) def _post_downgrade_270(self, engine): self.assertColumnNotExists(engine, 'instance_extra', 'flavor') self.assertColumnNotExists(engine, 'shadow_instance_extra', 'flavor') def _check_271(self, engine, data): self.assertIndexMembers(engine, 'block_device_mapping', 'snapshot_id', ['snapshot_id']) self.assertIndexMembers(engine, 'block_device_mapping', 'volume_id', ['volume_id']) self.assertIndexMembers(engine, 'dns_domains', 'dns_domains_project_id_idx', ['project_id']) self.assertIndexMembers(engine, 'fixed_ips', 'network_id', ['network_id']) self.assertIndexMembers(engine, 'fixed_ips', 'fixed_ips_instance_uuid_fkey', ['instance_uuid']) self.assertIndexMembers(engine, 'fixed_ips', 'fixed_ips_virtual_interface_id_fkey', ['virtual_interface_id']) self.assertIndexMembers(engine, 'floating_ips', 'fixed_ip_id', ['fixed_ip_id']) self.assertIndexMembers(engine, 'iscsi_targets', 'iscsi_targets_volume_id_fkey', ['volume_id']) self.assertIndexMembers(engine, 'virtual_interfaces', 'virtual_interfaces_network_id_idx', ['network_id']) self.assertIndexMembers(engine, 'virtual_interfaces', 'virtual_interfaces_instance_uuid_fkey', ['instance_uuid']) # Removed on MySQL, never existed on other databases self.assertIndexNotExists(engine, 'dns_domains', 'project_id') self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id') def _post_downgrade_271(self, engine): self.assertIndexNotExists(engine, 'dns_domains', 'dns_domains_project_id_idx') self.assertIndexNotExists(engine, 'virtual_interfaces', 'virtual_interfaces_network_id_idx') if engine.name == 'mysql': self.assertIndexMembers(engine, 'dns_domains', 'project_id', ['project_id']) self.assertIndexMembers(engine, 'virtual_interfaces', 'network_id', ['network_id']) # Rest of indexes will still exist on MySQL return # Never existed on non-MySQL databases, so shouldn't exist now self.assertIndexNotExists(engine, 'dns_domains', 'project_id') self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id') for table_name, index_name in [ ('block_device_mapping', 'snapshot_id'), ('block_device_mapping', 'volume_id'), ('dns_domains', 'dns_domains_project_id_idx'), ('fixed_ips', 'network_id'), ('fixed_ips', 'fixed_ips_instance_uuid_fkey'), ('fixed_ips', 'fixed_ips_virtual_interface_id_fkey'), ('floating_ips', 'fixed_ip_id'), ('iscsi_targets', 'iscsi_targets_volume_id_fkey'), ('virtual_interfaces', 'virtual_interfaces_network_id_idx'), ('virtual_interfaces', 'virtual_interfaces_instance_uuid_fkey')]: self.assertIndexNotExists(engine, table_name, index_name) class TestNovaMigrationsSQLite(NovaMigrationsCheckers, test.TestCase, test_base.DbTestCase): pass class TestNovaMigrationsMySQL(NovaMigrationsCheckers, test.TestCase, test_base.MySQLOpportunisticTestCase): def test_innodb_tables(self): with mock.patch.object(sa_migration, 'get_engine', return_value=self.migrate_engine): sa_migration.db_sync() total = self.migrate_engine.execute( "SELECT count(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA = '%(database)s'" % {'database': self.migrate_engine.url.database}) self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") noninnodb = self.migrate_engine.execute( "SELECT count(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA='%(database)s' " "AND ENGINE != 'InnoDB' " "AND TABLE_NAME != 'migrate_version'" % {'database': self.migrate_engine.url.database}) count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers, test.TestCase, test_base.PostgreSQLOpportunisticTestCase): pass class ProjectTestCase(test.NoDBTestCase): def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") missing_downgrade = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and not has_downgrade: fname = os.path.basename(path) missing_downgrade.append(fname) helpful_msg = ("The following migrations are missing a downgrade:" "\n\t%s" % '\n\t'.join(sorted(missing_downgrade))) self.assertFalse(missing_downgrade, helpful_msg)
apache-2.0
4,211,910,510,658,621,400
863,957,067,329,378,700
44.009655
79
0.610199
false
jotes/ansible
v2/test/parsing/test_mod_args.py
109
4913
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.parsing.mod_args import ModuleArgsParser from ansible.errors import AnsibleParserError from ansible.compat.tests import unittest class TestModArgsDwim(unittest.TestCase): # TODO: add tests that construct ModuleArgsParser with a task reference # TODO: verify the AnsibleError raised on failure knows the task # and the task knows the line numbers def setUp(self): pass def _debug(self, mod, args, to): print("RETURNED module = {0}".format(mod)) print(" args = {0}".format(args)) print(" to = {0}".format(to)) def tearDown(self): pass def test_basic_shell(self): m = ModuleArgsParser(dict(shell='echo hi')) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( _raw_params = 'echo hi', _uses_shell = True, )) self.assertIsNone(to) def test_basic_command(self): m = ModuleArgsParser(dict(command='echo hi')) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( _raw_params = 'echo hi', )) self.assertIsNone(to) def test_shell_with_modifiers(self): m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( creates = '/tmp/baz', removes = '/tmp/bleep', _raw_params = '/bin/foo', _uses_shell = True, )) self.assertIsNone(to) def test_normal_usage(self): m = ModuleArgsParser(dict(copy='src=a dest=b')) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_complex_args(self): m = ModuleArgsParser(dict(copy=dict(src='a', dest='b'))) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_action_with_complex(self): m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b'))) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_action_with_complex_and_complex_args(self): m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_local_action_string(self): m = ModuleArgsParser(dict(local_action='copy src=a dest=b')) mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIs(to, 'localhost') def test_multiple_actions(self): m = ModuleArgsParser(dict(action='shell echo hi', local_action='shell echo hi')) self.assertRaises(AnsibleParserError, m.parse) m = ModuleArgsParser(dict(action='shell echo hi', shell='echo hi')) self.assertRaises(AnsibleParserError, m.parse) m = ModuleArgsParser(dict(local_action='shell echo hi', shell='echo hi')) self.assertRaises(AnsibleParserError, m.parse) m = ModuleArgsParser(dict(ping='data=hi', shell='echo hi')) self.assertRaises(AnsibleParserError, m.parse)
gpl-3.0
-5,773,518,214,471,836,000
-8,615,915,931,975,510,000
36.792308
92
0.596784
false
ryuunosukeyoshi/PartnerPoi-Bot
lib/youtube_dl/extractor/tvigle.py
48
4229
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_age_limit, ) class TvigleIE(InfoExtractor): IE_NAME = 'tvigle' IE_DESC = 'Интернет-телевидение Tvigle.ru' _VALID_URL = r'https?://(?:www\.)?(?:tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$|cloud\.tvigle\.ru/video/(?P<id>\d+))' _GEO_BYPASS = False _GEO_COUNTRIES = ['RU'] _TESTS = [ { 'url': 'http://www.tvigle.ru/video/sokrat/', 'md5': '36514aed3657d4f70b4b2cef8eb520cd', 'info_dict': { 'id': '1848932', 'display_id': 'sokrat', 'ext': 'flv', 'title': 'Сократ', 'description': 'md5:d6b92ffb7217b4b8ebad2e7665253c17', 'duration': 6586, 'age_limit': 12, }, 'skip': 'georestricted', }, { 'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/', 'md5': 'e7efe5350dd5011d0de6550b53c3ba7b', 'info_dict': { 'id': '5142516', 'ext': 'flv', 'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком', 'description': 'md5:027f7dc872948f14c96d19b4178428a4', 'duration': 186.080, 'age_limit': 0, }, 'skip': 'georestricted', }, { 'url': 'https://cloud.tvigle.ru/video/5267604/', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') if not video_id: webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( (r'<div[^>]+class=["\']player["\'][^>]+id=["\'](\d+)', r'var\s+cloudId\s*=\s*["\'](\d+)', r'class="video-preview current_playing" id="(\d+)"'), webpage, 'video id') video_data = self._download_json( 'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id) item = video_data['playlist']['items'][0] videos = item.get('videos') error_message = item.get('errorMessage') if not videos and error_message: if item.get('isGeoBlocked') is True: self.raise_geo_restricted( msg=error_message, countries=self._GEO_COUNTRIES) else: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) title = item['title'] description = item.get('description') thumbnail = item.get('thumbnail') duration = float_or_none(item.get('durationMilliseconds'), 1000) age_limit = parse_age_limit(item.get('ageRestrictions')) formats = [] for vcodec, fmts in item['videos'].items(): if vcodec == 'hls': continue for format_id, video_url in fmts.items(): if format_id == 'm3u8': continue height = self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None) formats.append({ 'url': video_url, 'format_id': '%s-%s' % (vcodec, format_id), 'vcodec': vcodec, 'height': int_or_none(height), 'filesize': int_or_none(item.get('video_files_size', {}).get(vcodec, {}).get(format_id)), }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
gpl-3.0
-3,705,279,150,720,343,600
-2,540,057,534,261,051,000
33.92437
129
0.488932
false
ukanga/SickRage
sickbeard/show_queue.py
3
30406
# coding=utf-8 # Author: Nic Wolfe <nic@wolfeden.ca> # URL: https://sickrage.github.io # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from collections import namedtuple import os import traceback from imdb import _exceptions as imdb_exceptions from libtrakt import TraktAPI import sickbeard from sickbeard import generic_queue, logger, name_cache, notifiers, ui from sickbeard.blackandwhitelist import BlackAndWhiteList from sickbeard.common import WANTED from sickbeard.helpers import chmodAsParent, get_showname_from_indexer, makeDir, sortable_name from sickbeard.tv import TVShow from sickrage.helper.common import sanitize_filename from sickrage.helper.encoding import ek from sickrage.helper.exceptions import CantRefreshShowException, CantRemoveShowException, CantUpdateShowException, \ EpisodeDeletedException, MultipleShowObjectsException, ShowDirectoryNotFoundException from sickrage.show.Show import Show import six class ShowQueue(generic_queue.GenericQueue): def __init__(self): super(ShowQueue, self).__init__() self.queue_name = 'SHOWQUEUE' def _is_in_queue(self, show, actions): if not show: return False return show.indexerid in (x.show.indexerid if x.show else 0 for x in self.queue if x.action_id in actions) def _is_being_somethinged(self, show, actions): return self.currentItem is not None and show == self.currentItem.show and self.currentItem.action_id in actions # def is_in_add_queue(self, show): # return self._isInQueue(show, (ShowQueueActions.ADD,)) def is_in_update_queue(self, show): return self._is_in_queue(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE)) def is_in_refresh_queue(self, show): return self._is_in_queue(show, (ShowQueueActions.REFRESH,)) def is_in_rename_queue(self, show): return self._is_in_queue(show, (ShowQueueActions.RENAME,)) def is_in_remove_queue(self, show): return self._is_in_queue(show, (ShowQueueActions.REMOVE,)) def is_in_subtitle_queue(self, show): return self._is_in_queue(show, (ShowQueueActions.SUBTITLE,)) def is_being_added(self, show): return self._is_being_somethinged(show, (ShowQueueActions.ADD,)) def is_being_updated(self, show): return self._is_being_somethinged(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE)) def is_being_refreshed(self, show): return self._is_being_somethinged(show, (ShowQueueActions.REFRESH,)) def is_being_renamed(self, show): return self._is_being_somethinged(show, (ShowQueueActions.RENAME,)) def is_being_removed(self, show): return self._is_being_somethinged(show, (ShowQueueActions.REMOVE,)) def is_being_subtitled(self, show): return self._is_being_somethinged(show, (ShowQueueActions.SUBTITLE,)) @property def loading_show_list(self): return {x for x in self.queue + [self.currentItem] if x and x.is_loading} def update_show(self, show, force=False): if self.is_being_added(show): raise CantUpdateShowException( '{0} is still being added, wait until it is finished before you update.'.format(show.name) ) if self.is_being_updated(show): raise CantUpdateShowException( '{0} is already being updated by Post-processor or manually started, can\'t update again until it\'s done.'.format(show.name) ) if self.is_in_update_queue(show): raise CantUpdateShowException( '{0} is in process of being updated by Post-processor or manually started, can\'t update again until it\'s done.'.format(show.name) ) queue_item_obj = QueueItemUpdate(show, force=force) self.add_item(queue_item_obj) return queue_item_obj def refresh_show(self, show, force=False): if self.is_being_refreshed(show) and not force: raise CantRefreshShowException('This show is already being refreshed, not refreshing again.') if (self.is_being_updated(show) or self.is_in_update_queue(show)) and not force: logger.log( 'A refresh was attempted but there is already an update queued or in progress. Updates do a refresh at the end so I\'m skipping this request.', logger.DEBUG) return if show.paused and not force: logger.log('Skipping show [{0}] because it is paused.'.format(show.name), logger.DEBUG) return logger.log('Queueing show refresh for {0}'.format(show.name), logger.DEBUG) queue_item_obj = QueueItemRefresh(show, force=force) self.add_item(queue_item_obj) return queue_item_obj def rename_show_episodes(self, show, force=False): queue_item_obj = QueueItemRename(show) self.add_item(queue_item_obj) return queue_item_obj def download_subtitles(self, show, force=False): queue_item_obj = QueueItemSubtitle(show) self.add_item(queue_item_obj) return queue_item_obj def add_show(self, # pylint: disable=too-many-arguments, too-many-locals indexer, indexer_id, showDir, default_status=None, quality=None, season_folders=None, lang=None, subtitles=None, subtitles_sr_metadata=None, anime=None, scene=None, paused=None, blacklist=None, whitelist=None, default_status_after=None, root_dir=None): if lang is None: lang = sickbeard.INDEXER_DEFAULT_LANGUAGE if default_status_after is None: default_status_after = sickbeard.STATUS_DEFAULT_AFTER queue_item_obj = QueueItemAdd(indexer, indexer_id, showDir, default_status, quality, season_folders, lang, subtitles, subtitles_sr_metadata, anime, scene, paused, blacklist, whitelist, default_status_after, root_dir) self.add_item(queue_item_obj) return queue_item_obj def remove_show(self, show, full=False): if not show: raise CantRemoveShowException('Failed removing show: Show does not exist') if not hasattr(show, 'indexerid'): raise CantRemoveShowException('Failed removing show: Show does not have an indexer id') if self._is_in_queue(show, (ShowQueueActions.REMOVE,)): raise CantRemoveShowException('{0} is already queued to be removed'.format(show.name)) # remove other queued actions for this show. for item in self.queue: if item and item.show and item != self.currentItem and show.indexerid == item.show.indexerid: self.queue.remove(item) queue_item_obj = QueueItemRemove(show=show, full=full) self.add_item(queue_item_obj) return queue_item_obj class ShowQueueActions(object): # pylint: disable=too-few-public-methods def __init__(self): pass REFRESH = 1 ADD = 2 UPDATE = 3 FORCEUPDATE = 4 RENAME = 5 SUBTITLE = 6 REMOVE = 7 names = { REFRESH: 'Refresh', ADD: 'Add', UPDATE: 'Update', FORCEUPDATE: 'Force Update', RENAME: 'Rename', SUBTITLE: 'Subtitle', REMOVE: 'Remove Show' } class ShowQueueItem(generic_queue.QueueItem): """ Represents an item in the queue waiting to be executed Can be either: - show being added (may or may not be associated with a show object) - show being refreshed - show being updated - show being force updated - show being subtitled """ def __init__(self, action_id, show): super(ShowQueueItem, self).__init__(ShowQueueActions.names[action_id], action_id) self.show = show def is_in_queue(self): return self in sickbeard.showQueueScheduler.action.queue + [ sickbeard.showQueueScheduler.action.currentItem] @property def show_name(self): return self.show.name if self.show else 'UNSET' @property def is_loading(self): # pylint: disable=no-self-use return False class QueueItemAdd(ShowQueueItem): # pylint: disable=too-many-instance-attributes def __init__(self, # pylint: disable=too-many-arguments, too-many-locals indexer, indexer_id, showDir, default_status, quality, season_folders, lang, subtitles, subtitles_sr_metadata, anime, scene, paused, blacklist, whitelist, default_status_after, root_dir): super(QueueItemAdd, self).__init__(ShowQueueActions.ADD, None) if isinstance(showDir, bytes): self.showDir = showDir.decode('utf-8') else: self.showDir = showDir self.indexer = indexer self.indexer_id = indexer_id self.default_status = default_status self.quality = quality self.season_folders = season_folders self.lang = lang self.subtitles = subtitles self.subtitles_sr_metadata = subtitles_sr_metadata self.anime = anime self.scene = scene self.paused = paused self.blacklist = blacklist self.whitelist = whitelist self.default_status_after = default_status_after self.root_dir = root_dir self.show = None # Process add show in priority self.priority = generic_queue.QueuePriorities.HIGH @property def show_name(self): """ Returns the show name if there is a show object created, if not returns the dir that the show is being added to. """ return self.show.name if self.show else self.showDir.rsplit(os.sep)[-1] @property def is_loading(self): """ Returns True if we've gotten far enough to have a show object, or False if we still only know the folder name. """ return self.show not in sickbeard.showList or not self.show @property def info(self): info = namedtuple('LoadingShowInfo', 'id name sort_name network quality') if self.show: return info(id=self.show.indexerid, name=self.show.name, sort_name=self.show.sort_name, network=self.show.network, quality=self.show.quality) return info(id=self.show_name, name=self.show_name, sort_name=sortable_name(self.show_name), network=_('Loading'), quality=0) def run(self): # pylint: disable=too-many-branches, too-many-statements, too-many-return-statements super(QueueItemAdd, self).run() if self.showDir: try: assert isinstance(self.showDir, six.text_type) except AssertionError: logger.log(traceback.format_exc(), logger.WARNING) self._finish_early() return logger.log('Starting to add show {0}'.format('by ShowDir: {0}'.format(self.showDir) if self.showDir else 'by Indexer Id: {0}'.format(self.indexer_id))) # make sure the Indexer IDs are valid try: lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy() lINDEXER_API_PARMS['language'] = self.lang or sickbeard.INDEXER_DEFAULT_LANGUAGE logger.log('{0}: {1!r}'.format(sickbeard.indexerApi(self.indexer).name, lINDEXER_API_PARMS)) t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS) s = t[self.indexer_id] # Let's try to create the show Dir if it's not provided. This way we force the show dir to build build using the # Indexers provided series name if self.root_dir and not self.showDir: show_name = get_showname_from_indexer(self.indexer, self.indexer_id, self.lang) if not show_name: logger.log('Unable to get a show {0}, can\'t add the show'.format(self.showDir)) self._finish_early() return self.showDir = ek(os.path.join, self.root_dir, sanitize_filename(show_name)) dir_exists = makeDir(self.showDir) if not dir_exists: logger.log('Unable to create the folder {0}, can\'t add the show'.format(self.showDir)) self._finish_early() return chmodAsParent(self.showDir) # this usually only happens if they have an NFO in their show dir which gave us a Indexer ID that has no proper english version of the show if getattr(s, 'seriesname', None) is None: error_string = 'Show in {0} has no name on {1}, probably searched with the wrong language. Delete .nfo and add manually in the correct language.'.format( self.showDir, sickbeard.indexerApi(self.indexer).name) logger.log(error_string, logger.WARNING) ui.notifications.error('Unable to add show', error_string) self._finish_early() return # if the show has no episodes/seasons if not s: error_string = 'Show {0} is on {1} but contains no season/episode data.'.format( s[b'seriesname'], sickbeard.indexerApi(self.indexer).name) logger.log(error_string) ui.notifications.error('Unable to add show', error_string) self._finish_early() return except Exception as error: error_string = 'Unable to look up the show in {0} on {1} using ID {2}, not using the NFO. Delete .nfo and try adding manually again.'.format( self.showDir, sickbeard.indexerApi(self.indexer).name, self.indexer_id) logger.log('{0}: {1}'.format(error_string, error), logger.ERROR) ui.notifications.error( 'Unable to add show', error_string) if sickbeard.USE_TRAKT: trakt_id = sickbeard.indexerApi(self.indexer).config[b'trakt_id'] trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) title = self.showDir.split('/')[-1] data = { 'shows': [ { 'title': title, 'ids': {} } ] } if trakt_id == 'tvdb_id': data[b'shows'][0][b'ids'][b'tvdb'] = self.indexer_id else: data[b'shows'][0][b'ids'][b'tvrage'] = self.indexer_id trakt_api.traktRequest('sync/watchlist/remove', data, method='POST') self._finish_early() return try: try: newShow = TVShow(self.indexer, self.indexer_id, self.lang) except MultipleShowObjectsException as error: # If we have the show in our list, but the location is wrong, lets fix it and refresh! existing_show = Show.find(sickbeard.showList, self.indexer_id) if existing_show and not ek(os.path.isdir, existing_show._location): # pylint: disable=protected-access newShow = existing_show else: raise error newShow.loadFromIndexer() self.show = newShow # set up initial values self.show.location = self.showDir self.show.subtitles = self.subtitles if self.subtitles is not None else sickbeard.SUBTITLES_DEFAULT self.show.subtitles_sr_metadata = self.subtitles_sr_metadata self.show.quality = self.quality if self.quality else sickbeard.QUALITY_DEFAULT self.show.season_folders = self.season_folders if self.season_folders is not None else sickbeard.SEASON_FOLDERS_DEFAULT self.show.anime = self.anime if self.anime is not None else sickbeard.ANIME_DEFAULT self.show.scene = self.scene if self.scene is not None else sickbeard.SCENE_DEFAULT self.show.paused = self.paused if self.paused is not None else False # set up default new/missing episode status logger.log('Setting all episodes to the specified default status: {0}' .format(self.show.default_ep_status)) self.show.default_ep_status = self.default_status if self.show.anime: self.show.release_groups = BlackAndWhiteList(self.show.indexerid) if self.blacklist: self.show.release_groups.set_black_keywords(self.blacklist) if self.whitelist: self.show.release_groups.set_white_keywords(self.whitelist) # # be smartish about this # if self.show.genre and 'talk show' in self.show.genre.lower(): # self.show.air_by_date = 1 # if self.show.genre and 'documentary' in self.show.genre.lower(): # self.show.air_by_date = 0 # if self.show.classification and 'sports' in self.show.classification.lower(): # self.show.sports = 1 except sickbeard.indexer_exception as error: error_string = 'Unable to add {0} due to an error with {1}'.format( self.show.name if self.show else 'show', sickbeard.indexerApi(self.indexer).name) logger.log('{0}: {1}'.format(error_string, error), logger.ERROR) ui.notifications.error('Unable to add show', error_string) self._finish_early() return except MultipleShowObjectsException: error_string = 'The show in {0} is already in your show list, skipping'.format(self.showDir) logger.log(error_string, logger.WARNING) ui.notifications.error('Show skipped', error_string) self._finish_early() return except Exception as error: logger.log('Error trying to add show: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finish_early() raise logger.log('Retrieving show info from IMDb', logger.DEBUG) try: self.show.loadIMDbInfo() except imdb_exceptions.IMDbError as error: logger.log(' Something wrong on IMDb api: {0}'.format(error), logger.WARNING) except Exception as error: logger.log('Error loading IMDb info: {0}'.format(error), logger.ERROR) try: self.show.saveToDB() except Exception as error: logger.log('Error saving the show to the database: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finish_early() raise # add it to the show list if not Show.find(sickbeard.showList, self.indexer_id): sickbeard.showList.append(self.show) try: self.show.loadEpisodesFromIndexer() except Exception as error: logger.log( 'Error with {0}, not creating episode list: {1}'.format (sickbeard.indexerApi(self.show.indexer).name, error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # update internal name cache name_cache.buildNameCache(self.show) try: self.show.loadEpisodesFromDir() except Exception as error: logger.log('Error searching dir for episodes: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # if they set default ep status to WANTED then run the backlog to search for episodes # FIXME: This needs to be a backlog queue item!!! if self.show.default_ep_status == WANTED: logger.log('Launching backlog for this show since its episodes are WANTED') sickbeard.backlogSearchScheduler.action.searchBacklog([self.show]) self.show.writeMetadata() self.show.updateMetadata() self.show.populateCache() self.show.flushEpisodes() if sickbeard.USE_TRAKT: # if there are specific episodes that need to be added by trakt sickbeard.traktCheckerScheduler.action.manageNewShow(self.show) # add show to trakt.tv library if sickbeard.TRAKT_SYNC: sickbeard.traktCheckerScheduler.action.addShowToTraktLibrary(self.show) if sickbeard.TRAKT_SYNC_WATCHLIST: logger.log('update watchlist') notifiers.trakt_notifier.update_watchlist(show_obj=self.show) # Load XEM data to DB for show sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer, force=True) # check if show has XEM mapping so we can determin if searches should go by scene numbering or indexer numbering. if not self.scene and sickbeard.scene_numbering.get_xem_numbering_for_show(self.show.indexerid, self.show.indexer): self.show.scene = 1 # After initial add, set to default_status_after. self.show.default_ep_status = self.default_status_after super(QueueItemAdd, self).finish() self.finish() def _finish_early(self): if self.show is not None: sickbeard.showQueueScheduler.action.remove_show(self.show) super(QueueItemAdd, self).finish() self.finish() class QueueItemRefresh(ShowQueueItem): def __init__(self, show=None, force=False): super(QueueItemRefresh, self).__init__(ShowQueueActions.REFRESH, show) # do refreshes first because they're quick self.priority = generic_queue.QueuePriorities.HIGH # force refresh certain items self.force = force def run(self): super(QueueItemRefresh, self).run() logger.log('Performing refresh on {0}'.format(self.show.name)) self.show.refreshDir() self.show.writeMetadata() if self.force: self.show.updateMetadata() self.show.populateCache() # Load XEM data to DB for show sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer) super(QueueItemRefresh, self).finish() self.finish() class QueueItemRename(ShowQueueItem): def __init__(self, show=None): super(QueueItemRename, self).__init__(ShowQueueActions.RENAME, show) def run(self): super(QueueItemRename, self).run() logger.log('Performing rename on {0}'.format(self.show.name)) try: self.show.location except ShowDirectoryNotFoundException: logger.log('Can\'t perform rename on {0} when the show dir is missing.'.format(self.show.name), logger.WARNING) super(QueueItemRename, self).finish() self.finish() return ep_obj_rename_list = [] ep_obj_list = self.show.getAllEpisodes(has_location=True) for cur_ep_obj in ep_obj_list: # Only want to rename if we have a location if cur_ep_obj.location: if cur_ep_obj.relatedEps: # do we have one of multi-episodes in the rename list already have_already = False for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]: if cur_related_ep in ep_obj_rename_list: have_already = True break if not have_already: ep_obj_rename_list.append(cur_ep_obj) else: ep_obj_rename_list.append(cur_ep_obj) for cur_ep_obj in ep_obj_rename_list: cur_ep_obj.rename() super(QueueItemRename, self).finish() self.finish() class QueueItemSubtitle(ShowQueueItem): def __init__(self, show=None): super(QueueItemSubtitle, self).__init__(ShowQueueActions.SUBTITLE, show) def run(self): super(QueueItemSubtitle, self).run() logger.log('Downloading subtitles for {0} '.format(self.show.name)) self.show.download_subtitles() super(QueueItemSubtitle, self).finish() self.finish() class QueueItemUpdate(ShowQueueItem): def __init__(self, show=None, force=False): action = ShowQueueActions.FORCEUPDATE if force else ShowQueueActions.UPDATE super(QueueItemUpdate, self).__init__(action, show) self.force = force self.priority = generic_queue.QueuePriorities.HIGH def run(self): # pylint: disable=too-many-branches, too-many-statements super(QueueItemUpdate, self).run() logger.log('Beginning update of {0}'.format(self.show.name), logger.DEBUG) logger.log('Retrieving show info from {0}'.format(sickbeard.indexerApi(self.show.indexer).name), logger.DEBUG) try: self.show.loadFromIndexer(cache=not self.force) except sickbeard.indexer_error as error: logger.log('Unable to contact {0}, aborting: {1}'.format (sickbeard.indexerApi(self.show.indexer).name, error), logger.WARNING) super(QueueItemUpdate, self).finish() self.finish() return except sickbeard.indexer_attributenotfound as error: logger.log('Data retrieved from {0} was incomplete, aborting: {1}'.format (sickbeard.indexerApi(self.show.indexer).name, error), logger.ERROR) super(QueueItemUpdate, self).finish() self.finish() return logger.log('Retrieving show info from IMDb', logger.DEBUG) try: self.show.loadIMDbInfo() except imdb_exceptions.IMDbError as error: logger.log('Something wrong on IMDb api: {0}'.format(error), logger.WARNING) except Exception as error: logger.log('Error loading IMDb info: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # have to save show before reading episodes from db try: self.show.saveToDB() except Exception as error: logger.log('Error saving show info to the database: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # get episode list from DB logger.log('Loading all episodes from the database', logger.DEBUG) DBEpList = self.show.loadEpisodesFromDB() # get episode list from TVDB logger.log('Loading all episodes from {0}'.format(sickbeard.indexerApi(self.show.indexer).name), logger.DEBUG) try: IndexerEpList = self.show.loadEpisodesFromIndexer(cache=not self.force) except sickbeard.indexer_exception as error: logger.log('Unable to get info from {0}, the show info will not be refreshed: {1}'.format (sickbeard.indexerApi(self.show.indexer).name, error), logger.ERROR) IndexerEpList = None if not IndexerEpList: logger.log('No data returned from {0}, unable to update this show.'.format (sickbeard.indexerApi(self.show.indexer).name), logger.ERROR) else: # for each ep we found on the Indexer delete it from the DB list for curSeason in IndexerEpList: for curEpisode in IndexerEpList[curSeason]: curEp = self.show.getEpisode(curSeason, curEpisode) curEp.saveToDB() if curSeason in DBEpList and curEpisode in DBEpList[curSeason]: del DBEpList[curSeason][curEpisode] # remaining episodes in the DB list are not on the indexer, just delete them from the DB for curSeason in DBEpList: for curEpisode in DBEpList[curSeason]: logger.log('Permanently deleting episode {0:02d}E{1:02d} from the database'.format (curSeason, curEpisode), logger.INFO) curEp = self.show.getEpisode(curSeason, curEpisode) try: curEp.deleteEpisode() except EpisodeDeletedException: pass # save show again, in case episodes have changed try: self.show.saveToDB() except Exception as error: logger.log('Error saving show info to the database: {0}'.format(error), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) logger.log('Finished update of {0}'.format(self.show.name), logger.DEBUG) sickbeard.showQueueScheduler.action.refresh_show(self.show, self.force) super(QueueItemUpdate, self).finish() self.finish() class QueueItemRemove(ShowQueueItem): def __init__(self, show=None, full=False): super(QueueItemRemove, self).__init__(ShowQueueActions.REMOVE, show) # lets make sure this happens before any other high priority actions self.priority = generic_queue.QueuePriorities.HIGH ** 2 self.full = full def run(self): super(QueueItemRemove, self).run() logger.log('Removing {0}'.format(self.show.name)) self.show.deleteShow(full=self.full) if sickbeard.USE_TRAKT: try: sickbeard.traktCheckerScheduler.action.removeShowFromTraktLibrary(self.show) except Exception as error: logger.log('Unable to delete show from Trakt: {0}. Error: {1}'.format(self.show.name, error), logger.WARNING) super(QueueItemRemove, self).finish() self.finish()
gpl-3.0
-7,236,000,492,247,421,000
5,811,499,451,522,144,000
39.595461
169
0.621029
false
Kivvix/stage-LPC
compareSrc/searchSDSSdata.py
1
4221
#!/usr/bin/env python # -*- coding: utf-8 -*- import time import os import glob from config import * import data.calexp import data.src ## @def attributs # @brief attributs which we select in SDSS DB and src fits file attributs = 'objid,run,camcol,field,ra,dec,u,g,r,i,z' ## Calexp treatment ## def coordCalexp( fitsNum , calexpFits , first=True ): coordMin, coordMax = data.calexp.coord( calexpFits , first ) if ( first ): return coordMin else: return coordMax def savCalexp( coordMin , coordMax , fitsNum ): global attributs , PATH_OUTPUT calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum ) data.calexp.write( calexpLines , attributs , fitsNum , PATH_OUTPUT , True ) def calexp( fitsNum , calexpFits , first=True ): """ find and write calexp data (id,ra,dec,mag) :param fitsNum: number of fits file (``rrrrrr-bc-ffff``) :param calexpFits: name of calexp fits file :param first: take all the picture or less 128 first pixels :type fitsNum: string :type calexpFits: string :type first: boolean """ global attributs , PATH_OUTPUT coordMin, coordMax = data.calexp.coord( calexpFits , first ) calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum ) data.calexp.write( calexpLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first ) ## Src treatment ## def src( fitsNum , srcFits , first=True ): """ find and write src data (id,ra,dec,mag) :param fitsNum: number of fits file (``rrrrrr-bc-ffff``) :param srcFits: name of src fits file :param first: take all the picture or less 128 first pixels :type fitsNum: string :type srcFits: string :type first: boolean """ global attributs , PATH_OUTPUT srcCoord,srcMag = data.src.coord( srcFits , fitsNum , first ) srcLines = data.src.map( srcCoord , srcMag ) data.src.write( srcLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first ) def analyCol( runNum , c ): """ function threaded calling research of data :param runNum_c: tupe with run number and column of the CCD (1-6) :type runNum_c: tuple of string """ global b , PATH_DATA , PWD print " " + str(c) + " ", # data of each pair of fits files first = True for fits in glob.glob( c + "/" + b + "/calexp/calexp*.fits" ): fitsNum = fits[18:32] ## @def calexpFits # @brief path and name of calexp fits file calexpFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/calexp/calexp-" + fitsNum + ".fits" ## @def srcFits # @brief path and name of src fits file #srcFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/src/src-" + fitsNum + ".fits" #calexp( fitsNum , calexpFits , first ) if ( first ): coordMin = coordCalexp( fitsNum , calexpFits , first ) else: coordMax = coordCalexp( fitsNum , calexpFits , first ) #src( fitsNum , srcFits , first ) first = False savCalexp( coordMin , coordMax , "%06d" % int(runNum) + "-" + b + c ) def analyRun( runNum ): global b , PWD , PATH_DATA , PATH_OUTPUT , attributs print "run : " + str(runNum ) + " : ", os.chdir( PATH_DATA + "/" + runNum ) columns = glob.glob( "*" ) for c in columns : analyCol( runNum , c ) if __name__ == '__main__': os.chdir( PATH_DATA ) runs = glob.glob( "*" ) #runs = ( 7158, 7112, 5924, 5566, 6421, 7057, 6430, 4895, 5895, 6474, 6383, 7038, 5642, 6409, 6513, 6501, 6552, 2650, 6559, 6355, 7177, 7121, 3465, 7170, 7051, 6283, 6458, 5853, 6484, 5765, 2708, 5786, 4253, 6934, 6508, 2662, 6518, 6584, 4188, 6976, 7202, 7173, 4153, 5820, 2649, 7140, 6330, 3388, 7117, 6504, 6314, 4128, 6596, 6564, 5807, 6367, 6373, 5622, 5882, 7034, 7136, 6577, 6600, 2768, 3437, 4927, 6414, 3434, 5813, 7084, 4858, 7124, 6982, 4917, 4192, 5898, 6479, 4868, 7106, 7195, 5744, 3360, 4198, 6963, 6533, 4933, 5603, 3384, 7155, 5619, 4207, 4849, 5582, 7024, 1755, 5709, 5781, 5770, 7145, 5754, 5646, 5800, 5759, 6287, 6568, 7054, 4203, 5776, 6433, 4247, 5823, 5052, 3325, 5836, 5590, 6580, 7161, 2728, 4145, 5633, 6461, 6555, 6955, 4874, 5792, 5918, 6425, 6377, 4263, 5878, 6441, 6447, 7080, 5905, 5713, 6618, 6537, 5637, 6402, 6530, 7047, 6524, 7101, 6293 ) for r in runs : analyRun( r ) print " " time.sleep(60)
mit
7,105,068,246,496,585,000
-9,076,688,849,895,981,000
33.040323
875
0.644871
false
xaled/wunderous-analytics
wunderous/drive.py
1
5688
import os import sys import httplib2 from oauth2client.file import Storage from apiclient import discovery from oauth2client.client import OAuth2WebServerFlow from wunderous.config import config OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive' SHEETS_OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/drive.file https://www.googleapis.com/auth/spreadsheets https://www.googleapis.com/auth/spreadsheets.readonly' REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'credentials.json') SHEETS_CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'sheets_credentials.json') # CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "wunderous.config.json") sheet_service = None drive_service = None # def load_configs(): # client_secret = config['client_secret'] # client_id = config['client_id'] # return client_id, client_secret def init_drive_service(): global drive_service if drive_service: return drive_service storage = Storage(CREDS_FILE) credentials = storage.get() if credentials is None: # Run through the OAuth flow and retrieve credentials # client_id, client_secret = load_configs() flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI) authorize_url = flow.step1_get_authorize_url() print('Go to the following link in your browser: ' + authorize_url) code = input('Enter verification code: ').strip() credentials = flow.step2_exchange(code) storage.put(credentials) # Create an httplib2.Http object and authorize it with our credentials http = httplib2.Http() http = credentials.authorize(http) drive_service = discovery.build('drive', 'v2', http=http) return drive_service def init_sheet_service(): global sheet_service if sheet_service: return sheet_service storage = Storage(SHEETS_CREDS_FILE) credentials = storage.get() if credentials is None: # Run through the OAuth flow and retrieve credentials # client_id, client_secret = load_configs() flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI) authorize_url = flow.step1_get_authorize_url() print('Go to the following link in your browser: ' + authorize_url) code = input('Enter verification code: ').strip() credentials = flow.step2_exchange(code) storage.put(credentials) # Create an httplib2.Http object and authorize it with our credentials http = httplib2.Http() http = credentials.authorize(http) sheet_service = discovery.build('sheets', 'v4', http=http) return sheet_service def list_files(service): page_token = None while True: param = {} if page_token: param['pageToken'] = page_token files = service.files().list(**param).execute() for item in files['items']: yield item page_token = files.get('nextPageToken') if not page_token: break def _download_file(drive_service, download_url, outfile): resp, content = drive_service._http.request(download_url) if resp.status == 200: with open(outfile, 'wb') as f: f.write(content) print("OK") return else: raise Exception("ERROR downloading %s, response code is not 200!" % outfile) def download_file(outfile, fileid): drive_service = init_drive_service() for item in list_files(drive_service): if fileid == item.get('id'): if 'downloadUrl' in item: _download_file(drive_service, item['downloadUrl'], outfile) return else: raise Exception("No download link is found for file: %s" % item['title']) raise Exception("No file with id: %s is found " % fileid) def get_sheet_metadata(spreadsheet_id): sheet_service = init_sheet_service() sheet_metadata = sheet_service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() return sheet_metadata def get_sheet_values(spreadsheet_id, range_): sheet_service = init_sheet_service() request = sheet_service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_, valueRenderOption='FORMATTED_VALUE', dateTimeRenderOption='SERIAL_NUMBER') response = request.execute() return response def get_sheet_value(spreadsheet_id, range_): response = get_sheet_values(spreadsheet_id, range_) try: return response['values'][0][0] except: return '' def update_sheet_values(spreadsheet_id, range_, values): sheet_service = init_sheet_service() body = {'values': values} result = sheet_service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, body=body, valueInputOption='USER_ENTERED').execute() return result.get('updatedCells') def append_sheet_values(spreadsheet_id, range_, values): sheet_service = init_sheet_service() body = {'values': values} result = sheet_service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, body=body, valueInputOption='USER_ENTERED').execute() return result.get('updates').get('updatedCells')
mit
-3,405,282,358,351,890,000
-4,180,583,433,248,965,600
37.174497
249
0.651371
false
timesking/sublime-evernote
lib/pygments/lexers/templates.py
9
56066
# -*- coding: utf-8 -*- """ pygments.lexers.templates ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for various template engines' markup. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexers.web import \ PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer from pygments.lexers.agile import PythonLexer, PerlLexer from pygments.lexers.compiled import JavaLexer from pygments.lexers.jvm import TeaLangLexer from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ include, using, this from pygments.token import Error, Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Other, Token from pygments.util import html_doctype_matches, looks_like_xml __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer', 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer', 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer', 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer', 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer', 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer', 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer', 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer', 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer', 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer', 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer', 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer', 'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer'] class ErbLexer(Lexer): """ Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating) lexer. Just highlights ruby code between the preprocessor directives, other data is left untouched by the lexer. All options are also forwarded to the `RubyLexer`. """ name = 'ERB' aliases = ['erb'] mimetypes = ['application/x-ruby-templating'] _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M) def __init__(self, **options): from pygments.lexers.agile import RubyLexer self.ruby_lexer = RubyLexer(**options) Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): """ Since ERB doesn't allow "<%" and other tags inside of ruby blocks we have to use a split approach here that fails for that too. """ tokens = self._block_re.split(text) tokens.reverse() state = idx = 0 try: while True: # text if state == 0: val = tokens.pop() yield idx, Other, val idx += len(val) state = 1 # block starts elif state == 1: tag = tokens.pop() # literals if tag in ('<%%', '%%>'): yield idx, Other, tag idx += 3 state = 0 # comment elif tag == '<%#': yield idx, Comment.Preproc, tag val = tokens.pop() yield idx + 3, Comment, val idx += 3 + len(val) state = 2 # blocks or output elif tag in ('<%', '<%=', '<%-'): yield idx, Comment.Preproc, tag idx += len(tag) data = tokens.pop() r_idx = 0 for r_idx, r_token, r_value in \ self.ruby_lexer.get_tokens_unprocessed(data): yield r_idx + idx, r_token, r_value idx += len(data) state = 2 elif tag in ('%>', '-%>'): yield idx, Error, tag idx += len(tag) state = 0 # % raw ruby statements else: yield idx, Comment.Preproc, tag[0] r_idx = 0 for r_idx, r_token, r_value in \ self.ruby_lexer.get_tokens_unprocessed(tag[1:]): yield idx + 1 + r_idx, r_token, r_value idx += len(tag) state = 0 # block ends elif state == 2: tag = tokens.pop() if tag not in ('%>', '-%>'): yield idx, Other, tag else: yield idx, Comment.Preproc, tag idx += len(tag) state = 0 except IndexError: return def analyse_text(text): if '<%' in text and '%>' in text: return 0.4 class SmartyLexer(RegexLexer): """ Generic `Smarty <http://smarty.php.net/>`_ template lexer. Just highlights smarty code between the preprocessor directives, other data is left untouched by the lexer. """ name = 'Smarty' aliases = ['smarty'] filenames = ['*.tpl'] mimetypes = ['application/x-smarty'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ (r'[^{]+', Other), (r'(\{)(\*.*?\*)(\})', bygroups(Comment.Preproc, Comment, Comment.Preproc)), (r'(\{php\})(.*?)(\{/php\})', bygroups(Comment.Preproc, using(PhpLexer, startinline=True), Comment.Preproc)), (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)', bygroups(Comment.Preproc, Name.Function, Text), 'smarty'), (r'\{', Comment.Preproc, 'smarty') ], 'smarty': [ (r'\s+', Text), (r'\}', Comment.Preproc, '#pop'), (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable), (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable), (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator), (r'(true|false|null)\b', Keyword.Constant), (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" r"0[xX][0-9a-fA-F]+[Ll]?", Number), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute) ] } def analyse_text(text): rv = 0.0 if re.search('\{if\s+.*?\}.*?\{/if\}', text): rv += 0.15 if re.search('\{include\s+file=.*?\}', text): rv += 0.15 if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text): rv += 0.15 if re.search('\{\$.*?\}', text): rv += 0.01 return rv class VelocityLexer(RegexLexer): """ Generic `Velocity <http://velocity.apache.org/>`_ template lexer. Just highlights velocity directives and variable references, other data is left untouched by the lexer. """ name = 'Velocity' aliases = ['velocity'] filenames = ['*.vm','*.fhtml'] flags = re.MULTILINE | re.DOTALL identifier = r'[a-zA-Z_][a-zA-Z0-9_]*' tokens = { 'root': [ (r'[^{#$]+', Other), (r'(#)(\*.*?\*)(#)', bygroups(Comment.Preproc, Comment, Comment.Preproc)), (r'(##)(.*?$)', bygroups(Comment.Preproc, Comment)), (r'(#\{?)(' + identifier + r')(\}?)(\s?\()', bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation), 'directiveparams'), (r'(#\{?)(' + identifier + r')(\}|\b)', bygroups(Comment.Preproc, Name.Function, Comment.Preproc)), (r'\$\{?', Punctuation, 'variable') ], 'variable': [ (identifier, Name.Variable), (r'\(', Punctuation, 'funcparams'), (r'(\.)(' + identifier + r')', bygroups(Punctuation, Name.Variable), '#push'), (r'\}', Punctuation, '#pop'), (r'', Other, '#pop') ], 'directiveparams': [ (r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', Operator), (r'\[', Operator, 'rangeoperator'), (r'\b' + identifier + r'\b', Name.Function), include('funcparams') ], 'rangeoperator': [ (r'\.\.', Operator), include('funcparams'), (r'\]', Operator, '#pop') ], 'funcparams': [ (r'\$\{?', Punctuation, 'variable'), (r'\s+', Text), (r',', Punctuation), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), (r"0[xX][0-9a-fA-F]+[Ll]?", Number), (r"\b[0-9]+\b", Number), (r'(true|false|null)\b', Keyword.Constant), (r'\(', Punctuation, '#push'), (r'\)', Punctuation, '#pop'), (r'\[', Punctuation, '#push'), (r'\]', Punctuation, '#pop'), ] } def analyse_text(text): rv = 0.0 if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text): rv += 0.25 if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text): rv += 0.15 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text): rv += 0.15 if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?' r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text): rv += 0.01 return rv class VelocityHtmlLexer(DelegatingLexer): """ Subclass of the `VelocityLexer` that highlights unlexer data with the `HtmlLexer`. """ name = 'HTML+Velocity' aliases = ['html+velocity'] alias_filenames = ['*.html','*.fhtml'] mimetypes = ['text/html+velocity'] def __init__(self, **options): super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer, **options) class VelocityXmlLexer(DelegatingLexer): """ Subclass of the `VelocityLexer` that highlights unlexer data with the `XmlLexer`. """ name = 'XML+Velocity' aliases = ['xml+velocity'] alias_filenames = ['*.xml','*.vm'] mimetypes = ['application/xml+velocity'] def __init__(self, **options): super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer, **options) def analyse_text(text): rv = VelocityLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.5 return rv class DjangoLexer(RegexLexer): """ Generic `django <http://www.djangoproject.com/documentation/templates/>`_ and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer. It just highlights django/jinja code between the preprocessor directives, other data is left untouched by the lexer. """ name = 'Django/Jinja' aliases = ['django', 'jinja'] mimetypes = ['application/x-django-templating', 'application/x-jinja'] flags = re.M | re.S tokens = { 'root': [ (r'[^{]+', Other), (r'\{\{', Comment.Preproc, 'var'), # jinja/django comments (r'\{[*#].*?[*#]\}', Comment), # django comments (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)' r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})', bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, Comment, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # raw jinja blocks (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, Text, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # filter blocks (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 'block'), (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Comment.Preproc, Text, Keyword), 'block'), (r'\{', Other) ], 'varnames': [ (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Text, Name.Function)), (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo), (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|' r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b', Keyword), (r'(loop|block|super|forloop)\b', Name.Builtin), (r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable), (r'\.[a-zA-Z0-9_]+', Name.Variable), (r':?"(\\\\|\\"|[^"])*"', String.Double), (r":?'(\\\\|\\'|[^'])*'", String.Single), (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator), (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" r"0[xX][0-9a-fA-F]+[Ll]?", Number), ], 'var': [ (r'\s+', Text), (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), include('varnames') ], 'block': [ (r'\s+', Text), (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), include('varnames'), (r'.', Punctuation) ] } def analyse_text(text): rv = 0.0 if re.search(r'\{%\s*(block|extends)', text) is not None: rv += 0.4 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None: rv += 0.1 if re.search(r'\{\{.*?\}\}', text) is not None: rv += 0.1 return rv class MyghtyLexer(RegexLexer): """ Generic `myghty templates`_ lexer. Code that isn't Myghty markup is yielded as `Token.Other`. .. versionadded:: 0.6 .. _myghty templates: http://www.myghty.org/ """ name = 'Myghty' aliases = ['myghty'] filenames = ['*.myt', 'autodelegate'] mimetypes = ['application/x-myghty'] tokens = { 'root': [ (r'\s+', Text), (r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)', bygroups(Name.Tag, Text, Name.Function, Name.Tag, using(this), Name.Tag)), (r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)', bygroups(Name.Tag, Name.Function, Name.Tag, using(PythonLexer), Name.Tag)), (r'(<&[^|])(.*?)(,.*?)?(&>)', bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), (r'(<&\|)(.*?)(,.*?)?(&>)(?s)', bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), (r'</&>', Name.Tag), (r'(<%!?)(.*?)(%>)(?s)', bygroups(Name.Tag, using(PythonLexer), Name.Tag)), (r'(?<=^)#[^\n]*(\n|\Z)', Comment), (r'(?<=^)(%)([^\n]*)(\n|\Z)', bygroups(Name.Tag, using(PythonLexer), Other)), (r"""(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=[%#]) | # an eval or comment line (?=</?[%&]) | # a substitution or block or # call start or end # - don't consume (\\\n) | # an escaped newline \Z # end of string )""", bygroups(Other, Operator)), ] } class MyghtyHtmlLexer(DelegatingLexer): """ Subclass of the `MyghtyLexer` that highlights unlexer data with the `HtmlLexer`. .. versionadded:: 0.6 """ name = 'HTML+Myghty' aliases = ['html+myghty'] mimetypes = ['text/html+myghty'] def __init__(self, **options): super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer, **options) class MyghtyXmlLexer(DelegatingLexer): """ Subclass of the `MyghtyLexer` that highlights unlexer data with the `XmlLexer`. .. versionadded:: 0.6 """ name = 'XML+Myghty' aliases = ['xml+myghty'] mimetypes = ['application/xml+myghty'] def __init__(self, **options): super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer, **options) class MyghtyJavascriptLexer(DelegatingLexer): """ Subclass of the `MyghtyLexer` that highlights unlexer data with the `JavascriptLexer`. .. versionadded:: 0.6 """ name = 'JavaScript+Myghty' aliases = ['js+myghty', 'javascript+myghty'] mimetypes = ['application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy'] def __init__(self, **options): super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer, MyghtyLexer, **options) class MyghtyCssLexer(DelegatingLexer): """ Subclass of the `MyghtyLexer` that highlights unlexer data with the `CssLexer`. .. versionadded:: 0.6 """ name = 'CSS+Myghty' aliases = ['css+myghty'] mimetypes = ['text/css+myghty'] def __init__(self, **options): super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer, **options) class MasonLexer(RegexLexer): """ Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't Mason markup is HTML. .. _mason templates: http://www.masonhq.com/ .. versionadded:: 1.4 """ name = 'Mason' aliases = ['mason'] filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'] mimetypes = ['application/x-mason'] tokens = { 'root': [ (r'\s+', Text), (r'(<%doc>)(.*?)(</%doc>)(?s)', bygroups(Name.Tag, Comment.Multiline, Name.Tag)), (r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)', bygroups(Name.Tag, Text, Name.Function, Name.Tag, using(this), Name.Tag)), (r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)', bygroups(Name.Tag, Name.Function, Name.Tag, using(PerlLexer), Name.Tag)), (r'(<&[^|])(.*?)(,.*?)?(&>)(?s)', bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)), (r'(<&\|)(.*?)(,.*?)?(&>)(?s)', bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)), (r'</&>', Name.Tag), (r'(<%!?)(.*?)(%>)(?s)', bygroups(Name.Tag, using(PerlLexer), Name.Tag)), (r'(?<=^)#[^\n]*(\n|\Z)', Comment), (r'(?<=^)(%)([^\n]*)(\n|\Z)', bygroups(Name.Tag, using(PerlLexer), Other)), (r"""(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=[%#]) | # an eval or comment line (?=</?[%&]) | # a substitution or block or # call start or end # - don't consume (\\\n) | # an escaped newline \Z # end of string )""", bygroups(using(HtmlLexer), Operator)), ] } def analyse_text(text): rv = 0.0 if re.search('<&', text) is not None: rv = 1.0 return rv class MakoLexer(RegexLexer): """ Generic `mako templates`_ lexer. Code that isn't Mako markup is yielded as `Token.Other`. .. versionadded:: 0.7 .. _mako templates: http://www.makotemplates.org/ """ name = 'Mako' aliases = ['mako'] filenames = ['*.mao'] mimetypes = ['application/x-mako'] tokens = { 'root': [ (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)', bygroups(Text, Comment.Preproc, Keyword, Other)), (r'(\s*)(%)([^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), (r'(\s*)(##[^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, Other)), (r'(?s)<%doc>.*?</%doc>', Comment.Preproc), (r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), (r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), (r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(\$\{)(.*?)(\})', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'''(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=%|\#\#) | # an eval or comment line (?=\#\*) | # multiline comment (?=</?%) | # a python block # call start or end (?=\$\{) | # a substitution (?<=\n)(?=\s*%) | # - don't consume (\\\n) | # an escaped newline \Z # end of string ) ''', bygroups(Other, Operator)), (r'\s+', Text), ], 'ondeftags': [ (r'<%', Comment.Preproc), (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin), include('tag'), ], 'tag': [ (r'((?:\w+)\s*=)(\s*)(".*?")', bygroups(Name.Attribute, Text, String)), (r'/?\s*>', Comment.Preproc, '#pop'), (r'\s+', Text), ], 'attr': [ ('".*?"', String, '#pop'), ("'.*?'", String, '#pop'), (r'[^\s>]+', String, '#pop'), ], } class MakoHtmlLexer(DelegatingLexer): """ Subclass of the `MakoLexer` that highlights unlexed data with the `HtmlLexer`. .. versionadded:: 0.7 """ name = 'HTML+Mako' aliases = ['html+mako'] mimetypes = ['text/html+mako'] def __init__(self, **options): super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, **options) class MakoXmlLexer(DelegatingLexer): """ Subclass of the `MakoLexer` that highlights unlexer data with the `XmlLexer`. .. versionadded:: 0.7 """ name = 'XML+Mako' aliases = ['xml+mako'] mimetypes = ['application/xml+mako'] def __init__(self, **options): super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, **options) class MakoJavascriptLexer(DelegatingLexer): """ Subclass of the `MakoLexer` that highlights unlexer data with the `JavascriptLexer`. .. versionadded:: 0.7 """ name = 'JavaScript+Mako' aliases = ['js+mako', 'javascript+mako'] mimetypes = ['application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako'] def __init__(self, **options): super(MakoJavascriptLexer, self).__init__(JavascriptLexer, MakoLexer, **options) class MakoCssLexer(DelegatingLexer): """ Subclass of the `MakoLexer` that highlights unlexer data with the `CssLexer`. .. versionadded:: 0.7 """ name = 'CSS+Mako' aliases = ['css+mako'] mimetypes = ['text/css+mako'] def __init__(self, **options): super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, **options) # Genshi and Cheetah lexers courtesy of Matt Good. class CheetahPythonLexer(Lexer): """ Lexer for handling Cheetah's special $ tokens in Python syntax. """ def get_tokens_unprocessed(self, text): pylexer = PythonLexer(**self.options) for pos, type_, value in pylexer.get_tokens_unprocessed(text): if type_ == Token.Error and value == '$': type_ = Comment.Preproc yield pos, type_, value class CheetahLexer(RegexLexer): """ Generic `cheetah templates`_ lexer. Code that isn't Cheetah markup is yielded as `Token.Other`. This also works for `spitfire templates`_ which use the same syntax. .. _cheetah templates: http://www.cheetahtemplate.org/ .. _spitfire templates: http://code.google.com/p/spitfire/ """ name = 'Cheetah' aliases = ['cheetah', 'spitfire'] filenames = ['*.tmpl', '*.spt'] mimetypes = ['application/x-cheetah', 'application/x-spitfire'] tokens = { 'root': [ (r'(##[^\n]*)$', (bygroups(Comment))), (r'#[*](.|\n)*?[*]#', Comment), (r'#end[^#\n]*(?:#|$)', Comment.Preproc), (r'#slurp$', Comment.Preproc), (r'(#[a-zA-Z]+)([^#\n]*)(#|$)', (bygroups(Comment.Preproc, using(CheetahPythonLexer), Comment.Preproc))), # TODO support other Python syntax like $foo['bar'] (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])', bygroups(Comment.Preproc, using(CheetahPythonLexer))), (r'(\$\{!?)(.*?)(\})(?s)', bygroups(Comment.Preproc, using(CheetahPythonLexer), Comment.Preproc)), (r'''(?sx) (.+?) # anything, followed by: (?: (?=[#][#a-zA-Z]*) | # an eval comment (?=\$[a-zA-Z_{]) | # a substitution \Z # end of string ) ''', Other), (r'\s+', Text), ], } class CheetahHtmlLexer(DelegatingLexer): """ Subclass of the `CheetahLexer` that highlights unlexer data with the `HtmlLexer`. """ name = 'HTML+Cheetah' aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah'] mimetypes = ['text/html+cheetah', 'text/html+spitfire'] def __init__(self, **options): super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer, **options) class CheetahXmlLexer(DelegatingLexer): """ Subclass of the `CheetahLexer` that highlights unlexer data with the `XmlLexer`. """ name = 'XML+Cheetah' aliases = ['xml+cheetah', 'xml+spitfire'] mimetypes = ['application/xml+cheetah', 'application/xml+spitfire'] def __init__(self, **options): super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer, **options) class CheetahJavascriptLexer(DelegatingLexer): """ Subclass of the `CheetahLexer` that highlights unlexer data with the `JavascriptLexer`. """ name = 'JavaScript+Cheetah' aliases = ['js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'] mimetypes = ['application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire'] def __init__(self, **options): super(CheetahJavascriptLexer, self).__init__(JavascriptLexer, CheetahLexer, **options) class GenshiTextLexer(RegexLexer): """ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text templates. """ name = 'Genshi Text' aliases = ['genshitext'] mimetypes = ['application/x-genshi-text', 'text/x-genshi'] tokens = { 'root': [ (r'[^#\$\s]+', Other), (r'^(\s*)(##.*)$', bygroups(Text, Comment)), (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'), include('variable'), (r'[#\$\s]', Other), ], 'directive': [ (r'\n', Text, '#pop'), (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'), (r'(choose|when|with)([^\S\n]+)(.*)', bygroups(Keyword, Text, using(PythonLexer)), '#pop'), (r'(choose|otherwise)\b', Keyword, '#pop'), (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'), ], 'variable': [ (r'(?<!\$)(\$\{)(.+?)(\})', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)', Name.Variable), ] } class GenshiMarkupLexer(RegexLexer): """ Base lexer for Genshi markup, used by `HtmlGenshiLexer` and `GenshiLexer`. """ flags = re.DOTALL tokens = { 'root': [ (r'[^<\$]+', Other), (r'(<\?python)(.*?)(\?>)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), # yield style and script blocks as Other (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other), (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'), (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'), include('variable'), (r'[<\$]', Other), ], 'pytag': [ (r'\s+', Text), (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'pyattr': [ ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'), ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'), (r'[^\s>]+', String, '#pop'), ], 'tag': [ (r'\s+', Text), (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'), (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'attr': [ ('"', String, 'attr-dstring'), ("'", String, 'attr-sstring'), (r'[^\s>]*', String, '#pop') ], 'attr-dstring': [ ('"', String, '#pop'), include('strings'), ("'", String) ], 'attr-sstring': [ ("'", String, '#pop'), include('strings'), ("'", String) ], 'strings': [ ('[^"\'$]+', String), include('variable') ], 'variable': [ (r'(?<!\$)(\$\{)(.+?)(\})', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)', Name.Variable), ] } class HtmlGenshiLexer(DelegatingLexer): """ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and `kid <http://kid-templating.org/>`_ kid HTML templates. """ name = 'HTML+Genshi' aliases = ['html+genshi', 'html+kid'] alias_filenames = ['*.html', '*.htm', '*.xhtml'] mimetypes = ['text/html+genshi'] def __init__(self, **options): super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer, **options) def analyse_text(text): rv = 0.0 if re.search('\$\{.*?\}', text) is not None: rv += 0.2 if re.search('py:(.*?)=["\']', text) is not None: rv += 0.2 return rv + HtmlLexer.analyse_text(text) - 0.01 class GenshiLexer(DelegatingLexer): """ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and `kid <http://kid-templating.org/>`_ kid XML templates. """ name = 'Genshi' aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid'] filenames = ['*.kid'] alias_filenames = ['*.xml'] mimetypes = ['application/x-genshi', 'application/x-kid'] def __init__(self, **options): super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer, **options) def analyse_text(text): rv = 0.0 if re.search('\$\{.*?\}', text) is not None: rv += 0.2 if re.search('py:(.*?)=["\']', text) is not None: rv += 0.2 return rv + XmlLexer.analyse_text(text) - 0.01 class JavascriptGenshiLexer(DelegatingLexer): """ A lexer that highlights javascript code in genshi text templates. """ name = 'JavaScript+Genshi Text' aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'] alias_filenames = ['*.js'] mimetypes = ['application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi'] def __init__(self, **options): super(JavascriptGenshiLexer, self).__init__(JavascriptLexer, GenshiTextLexer, **options) def analyse_text(text): return GenshiLexer.analyse_text(text) - 0.05 class CssGenshiLexer(DelegatingLexer): """ A lexer that highlights CSS definitions in genshi text templates. """ name = 'CSS+Genshi Text' aliases = ['css+genshitext', 'css+genshi'] alias_filenames = ['*.css'] mimetypes = ['text/css+genshi'] def __init__(self, **options): super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer, **options) def analyse_text(text): return GenshiLexer.analyse_text(text) - 0.05 class RhtmlLexer(DelegatingLexer): """ Subclass of the ERB lexer that highlights the unlexed data with the html lexer. Nested Javascript and CSS is highlighted too. """ name = 'RHTML' aliases = ['rhtml', 'html+erb', 'html+ruby'] filenames = ['*.rhtml'] alias_filenames = ['*.html', '*.htm', '*.xhtml'] mimetypes = ['text/html+ruby'] def __init__(self, **options): super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options) def analyse_text(text): rv = ErbLexer.analyse_text(text) - 0.01 if html_doctype_matches(text): # one more than the XmlErbLexer returns rv += 0.5 return rv class XmlErbLexer(DelegatingLexer): """ Subclass of `ErbLexer` which highlights data outside preprocessor directives with the `XmlLexer`. """ name = 'XML+Ruby' aliases = ['xml+erb', 'xml+ruby'] alias_filenames = ['*.xml'] mimetypes = ['application/xml+ruby'] def __init__(self, **options): super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options) def analyse_text(text): rv = ErbLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 return rv class CssErbLexer(DelegatingLexer): """ Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`. """ name = 'CSS+Ruby' aliases = ['css+erb', 'css+ruby'] alias_filenames = ['*.css'] mimetypes = ['text/css+ruby'] def __init__(self, **options): super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options) def analyse_text(text): return ErbLexer.analyse_text(text) - 0.05 class JavascriptErbLexer(DelegatingLexer): """ Subclass of `ErbLexer` which highlights unlexed data with the `JavascriptLexer`. """ name = 'JavaScript+Ruby' aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'] alias_filenames = ['*.js'] mimetypes = ['application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby'] def __init__(self, **options): super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer, **options) def analyse_text(text): return ErbLexer.analyse_text(text) - 0.05 class HtmlPhpLexer(DelegatingLexer): """ Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`. Nested Javascript and CSS is highlighted too. """ name = 'HTML+PHP' aliases = ['html+php'] filenames = ['*.phtml'] alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml', '*.php[345]'] mimetypes = ['application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5'] def __init__(self, **options): super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options) def analyse_text(text): rv = PhpLexer.analyse_text(text) - 0.01 if html_doctype_matches(text): rv += 0.5 return rv class XmlPhpLexer(DelegatingLexer): """ Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`. """ name = 'XML+PHP' aliases = ['xml+php'] alias_filenames = ['*.xml', '*.php', '*.php[345]'] mimetypes = ['application/xml+php'] def __init__(self, **options): super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options) def analyse_text(text): rv = PhpLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 return rv class CssPhpLexer(DelegatingLexer): """ Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`. """ name = 'CSS+PHP' aliases = ['css+php'] alias_filenames = ['*.css'] mimetypes = ['text/css+php'] def __init__(self, **options): super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options) def analyse_text(text): return PhpLexer.analyse_text(text) - 0.05 class JavascriptPhpLexer(DelegatingLexer): """ Subclass of `PhpLexer` which highlights unmatched data with the `JavascriptLexer`. """ name = 'JavaScript+PHP' aliases = ['js+php', 'javascript+php'] alias_filenames = ['*.js'] mimetypes = ['application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php'] def __init__(self, **options): super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer, **options) def analyse_text(text): return PhpLexer.analyse_text(text) class HtmlSmartyLexer(DelegatingLexer): """ Subclass of the `SmartyLexer` that highighlights unlexed data with the `HtmlLexer`. Nested Javascript and CSS is highlighted too. """ name = 'HTML+Smarty' aliases = ['html+smarty'] alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl'] mimetypes = ['text/html+smarty'] def __init__(self, **options): super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options) def analyse_text(text): rv = SmartyLexer.analyse_text(text) - 0.01 if html_doctype_matches(text): rv += 0.5 return rv class XmlSmartyLexer(DelegatingLexer): """ Subclass of the `SmartyLexer` that highlights unlexed data with the `XmlLexer`. """ name = 'XML+Smarty' aliases = ['xml+smarty'] alias_filenames = ['*.xml', '*.tpl'] mimetypes = ['application/xml+smarty'] def __init__(self, **options): super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options) def analyse_text(text): rv = SmartyLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 return rv class CssSmartyLexer(DelegatingLexer): """ Subclass of the `SmartyLexer` that highlights unlexed data with the `CssLexer`. """ name = 'CSS+Smarty' aliases = ['css+smarty'] alias_filenames = ['*.css', '*.tpl'] mimetypes = ['text/css+smarty'] def __init__(self, **options): super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options) def analyse_text(text): return SmartyLexer.analyse_text(text) - 0.05 class JavascriptSmartyLexer(DelegatingLexer): """ Subclass of the `SmartyLexer` that highlights unlexed data with the `JavascriptLexer`. """ name = 'JavaScript+Smarty' aliases = ['js+smarty', 'javascript+smarty'] alias_filenames = ['*.js', '*.tpl'] mimetypes = ['application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty'] def __init__(self, **options): super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer, **options) def analyse_text(text): return SmartyLexer.analyse_text(text) - 0.05 class HtmlDjangoLexer(DelegatingLexer): """ Subclass of the `DjangoLexer` that highighlights unlexed data with the `HtmlLexer`. Nested Javascript and CSS is highlighted too. """ name = 'HTML+Django/Jinja' aliases = ['html+django', 'html+jinja', 'htmldjango'] alias_filenames = ['*.html', '*.htm', '*.xhtml'] mimetypes = ['text/html+django', 'text/html+jinja'] def __init__(self, **options): super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options) def analyse_text(text): rv = DjangoLexer.analyse_text(text) - 0.01 if html_doctype_matches(text): rv += 0.5 return rv class XmlDjangoLexer(DelegatingLexer): """ Subclass of the `DjangoLexer` that highlights unlexed data with the `XmlLexer`. """ name = 'XML+Django/Jinja' aliases = ['xml+django', 'xml+jinja'] alias_filenames = ['*.xml'] mimetypes = ['application/xml+django', 'application/xml+jinja'] def __init__(self, **options): super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options) def analyse_text(text): rv = DjangoLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 return rv class CssDjangoLexer(DelegatingLexer): """ Subclass of the `DjangoLexer` that highlights unlexed data with the `CssLexer`. """ name = 'CSS+Django/Jinja' aliases = ['css+django', 'css+jinja'] alias_filenames = ['*.css'] mimetypes = ['text/css+django', 'text/css+jinja'] def __init__(self, **options): super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options) def analyse_text(text): return DjangoLexer.analyse_text(text) - 0.05 class JavascriptDjangoLexer(DelegatingLexer): """ Subclass of the `DjangoLexer` that highlights unlexed data with the `JavascriptLexer`. """ name = 'JavaScript+Django/Jinja' aliases = ['js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'] alias_filenames = ['*.js'] mimetypes = ['application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja'] def __init__(self, **options): super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer, **options) def analyse_text(text): return DjangoLexer.analyse_text(text) - 0.05 class JspRootLexer(RegexLexer): """ Base for the `JspLexer`. Yields `Token.Other` for area outside of JSP tags. .. versionadded:: 0.7 """ tokens = { 'root': [ (r'<%\S?', Keyword, 'sec'), # FIXME: I want to make these keywords but still parse attributes. (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>', Keyword), (r'[^<]+', Other), (r'<', Other), ], 'sec': [ (r'%>', Keyword, '#pop'), # note: '\w\W' != '.' without DOTALL. (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)), ], } class JspLexer(DelegatingLexer): """ Lexer for Java Server Pages. .. versionadded:: 0.7 """ name = 'Java Server Page' aliases = ['jsp'] filenames = ['*.jsp'] mimetypes = ['application/x-jsp'] def __init__(self, **options): super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options) def analyse_text(text): rv = JavaLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 if '<%' in text and '%>' in text: rv += 0.1 return rv class EvoqueLexer(RegexLexer): """ For files using the Evoque templating system. .. versionadded:: 1.1 """ name = 'Evoque' aliases = ['evoque'] filenames = ['*.evoque'] mimetypes = ['application/x-evoque'] flags = re.DOTALL tokens = { 'root': [ (r'[^#$]+', Other), (r'#\[', Comment.Multiline, 'comment'), (r'\$\$', Other), # svn keywords (r'\$\w+:[^$\n]*\$', Comment.Multiline), # directives: begin, end (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})', bygroups(Punctuation, Name.Builtin, Punctuation, None, String, Punctuation)), # directives: evoque, overlay # see doc for handling first name arg: /directives/evoque/ #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base} # should be using(PythonLexer), not passed out as String (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?' r'(.*?)((?(4)%)\})', bygroups(Punctuation, Name.Builtin, Punctuation, None, String, using(PythonLexer), Punctuation)), # directives: if, for, prefer, test (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})', bygroups(Punctuation, Name.Builtin, Punctuation, None, using(PythonLexer), Punctuation)), # directive clauses (no {} expression) (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)), # expressions (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})', bygroups(Punctuation, None, using(PythonLexer), Name.Builtin, None, None, Punctuation)), (r'#', Other), ], 'comment': [ (r'[^\]#]', Comment.Multiline), (r'#\[', Comment.Multiline, '#push'), (r'\]#', Comment.Multiline, '#pop'), (r'[\]#]', Comment.Multiline) ], } class EvoqueHtmlLexer(DelegatingLexer): """ Subclass of the `EvoqueLexer` that highlights unlexed data with the `HtmlLexer`. .. versionadded:: 1.1 """ name = 'HTML+Evoque' aliases = ['html+evoque'] filenames = ['*.html'] mimetypes = ['text/html+evoque'] def __init__(self, **options): super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer, **options) class EvoqueXmlLexer(DelegatingLexer): """ Subclass of the `EvoqueLexer` that highlights unlexed data with the `XmlLexer`. .. versionadded:: 1.1 """ name = 'XML+Evoque' aliases = ['xml+evoque'] filenames = ['*.xml'] mimetypes = ['application/xml+evoque'] def __init__(self, **options): super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer, **options) class ColdfusionLexer(RegexLexer): """ Coldfusion statements """ name = 'cfstatement' aliases = ['cfs'] filenames = [] mimetypes = [] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r'//.*', Comment), (r'\+\+|--', Operator), (r'[-+*/^&=!]', Operator), (r'<=|>=|<|>', Operator), (r'mod\b', Operator), (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator), (r'\|\||&&', Operator), (r'"', String.Double, 'string'), # There is a special rule for allowing html in single quoted # strings, evidently. (r"'.*?'", String.Single), (r'\d+', Number), (r'(if|else|len|var|case|default|break|switch)\b', Keyword), (r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable), (r'[()\[\]{};:,.\\]', Punctuation), (r'\s+', Text), ], 'string': [ (r'""', String.Double), (r'#.+?#', String.Interp), (r'[^"#]+', String.Double), (r'#', String.Double), (r'"', String.Double, '#pop'), ], } class ColdfusionMarkupLexer(RegexLexer): """ Coldfusion markup only """ name = 'Coldfusion' aliases = ['cf'] filenames = [] mimetypes = [] tokens = { 'root': [ (r'[^<]+', Other), include('tags'), (r'<[^<>]*', Other), ], 'tags': [ (r'(?s)<!---.*?--->', Comment.Multiline), (r'(?s)<!--.*?-->', Comment), (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'), (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)', bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)), # negative lookbehind is for strings with embedded > (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|' r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|' r'mailpart|mail|header|content|zip|image|lock|argument|try|' r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)', bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)), ], 'cfoutput': [ (r'[^#<]+', Other), (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer), Punctuation)), #(r'<cfoutput.*?>', Name.Builtin, '#push'), (r'</cfoutput.*?>', Name.Builtin, '#pop'), include('tags'), (r'(?s)<[^<>]*', Other), (r'#', Other), ], } class ColdfusionHtmlLexer(DelegatingLexer): """ Coldfusion markup in html """ name = 'Coldfusion HTML' aliases = ['cfm'] filenames = ['*.cfm', '*.cfml', '*.cfc'] mimetypes = ['application/x-coldfusion'] def __init__(self, **options): super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer, **options) class SspLexer(DelegatingLexer): """ Lexer for Scalate Server Pages. .. versionadded:: 1.4 """ name = 'Scalate Server Page' aliases = ['ssp'] filenames = ['*.ssp'] mimetypes = ['application/x-ssp'] def __init__(self, **options): super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options) def analyse_text(text): rv = 0.0 if re.search('val \w+\s*:', text): rv += 0.6 if looks_like_xml(text): rv += 0.2 if '<%' in text and '%>' in text: rv += 0.1 return rv class TeaTemplateRootLexer(RegexLexer): """ Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of code blocks. .. versionadded:: 1.5 """ tokens = { 'root': [ (r'<%\S?', Keyword, 'sec'), (r'[^<]+', Other), (r'<', Other), ], 'sec': [ (r'%>', Keyword, '#pop'), # note: '\w\W' != '.' without DOTALL. (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)), ], } class TeaTemplateLexer(DelegatingLexer): """ Lexer for `Tea Templates <http://teatrove.org/>`_. .. versionadded:: 1.5 """ name = 'Tea' aliases = ['tea'] filenames = ['*.tea'] mimetypes = ['text/x-tea'] def __init__(self, **options): super(TeaTemplateLexer, self).__init__(XmlLexer, TeaTemplateRootLexer, **options) def analyse_text(text): rv = TeaLangLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 if '<%' in text and '%>' in text: rv += 0.1 return rv class LassoHtmlLexer(DelegatingLexer): """ Subclass of the `LassoLexer` which highlights unhandled data with the `HtmlLexer`. Nested JavaScript and CSS is also highlighted. .. versionadded:: 1.6 """ name = 'HTML+Lasso' aliases = ['html+lasso'] alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]', '*.incl', '*.inc', '*.las'] mimetypes = ['text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]'] def __init__(self, **options): super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options) def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.01 if re.search(r'<\w+>', text, re.I): rv += 0.2 if html_doctype_matches(text): rv += 0.5 return rv class LassoXmlLexer(DelegatingLexer): """ Subclass of the `LassoLexer` which highlights unhandled data with the `XmlLexer`. .. versionadded:: 1.6 """ name = 'XML+Lasso' aliases = ['xml+lasso'] alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]', '*.incl', '*.inc', '*.las'] mimetypes = ['application/xml+lasso'] def __init__(self, **options): super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options) def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.01 if looks_like_xml(text): rv += 0.4 return rv class LassoCssLexer(DelegatingLexer): """ Subclass of the `LassoLexer` which highlights unhandled data with the `CssLexer`. .. versionadded:: 1.6 """ name = 'CSS+Lasso' aliases = ['css+lasso'] alias_filenames = ['*.css'] mimetypes = ['text/css+lasso'] def __init__(self, **options): options['requiredelimiters'] = True super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options) def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.05 if re.search(r'\w+:.+?;', text): rv += 0.1 if 'padding:' in text: rv += 0.1 return rv class LassoJavascriptLexer(DelegatingLexer): """ Subclass of the `LassoLexer` which highlights unhandled data with the `JavascriptLexer`. .. versionadded:: 1.6 """ name = 'JavaScript+Lasso' aliases = ['js+lasso', 'javascript+lasso'] alias_filenames = ['*.js'] mimetypes = ['application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso'] def __init__(self, **options): options['requiredelimiters'] = True super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer, **options) def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.05 if 'function' in text: rv += 0.2 return rv
mit
-7,983,586,365,008,860,000
1,942,721,672,226,538,500
31.147936
84
0.491938
false
DiptoDas8/Biponi
lib/python2.7/site-packages/django/template/backends/django.py
44
3018
# Since this package contains a "django" module, this is required on Python 2. from __future__ import absolute_import import warnings from django.conf import settings from django.template.context import Context, RequestContext, make_context from django.template.engine import Engine, _dirs_undefined from django.utils.deprecation import RemovedInDjango110Warning from .base import BaseEngine class DjangoTemplates(BaseEngine): app_dirname = 'templates' def __init__(self, params): params = params.copy() options = params.pop('OPTIONS').copy() options.setdefault('debug', settings.DEBUG) options.setdefault('file_charset', settings.FILE_CHARSET) super(DjangoTemplates, self).__init__(params) self.engine = Engine(self.dirs, self.app_dirs, **options) def from_string(self, template_code): return Template(self.engine.from_string(template_code)) def get_template(self, template_name, dirs=_dirs_undefined): return Template(self.engine.get_template(template_name, dirs)) class Template(object): def __init__(self, template): self.template = template @property def origin(self): # TODO: define the Origin API. For now simply forwarding to the # underlying Template preserves backwards-compatibility. return self.template.origin def render(self, context=None, request=None): # A deprecation path is required here to cover the following usage: # >>> from django.template import Context # >>> from django.template.loader import get_template # >>> template = get_template('hello.html') # >>> template.render(Context({'name': 'world'})) # In Django 1.7 get_template() returned a django.template.Template. # In Django 1.8 it returns a django.template.backends.django.Template. # In Django 1.10 the isinstance checks should be removed. If passing a # Context or a RequestContext works by accident, it won't be an issue # per se, but it won't be officially supported either. if isinstance(context, RequestContext): if request is not None and request is not context.request: raise ValueError( "render() was called with a RequestContext and a request " "argument which refer to different requests. Make sure " "that the context argument is a dict or at least that " "the two arguments refer to the same request.") warnings.warn( "render() must be called with a dict, not a RequestContext.", RemovedInDjango110Warning, stacklevel=2) elif isinstance(context, Context): warnings.warn( "render() must be called with a dict, not a Context.", RemovedInDjango110Warning, stacklevel=2) else: context = make_context(context, request) return self.template.render(context)
mit
2,213,672,082,939,122,000
-3,740,147,476,646,998,000
39.783784
78
0.656064
false
ex1usive-m4d/TemplateDocx
controllers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/encodings/__init__.py
60
5638
""" Standard "encodings" Package Standard Python encoding modules are stored in this package directory. Codec modules must have names corresponding to normalized encoding names as defined in the normalize_encoding() function below, e.g. 'utf-8' must be implemented by the module 'utf_8.py'. Each codec module must export the following interface: * getregentry() -> codecs.CodecInfo object The getregentry() API must a CodecInfo object with encoder, decoder, incrementalencoder, incrementaldecoder, streamwriter and streamreader atttributes which adhere to the Python Codec Interface Standard. In addition, a module may optionally also define the following APIs which are then used by the package's codec search function: * getaliases() -> sequence of encoding name strings to use as aliases Alias names returned by getaliases() must be normalized encoding names as defined by normalize_encoding(). Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import codecs from encodings import aliases import __builtin__ _cache = {} _unknown = '--unknown--' _import_tail = ['*'] _norm_encoding_map = (' . ' '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' ' abcdefghijklmnopqrstuvwxyz ' ' ' ' ' ' ') _aliases = aliases.aliases class CodecRegistryError(LookupError, SystemError): pass def normalize_encoding(encoding): """ Normalize an encoding name. Normalization works as follows: all non-alphanumeric characters except the dot used for Python package names are collapsed and replaced with a single underscore, e.g. ' -;#' becomes '_'. Leading and trailing underscores are removed. Note that encoding names should be ASCII only; if they do use non-ASCII characters, these must be Latin-1 compatible. """ # Make sure we have an 8-bit string, because .translate() works # differently for Unicode strings. if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode): # Note that .encode('latin-1') does *not* use the codec # registry, so this call doesn't recurse. (See unicodeobject.c # PyUnicode_AsEncodedString() for details) encoding = encoding.encode('latin-1') return '_'.join(encoding.translate(_norm_encoding_map).split()) def search_function(encoding): # Cache lookup entry = _cache.get(encoding, _unknown) if entry is not _unknown: return entry # Import the module: # # First try to find an alias for the normalized encoding # name and lookup the module using the aliased name, then try to # lookup the module using the standard import scheme, i.e. first # try in the encodings package, then at top-level. # norm_encoding = normalize_encoding(encoding) aliased_encoding = _aliases.get(norm_encoding) or \ _aliases.get(norm_encoding.replace('.', '_')) if aliased_encoding is not None: modnames = [aliased_encoding, norm_encoding] else: modnames = [norm_encoding] for modname in modnames: if not modname or '.' in modname: continue try: # Import is absolute to prevent the possibly malicious import of a # module with side-effects that is not in the 'encodings' package. mod = __import__('encodings.' + modname, fromlist=_import_tail, level=0) except ImportError: pass else: break else: mod = None try: getregentry = mod.getregentry except AttributeError: # Not a codec module mod = None if mod is None: # Cache misses _cache[encoding] = None return None # Now ask the module for the registry entry entry = getregentry() if not isinstance(entry, codecs.CodecInfo): if not 4 <= len(entry) <= 7: raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) if not callable(entry[0]) or \ not callable(entry[1]) or \ (entry[2] is not None and not callable(entry[2])) or \ (entry[3] is not None and not callable(entry[3])) or \ (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \ (len(entry) > 5 and entry[5] is not None and not callable(entry[5])): raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) if len(entry)<7 or entry[6] is None: entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) entry = codecs.CodecInfo(*entry) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if not _aliases.has_key(alias): _aliases[alias] = modname # Return the registry entry return entry # Register the search_function in the Python codec registry codecs.register(search_function)
bsd-3-clause
6,564,381,847,502,046,000
-1,880,841,409,702,328,000
34.910828
84
0.600745
false
LuckDragon82/demo
boilerplate/external/babel/tests/core.py
61
1726
# -*- coding: utf-8 -*- # # Copyright (C) 2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://babel.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://babel.edgewall.org/log/. import doctest import os import unittest from babel import core from babel.core import default_locale class DefaultLocaleTest(unittest.TestCase): def setUp(self): self._old_locale_settings = self._current_locale_settings() def tearDown(self): self._set_locale_settings(self._old_locale_settings) def _current_locale_settings(self): settings = {} for name in ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG'): settings[name] = os.environ[name] return settings def _set_locale_settings(self, settings): for name, value in settings.items(): os.environ[name] = value def test_ignore_invalid_locales_in_lc_ctype(self): # This is a regression test specifically for a bad LC_CTYPE setting on # MacOS X 10.6 (#200) os.environ['LC_CTYPE'] = 'UTF-8' # must not throw an exception default_locale('LC_CTYPE') def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(core)) suite.addTest(unittest.makeSuite(DefaultLocaleTest)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
lgpl-3.0
-461,203,631,313,478,300
-3,869,352,477,485,033,000
30.566038
78
0.651796
false
lnielsen/invenio
invenio/legacy/pdfchecker/arxiv.py
3
16346
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ ArXiv Pdf Checker Task Checks arxiv records for missing pdfs and downloads them from arXiv """ import os import time import re from datetime import datetime from tempfile import NamedTemporaryFile from xml.dom import minidom import socket from invenio.legacy.bibdocfile.cli import bibupload_ffts from invenio.legacy.docextract.task import store_last_updated, \ fetch_last_updated from invenio.utils.shell import split_cli_ids_arg from invenio.legacy.dbquery import run_sql from invenio.legacy.bibsched.bibtask import task_low_level_submission from invenio.legacy.refextract.api import record_has_fulltext, \ check_record_for_refextract from invenio.legacy.bibsched.bibtask import task_init, \ write_message, \ task_update_progress, \ task_get_option, \ task_set_option, \ task_sleep_now_if_required from invenio.legacy.bibrecord import get_fieldvalues from invenio.config import CFG_VERSION, \ CFG_TMPSHAREDDIR, \ CFG_TMPDIR, \ CFG_ARXIV_URL_PATTERN # Help message is the usage() print out of how to use Refextract from invenio.legacy.docextract.record import get_record from invenio.legacy.bibdocfile.api import BibRecDocs, \ calculate_md5 from invenio.legacy.oaiharvest import utils as oai_harvest_daemon from invenio.utils.filedownload import (download_external_url, InvenioFileDownloadError) NAME = 'arxiv-pdf-checker' ARXIV_VERSION_PATTERN = re.compile(ur'v\d$', re.UNICODE) STATUS_OK = 'ok' STATUS_MISSING = 'missing' class PdfNotAvailable(Exception): pass class FoundExistingPdf(Exception): pass class AlreadyHarvested(Exception): def __init__(self, status): Exception.__init__(self) self.status = status def build_arxiv_url(arxiv_id, version): return CFG_ARXIV_URL_PATTERN % (arxiv_id, version) def extract_arxiv_ids_from_recid(recid): """Extract arxiv # for given recid We get them from the record which has this format: 037__ $9arXiv$arXiv:1010.1111 """ record = get_record(recid) for report_number_field in record.get('037', []): try: source = report_number_field.get_subfield_values('9')[0] except IndexError: continue else: if source != 'arXiv': continue try: report_number = report_number_field.get_subfield_values('a')[0] except IndexError: continue else: # Extract arxiv id if report_number.startswith('arXiv'): report_number = report_number.split(':')[1] if ARXIV_VERSION_PATTERN.search(report_number): report_number = report_number[:-2] yield report_number def cb_parse_option(key, value, opts, args): """Parse command line options""" if args: # There should be no standalone arguments raise StandardError("Error: Unrecognised argument '%s'." % args[0]) if key in ('-i', '--id'): recids = task_get_option('recids') if not recids: recids = set() task_set_option('recids', recids) recids.update(split_cli_ids_arg(value)) return True def store_arxiv_pdf_status(recid, status, version): """Store pdf harvesting status in the database""" valid_status = (STATUS_OK, STATUS_MISSING) if status not in valid_status: raise ValueError('invalid status %s' % status) now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") run_sql("""REPLACE INTO bibARXIVPDF (id_bibrec, status, date_harvested, version) VALUES (%s, %s, %s, %s)""", (recid, status, now, version)) def fetch_arxiv_pdf_status(recid): """Fetch from the database the harvest status of given recid""" ret = run_sql("""SELECT status, version FROM bibARXIVPDF WHERE id_bibrec = %s""", [recid]) return ret and ret[0] or (None, None) def download_one(recid, version): """Download given version of the PDF from arxiv""" write_message('fetching %s' % recid) for count, arxiv_id in enumerate(extract_arxiv_ids_from_recid(recid)): if count != 0: write_message("Warning: %s has multiple arxiv #" % recid) continue url_for_pdf = build_arxiv_url(arxiv_id, version) filename_arxiv_id = arxiv_id.replace('/', '_') temp_file = NamedTemporaryFile(prefix="arxiv-pdf-checker", dir=CFG_TMPSHAREDDIR, suffix="%s.pdf" % filename_arxiv_id) write_message('downloading pdf from %s' % url_for_pdf) path = download_external_url(url_for_pdf, temp_file.name, content_type='pdf') # Check if it is not an html not found page filesize = os.path.getsize(path) if filesize < 25000: f = open(path) try: for line in f: if 'PDF unavailable' in line: raise PdfNotAvailable() finally: f.close() docs = BibRecDocs(recid) bibdocfiles = docs.list_latest_files(doctype="arXiv") needs_update = False try: bibdocfile = bibdocfiles[0] except IndexError: bibdocfile = None needs_update = True else: existing_md5 = calculate_md5(bibdocfile.fullpath) new_md5 = calculate_md5(path.encode('utf-8')) if new_md5 != existing_md5: write_message('md5 differs updating') needs_update = True else: write_message('md5 matches existing pdf, skipping') if needs_update: if bibdocfiles: write_message('adding as new version') docs.add_new_version(path, docname=bibdocfile.name) else: write_message('adding as new file') docs.add_new_file(path, doctype="arXiv", docname="arXiv:%s" % filename_arxiv_id) else: raise FoundExistingPdf() def oai_harvest_query(arxiv_id, prefix='arXivRaw', verb='GetRecord', max_retries=5, repositories=[]): """Wrapper of oai_harvest_daemon.oai_harvest_get that handles retries""" if not len(repositories): from invenio.modules.oaiharvester.models import OaiHARVEST repository = OaiHARVEST.query.filter( OaiHARVEST.name == 'arxiv' ).first().todict() else: repository = repositories[0] harvestpath = os.path.join(CFG_TMPDIR, "arxiv-pdf-checker-oai-") def get(): return oai_harvest_daemon.oai_harvest_get( prefix=prefix, baseurl=repository['baseurl'], harvestpath=harvestpath, verb=verb, identifier='oai:arXiv.org:%s' % arxiv_id) responses = None for retry_count in range(1, max_retries + 1): try: responses = get() except (socket.timeout, socket.error): write_message('socket error, arxiv is down?') else: if not responses: write_message('no responses from oai server') break if retry_count <= 2: write_message('sleeping for 10s') time.sleep(10) else: write_message('sleeping for 30 minutes') time.sleep(1800) if responses is None: raise Exception('arXiv is down') return responses def fetch_arxiv_version(recid): """Query arxiv and extract the version of the pdf from the response""" for count, arxiv_id in enumerate(extract_arxiv_ids_from_recid(recid)): if count != 0: write_message("Warning: %s has multiple arxiv #" % recid) continue responses = oai_harvest_query(arxiv_id) if not responses: return None # The response is roughly in this format # <OAI-PMH> # <GetRecord> # <metadata> # <version version="v1"> # <date>Mon, 15 Apr 2013 19:33:21 GMT</date> # <size>609kb</size> # <source_type>D</source_type> # <version version="v2"> # <date>Mon, 25 Apr 2013 19:33:21 GMT</date> # <size>620kb</size> # <source_type>D</source_type> # </version> # </<metadata> # </<GetRecord> # </<OAI-PMH> # We pass one arxiv id, we are assuming a single response file tree = minidom.parse(responses[0]) version_tags = tree.getElementsByTagName('version') if version_tags: version = version_tags[-1].getAttribute('version') else: version = 'v1' # We have to remove the responses files manually # For some written the response is written to disk instead of # being a string for file_path in responses: os.unlink(file_path) return int(version[1:]) def process_one(recid): """Checks given recid for updated pdfs on arxiv""" write_message('checking %s' % recid) # Last version we have harvested harvest_status, harvest_version = fetch_arxiv_pdf_status(recid) # Fetch arxiv version arxiv_version = fetch_arxiv_version(recid) if not arxiv_version: msg = 'version information unavailable' write_message(msg) raise PdfNotAvailable(msg) write_message('harvested_version %s' % harvest_version) write_message('arxiv_version %s' % arxiv_version) if record_has_fulltext(recid) and harvest_version == arxiv_version: write_message('our version matches arxiv') raise AlreadyHarvested(status=harvest_status) # We already tried to harvest this record but failed if harvest_status == STATUS_MISSING and harvest_version == arxiv_version: raise PdfNotAvailable() updated = False try: download_one(recid, arxiv_version) except PdfNotAvailable: store_arxiv_pdf_status(recid, STATUS_MISSING, arxiv_version) raise except FoundExistingPdf: store_arxiv_pdf_status(recid, STATUS_OK, arxiv_version) raise else: store_arxiv_pdf_status(recid, STATUS_OK, arxiv_version) updated = True return updated def submit_fixmarc_task(recids): """Submit a task that synchronizes the 8564 tags This should be done right after changing the files attached to a record""" field = [{'doctype' : 'FIX-MARC'}] ffts = {} for recid in recids: ffts[recid] = field bibupload_ffts(ffts, append=False, interactive=False) def submit_refextract_task(recids): """Submit a refextract task if needed""" # First filter out recids we cannot safely extract references from # (mostly because they have been curated) recids = [recid for recid in recids if check_record_for_refextract(recid)] if recids: recids_str = ','.join(str(recid) for recid in recids) task_low_level_submission('refextract', NAME, '-i', recids_str) def fetch_updated_arxiv_records(date): """Fetch all the arxiv records modified since the last run""" def check_arxiv(recid): """Returns True for arxiv papers""" for report_number in get_fieldvalues(recid, '037__9'): if report_number == 'arXiv': return True return False # Fetch all records inserted since last run sql = "SELECT `id`, `modification_date` FROM `bibrec` " \ "WHERE `modification_date` >= %s " \ "ORDER BY `modification_date`" records = run_sql(sql, [date.isoformat()]) records = [(r, mod_date) for r, mod_date in records if check_arxiv(r)] # Show all records for debugging purposes if task_get_option('verbose') >= 9: write_message('recids:', verbose=9) for recid, mod_date in records: write_message("* %s, %s" % (recid, mod_date), verbose=9) task_update_progress("Done fetching %s arxiv record ids" % len(records)) return records def task_run_core(name=NAME): """Entry point for the arxiv-pdf-checker task""" # First gather recids to process recids = task_get_option('recids') if recids: start_date = None recids = [(recid, None) for recid in recids] else: start_date = datetime.now() dummy, last_date = fetch_last_updated(name) recids = fetch_updated_arxiv_records(last_date) updated_recids = set() try: for count, (recid, dummy) in enumerate(recids): if count % 50 == 0: msg = 'Done %s of %s' % (count, len(recids)) write_message(msg) task_update_progress(msg) # BibTask sleep task_sleep_now_if_required(can_stop_too=True) write_message('processing %s' % recid, verbose=9) try: if process_one(recid): updated_recids.add(recid) time.sleep(6) except AlreadyHarvested: write_message('already harvested successfully') time.sleep(6) except FoundExistingPdf: write_message('pdf already attached (matching md5)') time.sleep(6) except PdfNotAvailable: write_message("no pdf available") time.sleep(20) except InvenioFileDownloadError, e: write_message("failed to download: %s" % e) time.sleep(20) finally: # We want to process updated records even in case we are interrupted msg = 'Updated %s records' % len(updated_recids) write_message(msg) task_update_progress(msg) write_message(repr(updated_recids)) # For all updated records, we want to sync the 8564 tags # and reextract references if updated_recids: submit_fixmarc_task(updated_recids) submit_refextract_task(updated_recids) # Store last run date of the daemon # not if it ran on specific recids from the command line with --id # but only if it ran on the modified records if start_date: store_last_updated(0, start_date, name) return True def main(): """Constructs the refextract bibtask.""" # Build and submit the task task_init(authorization_action='runarxivpdfchecker', authorization_msg="Arxiv Pdf Checker Task Submission", description="""Daemon that checks if we have the latest version of arxiv PDFs""", # get the global help_message variable imported from refextract.py help_specific_usage=""" Scheduled (daemon) options: -i, --id Record id to check. Examples: (run a daemon job) arxiv-pdf-checker """, version="Invenio v%s" % CFG_VERSION, specific_params=("i:", ["id="]), task_submit_elaborate_specific_parameter_fnc=cb_parse_option, task_run_fnc=task_run_core)
gpl-2.0
8,429,368,975,728,366,000
-5,141,327,201,293,246,000
33.412632
89
0.592071
false
Red-M/CloudBot-legacy
util/http.py
7
3042
# convenience wrapper for urllib2 & friends import cookielib import json import urllib import urllib2 import urlparse from urllib import quote, quote_plus as _quote_plus from lxml import etree, html from bs4 import BeautifulSoup # used in plugins that import this from urllib2 import URLError, HTTPError ua_cloudbot = 'Cloudbot/DEV http://github.com/CloudDev/CloudBot' ua_firefox = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/17.0' \ ' Firefox/17.0' ua_old_firefox = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; ' \ 'rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6' ua_internetexplorer = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)' ua_chrome = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, ' \ 'like Gecko) Chrome/22.0.1229.79 Safari/537.4' jar = cookielib.CookieJar() def get(*args, **kwargs): return open(*args, **kwargs).read() def get_url(*args, **kwargs): return open(*args, **kwargs).geturl() def get_html(*args, **kwargs): return html.fromstring(get(*args, **kwargs)) def get_soup(*args, **kwargs): return BeautifulSoup(get(*args, **kwargs), 'lxml') def get_xml(*args, **kwargs): return etree.fromstring(get(*args, **kwargs)) def get_json(*args, **kwargs): return json.loads(get(*args, **kwargs)) def open(url, query_params=None, user_agent=None, post_data=None, referer=None, get_method=None, cookies=False, timeout=None, headers=None, **kwargs): if query_params is None: query_params = {} if user_agent is None: user_agent = ua_cloudbot query_params.update(kwargs) url = prepare_url(url, query_params) request = urllib2.Request(url, post_data) if get_method is not None: request.get_method = lambda: get_method if headers is not None: for header_key, header_value in headers.iteritems(): request.add_header(header_key, header_value) request.add_header('User-Agent', user_agent) if referer is not None: request.add_header('Referer', referer) if cookies: opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar)) else: opener = urllib2.build_opener() if timeout: return opener.open(request, timeout=timeout) else: return opener.open(request) def prepare_url(url, queries): if queries: scheme, netloc, path, query, fragment = urlparse.urlsplit(url) query = dict(urlparse.parse_qsl(query)) query.update(queries) query = urllib.urlencode(dict((to_utf8(key), to_utf8(value)) for key, value in query.iteritems())) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) return url def to_utf8(s): if isinstance(s, unicode): return s.encode('utf8', 'ignore') else: return str(s) def quote_plus(s): return _quote_plus(to_utf8(s)) def unescape(s): if not s.strip(): return s return html.fromstring(s).text_content()
gpl-3.0
-396,989,083,267,970,750
-8,228,522,322,625,774,000
24.563025
93
0.643984
false
dennisobrien/bokeh
examples/embed/autoload_static.py
5
2050
from jinja2 import Template from tornado.ioloop import IOLoop from tornado.web import Application, RequestHandler from bokeh.embed import autoload_static from bokeh.plotting import figure from bokeh.resources import CDN from bokeh.sampledata.iris import flowers from bokeh.util.browser import view template = Template(""" <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> </head> <body> <div> The plot embedded below is a standalone plot that was embedded using <fixed>autoload_static</fixed>. For more information see the <a target="_blank" href="https://bokeh.pydata.org/en/latest/docs/user_guide/embed.html#static-data"> documentation</a>. </div> {{ script|safe }} </body> </html> """) class IndexHandler(RequestHandler): def initialize(self, script): self.script = script def get(self): self.write(template.render(script=self.script)) # Normally, you might save the .js files to some location on disk, and serve # them from there. Here we us this request handler, just to make the example # completely self-contained. class JSHandler(RequestHandler): def initialize(self, js): self.js = js def get(self): self.write(self.js) def make_plot(): colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'} colors = [colormap[x] for x in flowers['species']] p = figure(title = "Iris Morphology") p.xaxis.axis_label = 'Petal Length' p.yaxis.axis_label = 'Petal Width' p.circle(flowers["petal_length"], flowers["petal_width"], color=colors, fill_alpha=0.2, size=10) return p if __name__ == '__main__': print('Opening Tornado app with embedded Bokeh plot on http://localhost:8080/') js, script = autoload_static(make_plot(), CDN, "embed.js") app = Application([ (r"/", IndexHandler, dict(script=script)), (r"/embed.js", JSHandler, dict(js=js)) ]) app.listen(8080) io_loop = IOLoop.current() io_loop.add_callback(view, "http://localhost:8080/") io_loop.start()
bsd-3-clause
5,661,425,573,062,970,000
-6,509,743,826,084,400,000
27.472222
104
0.673659
false
aaxelb/SHARE
share/harvesters/org_elife.py
3
3365
import time import logging import requests from django.conf import settings from furl import furl from lxml import etree from share.harvest import BaseHarvester logger = logging.getLogger(__name__) class ELifeHarvester(BaseHarvester): VERSION = 1 BASE_DATA_URL = 'https://raw.githubusercontent.com/elifesciences/elife-article-xml/master/{}' BASE_URL = 'https://api.github.com/repos/elifesciences/elife-article-xml/commits{}' def request(self, *args, **kwargs): if settings.GITHUB_API_KEY: kwargs.setdefault('headers', {})['Authorization'] = 'token {}'.format(settings.GITHUB_API_KEY) while True: response = self.requests.get(*args, **kwargs) if int(response.headers.get('X-RateLimit-Remaining', 0)) == 0: reset = int(response.headers.get('X-RateLimit-Reset', time.time())) - time.time() logger.warning('Hit GitHub ratelimit sleeping for %s seconds', reset) time.sleep(reset) if response.status_code != 403: response.raise_for_status() return response def do_harvest(self, start_date, end_date): end_date = end_date.date() start_date = start_date.date() logger.info("The data for each record must be requested individually - this may take a while... ") for sha in self.fetch_commits(start_date, end_date): for file_name in self.fetch_file_names(sha): if not file_name.endswith('.xml'): continue record = self.fetch_xml(file_name) if record is not None: continue doc = etree.tostring(record) doc_id = record.xpath('//article-id[@*]')[0].text yield (doc_id, doc) def fetch_commits(self, start_date, end_date): page = -1 url = self.BASE_URL.format('?') while True: page += 1 response = self.request(furl(url).set(query_params={ 'since': start_date.isoformat(), 'until': end_date.isoformat(), 'page': page, 'per_page': 100 }).url) commits = response.json() for commit in commits: if commit.get('sha'): yield commit['sha'] if len(commits) != 100: break def fetch_file_names(self, sha): page = -1 url = self.BASE_URL.format('/{}'.format(sha)) while True: page += 1 response = self.request(furl(url).set(query_params={ 'page': page, 'per_page': 100 })) files = response.json()['files'] for f in files: yield f['filename'] if len(files) != 100: break def fetch_xml(self, file_name): file_url = furl(self.BASE_DATA_URL.format(file_name)) # Not using self.requests when getting the file contents because the eLife rate limit (1, 60) does not apply resp = requests.get(file_url.url) if resp.status_code == 404: logger.warning('Could not download file %s', file_name) return None resp.raise_for_status() xml = etree.XML(resp.content) return xml
apache-2.0
1,997,469,616,869,356,500
-5,520,436,306,934,119,000
31.355769
116
0.547994
false
angryrancor/kivy
examples/audio/main.py
40
2207
''' Audio example ============= This example plays sounds of different formats. You should see a grid of buttons labelled with filenames. Clicking on the buttons will play, or restart, each sound. Not all sound formats will play on all platforms. All the sounds are from the http://woolyss.com/chipmusic-samples.php "THE FREESOUND PROJECT", Under Creative Commons Sampling Plus 1.0 License. ''' import kivy kivy.require('1.0.8') from kivy.app import App from kivy.uix.button import Button from kivy.uix.boxlayout import BoxLayout from kivy.core.audio import SoundLoader from kivy.properties import StringProperty, ObjectProperty, NumericProperty from glob import glob from os.path import dirname, join, basename class AudioButton(Button): filename = StringProperty(None) sound = ObjectProperty(None, allownone=True) volume = NumericProperty(1.0) def on_press(self): if self.sound is None: self.sound = SoundLoader.load(self.filename) # stop the sound if it's currently playing if self.sound.status != 'stop': self.sound.stop() self.sound.volume = self.volume self.sound.play() def release_audio(self): if self.sound: self.sound.stop() self.sound.unload() self.sound = None def set_volume(self, volume): self.volume = volume if self.sound: self.sound.volume = volume class AudioBackground(BoxLayout): pass class AudioApp(App): def build(self): root = AudioBackground(spacing=5) for fn in glob(join(dirname(__file__), '*.wav')): btn = AudioButton( text=basename(fn[:-4]).replace('_', ' '), filename=fn, size_hint=(None, None), halign='center', size=(128, 128), text_size=(118, None)) root.ids.sl.add_widget(btn) return root def release_audio(self): for audiobutton in self.root.ids.sl.children: audiobutton.release_audio() def set_volume(self, value): for audiobutton in self.root.ids.sl.children: audiobutton.set_volume(value) if __name__ == '__main__': AudioApp().run()
mit
-3,061,187,901,394,042,000
5,910,607,356,567,684,000
26.5875
75
0.639783
false
lattwood/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
121
10730
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A class to help start/stop the lighttpd server used by layout tests.""" import logging import os import time from webkitpy.layout_tests.servers import http_server_base _log = logging.getLogger(__name__) class Lighttpd(http_server_base.HttpServerBase): def __init__(self, port_obj, output_dir, background=False, port=None, root=None, run_background=None, additional_dirs=None, layout_tests_dir=None, number_of_servers=None): """Args: output_dir: the absolute path to the layout test result directory """ # Webkit tests http_server_base.HttpServerBase.__init__(self, port_obj, number_of_servers) self._name = 'lighttpd' self._output_dir = output_dir self._port = port self._root = root self._run_background = run_background self._additional_dirs = additional_dirs self._layout_tests_dir = layout_tests_dir self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name) if self._port: self._port = int(self._port) if not self._layout_tests_dir: self._layout_tests_dir = self._port_obj.layout_tests_dir() self._webkit_tests = os.path.join(self._layout_tests_dir, 'http', 'tests') self._js_test_resource = os.path.join(self._layout_tests_dir, 'fast', 'js', 'resources') self._media_resource = os.path.join(self._layout_tests_dir, 'media') # Self generated certificate for SSL server (for client cert get # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt) self._pem_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem') # One mapping where we can get to everything self.VIRTUALCONFIG = [] if self._webkit_tests: self.VIRTUALCONFIG.extend( # Three mappings (one with SSL) for LayoutTests http tests [{'port': 8000, 'docroot': self._webkit_tests}, {'port': 8080, 'docroot': self._webkit_tests}, {'port': 8443, 'docroot': self._webkit_tests, 'sslcert': self._pem_file}]) def _prepare_config(self): base_conf_file = self._port_obj.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'layout_tests', 'servers', 'lighttpd.conf') out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') time_str = time.strftime("%d%b%Y-%H%M%S") access_file_name = "access.log-" + time_str + ".txt" access_log = os.path.join(self._output_dir, access_file_name) log_file_name = "error.log-" + time_str + ".txt" error_log = os.path.join(self._output_dir, log_file_name) # Write out the config base_conf = self._filesystem.read_text_file(base_conf_file) # FIXME: This should be re-worked so that this block can # use with open() instead of a manual file.close() call. f = self._filesystem.open_text_file_for_writing(out_conf_file) f.write(base_conf) # Write out our cgi handlers. Run perl through env so that it # processes the #! line and runs perl with the proper command # line arguments. Emulate apache's mod_asis with a cat cgi handler. f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' ' ".pl" => "/usr/bin/env",\n' ' ".asis" => "/bin/cat",\n' ' ".php" => "%s" )\n\n') % self._port_obj._path_to_lighttpd_php()) # Setup log files f.write(('server.errorlog = "%s"\n' 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) # Setup upload folders. Upload folder is to hold temporary upload files # and also POST data. This is used to support XHR layout tests that # does POST. f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) # Setup a link to where the js test templates are stored f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % (self._js_test_resource)) if self._additional_dirs: for alias, path in self._additional_dirs.iteritems(): f.write(('alias.url += ( "%s" => "%s" )\n\n') % (alias, path)) # Setup a link to where the media resources are stored. f.write(('alias.url += ( "/media-resources" => "%s" )\n\n') % (self._media_resource)) # dump out of virtual host config at the bottom. if self._root: if self._port: # Have both port and root dir. mappings = [{'port': self._port, 'docroot': self._root}] else: # Have only a root dir - set the ports as for LayoutTests. # This is used in ui_tests to run http tests against a browser. # default set of ports as for LayoutTests but with a # specified root. mappings = [{'port': 8000, 'docroot': self._root}, {'port': 8080, 'docroot': self._root}, {'port': 8443, 'docroot': self._root, 'sslcert': self._pem_file}] else: mappings = self.VIRTUALCONFIG for mapping in mappings: ssl_setup = '' if 'sslcert' in mapping: ssl_setup = (' ssl.engine = "enable"\n' ' ssl.pemfile = "%s"\n' % mapping['sslcert']) f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' ' server.document-root = "%s"\n' + ssl_setup + '}\n\n') % (mapping['port'], mapping['docroot'])) f.close() executable = self._port_obj._path_to_lighttpd() module_path = self._port_obj._path_to_lighttpd_modules() start_cmd = [executable, # Newly written config file '-f', os.path.join(self._output_dir, 'lighttpd.conf'), # Where it can find its module dynamic libraries '-m', module_path] if not self._run_background: start_cmd.append(# Don't background '-D') # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the # bug that mod_alias.so loads it from the hard coded path. if self._port_obj.host.platform.is_mac(): tmp_module_path = '/tmp/lighttpd/lib' if not self._filesystem.exists(tmp_module_path): self._filesystem.maybe_make_directory(tmp_module_path) lib_file = 'liblightcomp.dylib' self._filesystem.copyfile(self._filesystem.join(module_path, lib_file), self._filesystem.join(tmp_module_path, lib_file)) self._start_cmd = start_cmd self._env = self._port_obj.setup_environ_for_server('lighttpd') self._mappings = mappings def _remove_stale_logs(self): # Sometimes logs are open in other processes but they should clear eventually. for log_prefix in ('access.log-', 'error.log-'): try: self._remove_log_files(self._output_dir, log_prefix) except OSError, e: _log.warning('Failed to remove old %s %s files' % (self._name, log_prefix)) def _spawn_process(self): _log.debug('Starting %s server, cmd="%s"' % (self._name, self._start_cmd)) process = self._executive.popen(self._start_cmd, env=self._env, shell=False, stderr=self._executive.PIPE) pid = process.pid self._filesystem.write_text_file(self._pid_file, str(pid)) return pid def _stop_running_server(self): # FIXME: It would be nice if we had a cleaner way of killing this process. # Currently we throw away the process object created in _spawn_process, # since there doesn't appear to be any way to kill the server any more # cleanly using it than just killing the pid, and we need to support # killing a pid directly anyway for run-webkit-httpd and run-webkit-websocketserver. self._wait_for_action(self._check_and_kill) if self._filesystem.exists(self._pid_file): self._filesystem.remove(self._pid_file) def _check_and_kill(self): if self._executive.check_running_pid(self._pid): host = self._port_obj.host if host.platform.is_win() and not host.platform.is_cygwin(): # FIXME: https://bugs.webkit.org/show_bug.cgi?id=106838 # We need to kill all of the child processes as well as the # parent, so we can't use executive.kill_process(). # # If this is actually working, we should figure out a clean API. self._executive.run_command(["taskkill.exe", "/f", "/t", "/pid", self._pid], error_handler=self._executive.ignore_error) else: self._executive.kill_process(self._pid) return False return True
bsd-3-clause
7,696,553,028,892,861,000
-7,379,401,141,149,554,000
46.061404
136
0.590867
false
jiangzhuo/kbengine
kbe/res/scripts/common/Lib/test/test_email/test_utils.py
87
5422
import datetime from email import utils import test.support import time import unittest import sys import os.path class DateTimeTests(unittest.TestCase): datestring = 'Sun, 23 Sep 2001 20:10:55' dateargs = (2001, 9, 23, 20, 10, 55) offsetstring = ' -0700' utcoffset = datetime.timedelta(hours=-7) tz = datetime.timezone(utcoffset) naive_dt = datetime.datetime(*dateargs) aware_dt = datetime.datetime(*dateargs, tzinfo=tz) def test_naive_datetime(self): self.assertEqual(utils.format_datetime(self.naive_dt), self.datestring + ' -0000') def test_aware_datetime(self): self.assertEqual(utils.format_datetime(self.aware_dt), self.datestring + self.offsetstring) def test_usegmt(self): utc_dt = datetime.datetime(*self.dateargs, tzinfo=datetime.timezone.utc) self.assertEqual(utils.format_datetime(utc_dt, usegmt=True), self.datestring + ' GMT') def test_usegmt_with_naive_datetime_raises(self): with self.assertRaises(ValueError): utils.format_datetime(self.naive_dt, usegmt=True) def test_usegmt_with_non_utc_datetime_raises(self): with self.assertRaises(ValueError): utils.format_datetime(self.aware_dt, usegmt=True) def test_parsedate_to_datetime(self): self.assertEqual( utils.parsedate_to_datetime(self.datestring + self.offsetstring), self.aware_dt) def test_parsedate_to_datetime_naive(self): self.assertEqual( utils.parsedate_to_datetime(self.datestring + ' -0000'), self.naive_dt) class LocaltimeTests(unittest.TestCase): def test_localtime_is_tz_aware_daylight_true(self): test.support.patch(self, time, 'daylight', True) t = utils.localtime() self.assertIsNotNone(t.tzinfo) def test_localtime_is_tz_aware_daylight_false(self): test.support.patch(self, time, 'daylight', False) t = utils.localtime() self.assertIsNotNone(t.tzinfo) def test_localtime_daylight_true_dst_false(self): test.support.patch(self, time, 'daylight', True) t0 = datetime.datetime(2012, 3, 12, 1, 1) t1 = utils.localtime(t0, isdst=-1) t2 = utils.localtime(t1) self.assertEqual(t1, t2) def test_localtime_daylight_false_dst_false(self): test.support.patch(self, time, 'daylight', False) t0 = datetime.datetime(2012, 3, 12, 1, 1) t1 = utils.localtime(t0, isdst=-1) t2 = utils.localtime(t1) self.assertEqual(t1, t2) def test_localtime_daylight_true_dst_true(self): test.support.patch(self, time, 'daylight', True) t0 = datetime.datetime(2012, 3, 12, 1, 1) t1 = utils.localtime(t0, isdst=1) t2 = utils.localtime(t1) self.assertEqual(t1, t2) def test_localtime_daylight_false_dst_true(self): test.support.patch(self, time, 'daylight', False) t0 = datetime.datetime(2012, 3, 12, 1, 1) t1 = utils.localtime(t0, isdst=1) t2 = utils.localtime(t1) self.assertEqual(t1, t2) @test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0') def test_localtime_epoch_utc_daylight_true(self): test.support.patch(self, time, 'daylight', True) t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc) t1 = utils.localtime(t0) t2 = t0 - datetime.timedelta(hours=5) t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5))) self.assertEqual(t1, t2) @test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0') def test_localtime_epoch_utc_daylight_false(self): test.support.patch(self, time, 'daylight', False) t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc) t1 = utils.localtime(t0) t2 = t0 - datetime.timedelta(hours=5) t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5))) self.assertEqual(t1, t2) def test_localtime_epoch_notz_daylight_true(self): test.support.patch(self, time, 'daylight', True) t0 = datetime.datetime(1990, 1, 1) t1 = utils.localtime(t0) t2 = utils.localtime(t0.replace(tzinfo=None)) self.assertEqual(t1, t2) def test_localtime_epoch_notz_daylight_false(self): test.support.patch(self, time, 'daylight', False) t0 = datetime.datetime(1990, 1, 1) t1 = utils.localtime(t0) t2 = utils.localtime(t0.replace(tzinfo=None)) self.assertEqual(t1, t2) # XXX: Need a more robust test for Olson's tzdata @unittest.skipIf(sys.platform.startswith('win'), "Windows does not use Olson's TZ database") @unittest.skipUnless(os.path.exists('/usr/share/zoneinfo') or os.path.exists('/usr/lib/zoneinfo'), "Can't find the Olson's TZ database") @test.support.run_with_tz('Europe/Kiev') def test_variable_tzname(self): t0 = datetime.datetime(1984, 1, 1, tzinfo=datetime.timezone.utc) t1 = utils.localtime(t0) self.assertEqual(t1.tzname(), 'MSK') t0 = datetime.datetime(1994, 1, 1, tzinfo=datetime.timezone.utc) t1 = utils.localtime(t0) self.assertEqual(t1.tzname(), 'EET') if __name__ == '__main__': unittest.main()
lgpl-3.0
-2,549,205,622,114,614,000
-2,541,072,912,509,192,000
37.728571
81
0.627444
false