rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
("type", "B"),
("mode", "I"), ("__pad0", "4x"),
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
("links", "I"),)
("nlink", "I"), ("uid", "I"), ("gid", "I"), ("rdev", "Q"), ("atime", "I"), ("atimens", "I"), ("mtime", "I"), ("mtimens", "I"), ("ctime", "I"), ("ctimens", "I"), ("__pad1", "68x"),) def get_st_times(self): return dict(( ("st_" + a, self[a] + self[a + "ns"] / (10 ** 9)) for a in (b + "time" for b in "amc"))) assert Ino...
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
assert BootRecord.size <= 256
assert BootRecord.size <= 256, "This will clobber the root directory entry"
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0)
def __init__(self, path): self.f = open(path, "r+b")
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.mast...
self.filesystem_cluster_count = \ self.master_region_cluster_count + \
@property def filesystem_cluster_count(self): return self.master_region_cluster_count + \
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.mast...
if inode_struct["type"] != TYPE_DIRECTORY:
if not S_ISDIR(inode_struct["mode"]):
def read_directory(self, inode): inode_struct = self.get_inode_struct(inode) if inode_struct["type"] != TYPE_DIRECTORY: raise ClfsError(ENOTDIR) offset = 0 while offset < inode_struct["size"]: dirent = DirEntry.unpack(self.read_inode_data( inode, offset, DirEntry.size)) if dirent["name"].rstrip("\0"): yield dirent offs...
for dirent in self.read_directory(cur_dirent["inode"]):
for dirent in self.read_directory(cur_dirent["ino"]):
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
def iter_allocation_table(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): yield struct.unpack("I", self.f.read(4))[0] def claim_free_cluster(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): cl...
dirent_for_path = get_dir_entry
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode)
def write_inode_data(self, ino, offset, buffer): inode_struct = self.get_inode_struct(ino)
#def write(self, path, buf, offset):
inode,
ino,
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + m...
inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset
ino, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(ino)["size"] == new_size - data_offset
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + m...
def create_node(self, path, type):
def create_node(self, path, mode): """Create an allocate a new inode, update relevant structures elsewhere"""
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]...
parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster...
create_rootdir = bool( (not node_basename) and (node_dirname == parent_dirname == "/")) if create_rootdir: assert S_ISDIR(mode) new_inode = Inode(size=0, uid=0, gid=0, rdev=0, mode=mode) sec, nsec = time_as_posix_spec(time()) for field_name in ("atime", "mtime", "ctime"): new_inode[field_name] = sec for field_name in ...
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]...
assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_...
if create_rootdir: self.seek_root_dirent() self.f.write(new_dirent.pack()) else: assert new_dirent.size == self.write_inode_data( ino=parent_ino, offset=self.get_inode_struct(parent_ino)["size"], buffer=new_dirent.pack(),) def generate_bootrecord(device_size):
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]...
gtk.STOCK_NEW, gtk.RESPONSE_OK,
gtk.STOCK_OK, gtk.RESPONSE_OK,
def add_share(self, button): namelbl = gtk.Label("Share name:")
subprocess.check_call(["xdg-open", url])
try: subprocess.check_call(["xdg-open", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(Non...
subprocess.check_call(["nautilus", url])
try: subprocess.check_call(["nautilus", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(Non...
cluster_read_size = min(read_size, self.cluster_size - read_offset)
cluster_read_size = min(read_size, self.cluster_size - read_offset, chain_size)
def read_from_chain(self, first_cluster, chain_size, read_offset, read_size): if chain_size <= 0: return "" #assert read_offset + read_size <= chain_size, (read_offset, read_size, chain_size) if read_offset > self.cluster_size: return self.read_from_chain( self.next_cluster(first_cluster), chain_size - self.cluster_siz...
elif a.name in nopub_actions:
if a.name in nopub_actions:
def trans_include(repo_uri, fargs, transaction=None): basedirs = [] timestamp_files = [] error_occurred = False opts, pargs = getopt.getopt(fargs, "d:T:") for opt, arg in opts: if opt == "-d": basedirs.append(arg) elif opt == "-T": timestamp_files.append(arg) if transaction == None: try: trans_id = os.environ["PKG_TR...
self.pkgsend_bulk(durl, self.example_pkg10)
self.pkgsend_bulk(durl, self.dup_lines_pkg10)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search 'example_pkg:set:pkg.fmri:'")
self.pkg("search 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search -o pkg.shortfmri '*6*'")
self.pkg("search -o pkg.shortfmri 'a'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("install example_pkg") self.pkg("search -l 'example_pkg:set:pkg.fmri:'")
self.pkg("install dup_lines") self.pkg("search -l 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search -l -o pkg.shortfmri,action.key '*6*'") expected_number_of_lines = 9 if "pkg.fmri" in self.output: expected_number_of_lines += 1 self.debug("Expected number of lines:%s" % expected_number_of_lines) self.assertEqual(len(self.output.splitlines()), expected_number_of_lines)
self.pkg("search -l -o pkg.shortfmri,action.key 'a'") self.assertEqual(len(self.output.splitlines()), 4)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
("show_on_expected_fail", 'f',
("showonexpectedfail", 'f',
def run(self): # nuke everything print("deleting " + dist_dir) shutil.rmtree(dist_dir, True) print("deleting " + build_dir) shutil.rmtree(build_dir, True) print("deleting " + root_dir) shutil.rmtree(root_dir, True) print("deleting " + pkgs_dir) shutil.rmtree(pkgs_dir, True) print("deleting " + extern_dir) shutil.rmtree...
self.show_on_expected_fail = 0
self.showonexpectedfail = 0
def initialize_options(self): self.only = "" self.baselinefile = "" self.verbosemode = 0 self.parseable = 0 self.genbaseline = 0 self.timing = 0 self.coverage = 0 self.stoponerr = 0 self.debugoutput = 0 self.show_on_expected_fail = 0 self.startattest = "" self.archivedir = ""
remote_publishers=True)
remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr...
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr...
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), ""))
publisher_info = self._get_getpublisherinfo(pub,
publisher_info = self._get_publisherinfo(pub,
def get_publisherinfo(self, pub, ccancel=None): """Given a publisher pub, return the publisher/0 information in a StringIO object."""
if not_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name not in not_these_pkgs ) else: newpkgs = set(pkgdict.values())
def main_func(): global file_repo global def_branch global def_repo global def_vers global extra_entire_contents global just_these_pkgs global not_these_pkgs global nopublish global publish_all global print_pkg_names global reference_uris global show_debug global wos_path global not_these_consolidations global curpkg ...
pub_name = "opensolaris.org"
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch."...
is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name,
is_zone, facets=pkg.facet.Facets(), force=False,
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch."...
self.write(cursor, user, id, {db_field: value}, context=context)
self.write(cursor, user, id, { db_field: datetime.datetime.combine(value, datetime.time()), }, context=context)
def set_function_fields(self, cursor, user, id, name, value, arg, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
date = datetime(
date = datetime.datetime(
def add_minutes(self, cursor, user, company, date, minutes, context=None): minutes = int(round(minutes)) minutes = date.minute + minutes
date = datetime(
date = datetime.datetime(
def add_hours(self, cursor, user, company, date, hours, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
date += timedelta(days= -date.weekday() + intfloor(days))
date += datetime.timedelta(days= -date.weekday() + intfloor(days))
def add_days(self, cursor, user, company, date, days, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
date += timedelta(days= 7 * intfloor(weeks))
date += datetime.timedelta(days= 7 * intfloor(weeks))
def add_weeks(self, cursor, user, company, date, weeks, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
cursor = Transation().cursor
cursor = Transaction().cursor
def get_function_fields(self, ids, names): ''' Function to compute function fields
self.assertRaises(Exception, test_view('project_plan'))
test_view('project_plan')
def test0005views(self): ''' Test views. ''' self.assertRaises(Exception, test_view('project_plan'))
db_field: datetime.datetime.combine(value, datetime.time()),
db_field: value \ and datetime.datetime.combine(value, datetime.time()) \ or False,
def set_function_fields(self, cursor, user, ids, name, value, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
if self.__spool is not None: self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
def cleanUp(self): """cleans up the task, i.e. removes the task's spool directory""" self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
return self.__fsm.getState().getName()
try: return self.__fsm.getState().getName() except: return "Terminated"
def getState(self): return self.__fsm.getState().getName()
log.debug("Event '%s' not found." % event)
log.error("Event '%s' not found." % event)
def do_Event(self, event, reqCtxt): log.debug("JOB '%s' run in state '%s' event '%s'" % (self.ticket(), self.__fsm.getState().getName(), event)) if hasattr(self.__fsm, event): log.debug("Run event '%s'" % event) getattr(self.__fsm, event)(self, reqCtxt) else: log.debug("Event '%s' not found." % event) raise CommandFai...
log.debug("=========>do_Event '%s' run in state '%s' even [%s]" %
log.debug("=========>do_Event '%s' run in state '%s' event [%s]" %
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "...
log.debug("FAILED: %s." % e) else: log.debug("Event '%s' not found." % event)
log.error("FAILED: %s." % e) else: log.error("Event '%s' not found." % event)
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "...
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
log.debug("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState())) return True
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
if (values[0] > bins[0]):
if (values[0] >= bins[0]):
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == ...
while (bins[lb+1] < values[rb]):
while (bins[lb+1] <= values[rb]):
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == ...
if bins[-1] == values[rb]: count[-1] += 1
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == ...
outputs = project_dirs[project]
outputs = project_dirs[project] + '/outputs'
def make_today_dir(project='tuning_change'): outputs = project_dirs[project] today = outputs + '/' + time.strftime('%y%m%d') if not os.path.exists(today): os.mkdir(today) return today
def ExpectStanza(stanza, name): if stanza.tagName != name: raise UnexpectedXml(stanza) def ExpectIq(stanza, type, name): ExpectStanza(stanza, 'iq') if (stanza.getAttribute('type') != type or stanza.firstChild.tagName != name): raise UnexpectedXml(stanza) def GetStanzaId(stanza): return stanza.getAttribute('id') def ...
self.stanzas.append(stanza.toxml()) def testBasic(self): parser = xmppserver.StanzaParser(self) parser.FeedString('<foo') self.assertEqual(len(self.stanzas), 0) parser.FeedString('/><bar></bar>') self.assertEqual(self.stanzas[0], '<foo/>') self.assertEqual(self.stanzas[1], '<bar/>') def testStream(self): parser = xmp...
def FeedStanza(self, stanza): """Inspects the given stanza and changes the handshake state if needed.
return AddrString(self._addr) def collect_incoming_data(self, data): self._parser.FeedString(data) def found_terminator(self): asynchat.async_chat.found_terminator(self) def handle_close(self): print "Closing connection to %s" % self self._connections.discard(self) del self._socket_map[self.fileno()] def FeedS...
username_domain = username auth_string = base64.b64encode('\0%s\0bar' % username_domain) auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string) handshake_task.FeedStanza(auth_xml) self.assertEqual(self.data_received, 3) stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>') stream_xml.setAttri...
def __str__(self): if self._jid: return str(self._jid) else: return AddrString(self._addr)
xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222)) asyncore.loop(30.0, False, socket_map) """ def __init__(self, socket_map, addr): asyncore.dispatcher.__init__(self, None, socket_map) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(addr) self.listen(5) self._so...
self.assertEqual(len(socket_map), 0) xmpp_server = FakeXmppServer(socket_map, ('', 0)) self.assertEqual(len(socket_map), 1) xmpp_server.handle_accept() self.assertEqual(len(socket_map), 2) if __name__ == '__main__': unittest.main()
def SendNotification(connections): """Sends a notification to all connections in the given sequence.""" for connection in connections: print 'Sending notification to %s' % connection connection.SendNotification()
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values.
def __init__(self): self.site='UNDEFINED' self.times=[]
@parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N.
class desktopui_PageCyclerTests(test.test): version = 1 results = {}
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N. @return - the percentile of th...
@return - the percentile of the values """ if not N: return None k = (len(N)-1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (k-f) d1 = key(N[int(c)]) * (c-k) return d0+d1
def run_page_cycler(self, gtest_filter = ''): assert(gtest_filter != ''), gtest_filter+' cannot be empty!' cmd = ('CR_SOURCE_ROOT=/home/chronos/chromium/src /home/chronos/' 'chromium/src/x86-generic_out/Release/page_cycler_tests' ' --gtest_filter=')+gtest_filter xcmd = site_ui.xcommand(cmd) logging.debug('Running: '+g...
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N. @return - the percentile of th...
def mean(numbers): assert(len(numbers) != 0), 'list should not be empty!' return sum(numbers)/len(numbers) class PageCyclerResultsParser: def parse_file(self, outfile = 'out.txt'): output = open(outfile).read() return self.parse_results(output) def parse_results(self, output = ''): median = functools.partial(percen...
def run_once(self): testNames=['PageCyclerTest.Alexa_usFile', 'PageCyclerTest.MozFile', 'PageCyclerTest.Intl1File', 'PageCyclerTest.Intl2File', 'PageCyclerTest.DhtmlFile', 'PageCyclerTest.Moz2File', 'PageCyclerTest.BloatFile', 'PageCyclerTest.DomFile', 'PageCyclerTest.MorejsFile', 'PageCyclerTest.MorejsnpFile'] for t...
def mean(numbers): assert(len(numbers) != 0), 'list should not be empty!' return sum(numbers)/len(numbers)
if match: content = callback + '(' + content + ')'
if match: content = callback + '(' + content.decode('utf-8') + ')'
def get(self): extURL = cgi.escape(self.request.get('extURL')) extMethod = cgi.escape(self.request.get('extMethod')) queryString = cgi.escape(self.request.query_string) queryDict = dict(cgi.parse_qsl(queryString)) callback = cgi.escape(self.request.get('_callback')) if queryString: error = 1 method = urlfetch.POST if...
entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated' )
if kind == "Library": entry += " '%s', '%s', '%s'), '%s')\n" % ( moduleName, 'deprecated', 'deprecated', moduleName.upper()) else: entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated')
def _appendToProjectsPy(self, moduleName, branchLocation, destination, template):
self.addPluginPath(os.path.join("/usr","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
self.addPluginPath(os.path.join("/usr","local","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
def __init__(self): """ Initialization of members. No other functionality. """ usage = "" usage += "The list below shows global available options.\n"
self.field, self.form, self),
self.field, self.form, self.content),
def update(self): value = zope.component.queryMultiAdapter( (self.context, self.request, self.widget, self.field, self.form, self), interfaces.IValue, name='message') if value is not None: self.message = value.get() else: self.message = self.createMessage()
iface = zope.interface.interface.InterfaceClass( 'IGeneratedForObject_%i' %hash(spec)) zope.interface.alsoProvides(spec, iface) spec = iface
ifaceName = 'IGeneratedForObject_%i' %hash(spec) existingInterfaces = [ i for i in zope.interface.directlyProvidedBy(spec) if i.__name__ == ifaceName ] if len(existingInterfaces) > 0: spec = existingInterfaces[0] else: iface = zope.interface.interface.InterfaceClass(ifaceName) zope.interface.alsoProvides(spec, ifa...
def getSpecification(spec, force=False): """Get the specification of the given object. If the given object is already a specification acceptable to the component architecture, it is simply returned. This is true for classes and specification objects (which includes interfaces). In case of instances, an interface is g...
return self.data.get(self.field.__name__, self.field.missing_value)
value = self.data.get(self.field.__name__, _marker) if value is _marker: raise AttributeError return value
def get(self): """See z3c.form.interfaces.IDataManager""" return self.data.get(self.field.__name__, self.field.missing_value)
baseuri+"
baseuri+prefix,
def _construct_ids(self, element, prefix, baseuri, skip_fragments=[], find_definitions = False): find_definitions_recursive = find_definitions counters = defaultdict(int) if isinstance(element, CompoundStructure): # Hitta begreppsdefinitioner if isinstance(element, Paragraf): # kolla om frsta stycket innehller en text ...
sets = [{'label':'Naive set 1' 'predicate',TEMP['naive1'], 'data',rs1},
sets = [{'label':'Naive set 1', 'predicate':TEMP['naive1'], 'data':rs1},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive2'], 'data',rs2},
'predicate':TEMP['naive2'], 'data':rs2},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive3'], 'data',rs3},
'predicate':TEMP['naive3'], 'data':rs3},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive4'], 'data',rs4}]
'predicate':TEMP['naive4'], 'data':rs4}]
def prep_annotation_file(self,basefile): print "prep_annotation_file"
pass
def prep_annotation_file(self,basefile): print "prep_annotation_file"
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\. |N|)?\d+( s\.\d+|))
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\.[_ ]|N|)?\d+([_ ]s\.\d+|))
def __str__(self): return repr(self.value)
level = self.getFormData("level", None) if level is not None: if level=="top": query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/ else: query += ' AND skos_broader:"%s"' % level
def __getSolrData(self): prefix = self.getSearchTerms() if prefix: query = '%(prefix)s OR %(prefix)s*' % { "prefix" : prefix } else: query = "*:*" req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", 'repository_type:"SEO"') req.setParam("fl", "score") req.setParam("sort", "score desc...
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return path.join(self.outDir, path....
def __init__(self, bundleDir, ipswDir, outDir, verbose, x_opt): self.x_opt = x_opt self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return pat...
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") par...
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") par...
def main(): parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="O...
kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss, kpatches)
if len(sys.argv) < 6: print "Usage: ibss_patcher ibss_decrypted_orig ibss_out kernelcache_decrypted_orig kernelcache_decrypted_patched ibss_patchproc.bin" exit(1) kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss...
def byte_search(image, bytes): for i in range(0, len(image) - len(bytes), 2): if image[i:i+len(bytes)] == bytes: return i return -1
leftOfBox = xPt[ind-1]
leftOfBox = self.xPts[ind-1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSi...
rightOfBox=xPt[ind+1]
rightOfBox = self.xPts[ind+1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSi...
self.value = 0.0
def __init__(self, leftSide=None, spacing=None, numXPointsToBin=None): self.numXPointsToBin = numXPointsToBin self.value = 0.0 self.xPts = [] self.yPts = [] self.leftSide = leftSide self.spacing = spacing
if xPt >= currentBin.rightSide:
while xPt >= currentBin.rightSide:
def __init__(self, xDataP, yDataP, newBins=None, binValue = 'countPoints', format = 'columns'): xData = np.sort(xDataP) sortedInds = np.argsort(xDataP) yData = yDataP[sortedInds] self.bins=[] if type(newBins) is type(1): # this algorithm is for data that has already been binned and we're going over the bins to rebin i...
axisGrp.create_dataset('bin centers', data = NdArray(bcs.datatype(), bcs))
axisGrp.create_dataset('bin centers', data = bcs)
def onAxis(self, axesGrp, axis, index): #index: index of this axis in the axis array #we need to index that so that axis can be loaded #sequentially. mapper = axis._mapper type = types[mapper.__class__]
if isinstance(unit, int) or isinstance(unit, float):
if isinstance(unit, int) or isinstance(unit, float) or isinstance(unit, long):
def onUnit(self, unit): if isinstance(unit, int) or isinstance(unit, float): return unit return unit.tostring()
dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision')
dst = WithProperties('public_html/binaries/' + remote_name, 'got_revision')
def addUploadBinariesStep(factory, binaries): for (local_name, remote_name) in binaries.items(): dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision') factory.addStep(FileUpload(slavesrc=local_name, masterdest=dst, mode=0755))
return pretty_date(dt)
return dt
def get_date(s): dt = datetime.strptime(s, '%Y-%m-%d %H:%M:%S') dt -= timedelta(seconds=time.timezone) # sqlite seems to save at GMT... ata :P return pretty_date(dt) # found this online
self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
def get_conn(self): return cherrypy.thread_data.db def get_cur(self): return self.get_conn().cursor() def fetchvar(self, query, args=()): c = self.get_cur() c.execute(query, args) return c.fetchone()[0]
def connect(self, thread_index): cherrypy.thread_data.db = sqlite3.connect('minitwit.sqlite') self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
self.c.execute(query, args) return self.c.fetchone()
c = self.get_cur() c.execute(query, args) return c.fetchone()
def fetchone(self, query, args=()): self.c.execute(query, args) return self.c.fetchone()
self.c.execute(query, args) return self.c.fetchalll()
c = self.get_cur() c.execute(query, args) return c.fetchall()
def fetchall(self, query, args=()): self.c.execute(query, args) return self.c.fetchalll()
self.c.execute(query, args) self.conn.commit()
c = self.get_cur() c.execute(query, args) self.get_conn().commit()
def query(self, query, args=()): self.c.execute(query, args) self.conn.commit()
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone()
logged_in = db.fetchone("select rowid from users where username = ? and password = ?", (username, md5sum(password)))
def login(self, username='', password='', redirect='/'): message = None if len(username) > 0 and len(password) > 0: conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone() if logged_in is not None: c...
conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone()
r = db.fetchone('select rowid, username from users where rowid = ?', (rowid,))
def get_logged_in(self): try: rowid = cherrypy.session.get('logged_in') conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone() return {'id': r[0], 'username': r[1]} except: return None
def default(self, id=None, text=None):
def default(self, id=None, text=None, last_update=None):
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
conn = cherrypy.thread_data.db c = conn.cursor()
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('select rowid, text, date from posts where rowid = ?', (id,)) r = c.fetchone()
r = db.fetchone('select rowid, text, date from posts where rowid = ?', (id,))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text)) conn.commit()
db.query('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
try: c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') return json.dumps([{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]) except: raise cherrypy.HTTPError(404)
if last_update is not None: last_update = int(last_update) / 1000 new_count = db.fetchvar('select count(*) from posts where strftime("%s", date) - ? > 0', (last_update,)) if int(new_count) == 0: return '[]' posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid o...
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]
posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': pretty_date(get_date(r[2])), 'username': r[3]} for r in posts]
def index(self): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r ...
c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit()
db.query('insert into users values (?, ?)', (username, md5sum(password)))
def register(self, username='', password='', conf_password=''): message = None if len(username) > 0 and len(password) > 0 and password == conf_password: c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit() raise cherrypy.HTTPRedirect('/session/login') elif password != conf_password: me...
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text text, date text)") c...
db.query("drop table if exists users") db.query("drop table if exists posts") db.query("create table users (username text, password text)") db.query("create unique index username on users (username)") db.query("create table posts (user int, text text, date text)") db.query("create index user on posts (user)") db.query(...
def install(self): conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text t...
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList]))
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList)))
def getRequirements(self, jobNum): reqs = Module.getRequirements(self, jobNum) if self.dataSplitter != None: reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList])) return reqs
return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != []
return self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList) != []
def canSubmit(self, jobNum): if self.checkSE and (self.dataSplitter != None): return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != [] return True